Detailed changes
@@ -25,33 +25,6 @@ on:
description: The app secret for the corresponding app ID
required: true
jobs:
- check_extension:
- if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions')
- runs-on: namespace-profile-2x4-ubuntu-2404
- steps:
- - name: steps::checkout_repo
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
- with:
- clean: false
- - id: cache-zed-extension-cli
- name: extension_tests::cache_zed_extension_cli
- uses: actions/cache@0057852bfaa89a56745cba8c7296529d2fc39830
- with:
- path: zed-extension
- key: zed-extension-${{ env.ZED_EXTENSION_CLI_SHA }}
- - name: extension_tests::download_zed_extension_cli
- if: steps.cache-zed-extension-cli.outputs.cache-hit != 'true'
- run: |
- wget --quiet "https://zed-extension-cli.nyc3.digitaloceanspaces.com/$ZED_EXTENSION_CLI_SHA/x86_64-unknown-linux-gnu/zed-extension"
- chmod +x zed-extension
- shell: bash -euxo pipefail {0}
- - name: extension_tests::check
- run: |
- mkdir -p /tmp/ext-scratch
- mkdir -p /tmp/ext-output
- ./zed-extension --source-dir . --scratch-dir /tmp/ext-scratch --output-dir /tmp/ext-output
- shell: bash -euxo pipefail {0}
- timeout-minutes: 2
check_bump_needed:
if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions')
runs-on: namespace-profile-2x4-ubuntu-2404
@@ -89,7 +62,6 @@ jobs:
timeout-minutes: 1
bump_extension_version:
needs:
- - check_extension
- check_bump_needed
if: |-
(github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') &&
@@ -144,7 +116,6 @@ jobs:
timeout-minutes: 1
create_version_label:
needs:
- - check_extension
- check_bump_needed
if: (github.repository_owner == 'zed-industries' || github.repository_owner == 'zed-extensions') && github.event_name == 'push' && github.ref == 'refs/heads/main' && needs.check_bump_needed.outputs.needs_bump == 'false'
runs-on: namespace-profile-8x16-ubuntu-2204
@@ -84,7 +84,7 @@ jobs:
run: ./script/check-keymaps
shell: bash -euxo pipefail {0}
- name: run_tests::check_style::check_for_typos
- uses: crate-ci/typos@80c8a4945eec0f6d464eaf9e65ed98ef085283d1
+ uses: crate-ci/typos@2d0ce569feab1f8752f1dde43cc2f2aa53236e06
with:
config: ./typos.toml
- name: steps::cargo_fmt
@@ -159,6 +159,7 @@ dependencies = [
"derive_more 0.99.20",
"editor",
"env_logger 0.11.8",
+ "eval_utils",
"fs",
"futures 0.3.31",
"git",
@@ -215,9 +216,9 @@ dependencies = [
[[package]]
name = "agent-client-protocol"
-version = "0.7.0"
+version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "525705e39c11cd73f7bc784e3681a9386aa30c8d0630808d3dc2237eb4f9cb1b"
+checksum = "3e639d6b544ad39f5b4e05802db5eb04e1518284eb05fda1839931003e0244c8"
dependencies = [
"agent-client-protocol-schema",
"anyhow",
@@ -226,16 +227,15 @@ dependencies = [
"derive_more 2.0.1",
"futures 0.3.31",
"log",
- "parking_lot",
"serde",
"serde_json",
]
[[package]]
name = "agent-client-protocol-schema"
-version = "0.6.2"
+version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ecf16c18fea41282d6bbadd1549a06be6836bddb1893f44a6235f340fa24e2af"
+checksum = "f182f5e14bef8232b239719bd99166bb11e986c08fc211f28e392f880d3093ba"
dependencies = [
"anyhow",
"derive_more 2.0.1",
@@ -328,6 +328,7 @@ dependencies = [
"buffer_diff",
"chrono",
"client",
+ "clock",
"cloud_llm_client",
"collections",
"command_palette_hooks",
@@ -335,6 +336,7 @@ dependencies = [
"context_server",
"db",
"editor",
+ "eval_utils",
"extension",
"extension_host",
"feature_flags",
@@ -343,6 +345,7 @@ dependencies = [
"futures 0.3.31",
"fuzzy",
"gpui",
+ "gpui_tokio",
"html_to_markdown",
"http_client",
"image",
@@ -370,6 +373,7 @@ dependencies = [
"proto",
"rand 0.9.2",
"release_channel",
+ "reqwest_client",
"rope",
"rules_library",
"schemars",
@@ -2126,30 +2130,15 @@ dependencies = [
"syn 2.0.106",
]
-[[package]]
-name = "bit-set"
-version = "0.5.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1"
-dependencies = [
- "bit-vec 0.6.3",
-]
-
[[package]]
name = "bit-set"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3"
dependencies = [
- "bit-vec 0.8.0",
+ "bit-vec",
]
-[[package]]
-name = "bit-vec"
-version = "0.6.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb"
-
[[package]]
name = "bit-vec"
version = "0.8.0"
@@ -2328,9 +2317,9 @@ dependencies = [
[[package]]
name = "borrow-or-share"
-version = "0.2.2"
+version = "0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3eeab4423108c5d7c744f4d234de88d18d636100093ae04caf4825134b9c3a32"
+checksum = "dc0b364ead1874514c8c2855ab558056ebfeb775653e7ae45ff72f28f8f3166c"
[[package]]
name = "borsh"
@@ -4184,6 +4173,7 @@ dependencies = [
"serde_json",
"smol",
"system_specs",
+ "windows 0.61.3",
"zstd 0.11.2+zstd.1.5.2",
]
@@ -5775,6 +5765,15 @@ dependencies = [
"watch",
]
+[[package]]
+name = "eval_utils"
+version = "0.1.0"
+dependencies = [
+ "gpui",
+ "serde",
+ "smol",
+]
+
[[package]]
name = "event-listener"
version = "2.5.3"
@@ -6000,22 +5999,11 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a"
[[package]]
name = "fancy-regex"
-version = "0.13.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "531e46835a22af56d1e3b66f04844bed63158bc094a628bec1d321d9b4c44bf2"
-dependencies = [
- "bit-set 0.5.3",
- "regex-automata",
- "regex-syntax",
-]
-
-[[package]]
-name = "fancy-regex"
-version = "0.14.0"
+version = "0.16.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6e24cb5a94bcae1e5408b0effca5cd7172ea3c5755049c5f3af4cd283a165298"
+checksum = "998b056554fbe42e03ae0e152895cd1a7e1002aec800fdc6635d20270260c46f"
dependencies = [
- "bit-set 0.8.0",
+ "bit-set",
"regex-automata",
"regex-syntax",
]
@@ -6237,9 +6225,9 @@ checksum = "8bf7cc16383c4b8d58b9905a8509f02926ce3058053c056376248d958c9df1e8"
[[package]]
name = "fluent-uri"
-version = "0.3.2"
+version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1918b65d96df47d3591bed19c5cca17e3fa5d0707318e4b5ef2eae01764df7e5"
+checksum = "bc74ac4d8359ae70623506d512209619e5cf8f347124910440dbc221714b328e"
dependencies = [
"borrow-or-share",
"ref-cast",
@@ -6978,7 +6966,7 @@ dependencies = [
[[package]]
name = "gh-workflow"
version = "0.8.0"
-source = "git+https://github.com/zed-industries/gh-workflow?rev=e5f883040530b4df36437f140084ee5cc7c1c9be#e5f883040530b4df36437f140084ee5cc7c1c9be"
+source = "git+https://github.com/zed-industries/gh-workflow?rev=09acfdf2bd5c1d6254abefd609c808ff73547b2c#09acfdf2bd5c1d6254abefd609c808ff73547b2c"
dependencies = [
"async-trait",
"derive_more 2.0.1",
@@ -6995,7 +6983,7 @@ dependencies = [
[[package]]
name = "gh-workflow-macros"
version = "0.8.0"
-source = "git+https://github.com/zed-industries/gh-workflow?rev=e5f883040530b4df36437f140084ee5cc7c1c9be#e5f883040530b4df36437f140084ee5cc7c1c9be"
+source = "git+https://github.com/zed-industries/gh-workflow?rev=09acfdf2bd5c1d6254abefd609c808ff73547b2c#09acfdf2bd5c1d6254abefd609c808ff73547b2c"
dependencies = [
"heck 0.5.0",
"quote",
@@ -7535,6 +7523,17 @@ dependencies = [
"serde",
]
+[[package]]
+name = "hashbrown"
+version = "0.16.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100"
+dependencies = [
+ "allocator-api2",
+ "equivalent",
+ "foldhash 0.2.0",
+]
+
[[package]]
name = "hashlink"
version = "0.8.4"
@@ -8624,21 +8623,21 @@ dependencies = [
[[package]]
name = "jsonschema"
-version = "0.30.0"
+version = "0.37.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f1b46a0365a611fbf1d2143104dcf910aada96fafd295bab16c60b802bf6fa1d"
+checksum = "73c9ffb2b5c56d58030e1b532d8e8389da94590515f118cf35b5cb68e4764a7e"
dependencies = [
"ahash 0.8.12",
- "base64 0.22.1",
"bytecount",
+ "data-encoding",
"email_address",
- "fancy-regex 0.14.0",
+ "fancy-regex",
"fraction",
+ "getrandom 0.3.4",
"idna",
"itoa",
"num-cmp",
"num-traits",
- "once_cell",
"percent-encoding",
"referencing",
"regex",
@@ -8646,6 +8645,7 @@ dependencies = [
"reqwest 0.12.24",
"serde",
"serde_json",
+ "unicode-general-category",
"uuid-simd",
]
@@ -10195,7 +10195,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b977c445f26e49757f9aca3631c3b8b836942cb278d69a92e7b80d3b24da632"
dependencies = [
"arrayvec",
- "bit-set 0.8.0",
+ "bit-set",
"bitflags 2.9.4",
"cfg_aliases 0.2.1",
"codespan-reporting 0.12.0",
@@ -13051,7 +13051,7 @@ dependencies = [
"dap",
"dap_adapters",
"extension",
- "fancy-regex 0.14.0",
+ "fancy-regex",
"fs",
"futures 0.3.31",
"fuzzy",
@@ -13922,13 +13922,14 @@ dependencies = [
[[package]]
name = "referencing"
-version = "0.30.0"
+version = "0.37.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c8eff4fa778b5c2a57e85c5f2fe3a709c52f0e60d23146e2151cbef5893f420e"
+checksum = "4283168a506f0dcbdce31c9f9cce3129c924da4c6bca46e46707fcb746d2d70c"
dependencies = [
"ahash 0.8.12",
"fluent-uri",
- "once_cell",
+ "getrandom 0.3.4",
+ "hashbrown 0.16.1",
"parking_lot",
"percent-encoding",
"serde_json",
@@ -17122,7 +17123,7 @@ dependencies = [
"alacritty_terminal",
"anyhow",
"collections",
- "fancy-regex 0.14.0",
+ "fancy-regex",
"futures 0.3.31",
"gpui",
"itertools 0.14.0",
@@ -17356,12 +17357,12 @@ dependencies = [
[[package]]
name = "tiktoken-rs"
version = "0.9.1"
-source = "git+https://github.com/zed-industries/tiktoken-rs?rev=7249f999c5fdf9bf3cc5c288c964454e4dac0c00#7249f999c5fdf9bf3cc5c288c964454e4dac0c00"
+source = "git+https://github.com/zed-industries/tiktoken-rs?rev=2570c4387a8505fb8f1d3f3557454b474f1e8271#2570c4387a8505fb8f1d3f3557454b474f1e8271"
dependencies = [
"anyhow",
"base64 0.22.1",
"bstr",
- "fancy-regex 0.13.0",
+ "fancy-regex",
"lazy_static",
"regex",
"rustc-hash 1.1.0",
@@ -18013,9 +18014,9 @@ dependencies = [
[[package]]
name = "tree-sitter-bash"
-version = "0.25.0"
+version = "0.25.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "871b0606e667e98a1237ebdc1b0d7056e0aebfdc3141d12b399865d4cb6ed8a6"
+checksum = "9e5ec769279cc91b561d3df0d8a5deb26b0ad40d183127f409494d6d8fc53062"
dependencies = [
"cc",
"tree-sitter-language",
@@ -18493,6 +18494,12 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce61d488bcdc9bc8b5d1772c404828b17fc481c0a582b5581e95fb233aef503e"
+[[package]]
+name = "unicode-general-category"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0b993bddc193ae5bd0d623b49ec06ac3e9312875fdae725a975c51db1cc1677f"
+
[[package]]
name = "unicode-ident"
version = "1.0.19"
@@ -18727,7 +18734,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "23b082222b4f6619906941c17eb2297fff4c2fb96cb60164170522942a200bd8"
dependencies = [
"outref",
- "uuid",
"vsimd",
]
@@ -21212,7 +21218,7 @@ dependencies = [
[[package]]
name = "zed"
-version = "0.216.0"
+version = "0.217.0"
dependencies = [
"acp_tools",
"activity_indicator",
@@ -21502,6 +21508,8 @@ dependencies = [
[[package]]
name = "zed_extension_api"
version = "0.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0729d50b4ca0a7e28e590bbe32e3ca0194d97ef654961451a424c661a366fca0"
dependencies = [
"serde",
"serde_json",
@@ -21510,9 +21518,7 @@ dependencies = [
[[package]]
name = "zed_extension_api"
-version = "0.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0729d50b4ca0a7e28e590bbe32e3ca0194d97ef654961451a424c661a366fca0"
+version = "0.8.0"
dependencies = [
"serde",
"serde_json",
@@ -21530,7 +21536,7 @@ dependencies = [
name = "zed_html"
version = "0.2.3"
dependencies = [
- "zed_extension_api 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "zed_extension_api 0.7.0",
]
[[package]]
@@ -21544,7 +21550,7 @@ dependencies = [
name = "zed_test_extension"
version = "0.1.0"
dependencies = [
- "zed_extension_api 0.7.0",
+ "zed_extension_api 0.8.0",
]
[[package]]
@@ -59,6 +59,7 @@ members = [
"crates/zeta2_tools",
"crates/editor",
"crates/eval",
+ "crates/eval_utils",
"crates/explorer_command_injector",
"crates/extension",
"crates/extension_api",
@@ -288,6 +289,7 @@ deepseek = { path = "crates/deepseek" }
derive_refineable = { path = "crates/refineable/derive_refineable" }
diagnostics = { path = "crates/diagnostics" }
editor = { path = "crates/editor" }
+eval_utils = { path = "crates/eval_utils" }
extension = { path = "crates/extension" }
extension_host = { path = "crates/extension_host" }
extensions_ui = { path = "crates/extensions_ui" }
@@ -439,7 +441,7 @@ zlog_settings = { path = "crates/zlog_settings" }
# External crates
#
-agent-client-protocol = { version = "0.7.0", features = ["unstable"] }
+agent-client-protocol = { version = "=0.8.0", features = ["unstable"] }
aho-corasick = "1.1"
alacritty_terminal = "0.25.1-rc1"
any_vec = "0.14"
@@ -503,12 +505,12 @@ ec4rs = "1.1"
emojis = "0.6.1"
env_logger = "0.11"
exec = "0.3.1"
-fancy-regex = "0.14.0"
+fancy-regex = "0.16.0"
fork = "0.4.0"
futures = "0.3"
futures-batch = "0.6.1"
futures-lite = "1.13"
-gh-workflow = { git = "https://github.com/zed-industries/gh-workflow", rev = "e5f883040530b4df36437f140084ee5cc7c1c9be" }
+gh-workflow = { git = "https://github.com/zed-industries/gh-workflow", rev = "09acfdf2bd5c1d6254abefd609c808ff73547b2c" }
git2 = { version = "0.20.1", default-features = false }
globset = "0.4"
handlebars = "4.3"
@@ -529,7 +531,7 @@ indoc = "2"
inventory = "0.3.19"
itertools = "0.14.0"
json_dotpath = "1.1"
-jsonschema = "0.30.0"
+jsonschema = "0.37.0"
jsonwebtoken = "9.3"
jupyter-protocol = "0.10.0"
jupyter-websocket-client = "0.15.0"
@@ -656,7 +658,7 @@ sysinfo = "0.37.0"
take-until = "0.2.0"
tempfile = "3.20.0"
thiserror = "2.0.12"
-tiktoken-rs = { git = "https://github.com/zed-industries/tiktoken-rs", rev = "7249f999c5fdf9bf3cc5c288c964454e4dac0c00" }
+tiktoken-rs = { git = "https://github.com/zed-industries/tiktoken-rs", rev = "2570c4387a8505fb8f1d3f3557454b474f1e8271" }
time = { version = "0.3", features = [
"macros",
"parsing",
@@ -672,7 +674,7 @@ toml = "0.8"
toml_edit = { version = "0.22", default-features = false, features = ["display", "parse", "serde"] }
tower-http = "0.4.4"
tree-sitter = { version = "0.25.10", features = ["wasm"] }
-tree-sitter-bash = "0.25.0"
+tree-sitter-bash = "0.25.1"
tree-sitter-c = "0.23"
tree-sitter-cpp = { git = "https://github.com/tree-sitter/tree-sitter-cpp", rev = "5cb9b693cfd7bfacab1d9ff4acac1a4150700609" }
tree-sitter-css = "0.23"
@@ -53,6 +53,10 @@ extension
git
= @cole-miller
= @danilo-leal
+ = @dvdsk
+ = @kubkon
+ = @Anthony-Eid
+ = @cameron1024
gpui
= @Anthony-Eid
@@ -1 +0,0 @@
-<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="none"><path stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.2" d="M14 11.333A6 6 0 0 0 4 6.867l-1 .9"/><path stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.333" d="M2 4.667v4h4"/><path fill="#000" stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.2" d="M8 12a.667.667 0 1 0 0-1.333A.667.667 0 0 0 8 12Z"/></svg>
@@ -1 +1,5 @@
-<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="none"><path stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.2" d="M3.333 10 8 14.667 12.667 10M8 5.333v9.334"/><path fill="#000" stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.2" d="M8 2.667a.667.667 0 1 0 0-1.334.667.667 0 0 0 0 1.334Z"/></svg>
+<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
+<path d="M2 13H5" stroke="#C6CAD0" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
+<path d="M11 13H14" stroke="#C6CAD0" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
+<path d="M11.5 8.5L8 12M8 12L4.5 8.5M8 12L8 3" stroke="#C6CAD0" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
+</svg>
@@ -1 +1,5 @@
-<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="none"><path stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.2" d="M3.333 6 8 1.333 12.667 6M8 10.667V1.333"/><path fill="#000" stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.2" d="M8 13.333a.667.667 0 1 1 0 1.334.667.667 0 0 1 0-1.334Z"/></svg>
+<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
+<path d="M4.5 6.5L8 3M8 3L11.5 6.5M8 3V12" stroke="#C6CAD0" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
+<path d="M2 13H5" stroke="#C6CAD0" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
+<path d="M11 13H14" stroke="#C6CAD0" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
+</svg>
@@ -1 +1,5 @@
-<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" fill="none"><path stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.2" d="M2 11.333a6 6 0 0 1 10-4.466l1 .9"/><path stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.333" d="M14 4.667v4h-4"/><path fill="#000" stroke="#000" stroke-linecap="round" stroke-linejoin="round" stroke-width="1.2" d="M8 12a.667.667 0 1 1 0-1.333A.667.667 0 0 1 8 12Z"/></svg>
+<svg width="16" height="16" viewBox="0 0 16 16" fill="none" xmlns="http://www.w3.org/2000/svg">
+<path d="M2 11.333C2.00118 10.1752 2.33729 9.04258 2.96777 8.07159C3.59826 7.10059 4.49621 6.33274 5.55331 5.86064C6.61041 5.38853 7.78152 5.23235 8.9254 5.41091C10.0693 5.58947 11.1371 6.09516 12 6.86698L13 7.76698" stroke="#C6CAD0" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
+<path d="M14 4.66699V8.66699H10" stroke="#C6CAD0" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
+<path d="M7 13H10" stroke="#C6CAD0" stroke-width="1.2" stroke-linecap="round" stroke-linejoin="round"/>
+</svg>
@@ -1100,13 +1100,22 @@
"preview_tabs": {
// Whether preview tabs should be enabled.
// Preview tabs allow you to open files in preview mode, where they close automatically
- // when you switch to another file unless you explicitly pin them.
+ // when you open another preview tab.
// This is useful for quickly viewing files without cluttering your workspace.
"enabled": true,
+ // Whether to open tabs in preview mode when opened from the project panel with a single click.
+ "enable_preview_from_project_panel": true,
// Whether to open tabs in preview mode when selected from the file finder.
"enable_preview_from_file_finder": false,
- // Whether a preview tab gets replaced when code navigation is used to navigate away from the tab.
- "enable_preview_from_code_navigation": false
+ // Whether to open tabs in preview mode when opened from a multibuffer.
+ "enable_preview_from_multibuffer": true,
+ // Whether to open tabs in preview mode when code navigation is used to open a multibuffer.
+ "enable_preview_multibuffer_from_code_navigation": false,
+ // Whether to open tabs in preview mode when code navigation is used to open a single file.
+ "enable_preview_file_from_code_navigation": true,
+ // Whether to keep tabs in preview mode when code navigation is used to navigate away from them.
+ // If `enable_preview_file_from_code_navigation` or `enable_preview_multibuffer_from_code_navigation` is also true, the new tab may replace the existing one.
+ "enable_keep_preview_on_code_navigation": false
},
// Settings related to the file finder.
"file_finder": {
@@ -45,6 +45,7 @@
"tab.inactive_background": "#1f2127ff",
"tab.active_background": "#0d1016ff",
"search.match_background": "#5ac2fe66",
+ "search.active_match_background": "#ea570166",
"panel.background": "#1f2127ff",
"panel.focused_border": "#5ac1feff",
"pane.focused_border": null,
@@ -436,6 +437,7 @@
"tab.inactive_background": "#ececedff",
"tab.active_background": "#fcfcfcff",
"search.match_background": "#3b9ee566",
+ "search.active_match_background": "#f88b3666",
"panel.background": "#ececedff",
"panel.focused_border": "#3b9ee5ff",
"pane.focused_border": null,
@@ -827,6 +829,7 @@
"tab.inactive_background": "#353944ff",
"tab.active_background": "#242835ff",
"search.match_background": "#73cffe66",
+ "search.active_match_background": "#fd722b66",
"panel.background": "#353944ff",
"panel.focused_border": null,
"pane.focused_border": null,
@@ -46,6 +46,7 @@
"tab.inactive_background": "#3a3735ff",
"tab.active_background": "#282828ff",
"search.match_background": "#83a59866",
+ "search.active_match_background": "#c09f3f66",
"panel.background": "#3a3735ff",
"panel.focused_border": "#83a598ff",
"pane.focused_border": null,
@@ -452,6 +453,7 @@
"tab.inactive_background": "#393634ff",
"tab.active_background": "#1d2021ff",
"search.match_background": "#83a59866",
+ "search.active_match_background": "#c9653666",
"panel.background": "#393634ff",
"panel.focused_border": "#83a598ff",
"pane.focused_border": null,
@@ -858,6 +860,7 @@
"tab.inactive_background": "#3b3735ff",
"tab.active_background": "#32302fff",
"search.match_background": "#83a59866",
+ "search.active_match_background": "#aea85166",
"panel.background": "#3b3735ff",
"panel.focused_border": null,
"pane.focused_border": null,
@@ -1264,6 +1267,7 @@
"tab.inactive_background": "#ecddb4ff",
"tab.active_background": "#fbf1c7ff",
"search.match_background": "#0b667866",
+ "search.active_match_background": "#ba2d1166",
"panel.background": "#ecddb4ff",
"panel.focused_border": null,
"pane.focused_border": null,
@@ -1670,6 +1674,7 @@
"tab.inactive_background": "#ecddb5ff",
"tab.active_background": "#f9f5d7ff",
"search.match_background": "#0b667866",
+ "search.active_match_background": "#dc351466",
"panel.background": "#ecddb5ff",
"panel.focused_border": null,
"pane.focused_border": null,
@@ -2076,6 +2081,7 @@
"tab.inactive_background": "#ecdcb3ff",
"tab.active_background": "#f2e5bcff",
"search.match_background": "#0b667866",
+ "search.active_match_background": "#d7331466",
"panel.background": "#ecdcb3ff",
"panel.focused_border": null,
"pane.focused_border": null,
@@ -45,6 +45,7 @@
"tab.inactive_background": "#2f343eff",
"tab.active_background": "#282c33ff",
"search.match_background": "#74ade866",
+ "search.active_match_background": "#e8af7466",
"panel.background": "#2f343eff",
"panel.focused_border": null,
"pane.focused_border": null,
@@ -448,6 +449,7 @@
"tab.inactive_background": "#ebebecff",
"tab.active_background": "#fafafaff",
"search.match_background": "#5c79e266",
+ "search.active_match_background": "#d0a92366",
"panel.background": "#ebebecff",
"panel.focused_border": null,
"pane.focused_border": null,
@@ -201,17 +201,19 @@ impl ToolCall {
};
let mut content = Vec::with_capacity(tool_call.content.len());
for item in tool_call.content {
- content.push(ToolCallContent::from_acp(
+ if let Some(item) = ToolCallContent::from_acp(
item,
language_registry.clone(),
path_style,
terminals,
cx,
- )?);
+ )? {
+ content.push(item);
+ }
}
let result = Self {
- id: tool_call.id,
+ id: tool_call.tool_call_id,
label: cx
.new(|cx| Markdown::new(title.into(), Some(language_registry.clone()), None, cx)),
kind: tool_call.kind,
@@ -241,6 +243,7 @@ impl ToolCall {
locations,
raw_input,
raw_output,
+ ..
} = fields;
if let Some(kind) = kind {
@@ -262,21 +265,29 @@ impl ToolCall {
}
if let Some(content) = content {
- let new_content_len = content.len();
+ let mut new_content_len = content.len();
let mut content = content.into_iter();
// Reuse existing content if we can
for (old, new) in self.content.iter_mut().zip(content.by_ref()) {
- old.update_from_acp(new, language_registry.clone(), path_style, terminals, cx)?;
+ let valid_content =
+ old.update_from_acp(new, language_registry.clone(), path_style, terminals, cx)?;
+ if !valid_content {
+ new_content_len -= 1;
+ }
}
for new in content {
- self.content.push(ToolCallContent::from_acp(
+ if let Some(new) = ToolCallContent::from_acp(
new,
language_registry.clone(),
path_style,
terminals,
cx,
- )?)
+ )? {
+ self.content.push(new);
+ } else {
+ new_content_len -= 1;
+ }
}
self.content.truncate(new_content_len);
}
@@ -425,6 +436,7 @@ impl From<acp::ToolCallStatus> for ToolCallStatus {
acp::ToolCallStatus::InProgress => Self::InProgress,
acp::ToolCallStatus::Completed => Self::Completed,
acp::ToolCallStatus::Failed => Self::Failed,
+ _ => Self::Pending,
}
}
}
@@ -537,7 +549,7 @@ impl ContentBlock {
..
}) => Self::resource_link_md(&uri, path_style),
acp::ContentBlock::Image(image) => Self::image_md(&image),
- acp::ContentBlock::Audio(_) | acp::ContentBlock::Resource(_) => String::new(),
+ _ => String::new(),
}
}
@@ -591,15 +603,17 @@ impl ToolCallContent {
path_style: PathStyle,
terminals: &HashMap<acp::TerminalId, Entity<Terminal>>,
cx: &mut App,
- ) -> Result<Self> {
+ ) -> Result<Option<Self>> {
match content {
- acp::ToolCallContent::Content { content } => Ok(Self::ContentBlock(ContentBlock::new(
- content,
- &language_registry,
- path_style,
- cx,
- ))),
- acp::ToolCallContent::Diff { diff } => Ok(Self::Diff(cx.new(|cx| {
+ acp::ToolCallContent::Content(acp::Content { content, .. }) => {
+ Ok(Some(Self::ContentBlock(ContentBlock::new(
+ content,
+ &language_registry,
+ path_style,
+ cx,
+ ))))
+ }
+ acp::ToolCallContent::Diff(diff) => Ok(Some(Self::Diff(cx.new(|cx| {
Diff::finalized(
diff.path.to_string_lossy().into_owned(),
diff.old_text,
@@ -607,12 +621,13 @@ impl ToolCallContent {
language_registry,
cx,
)
- }))),
- acp::ToolCallContent::Terminal { terminal_id } => terminals
+ })))),
+ acp::ToolCallContent::Terminal(acp::Terminal { terminal_id, .. }) => terminals
.get(&terminal_id)
.cloned()
- .map(Self::Terminal)
+ .map(|terminal| Some(Self::Terminal(terminal)))
.ok_or_else(|| anyhow::anyhow!("Terminal with id `{}` not found", terminal_id)),
+ _ => Ok(None),
}
}
@@ -623,9 +638,9 @@ impl ToolCallContent {
path_style: PathStyle,
terminals: &HashMap<acp::TerminalId, Entity<Terminal>>,
cx: &mut App,
- ) -> Result<()> {
+ ) -> Result<bool> {
let needs_update = match (&self, &new) {
- (Self::Diff(old_diff), acp::ToolCallContent::Diff { diff: new_diff }) => {
+ (Self::Diff(old_diff), acp::ToolCallContent::Diff(new_diff)) => {
old_diff.read(cx).needs_update(
new_diff.old_text.as_deref().unwrap_or(""),
&new_diff.new_text,
@@ -635,10 +650,14 @@ impl ToolCallContent {
_ => true,
};
- if needs_update {
- *self = Self::from_acp(new, language_registry, path_style, terminals, cx)?;
+ if let Some(update) = Self::from_acp(new, language_registry, path_style, terminals, cx)? {
+ if needs_update {
+ *self = update;
+ }
+ Ok(true)
+ } else {
+ Ok(false)
}
- Ok(())
}
pub fn to_markdown(&self, cx: &App) -> String {
@@ -660,7 +679,7 @@ pub enum ToolCallUpdate {
impl ToolCallUpdate {
fn id(&self) -> &acp::ToolCallId {
match self {
- Self::UpdateFields(update) => &update.id,
+ Self::UpdateFields(update) => &update.tool_call_id,
Self::UpdateDiff(diff) => &diff.id,
Self::UpdateTerminal(terminal) => &terminal.id,
}
@@ -732,6 +751,7 @@ impl Plan {
acp::PlanEntryStatus::Completed => {
stats.completed += 1;
}
+ _ => {}
}
}
@@ -1154,6 +1174,7 @@ impl AcpThread {
current_mode_id,
..
}) => cx.emit(AcpThreadEvent::ModeUpdated(current_mode_id)),
+ _ => {}
}
Ok(())
}
@@ -1287,11 +1308,7 @@ impl AcpThread {
label: cx.new(|cx| Markdown::new("Tool call not found".into(), None, None, cx)),
kind: acp::ToolKind::Fetch,
content: vec![ToolCallContent::ContentBlock(ContentBlock::new(
- acp::ContentBlock::Text(acp::TextContent {
- text: "Tool call not found".to_string(),
- annotations: None,
- meta: None,
- }),
+ "Tool call not found".into(),
&languages,
path_style,
cx,
@@ -1315,7 +1332,7 @@ impl AcpThread {
let location_updated = update.fields.locations.is_some();
call.update_fields(update.fields, languages, path_style, &self.terminals, cx)?;
if location_updated {
- self.resolve_locations(update.id, cx);
+ self.resolve_locations(update.tool_call_id, cx);
}
}
ToolCallUpdate::UpdateDiff(update) => {
@@ -1353,7 +1370,7 @@ impl AcpThread {
) -> Result<(), acp::Error> {
let language_registry = self.project.read(cx).languages().clone();
let path_style = self.project.read(cx).path_style(cx);
- let id = update.id.clone();
+ let id = update.tool_call_id.clone();
let agent = self.connection().telemetry_id();
let session = self.session_id();
@@ -1518,16 +1535,16 @@ impl AcpThread {
// some tools would (incorrectly) continue to auto-accept.
if let Some(allow_once_option) = options.iter().find_map(|option| {
if matches!(option.kind, acp::PermissionOptionKind::AllowOnce) {
- Some(option.id.clone())
+ Some(option.option_id.clone())
} else {
None
}
}) {
self.upsert_tool_call_inner(tool_call, ToolCallStatus::Pending, cx)?;
return Ok(async {
- acp::RequestPermissionOutcome::Selected {
- option_id: allow_once_option,
- }
+ acp::RequestPermissionOutcome::Selected(acp::SelectedPermissionOutcome::new(
+ allow_once_option,
+ ))
}
.boxed());
}
@@ -1543,7 +1560,9 @@ impl AcpThread {
let fut = async {
match rx.await {
- Ok(option) => acp::RequestPermissionOutcome::Selected { option_id: option },
+ Ok(option) => acp::RequestPermissionOutcome::Selected(
+ acp::SelectedPermissionOutcome::new(option),
+ ),
Err(oneshot::Canceled) => acp::RequestPermissionOutcome::Cancelled,
}
}
@@ -1570,6 +1589,7 @@ impl AcpThread {
acp::PermissionOptionKind::AllowOnce | acp::PermissionOptionKind::AllowAlways => {
ToolCallStatus::InProgress
}
+ _ => ToolCallStatus::InProgress,
};
let curr_status = mem::replace(&mut call.status, new_status);
@@ -1648,14 +1668,7 @@ impl AcpThread {
message: &str,
cx: &mut Context<Self>,
) -> BoxFuture<'static, Result<()>> {
- self.send(
- vec![acp::ContentBlock::Text(acp::TextContent {
- text: message.to_string(),
- annotations: None,
- meta: None,
- })],
- cx,
- )
+ self.send(vec![message.into()], cx)
}
pub fn send(
@@ -1669,11 +1682,7 @@ impl AcpThread {
self.project.read(cx).path_style(cx),
cx,
);
- let request = acp::PromptRequest {
- prompt: message.clone(),
- session_id: self.session_id.clone(),
- meta: None,
- };
+ let request = acp::PromptRequest::new(self.session_id.clone(), message.clone());
let git_store = self.project.read(cx).git_store().clone();
let message_id = if self.connection.truncate(&self.session_id, cx).is_some() {
@@ -1765,7 +1774,7 @@ impl AcpThread {
result,
Ok(Ok(acp::PromptResponse {
stop_reason: acp::StopReason::Cancelled,
- meta: None,
+ ..
}))
);
@@ -1781,7 +1790,7 @@ impl AcpThread {
// Handle refusal - distinguish between user prompt and tool call refusals
if let Ok(Ok(acp::PromptResponse {
stop_reason: acp::StopReason::Refusal,
- meta: _,
+ ..
})) = result
{
if let Some((user_msg_ix, _)) = this.last_user_message() {
@@ -2017,7 +2026,7 @@ impl AcpThread {
})?;
Ok(project.open_buffer(path, cx))
})
- .map_err(|e| acp::Error::internal_error().with_data(e.to_string()))
+ .map_err(|e| acp::Error::internal_error().data(e.to_string()))
.flatten()?;
let buffer = load.await?;
@@ -2050,7 +2059,7 @@ impl AcpThread {
let start_position = Point::new(line, 0);
if start_position > max_point {
- return Err(acp::Error::invalid_params().with_data(format!(
+ return Err(acp::Error::invalid_params().data(format!(
"Attempting to read beyond the end of the file, line {}:{}",
max_point.row + 1,
max_point.column
@@ -2202,7 +2211,7 @@ impl AcpThread {
let language_registry = project.read(cx).languages().clone();
let is_windows = project.read(cx).path_style(cx).is_windows();
- let terminal_id = acp::TerminalId(Uuid::new_v4().to_string().into());
+ let terminal_id = acp::TerminalId::new(Uuid::new_v4().to_string());
let terminal_task = cx.spawn({
let terminal_id = terminal_id.clone();
async move |_this, cx| {
@@ -2412,7 +2421,7 @@ mod tests {
.await
.unwrap();
- let terminal_id = acp::TerminalId(uuid::Uuid::new_v4().to_string().into());
+ let terminal_id = acp::TerminalId::new(uuid::Uuid::new_v4().to_string());
// Send Output BEFORE Created - should be buffered by acp_thread
thread.update(cx, |thread, cx| {
@@ -2474,7 +2483,7 @@ mod tests {
.await
.unwrap();
- let terminal_id = acp::TerminalId(uuid::Uuid::new_v4().to_string().into());
+ let terminal_id = acp::TerminalId::new(uuid::Uuid::new_v4().to_string());
// Send Output BEFORE Created
thread.update(cx, |thread, cx| {
@@ -2492,11 +2501,7 @@ mod tests {
thread.on_terminal_provider_event(
TerminalProviderEvent::Exit {
terminal_id: terminal_id.clone(),
- status: acp::TerminalExitStatus {
- exit_code: Some(0),
- signal: None,
- meta: None,
- },
+ status: acp::TerminalExitStatus::new().exit_code(0),
},
cx,
);
@@ -2553,15 +2558,7 @@ mod tests {
// Test creating a new user message
thread.update(cx, |thread, cx| {
- thread.push_user_content_block(
- None,
- acp::ContentBlock::Text(acp::TextContent {
- annotations: None,
- text: "Hello, ".to_string(),
- meta: None,
- }),
- cx,
- );
+ thread.push_user_content_block(None, "Hello, ".into(), cx);
});
thread.update(cx, |thread, cx| {
@@ -2577,15 +2574,7 @@ mod tests {
// Test appending to existing user message
let message_1_id = UserMessageId::new();
thread.update(cx, |thread, cx| {
- thread.push_user_content_block(
- Some(message_1_id.clone()),
- acp::ContentBlock::Text(acp::TextContent {
- annotations: None,
- text: "world!".to_string(),
- meta: None,
- }),
- cx,
- );
+ thread.push_user_content_block(Some(message_1_id.clone()), "world!".into(), cx);
});
thread.update(cx, |thread, cx| {
@@ -2600,26 +2589,14 @@ mod tests {
// Test creating new user message after assistant message
thread.update(cx, |thread, cx| {
- thread.push_assistant_content_block(
- acp::ContentBlock::Text(acp::TextContent {
- annotations: None,
- text: "Assistant response".to_string(),
- meta: None,
- }),
- false,
- cx,
- );
+ thread.push_assistant_content_block("Assistant response".into(), false, cx);
});
let message_2_id = UserMessageId::new();
thread.update(cx, |thread, cx| {
thread.push_user_content_block(
Some(message_2_id.clone()),
- acp::ContentBlock::Text(acp::TextContent {
- annotations: None,
- text: "New user message".to_string(),
- meta: None,
- }),
+ "New user message".into(),
cx,
);
});
@@ -2647,27 +2624,22 @@ mod tests {
thread.update(&mut cx, |thread, cx| {
thread
.handle_session_update(
- acp::SessionUpdate::AgentThoughtChunk(acp::ContentChunk {
- content: "Thinking ".into(),
- meta: None,
- }),
+ acp::SessionUpdate::AgentThoughtChunk(acp::ContentChunk::new(
+ "Thinking ".into(),
+ )),
cx,
)
.unwrap();
thread
.handle_session_update(
- acp::SessionUpdate::AgentThoughtChunk(acp::ContentChunk {
- content: "hard!".into(),
- meta: None,
- }),
+ acp::SessionUpdate::AgentThoughtChunk(acp::ContentChunk::new(
+ "hard!".into(),
+ )),
cx,
)
.unwrap();
})?;
- Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::EndTurn,
- meta: None,
- })
+ Ok(acp::PromptResponse::new(acp::StopReason::EndTurn))
}
.boxed_local()
},
@@ -2735,10 +2707,7 @@ mod tests {
.unwrap()
.await
.unwrap();
- Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::EndTurn,
- meta: None,
- })
+ Ok(acp::PromptResponse::new(acp::StopReason::EndTurn))
}
.boxed_local()
},
@@ -2969,7 +2938,7 @@ mod tests {
let fs = FakeFs::new(cx.executor());
let project = Project::test(fs, [], cx).await;
- let id = acp::ToolCallId("test".into());
+ let id = acp::ToolCallId::new("test");
let connection = Rc::new(FakeAgentConnection::new().on_user_message({
let id = id.clone();
@@ -2979,26 +2948,17 @@ mod tests {
thread
.update(&mut cx, |thread, cx| {
thread.handle_session_update(
- acp::SessionUpdate::ToolCall(acp::ToolCall {
- id: id.clone(),
- title: "Label".into(),
- kind: acp::ToolKind::Fetch,
- status: acp::ToolCallStatus::InProgress,
- content: vec![],
- locations: vec![],
- raw_input: None,
- raw_output: None,
- meta: None,
- }),
+ acp::SessionUpdate::ToolCall(
+ acp::ToolCall::new(id.clone(), "Label")
+ .kind(acp::ToolKind::Fetch)
+ .status(acp::ToolCallStatus::InProgress),
+ ),
cx,
)
})
.unwrap()
.unwrap();
- Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::EndTurn,
- meta: None,
- })
+ Ok(acp::PromptResponse::new(acp::StopReason::EndTurn))
}
.boxed_local()
}
@@ -3040,14 +3000,10 @@ mod tests {
thread
.update(cx, |thread, cx| {
thread.handle_session_update(
- acp::SessionUpdate::ToolCallUpdate(acp::ToolCallUpdate {
+ acp::SessionUpdate::ToolCallUpdate(acp::ToolCallUpdate::new(
id,
- fields: acp::ToolCallUpdateFields {
- status: Some(acp::ToolCallStatus::Completed),
- ..Default::default()
- },
- meta: None,
- }),
+ acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::Completed),
+ )),
cx,
)
})
@@ -3079,33 +3035,21 @@ mod tests {
thread
.update(&mut cx, |thread, cx| {
thread.handle_session_update(
- acp::SessionUpdate::ToolCall(acp::ToolCall {
- id: acp::ToolCallId("test".into()),
- title: "Label".into(),
- kind: acp::ToolKind::Edit,
- status: acp::ToolCallStatus::Completed,
- content: vec![acp::ToolCallContent::Diff {
- diff: acp::Diff {
- path: "/test/test.txt".into(),
- old_text: None,
- new_text: "foo".into(),
- meta: None,
- },
- }],
- locations: vec![],
- raw_input: None,
- raw_output: None,
- meta: None,
- }),
+ acp::SessionUpdate::ToolCall(
+ acp::ToolCall::new("test", "Label")
+ .kind(acp::ToolKind::Edit)
+ .status(acp::ToolCallStatus::Completed)
+ .content(vec![acp::ToolCallContent::Diff(acp::Diff::new(
+ "/test/test.txt",
+ "foo",
+ ))]),
+ ),
cx,
)
})
.unwrap()
.unwrap();
- Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::EndTurn,
- meta: None,
- })
+ Ok(acp::PromptResponse::new(acp::StopReason::EndTurn))
}
.boxed_local()
}
@@ -3158,18 +3102,14 @@ mod tests {
thread.update(&mut cx, |thread, cx| {
thread
.handle_session_update(
- acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk {
- content: content.text.to_uppercase().into(),
- meta: None,
- }),
+ acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new(
+ content.text.to_uppercase().into(),
+ )),
cx,
)
.unwrap();
})?;
- Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::EndTurn,
- meta: None,
- })
+ Ok(acp::PromptResponse::new(acp::StopReason::EndTurn))
}
.boxed_local()
}
@@ -3325,34 +3265,22 @@ mod tests {
thread.update(&mut cx, |thread, cx| {
thread
.handle_session_update(
- acp::SessionUpdate::ToolCall(acp::ToolCall {
- id: acp::ToolCallId("tool1".into()),
- title: "Test Tool".into(),
- kind: acp::ToolKind::Fetch,
- status: acp::ToolCallStatus::Completed,
- content: vec![],
- locations: vec![],
- raw_input: Some(serde_json::json!({"query": "test"})),
- raw_output: Some(
- serde_json::json!({"result": "inappropriate content"}),
- ),
- meta: None,
- }),
+ acp::SessionUpdate::ToolCall(
+ acp::ToolCall::new("tool1", "Test Tool")
+ .kind(acp::ToolKind::Fetch)
+ .status(acp::ToolCallStatus::Completed)
+ .raw_input(serde_json::json!({"query": "test"}))
+ .raw_output(serde_json::json!({"result": "inappropriate content"})),
+ ),
cx,
)
.unwrap();
})?;
// Now return refusal because of the tool result
- Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::Refusal,
- meta: None,
- })
+ Ok(acp::PromptResponse::new(acp::StopReason::Refusal))
} else {
- Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::EndTurn,
- meta: None,
- })
+ Ok(acp::PromptResponse::new(acp::StopReason::EndTurn))
}
}
.boxed_local()
@@ -3380,16 +3308,7 @@ mod tests {
});
// Send a user message - this will trigger tool call and then refusal
- let send_task = thread.update(cx, |thread, cx| {
- thread.send(
- vec![acp::ContentBlock::Text(acp::TextContent {
- text: "Hello".into(),
- annotations: None,
- meta: None,
- })],
- cx,
- )
- });
+ let send_task = thread.update(cx, |thread, cx| thread.send(vec!["Hello".into()], cx));
cx.background_executor.spawn(send_task).detach();
cx.run_until_parked();
@@ -3435,21 +3354,11 @@ mod tests {
let refuse_next = refuse_next.clone();
move |_request, _thread, _cx| {
if refuse_next.load(SeqCst) {
- async move {
- Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::Refusal,
- meta: None,
- })
- }
- .boxed_local()
+ async move { Ok(acp::PromptResponse::new(acp::StopReason::Refusal)) }
+ .boxed_local()
} else {
- async move {
- Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::EndTurn,
- meta: None,
- })
- }
- .boxed_local()
+ async move { Ok(acp::PromptResponse::new(acp::StopReason::EndTurn)) }
+ .boxed_local()
}
}
}));
@@ -3506,10 +3415,7 @@ mod tests {
let refuse_next = refuse_next.clone();
async move {
if refuse_next.load(SeqCst) {
- return Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::Refusal,
- meta: None,
- });
+ return Ok(acp::PromptResponse::new(acp::StopReason::Refusal));
}
let acp::ContentBlock::Text(content) = &request.prompt[0] else {
@@ -3518,18 +3424,14 @@ mod tests {
thread.update(&mut cx, |thread, cx| {
thread
.handle_session_update(
- acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk {
- content: content.text.to_uppercase().into(),
- meta: None,
- }),
+ acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new(
+ content.text.to_uppercase().into(),
+ )),
cx,
)
.unwrap();
})?;
- Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::EndTurn,
- meta: None,
- })
+ Ok(acp::PromptResponse::new(acp::StopReason::EndTurn))
}
.boxed_local()
}
@@ -3668,13 +3570,12 @@ mod tests {
_cwd: &Path,
cx: &mut App,
) -> Task<gpui::Result<Entity<AcpThread>>> {
- let session_id = acp::SessionId(
+ let session_id = acp::SessionId::new(
rand::rng()
.sample_iter(&distr::Alphanumeric)
.take(7)
.map(char::from)
- .collect::<String>()
- .into(),
+ .collect::<String>(),
);
let action_log = cx.new(|_| ActionLog::new(project.clone()));
let thread = cx.new(|cx| {
@@ -3684,12 +3585,12 @@ mod tests {
project,
action_log,
session_id.clone(),
- watch::Receiver::constant(acp::PromptCapabilities {
- image: true,
- audio: true,
- embedded_context: true,
- meta: None,
- }),
+ watch::Receiver::constant(
+ acp::PromptCapabilities::new()
+ .image(true)
+ .audio(true)
+ .embedded_context(true),
+ ),
cx,
)
});
@@ -3718,10 +3619,7 @@ mod tests {
let thread = thread.clone();
cx.spawn(async move |cx| handler(params, thread, cx.clone()).await)
} else {
- Task::ready(Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::EndTurn,
- meta: None,
- }))
+ Task::ready(Ok(acp::PromptResponse::new(acp::StopReason::EndTurn)))
}
}
@@ -3776,17 +3674,13 @@ mod tests {
.unwrap();
// Try to update a tool call that doesn't exist
- let nonexistent_id = acp::ToolCallId("nonexistent-tool-call".into());
+ let nonexistent_id = acp::ToolCallId::new("nonexistent-tool-call");
thread.update(cx, |thread, cx| {
let result = thread.handle_session_update(
- acp::SessionUpdate::ToolCallUpdate(acp::ToolCallUpdate {
- id: nonexistent_id.clone(),
- fields: acp::ToolCallUpdateFields {
- status: Some(acp::ToolCallStatus::Completed),
- ..Default::default()
- },
- meta: None,
- }),
+ acp::SessionUpdate::ToolCallUpdate(acp::ToolCallUpdate::new(
+ nonexistent_id.clone(),
+ acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::Completed),
+ )),
cx,
);
@@ -3861,7 +3755,7 @@ mod tests {
.unwrap();
// Create 2 terminals BEFORE the checkpoint that have completed running
- let terminal_id_1 = acp::TerminalId(uuid::Uuid::new_v4().to_string().into());
+ let terminal_id_1 = acp::TerminalId::new(uuid::Uuid::new_v4().to_string());
let mock_terminal_1 = cx.new(|cx| {
let builder = ::terminal::TerminalBuilder::new_display_only(
::terminal::terminal_settings::CursorShape::default(),
@@ -3900,17 +3794,13 @@ mod tests {
thread.on_terminal_provider_event(
TerminalProviderEvent::Exit {
terminal_id: terminal_id_1.clone(),
- status: acp::TerminalExitStatus {
- exit_code: Some(0),
- signal: None,
- meta: None,
- },
+ status: acp::TerminalExitStatus::new().exit_code(0),
},
cx,
);
});
- let terminal_id_2 = acp::TerminalId(uuid::Uuid::new_v4().to_string().into());
+ let terminal_id_2 = acp::TerminalId::new(uuid::Uuid::new_v4().to_string());
let mock_terminal_2 = cx.new(|cx| {
let builder = ::terminal::TerminalBuilder::new_display_only(
::terminal::terminal_settings::CursorShape::default(),
@@ -3949,11 +3839,7 @@ mod tests {
thread.on_terminal_provider_event(
TerminalProviderEvent::Exit {
terminal_id: terminal_id_2.clone(),
- status: acp::TerminalExitStatus {
- exit_code: Some(0),
- signal: None,
- meta: None,
- },
+ status: acp::TerminalExitStatus::new().exit_code(0),
},
cx,
);
@@ -3973,7 +3859,7 @@ mod tests {
// Create a terminal AFTER the checkpoint we'll restore to.
// This simulates the AI agent starting a long-running terminal command.
- let terminal_id = acp::TerminalId(uuid::Uuid::new_v4().to_string().into());
+ let terminal_id = acp::TerminalId::new(uuid::Uuid::new_v4().to_string());
let mock_terminal = cx.new(|cx| {
let builder = ::terminal::TerminalBuilder::new_display_only(
::terminal::terminal_settings::CursorShape::default(),
@@ -4015,21 +3901,15 @@ mod tests {
thread.update(cx, |thread, cx| {
thread
.handle_session_update(
- acp::SessionUpdate::ToolCall(acp::ToolCall {
- id: acp::ToolCallId("terminal-tool-1".into()),
- title: "Running command".into(),
- kind: acp::ToolKind::Execute,
- status: acp::ToolCallStatus::InProgress,
- content: vec![acp::ToolCallContent::Terminal {
- terminal_id: terminal_id.clone(),
- }],
- locations: vec![],
- raw_input: Some(
- serde_json::json!({"command": "sleep 1000", "cd": "/test"}),
- ),
- raw_output: None,
- meta: None,
- }),
+ acp::SessionUpdate::ToolCall(
+ acp::ToolCall::new("terminal-tool-1", "Running command")
+ .kind(acp::ToolKind::Execute)
+ .status(acp::ToolCallStatus::InProgress)
+ .content(vec![acp::ToolCallContent::Terminal(acp::Terminal::new(
+ terminal_id.clone(),
+ ))])
+ .raw_input(serde_json::json!({"command": "sleep 1000", "cd": "/test"})),
+ ),
cx,
)
.unwrap();
@@ -336,7 +336,7 @@ mod test_support {
_cwd: &Path,
cx: &mut gpui::App,
) -> Task<gpui::Result<Entity<AcpThread>>> {
- let session_id = acp::SessionId(self.sessions.lock().len().to_string().into());
+ let session_id = acp::SessionId::new(self.sessions.lock().len().to_string());
let action_log = cx.new(|_| ActionLog::new(project.clone()));
let thread = cx.new(|cx| {
AcpThread::new(
@@ -345,12 +345,12 @@ mod test_support {
project,
action_log,
session_id.clone(),
- watch::Receiver::constant(acp::PromptCapabilities {
- image: true,
- audio: true,
- embedded_context: true,
- meta: None,
- }),
+ watch::Receiver::constant(
+ acp::PromptCapabilities::new()
+ .image(true)
+ .audio(true)
+ .embedded_context(true),
+ ),
cx,
)
});
@@ -389,10 +389,7 @@ mod test_support {
response_tx.replace(tx);
cx.spawn(async move |_| {
let stop_reason = rx.await?;
- Ok(acp::PromptResponse {
- stop_reason,
- meta: None,
- })
+ Ok(acp::PromptResponse::new(stop_reason))
})
} else {
for update in self.next_prompt_updates.lock().drain(..) {
@@ -400,7 +397,7 @@ mod test_support {
let update = update.clone();
let permission_request = if let acp::SessionUpdate::ToolCall(tool_call) =
&update
- && let Some(options) = self.permission_requests.get(&tool_call.id)
+ && let Some(options) = self.permission_requests.get(&tool_call.tool_call_id)
{
Some((tool_call.clone(), options.clone()))
} else {
@@ -429,10 +426,7 @@ mod test_support {
cx.spawn(async move |_| {
try_join_all(tasks).await?;
- Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::EndTurn,
- meta: None,
- })
+ Ok(acp::PromptResponse::new(acp::StopReason::EndTurn))
})
}
}
@@ -108,7 +108,7 @@ impl MentionUri {
if let Some(thread_id) = path.strip_prefix("/agent/thread/") {
let name = single_query_param(&url, "name")?.context("Missing thread name")?;
Ok(Self::Thread {
- id: acp::SessionId(thread_id.into()),
+ id: acp::SessionId::new(thread_id),
name,
})
} else if let Some(path) = path.strip_prefix("/agent/text-thread/") {
@@ -75,11 +75,15 @@ impl Terminal {
let exit_status = exit_status.map(portable_pty::ExitStatus::from);
- acp::TerminalExitStatus {
- exit_code: exit_status.as_ref().map(|e| e.exit_code()),
- signal: exit_status.and_then(|e| e.signal().map(Into::into)),
- meta: None,
+ let mut status = acp::TerminalExitStatus::new();
+
+ if let Some(exit_status) = exit_status.as_ref() {
+ status = status.exit_code(exit_status.exit_code());
+ if let Some(signal) = exit_status.signal() {
+ status = status.signal(signal);
+ }
}
+ status
})
.shared(),
}
@@ -101,27 +105,23 @@ impl Terminal {
pub fn current_output(&self, cx: &App) -> acp::TerminalOutputResponse {
if let Some(output) = self.output.as_ref() {
- let exit_status = output.exit_status.map(portable_pty::ExitStatus::from);
-
- acp::TerminalOutputResponse {
- output: output.content.clone(),
- truncated: output.original_content_len > output.content.len(),
- exit_status: Some(acp::TerminalExitStatus {
- exit_code: exit_status.as_ref().map(|e| e.exit_code()),
- signal: exit_status.and_then(|e| e.signal().map(Into::into)),
- meta: None,
- }),
- meta: None,
+ let mut exit_status = acp::TerminalExitStatus::new();
+ if let Some(status) = output.exit_status.map(portable_pty::ExitStatus::from) {
+ exit_status = exit_status.exit_code(status.exit_code());
+ if let Some(signal) = status.signal() {
+ exit_status = exit_status.signal(signal);
+ }
}
+
+ acp::TerminalOutputResponse::new(
+ output.content.clone(),
+ output.original_content_len > output.content.len(),
+ )
+ .exit_status(exit_status)
} else {
let (current_content, original_len) = self.truncated_output(cx);
-
- acp::TerminalOutputResponse {
- truncated: current_content.len() < original_len,
- output: current_content,
- exit_status: None,
- meta: None,
- }
+ let truncated = current_content.len() < original_len;
+ acp::TerminalOutputResponse::new(current_content, truncated)
}
}
@@ -83,6 +83,7 @@ ctor.workspace = true
db = { workspace = true, "features" = ["test-support"] }
editor = { workspace = true, "features" = ["test-support"] }
env_logger.workspace = true
+eval_utils.workspace = true
fs = { workspace = true, "features" = ["test-support"] }
git = { workspace = true, "features" = ["test-support"] }
gpui = { workspace = true, "features" = ["test-support"] }
@@ -189,7 +189,7 @@ impl LanguageModels {
}
fn model_id(model: &Arc<dyn LanguageModel>) -> acp::ModelId {
- acp::ModelId(format!("{}/{}", model.provider_id().0, model.id().0).into())
+ acp::ModelId::new(format!("{}/{}", model.provider_id().0, model.id().0))
}
fn authenticate_all_language_model_providers(cx: &mut App) -> Task<()> {
@@ -816,28 +816,12 @@ impl NativeAgentConnection {
}
ThreadEvent::AgentText(text) => {
acp_thread.update(cx, |thread, cx| {
- thread.push_assistant_content_block(
- acp::ContentBlock::Text(acp::TextContent {
- text,
- annotations: None,
- meta: None,
- }),
- false,
- cx,
- )
+ thread.push_assistant_content_block(text.into(), false, cx)
})?;
}
ThreadEvent::AgentThinking(text) => {
acp_thread.update(cx, |thread, cx| {
- thread.push_assistant_content_block(
- acp::ContentBlock::Text(acp::TextContent {
- text,
- annotations: None,
- meta: None,
- }),
- true,
- cx,
- )
+ thread.push_assistant_content_block(text.into(), true, cx)
})?;
}
ThreadEvent::ToolCallAuthorization(ToolCallAuthorization {
@@ -851,8 +835,9 @@ impl NativeAgentConnection {
)
})??;
cx.background_spawn(async move {
- if let acp::RequestPermissionOutcome::Selected { option_id } =
- outcome_task.await
+ if let acp::RequestPermissionOutcome::Selected(
+ acp::SelectedPermissionOutcome { option_id, .. },
+ ) = outcome_task.await
{
response
.send(option_id)
@@ -879,10 +864,7 @@ impl NativeAgentConnection {
}
ThreadEvent::Stop(stop_reason) => {
log::debug!("Assistant message complete: {:?}", stop_reason);
- return Ok(acp::PromptResponse {
- stop_reason,
- meta: None,
- });
+ return Ok(acp::PromptResponse::new(stop_reason));
}
}
}
@@ -894,10 +876,7 @@ impl NativeAgentConnection {
}
log::debug!("Response stream completed");
- anyhow::Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::EndTurn,
- meta: None,
- })
+ anyhow::Ok(acp::PromptResponse::new(acp::StopReason::EndTurn))
})
}
}
@@ -1401,7 +1380,7 @@ mod internal_tests {
IndexMap::from_iter([(
AgentModelGroupName("Fake".into()),
vec![AgentModelInfo {
- id: acp::ModelId("fake/fake".into()),
+ id: acp::ModelId::new("fake/fake"),
name: "Fake".into(),
description: None,
icon: Some(ui::IconName::ZedAssistant),
@@ -1462,7 +1441,7 @@ mod internal_tests {
// Select a model
let selector = connection.model_selector(&session_id).unwrap();
- let model_id = acp::ModelId("fake/fake".into());
+ let model_id = acp::ModelId::new("fake/fake");
cx.update(|cx| selector.select_model(model_id.clone(), cx))
.await
.unwrap();
@@ -1548,20 +1527,14 @@ mod internal_tests {
thread.send(
vec![
"What does ".into(),
- acp::ContentBlock::ResourceLink(acp::ResourceLink {
- name: "b.md".into(),
- uri: MentionUri::File {
+ acp::ContentBlock::ResourceLink(acp::ResourceLink::new(
+ "b.md",
+ MentionUri::File {
abs_path: path!("/a/b.md").into(),
}
.to_uri()
.to_string(),
- annotations: None,
- description: None,
- mime_type: None,
- size: None,
- title: None,
- meta: None,
- }),
+ )),
" mean?".into(),
],
cx,
@@ -366,7 +366,7 @@ impl ThreadsDatabase {
for (id, summary, updated_at) in rows {
threads.push(DbThreadMetadata {
- id: acp::SessionId(id),
+ id: acp::SessionId::new(id),
title: summary.into(),
updated_at: DateTime::parse_from_rfc3339(&updated_at)?.with_timezone(&Utc),
});
@@ -4,7 +4,7 @@ use crate::{
};
use Role::*;
use client::{Client, UserStore};
-use collections::HashMap;
+use eval_utils::{EvalOutput, EvalOutputProcessor, OutcomeKind};
use fs::FakeFs;
use futures::{FutureExt, future::LocalBoxFuture};
use gpui::{AppContext, TestAppContext, Timer};
@@ -20,16 +20,62 @@ use rand::prelude::*;
use reqwest_client::ReqwestClient;
use serde_json::json;
use std::{
- cmp::Reverse,
fmt::{self, Display},
- io::Write as _,
path::Path,
str::FromStr,
- sync::mpsc,
time::Duration,
};
use util::path;
+#[derive(Default, Clone, Debug)]
+struct EditAgentOutputProcessor {
+ mismatched_tag_threshold: f32,
+ cumulative_tags: usize,
+ cumulative_mismatched_tags: usize,
+ eval_outputs: Vec<EvalOutput<EditEvalMetadata>>,
+}
+
+fn mismatched_tag_threshold(mismatched_tag_threshold: f32) -> EditAgentOutputProcessor {
+ EditAgentOutputProcessor {
+ mismatched_tag_threshold,
+ cumulative_tags: 0,
+ cumulative_mismatched_tags: 0,
+ eval_outputs: Vec::new(),
+ }
+}
+
+#[derive(Clone, Debug)]
+struct EditEvalMetadata {
+ tags: usize,
+ mismatched_tags: usize,
+}
+
+impl EvalOutputProcessor for EditAgentOutputProcessor {
+ type Metadata = EditEvalMetadata;
+
+ fn process(&mut self, output: &EvalOutput<Self::Metadata>) {
+ if matches!(output.outcome, OutcomeKind::Passed | OutcomeKind::Failed) {
+ self.cumulative_mismatched_tags += output.metadata.mismatched_tags;
+ self.cumulative_tags += output.metadata.tags;
+ self.eval_outputs.push(output.clone());
+ }
+ }
+
+ fn assert(&mut self) {
+ let mismatched_tag_ratio =
+ self.cumulative_mismatched_tags as f32 / self.cumulative_tags as f32;
+ if mismatched_tag_ratio > self.mismatched_tag_threshold {
+ for eval_output in &self.eval_outputs {
+ println!("{}", eval_output.data);
+ }
+ panic!(
+ "Too many mismatched tags: {:?}",
+ self.cumulative_mismatched_tags
+ );
+ }
+ }
+}
+
#[test]
#[cfg_attr(not(feature = "unit-eval"), ignore)]
fn eval_extract_handle_command_output() {
@@ -55,22 +101,19 @@ fn eval_extract_handle_command_output() {
include_str!("evals/fixtures/extract_handle_command_output/possible-07.diff"),
];
let edit_description = "Extract `handle_command_output` method from `run_git_blame`.";
- eval(
- 100,
- 0.95,
- 0.05,
- EvalInput::from_conversation(
+ eval_utils::eval(100, 0.95, mismatched_tag_threshold(0.05), move || {
+ run_eval(EvalInput::from_conversation(
vec![
message(
User,
[text(formatdoc! {"
- Read the `{input_file_path}` file and extract a method in
- the final stanza of `run_git_blame` to deal with command failures,
- call it `handle_command_output` and take the std::process::Output as the only parameter.
- Do not document the method and do not add any comments.
+ Read the `{input_file_path}` file and extract a method in
+ the final stanza of `run_git_blame` to deal with command failures,
+ call it `handle_command_output` and take the std::process::Output as the only parameter.
+ Do not document the method and do not add any comments.
- Add it right next to `run_git_blame` and copy it verbatim from `run_git_blame`.
- "})],
+ Add it right next to `run_git_blame` and copy it verbatim from `run_git_blame`.
+ "})],
),
message(
Assistant,
@@ -102,9 +145,9 @@ fn eval_extract_handle_command_output() {
),
],
Some(input_file_content.into()),
- EvalAssertion::assert_diff_any(possible_diffs),
- ),
- );
+ EvalAssertion::assert_diff_any(possible_diffs.clone()),
+ ))
+ });
}
#[test]
@@ -122,18 +165,16 @@ fn eval_delete_run_git_blame() {
let input_file_content = include_str!("evals/fixtures/delete_run_git_blame/before.rs");
let output_file_content = include_str!("evals/fixtures/delete_run_git_blame/after.rs");
let edit_description = "Delete the `run_git_blame` function.";
- eval(
- 100,
- 0.95,
- 0.05,
- EvalInput::from_conversation(
+
+ eval_utils::eval(100, 0.95, mismatched_tag_threshold(0.05), move || {
+ run_eval(EvalInput::from_conversation(
vec![
message(
User,
[text(formatdoc! {"
- Read the `{input_file_path}` file and delete `run_git_blame`. Just that
- one function, not its usages.
- "})],
+ Read the `{input_file_path}` file and delete `run_git_blame`. Just that
+ one function, not its usages.
+ "})],
),
message(
Assistant,
@@ -166,8 +207,8 @@ fn eval_delete_run_git_blame() {
],
Some(input_file_content.into()),
EvalAssertion::assert_eq(output_file_content),
- ),
- );
+ ))
+ });
}
#[test]
@@ -185,18 +226,16 @@ fn eval_translate_doc_comments() {
let input_file_path = "root/canvas.rs";
let input_file_content = include_str!("evals/fixtures/translate_doc_comments/before.rs");
let edit_description = "Translate all doc comments to Italian";
- eval(
- 200,
- 1.,
- 0.05,
- EvalInput::from_conversation(
+
+ eval_utils::eval(200, 1., mismatched_tag_threshold(0.05), move || {
+ run_eval(EvalInput::from_conversation(
vec![
message(
User,
[text(formatdoc! {"
- Read the {input_file_path} file and edit it (without overwriting it),
- translating all the doc comments to italian.
- "})],
+ Read the {input_file_path} file and edit it (without overwriting it),
+ translating all the doc comments to italian.
+ "})],
),
message(
Assistant,
@@ -229,8 +268,8 @@ fn eval_translate_doc_comments() {
],
Some(input_file_content.into()),
EvalAssertion::judge_diff("Doc comments were translated to Italian"),
- ),
- );
+ ))
+ });
}
#[test]
@@ -249,33 +288,31 @@ fn eval_use_wasi_sdk_in_compile_parser_to_wasm() {
let input_file_content =
include_str!("evals/fixtures/use_wasi_sdk_in_compile_parser_to_wasm/before.rs");
let edit_description = "Update compile_parser_to_wasm to use wasi-sdk instead of emscripten";
- eval(
- 100,
- 0.95,
- 0.05,
- EvalInput::from_conversation(
+
+ eval_utils::eval(100, 0.95, mismatched_tag_threshold(0.05), move || {
+ run_eval(EvalInput::from_conversation(
vec![
message(
User,
[text(formatdoc! {"
- Read the `{input_file_path}` file and change `compile_parser_to_wasm` to use `wasi-sdk` instead of emscripten.
- Use `ureq` to download the SDK for the current platform and architecture.
- Extract the archive into a sibling of `lib` inside the `tree-sitter` directory in the cache_dir.
- Compile the parser to wasm using the `bin/clang` executable (or `bin/clang.exe` on windows)
- that's inside of the archive.
- Don't re-download the SDK if that executable already exists.
-
- Use these clang flags: -fPIC -shared -Os -Wl,--export=tree_sitter_{{language_name}}
-
- Here are the available wasi-sdk assets:
- - wasi-sdk-25.0-x86_64-macos.tar.gz
- - wasi-sdk-25.0-arm64-macos.tar.gz
- - wasi-sdk-25.0-x86_64-linux.tar.gz
- - wasi-sdk-25.0-arm64-linux.tar.gz
- - wasi-sdk-25.0-x86_64-linux.tar.gz
- - wasi-sdk-25.0-arm64-linux.tar.gz
- - wasi-sdk-25.0-x86_64-windows.tar.gz
- "})],
+ Read the `{input_file_path}` file and change `compile_parser_to_wasm` to use `wasi-sdk` instead of emscripten.
+ Use `ureq` to download the SDK for the current platform and architecture.
+ Extract the archive into a sibling of `lib` inside the `tree-sitter` directory in the cache_dir.
+ Compile the parser to wasm using the `bin/clang` executable (or `bin/clang.exe` on windows)
+ that's inside of the archive.
+ Don't re-download the SDK if that executable already exists.
+
+ Use these clang flags: -fPIC -shared -Os -Wl,--export=tree_sitter_{{language_name}}
+
+ Here are the available wasi-sdk assets:
+ - wasi-sdk-25.0-x86_64-macos.tar.gz
+ - wasi-sdk-25.0-arm64-macos.tar.gz
+ - wasi-sdk-25.0-x86_64-linux.tar.gz
+ - wasi-sdk-25.0-arm64-linux.tar.gz
+ - wasi-sdk-25.0-x86_64-linux.tar.gz
+ - wasi-sdk-25.0-arm64-linux.tar.gz
+ - wasi-sdk-25.0-x86_64-windows.tar.gz
+ "})],
),
message(
Assistant,
@@ -352,11 +389,11 @@ fn eval_use_wasi_sdk_in_compile_parser_to_wasm() {
],
Some(input_file_content.into()),
EvalAssertion::judge_diff(indoc! {"
- - The compile_parser_to_wasm method has been changed to use wasi-sdk
- - ureq is used to download the SDK for current platform and architecture
- "}),
- ),
- );
+ - The compile_parser_to_wasm method has been changed to use wasi-sdk
+ - ureq is used to download the SDK for current platform and architecture
+ "}),
+ ))
+ });
}
#[test]
@@ -380,11 +417,8 @@ fn eval_disable_cursor_blinking() {
include_str!("evals/fixtures/disable_cursor_blinking/possible-03.diff"),
include_str!("evals/fixtures/disable_cursor_blinking/possible-04.diff"),
];
- eval(
- 100,
- 0.51,
- 0.05,
- EvalInput::from_conversation(
+ eval_utils::eval(100, 0.51, mismatched_tag_threshold(0.05), move || {
+ run_eval(EvalInput::from_conversation(
vec![
message(User, [text("Let's research how to cursor blinking works.")]),
message(
@@ -421,10 +455,10 @@ fn eval_disable_cursor_blinking() {
message(
User,
[text(indoc! {"
- Comment out the lines that interact with the BlinkManager.
- Keep the outer `update` blocks, but comments everything that's inside (including if statements).
- Don't add additional comments.
- "})],
+ Comment out the lines that interact with the BlinkManager.
+ Keep the outer `update` blocks, but comments everything that's inside (including if statements).
+ Don't add additional comments.
+ "})],
),
message(
Assistant,
@@ -440,9 +474,9 @@ fn eval_disable_cursor_blinking() {
),
],
Some(input_file_content.into()),
- EvalAssertion::assert_diff_any(possible_diffs),
- ),
- );
+ EvalAssertion::assert_diff_any(possible_diffs.clone()),
+ ))
+ });
}
#[test]
@@ -467,20 +501,16 @@ fn eval_from_pixels_constructor() {
let input_file_path = "root/canvas.rs";
let input_file_content = include_str!("evals/fixtures/from_pixels_constructor/before.rs");
let edit_description = "Implement from_pixels constructor and add tests.";
- eval(
- 100,
- 0.95,
- // For whatever reason, this eval produces more mismatched tags.
- // Increasing for now, let's see if we can bring this down.
- 0.25,
- EvalInput::from_conversation(
+
+ eval_utils::eval(100, 0.95, mismatched_tag_threshold(0.25), move || {
+ run_eval(EvalInput::from_conversation(
vec![
message(
User,
[text(indoc! {"
- Introduce a new `from_pixels` constructor in Canvas and
- also add tests for it in the same file.
- "})],
+ Introduce a new `from_pixels` constructor in Canvas and
+ also add tests for it in the same file.
+ "})],
),
message(
Assistant,
@@ -545,92 +575,92 @@ fn eval_from_pixels_constructor() {
"tool_4",
"grep",
indoc! {"
- Found 6 matches:
+ Found 6 matches:
- ## Matches in font-kit/src/loaders/core_text.rs
+ ## Matches in font-kit/src/loaders/core_text.rs
- ### mod test › L926-936
- ```
- mod test {
- use super::Font;
- use crate::properties::{Stretch, Weight};
+ ### mod test › L926-936
+ ```
+ mod test {
+ use super::Font;
+ use crate::properties::{Stretch, Weight};
- #[cfg(feature = \"source\")]
- use crate::source::SystemSource;
+ #[cfg(feature = \"source\")]
+ use crate::source::SystemSource;
- static TEST_FONT_POSTSCRIPT_NAME: &'static str = \"ArialMT\";
+ static TEST_FONT_POSTSCRIPT_NAME: &'static str = \"ArialMT\";
- #[cfg(feature = \"source\")]
- #[test]
- ```
+ #[cfg(feature = \"source\")]
+ #[test]
+ ```
- 55 lines remaining in ancestor node. Read the file to see all.
+ 55 lines remaining in ancestor node. Read the file to see all.
- ### mod test › L947-951
- ```
- }
+ ### mod test › L947-951
+ ```
+ }
- #[test]
- fn test_core_text_to_css_font_weight() {
- // Exact matches
- ```
+ #[test]
+ fn test_core_text_to_css_font_weight() {
+ // Exact matches
+ ```
- ### mod test › L959-963
- ```
- }
+ ### mod test › L959-963
+ ```
+ }
- #[test]
- fn test_core_text_to_css_font_stretch() {
- // Exact matches
- ```
+ #[test]
+ fn test_core_text_to_css_font_stretch() {
+ // Exact matches
+ ```
- ## Matches in font-kit/src/loaders/freetype.rs
+ ## Matches in font-kit/src/loaders/freetype.rs
- ### mod test › L1238-1248
- ```
- mod test {
- use crate::loaders::freetype::Font;
+ ### mod test › L1238-1248
+ ```
+ mod test {
+ use crate::loaders::freetype::Font;
- static PCF_FONT_PATH: &str = \"resources/tests/times-roman-pcf/timR12.pcf\";
- static PCF_FONT_POSTSCRIPT_NAME: &str = \"Times-Roman\";
+ static PCF_FONT_PATH: &str = \"resources/tests/times-roman-pcf/timR12.pcf\";
+ static PCF_FONT_POSTSCRIPT_NAME: &str = \"Times-Roman\";
- #[test]
- fn get_pcf_postscript_name() {
- let font = Font::from_path(PCF_FONT_PATH, 0).unwrap();
- assert_eq!(font.postscript_name().unwrap(), PCF_FONT_POSTSCRIPT_NAME);
- }
- ```
+ #[test]
+ fn get_pcf_postscript_name() {
+ let font = Font::from_path(PCF_FONT_PATH, 0).unwrap();
+ assert_eq!(font.postscript_name().unwrap(), PCF_FONT_POSTSCRIPT_NAME);
+ }
+ ```
- 1 lines remaining in ancestor node. Read the file to see all.
+ 1 lines remaining in ancestor node. Read the file to see all.
- ## Matches in font-kit/src/sources/core_text.rs
+ ## Matches in font-kit/src/sources/core_text.rs
- ### mod test › L265-275
- ```
- mod test {
- use crate::properties::{Stretch, Weight};
+ ### mod test › L265-275
+ ```
+ mod test {
+ use crate::properties::{Stretch, Weight};
- #[test]
- fn test_css_to_core_text_font_weight() {
- // Exact matches
- assert_eq!(super::css_to_core_text_font_weight(Weight(100.0)), -0.7);
- assert_eq!(super::css_to_core_text_font_weight(Weight(400.0)), 0.0);
- assert_eq!(super::css_to_core_text_font_weight(Weight(700.0)), 0.4);
- assert_eq!(super::css_to_core_text_font_weight(Weight(900.0)), 0.8);
+ #[test]
+ fn test_css_to_core_text_font_weight() {
+ // Exact matches
+ assert_eq!(super::css_to_core_text_font_weight(Weight(100.0)), -0.7);
+ assert_eq!(super::css_to_core_text_font_weight(Weight(400.0)), 0.0);
+ assert_eq!(super::css_to_core_text_font_weight(Weight(700.0)), 0.4);
+ assert_eq!(super::css_to_core_text_font_weight(Weight(900.0)), 0.8);
- ```
+ ```
- 27 lines remaining in ancestor node. Read the file to see all.
+ 27 lines remaining in ancestor node. Read the file to see all.
- ### mod test › L278-282
- ```
- }
+ ### mod test › L278-282
+ ```
+ }
- #[test]
- fn test_css_to_core_text_font_stretch() {
- // Exact matches
- ```
- "},
+ #[test]
+ fn test_css_to_core_text_font_stretch() {
+ // Exact matches
+ ```
+ "},
)],
),
message(
@@ -648,11 +678,11 @@ fn eval_from_pixels_constructor() {
],
Some(input_file_content.into()),
EvalAssertion::judge_diff(indoc! {"
- - The diff contains a new `from_pixels` constructor
- - The diff contains new tests for the `from_pixels` constructor
- "}),
- ),
- );
+ - The diff contains a new `from_pixels` constructor
+ - The diff contains new tests for the `from_pixels` constructor
+ "}),
+ ))
+ });
}
#[test]
@@ -670,11 +700,9 @@ fn eval_zode() {
let input_file_path = "root/zode.py";
let input_content = None;
let edit_description = "Create the main Zode CLI script";
- eval(
- 50,
- 1.,
- 0.05,
- EvalInput::from_conversation(
+
+ eval_utils::eval(50, 1., mismatched_tag_threshold(0.05), move || {
+ run_eval(EvalInput::from_conversation(
vec![
message(User, [text(include_str!("evals/fixtures/zode/prompt.md"))]),
message(
@@ -733,7 +761,7 @@ fn eval_zode() {
],
),
],
- input_content,
+ input_content.clone(),
EvalAssertion::new(async move |sample, _, _cx| {
let invalid_starts = [' ', '`', '\n'];
let mut message = String::new();
@@ -758,8 +786,8 @@ fn eval_zode() {
})
}
}),
- ),
- );
+ ))
+ });
}
#[test]
@@ -777,19 +805,17 @@ fn eval_add_overwrite_test() {
let input_file_path = "root/action_log.rs";
let input_file_content = include_str!("evals/fixtures/add_overwrite_test/before.rs");
let edit_description = "Add a new test for overwriting a file in action_log.rs";
- eval(
- 200,
- 0.5, // TODO: make this eval better
- 0.05,
- EvalInput::from_conversation(
+
+ eval_utils::eval(200, 0.5, mismatched_tag_threshold(0.05), move || {
+ run_eval(EvalInput::from_conversation(
vec![
message(
User,
[text(indoc! {"
- Introduce a new test in `action_log.rs` to test overwriting a file.
- That is, a file already exists, but we call `buffer_created` as if the file were new.
- Take inspiration from all the other tests in the file.
- "})],
+ Introduce a new test in `action_log.rs` to test overwriting a file.
+ That is, a file already exists, but we call `buffer_created` as if the file were new.
+ Take inspiration from all the other tests in the file.
+ "})],
),
message(
Assistant,
@@ -809,81 +835,81 @@ fn eval_add_overwrite_test() {
"tool_1",
"read_file",
indoc! {"
- pub struct ActionLog [L13-20]
- tracked_buffers [L15]
- edited_since_project_diagnostics_check [L17]
- project [L19]
- impl ActionLog [L22-498]
- pub fn new [L24-30]
- pub fn project [L32-34]
- pub fn checked_project_diagnostics [L37-39]
- pub fn has_edited_files_since_project_diagnostics_check [L42-44]
- fn track_buffer_internal [L46-101]
- fn handle_buffer_event [L103-116]
- fn handle_buffer_edited [L118-123]
- fn handle_buffer_file_changed [L125-158]
- async fn maintain_diff [L160-264]
- pub fn buffer_read [L267-269]
- pub fn buffer_created [L272-276]
- pub fn buffer_edited [L279-287]
- pub fn will_delete_buffer [L289-304]
- pub fn keep_edits_in_range [L306-364]
- pub fn reject_edits_in_ranges [L366-459]
- pub fn keep_all_edits [L461-473]
- pub fn changed_buffers [L476-482]
- pub fn stale_buffers [L485-497]
- fn apply_non_conflicting_edits [L500-561]
- fn diff_snapshots [L563-585]
- fn point_to_row_edit [L587-614]
- enum ChangeAuthor [L617-620]
- User [L618]
- Agent [L619]
- enum TrackedBufferStatus [L623-627]
- Created [L624]
- Modified [L625]
- Deleted [L626]
- struct TrackedBuffer [L629-641]
- buffer [L630]
- base_text [L631]
- unreviewed_changes [L632]
- status [L633]
- version [L634]
- diff [L635]
- snapshot [L636]
- diff_update [L637]
- _open_lsp_handle [L638]
- _maintain_diff [L639]
- _subscription [L640]
- impl TrackedBuffer [L643-657]
- fn has_changes [L644-650]
- fn schedule_diff_update [L652-656]
- pub struct ChangedBuffer [L659-661]
- pub diff [L660]
- mod tests [L664-1574]
- fn init_logger [L678-682]
- fn init_test [L684-691]
- async fn test_keep_edits [L694-769]
- async fn test_deletions [L772-854]
- async fn test_overlapping_user_edits [L857-951]
- async fn test_creating_files [L954-1010]
- async fn test_deleting_files [L1013-1120]
- async fn test_reject_edits [L1123-1255]
- async fn test_reject_multiple_edits [L1258-1331]
- async fn test_reject_deleted_file [L1334-1388]
- async fn test_reject_created_file [L1391-1443]
- async fn test_random_diffs [L1446-1535]
- fn quiesce [L1510-1534]
- struct HunkStatus [L1538-1542]
- range [L1539]
- diff_status [L1540]
- old_text [L1541]
- fn unreviewed_hunks [L1544-1573]
-
- Showing symbols 1-69 (total symbols: 69)
-
- Using the line numbers in this outline, you can call this tool again while specifying
- the start_line and end_line fields to see the implementations of symbols in the outline.
- "},
+ pub struct ActionLog [L13-20]
+ tracked_buffers [L15]
+ edited_since_project_diagnostics_check [L17]
+ project [L19]
+ impl ActionLog [L22-498]
+ pub fn new [L24-30]
+ pub fn project [L32-34]
+ pub fn checked_project_diagnostics [L37-39]
+ pub fn has_edited_files_since_project_diagnostics_check [L42-44]
+ fn track_buffer_internal [L46-101]
+ fn handle_buffer_event [L103-116]
+ fn handle_buffer_edited [L118-123]
+ fn handle_buffer_file_changed [L125-158]
+ async fn maintain_diff [L160-264]
+ pub fn buffer_read [L267-269]
+ pub fn buffer_created [L272-276]
+ pub fn buffer_edited [L279-287]
+ pub fn will_delete_buffer [L289-304]
+ pub fn keep_edits_in_range [L306-364]
+ pub fn reject_edits_in_ranges [L366-459]
+ pub fn keep_all_edits [L461-473]
+ pub fn changed_buffers [L476-482]
+ pub fn stale_buffers [L485-497]
+ fn apply_non_conflicting_edits [L500-561]
+ fn diff_snapshots [L563-585]
+ fn point_to_row_edit [L587-614]
+ enum ChangeAuthor [L617-620]
+ User [L618]
+ Agent [L619]
+ enum TrackedBufferStatus [L623-627]
+ Created [L624]
+ Modified [L625]
+ Deleted [L626]
+ struct TrackedBuffer [L629-641]
+ buffer [L630]
+ base_text [L631]
+ unreviewed_changes [L632]
+ status [L633]
+ version [L634]
+ diff [L635]
+ snapshot [L636]
+ diff_update [L637]
+ _open_lsp_handle [L638]
+ _maintain_diff [L639]
+ _subscription [L640]
+ impl TrackedBuffer [L643-657]
+ fn has_changes [L644-650]
+ fn schedule_diff_update [L652-656]
+ pub struct ChangedBuffer [L659-661]
+ pub diff [L660]
+ mod tests [L664-1574]
+ fn init_logger [L678-682]
+ fn init_test [L684-691]
+ async fn test_keep_edits [L694-769]
+ async fn test_deletions [L772-854]
+ async fn test_overlapping_user_edits [L857-951]
+ async fn test_creating_files [L954-1010]
+ async fn test_deleting_files [L1013-1120]
+ async fn test_reject_edits [L1123-1255]
+ async fn test_reject_multiple_edits [L1258-1331]
+ async fn test_reject_deleted_file [L1334-1388]
+ async fn test_reject_created_file [L1391-1443]
+ async fn test_random_diffs [L1446-1535]
+ fn quiesce [L1510-1534]
+ struct HunkStatus [L1538-1542]
+ range [L1539]
+ diff_status [L1540]
+ old_text [L1541]
+ fn unreviewed_hunks [L1544-1573]
+
+ Showing symbols 1-69 (total symbols: 69)
+
+ Using the line numbers in this outline, you can call this tool again while specifying
+ the start_line and end_line fields to see the implementations of symbols in the outline.
+ "},
)],
),
message(
@@ -963,17 +989,17 @@ fn eval_add_overwrite_test() {
Assistant,
[
text(indoc! {"
- Based on my understanding of the existing tests and code, I'll now create a new test for overwriting a file. In this test, we'll:
+ Based on my understanding of the existing tests and code, I'll now create a new test for overwriting a file. In this test, we'll:
- 1. Create a file
- 2. Open a buffer for it
- 3. Track it in the action log
- 4. Modify the buffer
- 5. Call `buffer_created` again as if the file were new
- 6. Check that the changes are properly tracked
+ 1. Create a file
+ 2. Open a buffer for it
+ 3. Track it in the action log
+ 4. Modify the buffer
+ 5. Call `buffer_created` again as if the file were new
+ 6. Check that the changes are properly tracked
- Let's write the test:
- "}),
+ Let's write the test:
+ "}),
tool_use(
"tool_5",
"edit_file",
@@ -990,8 +1016,8 @@ fn eval_add_overwrite_test() {
EvalAssertion::judge_diff(
"A new test for overwritten files was created, without changing any previous test",
),
- ),
- );
+ ))
+ });
}
#[test]
@@ -1016,20 +1042,18 @@ fn eval_create_empty_file() {
let input_file_content = None;
let expected_output_content = String::new();
- eval(
- 100,
- 0.99,
- 0.05,
- EvalInput::from_conversation(
+
+ eval_utils::eval(100, 0.99, mismatched_tag_threshold(0.05), move || {
+ run_eval(EvalInput::from_conversation(
vec![
message(User, [text("Create a second empty todo file ")]),
message(
Assistant,
[
text(formatdoc! {"
- I'll help you create a second empty todo file.
- First, let me examine the project structure to see if there's already a todo file, which will help me determine the appropriate name and location for the second one.
- "}),
+ I'll help you create a second empty todo file.
+ First, let me examine the project structure to see if there's already a todo file, which will help me determine the appropriate name and location for the second one.
+ "}),
tool_use(
"toolu_01GAF8TtsgpjKxCr8fgQLDgR",
"list_directory",
@@ -1051,8 +1075,8 @@ fn eval_create_empty_file() {
Assistant,
[
text(formatdoc! {"
- I can see there's already a `TODO` file in the `root` directory. Let me create a second empty todo file called `TODO3` in the same directory:
- "}),
+ I can see there's already a `TODO` file in the `root` directory. Let me create a second empty todo file called `TODO3` in the same directory:
+ "}),
tool_use(
"toolu_01Tb3iQ9griqSYMmVuykQPWU",
"edit_file",
@@ -1065,12 +1089,12 @@ fn eval_create_empty_file() {
],
),
],
- input_file_content,
+ input_file_content.clone(),
// Bad behavior is to write something like
// "I'll create an empty TODO3 file as requested."
- EvalAssertion::assert_eq(expected_output_content),
- ),
- );
+ EvalAssertion::assert_eq(expected_output_content.clone()),
+ ))
+ });
}
fn message(
@@ -1312,115 +1336,44 @@ impl EvalAssertion {
}
}
-fn eval(
- iterations: usize,
- expected_pass_ratio: f32,
- mismatched_tag_threshold: f32,
- mut eval: EvalInput,
-) {
- let mut evaluated_count = 0;
- let mut failed_count = 0;
- report_progress(evaluated_count, failed_count, iterations);
-
- let (tx, rx) = mpsc::channel();
-
- // Cache the last message in the conversation, and run one instance of the eval so that
- // all the next ones are cached.
- eval.conversation.last_mut().unwrap().cache = true;
- run_eval(eval.clone(), tx.clone());
-
- let executor = gpui::background_executor();
- let semaphore = Arc::new(smol::lock::Semaphore::new(32));
- for _ in 1..iterations {
- let eval = eval.clone();
- let tx = tx.clone();
- let semaphore = semaphore.clone();
- executor
- .spawn(async move {
- let _guard = semaphore.acquire().await;
- run_eval(eval, tx)
- })
- .detach();
- }
- drop(tx);
-
- let mut failed_evals = HashMap::default();
- let mut errored_evals = HashMap::default();
- let mut eval_outputs = Vec::new();
- let mut cumulative_parser_metrics = EditParserMetrics::default();
- while let Ok(output) = rx.recv() {
- match output {
- Ok(output) => {
- cumulative_parser_metrics += output.sample.edit_output.parser_metrics.clone();
- eval_outputs.push(output.clone());
- if output.assertion.score < 80 {
- failed_count += 1;
- failed_evals
- .entry(output.sample.text_after.clone())
- .or_insert(Vec::new())
- .push(output);
- }
- }
- Err(error) => {
- failed_count += 1;
- *errored_evals.entry(format!("{:?}", error)).or_insert(0) += 1;
- }
- }
-
- evaluated_count += 1;
- report_progress(evaluated_count, failed_count, iterations);
- }
-
- let actual_pass_ratio = (iterations - failed_count) as f32 / iterations as f32;
- println!("Actual pass ratio: {}\n", actual_pass_ratio);
- if actual_pass_ratio < expected_pass_ratio {
- let mut errored_evals = errored_evals.into_iter().collect::<Vec<_>>();
- errored_evals.sort_by_key(|(_, count)| Reverse(*count));
- for (error, count) in errored_evals {
- println!("Eval errored {} times. Error: {}", count, error);
- }
-
- let mut failed_evals = failed_evals.into_iter().collect::<Vec<_>>();
- failed_evals.sort_by_key(|(_, evals)| Reverse(evals.len()));
- for (_buffer_output, failed_evals) in failed_evals {
- let eval_output = failed_evals.first().unwrap();
- println!("Eval failed {} times", failed_evals.len());
- println!("{}", eval_output);
- }
-
- panic!(
- "Actual pass ratio: {}\nExpected pass ratio: {}",
- actual_pass_ratio, expected_pass_ratio
- );
- }
-
- let mismatched_tag_ratio =
- cumulative_parser_metrics.mismatched_tags as f32 / cumulative_parser_metrics.tags as f32;
- if mismatched_tag_ratio > mismatched_tag_threshold {
- for eval_output in eval_outputs {
- println!("{}", eval_output);
- }
- panic!("Too many mismatched tags: {:?}", cumulative_parser_metrics);
- }
-}
-
-fn run_eval(eval: EvalInput, tx: mpsc::Sender<Result<EvalOutput>>) {
+fn run_eval(eval: EvalInput) -> eval_utils::EvalOutput<EditEvalMetadata> {
let dispatcher = gpui::TestDispatcher::new(StdRng::from_os_rng());
let mut cx = TestAppContext::build(dispatcher, None);
- let output = cx.executor().block_test(async {
+ let result = cx.executor().block_test(async {
let test = EditAgentTest::new(&mut cx).await;
test.eval(eval, &mut cx).await
});
- tx.send(output).unwrap();
+ match result {
+ Ok(output) => eval_utils::EvalOutput {
+ data: output.to_string(),
+ outcome: if output.assertion.score < 80 {
+ eval_utils::OutcomeKind::Failed
+ } else {
+ eval_utils::OutcomeKind::Passed
+ },
+ metadata: EditEvalMetadata {
+ tags: output.sample.edit_output.parser_metrics.tags,
+ mismatched_tags: output.sample.edit_output.parser_metrics.mismatched_tags,
+ },
+ },
+ Err(e) => eval_utils::EvalOutput {
+ data: format!("{e:?}"),
+ outcome: eval_utils::OutcomeKind::Error,
+ metadata: EditEvalMetadata {
+ tags: 0,
+ mismatched_tags: 0,
+ },
+ },
+ }
}
#[derive(Clone)]
-struct EvalOutput {
+struct EditEvalOutput {
sample: EvalSample,
assertion: EvalAssertionOutcome,
}
-impl Display for EvalOutput {
+impl Display for EditEvalOutput {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(f, "Score: {:?}", self.assertion.score)?;
if let Some(message) = self.assertion.message.as_ref() {
@@ -1439,22 +1392,6 @@ impl Display for EvalOutput {
}
}
-fn report_progress(evaluated_count: usize, failed_count: usize, iterations: usize) {
- let passed_count = evaluated_count - failed_count;
- let passed_ratio = if evaluated_count == 0 {
- 0.0
- } else {
- passed_count as f64 / evaluated_count as f64
- };
- print!(
- "\r\x1b[KEvaluated {}/{} ({:.2}% passed)",
- evaluated_count,
- iterations,
- passed_ratio * 100.0
- );
- std::io::stdout().flush().unwrap();
-}
-
struct EditAgentTest {
agent: EditAgent,
project: Entity<Project>,
@@ -1550,7 +1487,10 @@ impl EditAgentTest {
})
}
- async fn eval(&self, eval: EvalInput, cx: &mut TestAppContext) -> Result<EvalOutput> {
+ async fn eval(&self, mut eval: EvalInput, cx: &mut TestAppContext) -> Result<EditEvalOutput> {
+ // Make sure the last message in the conversation is cached.
+ eval.conversation.last_mut().unwrap().cache = true;
+
let path = self
.project
.read_with(cx, |project, cx| {
@@ -1656,7 +1596,7 @@ impl EditAgentTest {
.run(&sample, self.judge_model.clone(), cx)
.await?;
- Ok(EvalOutput { assertion, sample })
+ Ok(EditEvalOutput { assertion, sample })
}
}
@@ -354,9 +354,9 @@ impl HistoryStore {
.into_iter()
.take(MAX_RECENTLY_OPENED_ENTRIES)
.flat_map(|entry| match entry {
- SerializedRecentOpen::AcpThread(id) => Some(HistoryEntryId::AcpThread(
- acp::SessionId(id.as_str().into()),
- )),
+ SerializedRecentOpen::AcpThread(id) => {
+ Some(HistoryEntryId::AcpThread(acp::SessionId::new(id.as_str())))
+ }
SerializedRecentOpen::TextThread(file_name) => Some(
HistoryEntryId::TextThread(text_threads_dir().join(file_name).into()),
),
@@ -493,14 +493,14 @@ async fn test_tool_authorization(cx: &mut TestAppContext) {
// Approve the first
tool_call_auth_1
.response
- .send(tool_call_auth_1.options[1].id.clone())
+ .send(tool_call_auth_1.options[1].option_id.clone())
.unwrap();
cx.run_until_parked();
// Reject the second
tool_call_auth_2
.response
- .send(tool_call_auth_1.options[2].id.clone())
+ .send(tool_call_auth_1.options[2].option_id.clone())
.unwrap();
cx.run_until_parked();
@@ -510,14 +510,14 @@ async fn test_tool_authorization(cx: &mut TestAppContext) {
message.content,
vec![
language_model::MessageContent::ToolResult(LanguageModelToolResult {
- tool_use_id: tool_call_auth_1.tool_call.id.0.to_string().into(),
+ tool_use_id: tool_call_auth_1.tool_call.tool_call_id.0.to_string().into(),
tool_name: ToolRequiringPermission::name().into(),
is_error: false,
content: "Allowed".into(),
output: Some("Allowed".into())
}),
language_model::MessageContent::ToolResult(LanguageModelToolResult {
- tool_use_id: tool_call_auth_2.tool_call.id.0.to_string().into(),
+ tool_use_id: tool_call_auth_2.tool_call.tool_call_id.0.to_string().into(),
tool_name: ToolRequiringPermission::name().into(),
is_error: true,
content: "Permission to run tool denied by user".into(),
@@ -543,7 +543,7 @@ async fn test_tool_authorization(cx: &mut TestAppContext) {
let tool_call_auth_3 = next_tool_call_authorization(&mut events).await;
tool_call_auth_3
.response
- .send(tool_call_auth_3.options[0].id.clone())
+ .send(tool_call_auth_3.options[0].option_id.clone())
.unwrap();
cx.run_until_parked();
let completion = fake_model.pending_completions().pop().unwrap();
@@ -552,7 +552,7 @@ async fn test_tool_authorization(cx: &mut TestAppContext) {
message.content,
vec![language_model::MessageContent::ToolResult(
LanguageModelToolResult {
- tool_use_id: tool_call_auth_3.tool_call.id.0.to_string().into(),
+ tool_use_id: tool_call_auth_3.tool_call.tool_call_id.0.to_string().into(),
tool_name: ToolRequiringPermission::name().into(),
is_error: false,
content: "Allowed".into(),
@@ -1353,20 +1353,20 @@ async fn test_cancellation(cx: &mut TestAppContext) {
ThreadEvent::ToolCall(tool_call) => {
assert_eq!(tool_call.title, expected_tools.remove(0));
if tool_call.title == "Echo" {
- echo_id = Some(tool_call.id);
+ echo_id = Some(tool_call.tool_call_id);
}
}
ThreadEvent::ToolCallUpdate(acp_thread::ToolCallUpdate::UpdateFields(
acp::ToolCallUpdate {
- id,
+ tool_call_id,
fields:
acp::ToolCallUpdateFields {
status: Some(acp::ToolCallStatus::Completed),
..
},
- meta: None,
+ ..
},
- )) if Some(&id) == echo_id.as_ref() => {
+ )) if Some(&tool_call_id) == echo_id.as_ref() => {
echo_completed = true;
}
_ => {}
@@ -1995,11 +1995,7 @@ async fn test_agent_connection(cx: &mut TestAppContext) {
.update(|cx| {
connection.prompt(
Some(acp_thread::UserMessageId::new()),
- acp::PromptRequest {
- session_id: session_id.clone(),
- prompt: vec!["ghi".into()],
- meta: None,
- },
+ acp::PromptRequest::new(session_id.clone(), vec!["ghi".into()]),
cx,
)
})
@@ -2056,68 +2052,50 @@ async fn test_tool_updates_to_completion(cx: &mut TestAppContext) {
let tool_call = expect_tool_call(&mut events).await;
assert_eq!(
tool_call,
- acp::ToolCall {
- id: acp::ToolCallId("1".into()),
- title: "Thinking".into(),
- kind: acp::ToolKind::Think,
- status: acp::ToolCallStatus::Pending,
- content: vec![],
- locations: vec![],
- raw_input: Some(json!({})),
- raw_output: None,
- meta: Some(json!({ "tool_name": "thinking" })),
- }
+ acp::ToolCall::new("1", "Thinking")
+ .kind(acp::ToolKind::Think)
+ .raw_input(json!({}))
+ .meta(acp::Meta::from_iter([(
+ "tool_name".into(),
+ "thinking".into()
+ )]))
);
let update = expect_tool_call_update_fields(&mut events).await;
assert_eq!(
update,
- acp::ToolCallUpdate {
- id: acp::ToolCallId("1".into()),
- fields: acp::ToolCallUpdateFields {
- title: Some("Thinking".into()),
- kind: Some(acp::ToolKind::Think),
- raw_input: Some(json!({ "content": "Thinking hard!" })),
- ..Default::default()
- },
- meta: None,
- }
+ acp::ToolCallUpdate::new(
+ "1",
+ acp::ToolCallUpdateFields::new()
+ .title("Thinking")
+ .kind(acp::ToolKind::Think)
+ .raw_input(json!({ "content": "Thinking hard!"}))
+ )
);
let update = expect_tool_call_update_fields(&mut events).await;
assert_eq!(
update,
- acp::ToolCallUpdate {
- id: acp::ToolCallId("1".into()),
- fields: acp::ToolCallUpdateFields {
- status: Some(acp::ToolCallStatus::InProgress),
- ..Default::default()
- },
- meta: None,
- }
+ acp::ToolCallUpdate::new(
+ "1",
+ acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress)
+ )
);
let update = expect_tool_call_update_fields(&mut events).await;
assert_eq!(
update,
- acp::ToolCallUpdate {
- id: acp::ToolCallId("1".into()),
- fields: acp::ToolCallUpdateFields {
- content: Some(vec!["Thinking hard!".into()]),
- ..Default::default()
- },
- meta: None,
- }
+ acp::ToolCallUpdate::new(
+ "1",
+ acp::ToolCallUpdateFields::new().content(vec!["Thinking hard!".into()])
+ )
);
let update = expect_tool_call_update_fields(&mut events).await;
assert_eq!(
update,
- acp::ToolCallUpdate {
- id: acp::ToolCallId("1".into()),
- fields: acp::ToolCallUpdateFields {
- status: Some(acp::ToolCallStatus::Completed),
- raw_output: Some("Finished thinking.".into()),
- ..Default::default()
- },
- meta: None,
- }
+ acp::ToolCallUpdate::new(
+ "1",
+ acp::ToolCallUpdateFields::new()
+ .status(acp::ToolCallStatus::Completed)
+ .raw_output("Finished thinking.".into())
+ )
);
}
@@ -619,12 +619,9 @@ pub struct Thread {
impl Thread {
fn prompt_capabilities(model: Option<&dyn LanguageModel>) -> acp::PromptCapabilities {
let image = model.map_or(true, |model| model.supports_images());
- acp::PromptCapabilities {
- meta: None,
- image,
- audio: false,
- embedded_context: true,
- }
+ acp::PromptCapabilities::new()
+ .image(image)
+ .embedded_context(true)
}
pub fn new(
@@ -640,7 +637,7 @@ impl Thread {
let (prompt_capabilities_tx, prompt_capabilities_rx) =
watch::channel(Self::prompt_capabilities(model.as_deref()));
Self {
- id: acp::SessionId(uuid::Uuid::new_v4().to_string().into()),
+ id: acp::SessionId::new(uuid::Uuid::new_v4().to_string()),
prompt_id: PromptId::new(),
updated_at: Utc::now(),
title: None,
@@ -737,17 +734,11 @@ impl Thread {
let Some(tool) = tool else {
stream
.0
- .unbounded_send(Ok(ThreadEvent::ToolCall(acp::ToolCall {
- meta: None,
- id: acp::ToolCallId(tool_use.id.to_string().into()),
- title: tool_use.name.to_string(),
- kind: acp::ToolKind::Other,
- status: acp::ToolCallStatus::Failed,
- content: Vec::new(),
- locations: Vec::new(),
- raw_input: Some(tool_use.input.clone()),
- raw_output: None,
- })))
+ .unbounded_send(Ok(ThreadEvent::ToolCall(
+ acp::ToolCall::new(tool_use.id.to_string(), tool_use.name.to_string())
+ .status(acp::ToolCallStatus::Failed)
+ .raw_input(tool_use.input.clone()),
+ )))
.ok();
return;
};
@@ -775,24 +766,20 @@ impl Thread {
.log_err();
}
- stream.update_tool_call_fields(
- &tool_use.id,
- acp::ToolCallUpdateFields {
- status: Some(
- tool_result
- .as_ref()
- .map_or(acp::ToolCallStatus::Failed, |result| {
- if result.is_error {
- acp::ToolCallStatus::Failed
- } else {
- acp::ToolCallStatus::Completed
- }
- }),
- ),
- raw_output: output,
- ..Default::default()
+ let mut fields = acp::ToolCallUpdateFields::new().status(tool_result.as_ref().map_or(
+ acp::ToolCallStatus::Failed,
+ |result| {
+ if result.is_error {
+ acp::ToolCallStatus::Failed
+ } else {
+ acp::ToolCallStatus::Completed
+ }
},
- );
+ ));
+ if let Some(output) = output {
+ fields = fields.raw_output(output);
+ }
+ stream.update_tool_call_fields(&tool_use.id, fields);
}
pub fn from_db(
@@ -1272,18 +1259,15 @@ impl Thread {
while let Some(tool_result) = tool_results.next().await {
log::debug!("Tool finished {:?}", tool_result);
- event_stream.update_tool_call_fields(
- &tool_result.tool_use_id,
- acp::ToolCallUpdateFields {
- status: Some(if tool_result.is_error {
- acp::ToolCallStatus::Failed
- } else {
- acp::ToolCallStatus::Completed
- }),
- raw_output: tool_result.output.clone(),
- ..Default::default()
- },
- );
+ let mut fields = acp::ToolCallUpdateFields::new().status(if tool_result.is_error {
+ acp::ToolCallStatus::Failed
+ } else {
+ acp::ToolCallStatus::Completed
+ });
+ if let Some(output) = &tool_result.output {
+ fields = fields.raw_output(output.clone());
+ }
+ event_stream.update_tool_call_fields(&tool_result.tool_use_id, fields);
this.update(cx, |this, _cx| {
this.pending_message()
.tool_results
@@ -1560,12 +1544,10 @@ impl Thread {
} else {
event_stream.update_tool_call_fields(
&tool_use.id,
- acp::ToolCallUpdateFields {
- title: Some(title.into()),
- kind: Some(kind),
- raw_input: Some(tool_use.input.clone()),
- ..Default::default()
- },
+ acp::ToolCallUpdateFields::new()
+ .title(title)
+ .kind(kind)
+ .raw_input(tool_use.input.clone()),
);
}
@@ -1587,10 +1569,9 @@ impl Thread {
let fs = self.project.read(cx).fs().clone();
let tool_event_stream =
ToolCallEventStream::new(tool_use.id.clone(), event_stream.clone(), Some(fs));
- tool_event_stream.update_fields(acp::ToolCallUpdateFields {
- status: Some(acp::ToolCallStatus::InProgress),
- ..Default::default()
- });
+ tool_event_stream.update_fields(
+ acp::ToolCallUpdateFields::new().status(acp::ToolCallStatus::InProgress),
+ );
let supports_images = self.model().is_some_and(|model| model.supports_images());
let tool_result = tool.run(tool_use.input, tool_event_stream, cx);
log::debug!("Running tool {}", tool_use.name);
@@ -2381,19 +2362,13 @@ impl ThreadEventStream {
kind: acp::ToolKind,
input: serde_json::Value,
) -> acp::ToolCall {
- acp::ToolCall {
- meta: Some(serde_json::json!({
- "tool_name": tool_name
- })),
- id: acp::ToolCallId(id.to_string().into()),
- title,
- kind,
- status: acp::ToolCallStatus::Pending,
- content: vec![],
- locations: vec![],
- raw_input: Some(input),
- raw_output: None,
- }
+ acp::ToolCall::new(id.to_string(), title)
+ .kind(kind)
+ .raw_input(input)
+ .meta(acp::Meta::from_iter([(
+ "tool_name".into(),
+ tool_name.into(),
+ )]))
}
fn update_tool_call_fields(
@@ -2403,12 +2378,7 @@ impl ThreadEventStream {
) {
self.0
.unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
- acp::ToolCallUpdate {
- meta: None,
- id: acp::ToolCallId(tool_use_id.to_string().into()),
- fields,
- }
- .into(),
+ acp::ToolCallUpdate::new(tool_use_id.to_string(), fields).into(),
)))
.ok();
}
@@ -2471,7 +2441,7 @@ impl ToolCallEventStream {
.0
.unbounded_send(Ok(ThreadEvent::ToolCallUpdate(
acp_thread::ToolCallUpdateDiff {
- id: acp::ToolCallId(self.tool_use_id.to_string().into()),
+ id: acp::ToolCallId::new(self.tool_use_id.to_string()),
diff,
}
.into(),
@@ -2489,33 +2459,26 @@ impl ToolCallEventStream {
.0
.unbounded_send(Ok(ThreadEvent::ToolCallAuthorization(
ToolCallAuthorization {
- tool_call: acp::ToolCallUpdate {
- meta: None,
- id: acp::ToolCallId(self.tool_use_id.to_string().into()),
- fields: acp::ToolCallUpdateFields {
- title: Some(title.into()),
- ..Default::default()
- },
- },
+ tool_call: acp::ToolCallUpdate::new(
+ self.tool_use_id.to_string(),
+ acp::ToolCallUpdateFields::new().title(title),
+ ),
options: vec![
- acp::PermissionOption {
- id: acp::PermissionOptionId("always_allow".into()),
- name: "Always Allow".into(),
- kind: acp::PermissionOptionKind::AllowAlways,
- meta: None,
- },
- acp::PermissionOption {
- id: acp::PermissionOptionId("allow".into()),
- name: "Allow".into(),
- kind: acp::PermissionOptionKind::AllowOnce,
- meta: None,
- },
- acp::PermissionOption {
- id: acp::PermissionOptionId("deny".into()),
- name: "Deny".into(),
- kind: acp::PermissionOptionKind::RejectOnce,
- meta: None,
- },
+ acp::PermissionOption::new(
+ acp::PermissionOptionId::new("always_allow"),
+ "Always Allow",
+ acp::PermissionOptionKind::AllowAlways,
+ ),
+ acp::PermissionOption::new(
+ acp::PermissionOptionId::new("allow"),
+ "Allow",
+ acp::PermissionOptionKind::AllowOnce,
+ ),
+ acp::PermissionOption::new(
+ acp::PermissionOptionId::new("deny"),
+ "Deny",
+ acp::PermissionOptionKind::RejectOnce,
+ ),
],
response: response_tx,
},
@@ -2660,7 +2623,15 @@ impl UserMessageContent {
// TODO
Self::Text("[blob]".to_string())
}
+ other => {
+ log::warn!("Unexpected content type: {:?}", other);
+ Self::Text("[unknown]".to_string())
+ }
},
+ other => {
+ log::warn!("Unexpected content type: {:?}", other);
+ Self::Text("[unknown]".to_string())
+ }
}
}
}
@@ -2668,32 +2639,15 @@ impl UserMessageContent {
impl From<UserMessageContent> for acp::ContentBlock {
fn from(content: UserMessageContent) -> Self {
match content {
- UserMessageContent::Text(text) => acp::ContentBlock::Text(acp::TextContent {
- text,
- annotations: None,
- meta: None,
- }),
- UserMessageContent::Image(image) => acp::ContentBlock::Image(acp::ImageContent {
- data: image.source.to_string(),
- mime_type: "image/png".to_string(),
- meta: None,
- annotations: None,
- uri: None,
- }),
- UserMessageContent::Mention { uri, content } => {
- acp::ContentBlock::Resource(acp::EmbeddedResource {
- meta: None,
- resource: acp::EmbeddedResourceResource::TextResourceContents(
- acp::TextResourceContents {
- meta: None,
- mime_type: None,
- text: content,
- uri: uri.to_uri().to_string(),
- },
- ),
- annotations: None,
- })
+ UserMessageContent::Text(text) => text.into(),
+ UserMessageContent::Image(image) => {
+ acp::ContentBlock::Image(acp::ImageContent::new(image.source, "image/png"))
}
+ UserMessageContent::Mention { uri, content } => acp::ContentBlock::Resource(
+ acp::EmbeddedResource::new(acp::EmbeddedResourceResource::TextResourceContents(
+ acp::TextResourceContents::new(content, uri.to_uri().to_string()),
+ )),
+ ),
}
}
}
@@ -273,14 +273,9 @@ impl AgentTool for EditFileTool {
};
let abs_path = project.read(cx).absolute_path(&project_path, cx);
if let Some(abs_path) = abs_path.clone() {
- event_stream.update_fields(ToolCallUpdateFields {
- locations: Some(vec![acp::ToolCallLocation {
- path: abs_path,
- line: None,
- meta: None,
- }]),
- ..Default::default()
- });
+ event_stream.update_fields(
+ ToolCallUpdateFields::new().locations(vec![acp::ToolCallLocation::new(abs_path)]),
+ );
}
let authorize = self.authorize(&input, &event_stream, cx);
@@ -389,10 +384,11 @@ impl AgentTool for EditFileTool {
range.start.to_point(&buffer.snapshot()).row
}).ok();
if let Some(abs_path) = abs_path.clone() {
- event_stream.update_fields(ToolCallUpdateFields {
- locations: Some(vec![ToolCallLocation { path: abs_path, line, meta: None }]),
- ..Default::default()
- });
+ let mut location = ToolCallLocation::new(abs_path);
+ if let Some(line) = line {
+ location = location.line(line);
+ }
+ event_stream.update_fields(ToolCallUpdateFields::new().locations(vec![location]));
}
emitted_location = true;
}
@@ -118,33 +118,29 @@ impl AgentTool for FindPathTool {
let paginated_matches: &[PathBuf] = &matches[cmp::min(input.offset, matches.len())
..cmp::min(input.offset + RESULTS_PER_PAGE, matches.len())];
- event_stream.update_fields(acp::ToolCallUpdateFields {
- title: Some(if paginated_matches.is_empty() {
- "No matches".into()
- } else if paginated_matches.len() == 1 {
- "1 match".into()
- } else {
- format!("{} matches", paginated_matches.len())
- }),
- content: Some(
- paginated_matches
- .iter()
- .map(|path| acp::ToolCallContent::Content {
- content: acp::ContentBlock::ResourceLink(acp::ResourceLink {
- uri: format!("file://{}", path.display()),
- name: path.to_string_lossy().into(),
- annotations: None,
- description: None,
- mime_type: None,
- size: None,
- title: None,
- meta: None,
- }),
- })
- .collect(),
- ),
- ..Default::default()
- });
+ event_stream.update_fields(
+ acp::ToolCallUpdateFields::new()
+ .title(if paginated_matches.is_empty() {
+ "No matches".into()
+ } else if paginated_matches.len() == 1 {
+ "1 match".into()
+ } else {
+ format!("{} matches", paginated_matches.len())
+ })
+ .content(
+ paginated_matches
+ .iter()
+ .map(|path| {
+ acp::ToolCallContent::Content(acp::Content::new(
+ acp::ContentBlock::ResourceLink(acp::ResourceLink::new(
+ path.to_string_lossy(),
+ format!("file://{}", path.display()),
+ )),
+ ))
+ })
+ .collect(),
+ ),
+ );
Ok(FindPathToolOutput {
offset: input.offset,
@@ -152,15 +152,12 @@ impl AgentTool for ReadFileTool {
}
let file_path = input.path.clone();
+ let mut location = acp::ToolCallLocation::new(&abs_path);
+ if let Some(line) = input.start_line {
+ location = location.line(line.saturating_sub(1));
+ }
- event_stream.update_fields(ToolCallUpdateFields {
- locations: Some(vec![acp::ToolCallLocation {
- path: abs_path.clone(),
- line: input.start_line.map(|line| line.saturating_sub(1)),
- meta: None,
- }]),
- ..Default::default()
- });
+ event_stream.update_fields(ToolCallUpdateFields::new().locations(vec![location]));
if image_store::is_image_file(&self.project, &project_path, cx) {
return cx.spawn(async move |cx| {
@@ -289,12 +286,9 @@ impl AgentTool for ReadFileTool {
text,
}
.to_string();
- event_stream.update_fields(ToolCallUpdateFields {
- content: Some(vec![acp::ToolCallContent::Content {
- content: markdown.into(),
- }]),
- ..Default::default()
- })
+ event_stream.update_fields(ToolCallUpdateFields::new().content(vec![
+ acp::ToolCallContent::Content(acp::Content::new(markdown)),
+ ]));
}
})?;
@@ -112,10 +112,9 @@ impl AgentTool for TerminalTool {
.await?;
let terminal_id = terminal.id(cx)?;
- event_stream.update_fields(acp::ToolCallUpdateFields {
- content: Some(vec![acp::ToolCallContent::Terminal { terminal_id }]),
- ..Default::default()
- });
+ event_stream.update_fields(acp::ToolCallUpdateFields::new().content(vec![
+ acp::ToolCallContent::Terminal(acp::Terminal::new(terminal_id)),
+ ]));
let exit_status = terminal.wait_for_exit(cx)?.await;
let output = terminal.current_output(cx)?;
@@ -43,10 +43,8 @@ impl AgentTool for ThinkingTool {
event_stream: ToolCallEventStream,
_cx: &mut App,
) -> Task<Result<String>> {
- event_stream.update_fields(acp::ToolCallUpdateFields {
- content: Some(vec![input.content.into()]),
- ..Default::default()
- });
+ event_stream
+ .update_fields(acp::ToolCallUpdateFields::new().content(vec![input.content.into()]));
Task::ready(Ok("Finished thinking.".to_string()))
}
}
@@ -76,10 +76,8 @@ impl AgentTool for WebSearchTool {
let response = match search_task.await {
Ok(response) => response,
Err(err) => {
- event_stream.update_fields(acp::ToolCallUpdateFields {
- title: Some("Web Search Failed".to_string()),
- ..Default::default()
- });
+ event_stream
+ .update_fields(acp::ToolCallUpdateFields::new().title("Web Search Failed"));
return Err(err);
}
};
@@ -107,26 +105,23 @@ fn emit_update(response: &WebSearchResponse, event_stream: &ToolCallEventStream)
} else {
format!("{} results", response.results.len())
};
- event_stream.update_fields(acp::ToolCallUpdateFields {
- title: Some(format!("Searched the web: {result_text}")),
- content: Some(
- response
- .results
- .iter()
- .map(|result| acp::ToolCallContent::Content {
- content: acp::ContentBlock::ResourceLink(acp::ResourceLink {
- name: result.title.clone(),
- uri: result.url.clone(),
- title: Some(result.title.clone()),
- description: Some(result.text.clone()),
- mime_type: None,
- annotations: None,
- size: None,
- meta: None,
- }),
- })
- .collect(),
- ),
- ..Default::default()
- });
+ event_stream.update_fields(
+ acp::ToolCallUpdateFields::new()
+ .title(format!("Searched the web: {result_text}"))
+ .content(
+ response
+ .results
+ .iter()
+ .map(|result| {
+ acp::ToolCallContent::Content(acp::Content::new(
+ acp::ContentBlock::ResourceLink(
+ acp::ResourceLink::new(result.title.clone(), result.url.clone())
+ .title(result.title.clone())
+ .description(result.text.clone()),
+ ),
+ ))
+ })
+ .collect(),
+ ),
+ );
}
@@ -76,7 +76,7 @@ pub async fn connect(
Ok(Rc::new(conn) as _)
}
-const MINIMUM_SUPPORTED_VERSION: acp::ProtocolVersion = acp::V1;
+const MINIMUM_SUPPORTED_VERSION: acp::ProtocolVersion = acp::ProtocolVersion::V1;
impl AcpConnection {
pub async fn stdio(
@@ -173,29 +173,27 @@ impl AcpConnection {
});
})?;
+ let mut client_info = acp::Implementation::new("zed", version);
+ if let Some(release_channel) = release_channel {
+ client_info = client_info.title(release_channel);
+ }
let response = connection
- .initialize(acp::InitializeRequest {
- protocol_version: acp::VERSION,
- client_capabilities: acp::ClientCapabilities {
- fs: acp::FileSystemCapability {
- read_text_file: true,
- write_text_file: true,
- meta: None,
- },
- terminal: true,
- meta: Some(serde_json::json!({
- // Experimental: Allow for rendering terminal output from the agents
- "terminal_output": true,
- "terminal-auth": true,
- })),
- },
- client_info: Some(acp::Implementation {
- name: "zed".to_owned(),
- title: release_channel.map(|c| c.to_owned()),
- version,
- }),
- meta: None,
- })
+ .initialize(
+ acp::InitializeRequest::new(acp::ProtocolVersion::V1)
+ .client_capabilities(
+ acp::ClientCapabilities::new()
+ .fs(acp::FileSystemCapability::new()
+ .read_text_file(true)
+ .write_text_file(true))
+ .terminal(true)
+ // Experimental: Allow for rendering terminal output from the agents
+ .meta(acp::Meta::from_iter([
+ ("terminal_output".into(), true.into()),
+ ("terminal-auth".into(), true.into()),
+ ])),
+ )
+ .client_info(client_info),
+ )
.await?;
if response.protocol_version < MINIMUM_SUPPORTED_VERSION {
@@ -253,14 +251,13 @@ impl AgentConnection for AcpConnection {
let default_model = self.default_model.clone();
let cwd = cwd.to_path_buf();
let context_server_store = project.read(cx).context_server_store().read(cx);
- let mcp_servers =
- if project.read(cx).is_local() {
- context_server_store
- .configured_server_ids()
- .iter()
- .filter_map(|id| {
- let configuration = context_server_store.configuration_for_server(id)?;
- match &*configuration {
+ let mcp_servers = if project.read(cx).is_local() {
+ context_server_store
+ .configured_server_ids()
+ .iter()
+ .filter_map(|id| {
+ let configuration = context_server_store.configuration_for_server(id)?;
+ match &*configuration {
project::context_server_store::ContextServerConfiguration::Custom {
command,
..
@@ -268,47 +265,41 @@ impl AgentConnection for AcpConnection {
| project::context_server_store::ContextServerConfiguration::Extension {
command,
..
- } => Some(acp::McpServer::Stdio {
- name: id.0.to_string(),
- command: command.path.clone(),
- args: command.args.clone(),
- env: if let Some(env) = command.env.as_ref() {
- env.iter()
- .map(|(name, value)| acp::EnvVariable {
- name: name.clone(),
- value: value.clone(),
- meta: None,
- })
- .collect()
- } else {
- vec![]
- },
- }),
+ } => Some(acp::McpServer::Stdio(
+ acp::McpServerStdio::new(id.0.to_string(), &command.path)
+ .args(command.args.clone())
+ .env(if let Some(env) = command.env.as_ref() {
+ env.iter()
+ .map(|(name, value)| acp::EnvVariable::new(name, value))
+ .collect()
+ } else {
+ vec![]
+ }),
+ )),
project::context_server_store::ContextServerConfiguration::Http {
url,
headers,
- } => Some(acp::McpServer::Http {
- name: id.0.to_string(),
- url: url.to_string(),
- headers: headers.iter().map(|(name, value)| acp::HttpHeader {
- name: name.clone(),
- value: value.clone(),
- meta: None,
- }).collect(),
- }),
+ } => Some(acp::McpServer::Http(
+ acp::McpServerHttp::new(id.0.to_string(), url.to_string()).headers(
+ headers
+ .iter()
+ .map(|(name, value)| acp::HttpHeader::new(name, value))
+ .collect(),
+ ),
+ )),
}
- })
- .collect()
- } else {
- // In SSH projects, the external agent is running on the remote
- // machine, and currently we only run MCP servers on the local
- // machine. So don't pass any MCP servers to the agent in that case.
- Vec::new()
- };
+ })
+ .collect()
+ } else {
+ // In SSH projects, the external agent is running on the remote
+ // machine, and currently we only run MCP servers on the local
+ // machine. So don't pass any MCP servers to the agent in that case.
+ Vec::new()
+ };
cx.spawn(async move |cx| {
let response = conn
- .new_session(acp::NewSessionRequest { mcp_servers, cwd, meta: None })
+ .new_session(acp::NewSessionRequest::new(cwd).mcp_servers(mcp_servers))
.await
.map_err(|err| {
if err.code == acp::ErrorCode::AUTH_REQUIRED.code {
@@ -341,11 +332,7 @@ impl AgentConnection for AcpConnection {
let modes = modes.clone();
let conn = conn.clone();
async move |_| {
- let result = conn.set_session_mode(acp::SetSessionModeRequest {
- session_id,
- mode_id: default_mode,
- meta: None,
- })
+ let result = conn.set_session_mode(acp::SetSessionModeRequest::new(session_id, default_mode))
.await.log_err();
if result.is_none() {
@@ -388,11 +375,7 @@ impl AgentConnection for AcpConnection {
let models = models.clone();
let conn = conn.clone();
async move |_| {
- let result = conn.set_session_model(acp::SetSessionModelRequest {
- session_id,
- model_id: default_model,
- meta: None,
- })
+ let result = conn.set_session_model(acp::SetSessionModelRequest::new(session_id, default_model))
.await.log_err();
if result.is_none() {
@@ -456,12 +439,8 @@ impl AgentConnection for AcpConnection {
fn authenticate(&self, method_id: acp::AuthMethodId, cx: &mut App) -> Task<Result<()>> {
let conn = self.connection.clone();
cx.foreground_executor().spawn(async move {
- conn.authenticate(acp::AuthenticateRequest {
- method_id: method_id.clone(),
- meta: None,
- })
- .await?;
-
+ conn.authenticate(acp::AuthenticateRequest::new(method_id))
+ .await?;
Ok(())
})
}
@@ -515,10 +494,7 @@ impl AgentConnection for AcpConnection {
&& (details.contains("This operation was aborted")
|| details.contains("The user aborted a request"))
{
- Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::Cancelled,
- meta: None,
- })
+ Ok(acp::PromptResponse::new(acp::StopReason::Cancelled))
} else {
Err(anyhow!(details))
}
@@ -535,10 +511,7 @@ impl AgentConnection for AcpConnection {
session.suppress_abort_err = true;
}
let conn = self.connection.clone();
- let params = acp::CancelNotification {
- session_id: session_id.clone(),
- meta: None,
- };
+ let params = acp::CancelNotification::new(session_id.clone());
cx.foreground_executor()
.spawn(async move { conn.cancel(params).await })
.detach();
@@ -619,11 +592,7 @@ impl acp_thread::AgentSessionModes for AcpSessionModes {
let state = self.state.clone();
cx.foreground_executor().spawn(async move {
let result = connection
- .set_session_mode(acp::SetSessionModeRequest {
- session_id,
- mode_id,
- meta: None,
- })
+ .set_session_mode(acp::SetSessionModeRequest::new(session_id, mode_id))
.await;
if result.is_err() {
@@ -682,11 +651,7 @@ impl acp_thread::AgentModelSelector for AcpModelSelector {
let state = self.state.clone();
cx.foreground_executor().spawn(async move {
let result = connection
- .set_session_model(acp::SetSessionModelRequest {
- session_id,
- model_id,
- meta: None,
- })
+ .set_session_model(acp::SetSessionModelRequest::new(session_id, model_id))
.await;
if result.is_err() {
@@ -748,10 +713,7 @@ impl acp::Client for ClientDelegate {
let outcome = task.await;
- Ok(acp::RequestPermissionResponse {
- outcome,
- meta: None,
- })
+ Ok(acp::RequestPermissionResponse::new(outcome))
}
async fn write_text_file(
@@ -783,10 +745,7 @@ impl acp::Client for ClientDelegate {
let content = task.await?;
- Ok(acp::ReadTextFileResponse {
- content,
- meta: None,
- })
+ Ok(acp::ReadTextFileResponse::new(content))
}
async fn session_notification(
@@ -821,7 +780,7 @@ impl acp::Client for ClientDelegate {
if let Some(terminal_info) = meta.get("terminal_info") {
if let Some(id_str) = terminal_info.get("terminal_id").and_then(|v| v.as_str())
{
- let terminal_id = acp::TerminalId(id_str.into());
+ let terminal_id = acp::TerminalId::new(id_str);
let cwd = terminal_info
.get("cwd")
.and_then(|v| v.as_str().map(PathBuf::from));
@@ -837,7 +796,7 @@ impl acp::Client for ClientDelegate {
let lower = cx.new(|cx| builder.subscribe(cx));
thread.on_terminal_provider_event(
TerminalProviderEvent::Created {
- terminal_id: terminal_id.clone(),
+ terminal_id,
label: tc.title.clone(),
cwd,
output_byte_limit: None,
@@ -862,15 +821,12 @@ impl acp::Client for ClientDelegate {
if let Some(meta) = &tcu.meta {
if let Some(term_out) = meta.get("terminal_output") {
if let Some(id_str) = term_out.get("terminal_id").and_then(|v| v.as_str()) {
- let terminal_id = acp::TerminalId(id_str.into());
+ let terminal_id = acp::TerminalId::new(id_str);
if let Some(s) = term_out.get("data").and_then(|v| v.as_str()) {
let data = s.as_bytes().to_vec();
let _ = session.thread.update(&mut self.cx.clone(), |thread, cx| {
thread.on_terminal_provider_event(
- TerminalProviderEvent::Output {
- terminal_id: terminal_id.clone(),
- data,
- },
+ TerminalProviderEvent::Output { terminal_id, data },
cx,
);
});
@@ -881,21 +837,19 @@ impl acp::Client for ClientDelegate {
// terminal_exit
if let Some(term_exit) = meta.get("terminal_exit") {
if let Some(id_str) = term_exit.get("terminal_id").and_then(|v| v.as_str()) {
- let terminal_id = acp::TerminalId(id_str.into());
- let status = acp::TerminalExitStatus {
- exit_code: term_exit
- .get("exit_code")
- .and_then(|v| v.as_u64())
- .map(|i| i as u32),
- signal: term_exit
- .get("signal")
- .and_then(|v| v.as_str().map(|s| s.to_string())),
- meta: None,
- };
+ let terminal_id = acp::TerminalId::new(id_str);
+ let mut status = acp::TerminalExitStatus::new();
+ if let Some(code) = term_exit.get("exit_code").and_then(|v| v.as_u64()) {
+ status = status.exit_code(code as u32)
+ }
+ if let Some(signal) = term_exit.get("signal").and_then(|v| v.as_str()) {
+ status = status.signal(signal);
+ }
+
let _ = session.thread.update(&mut self.cx.clone(), |thread, cx| {
thread.on_terminal_provider_event(
TerminalProviderEvent::Exit {
- terminal_id: terminal_id.clone(),
+ terminal_id,
status,
},
cx,
@@ -932,7 +886,7 @@ impl acp::Client for ClientDelegate {
// Register with renderer
let terminal_entity = thread.update(&mut self.cx.clone(), |thread, cx| {
thread.register_terminal_created(
- acp::TerminalId(uuid::Uuid::new_v4().to_string().into()),
+ acp::TerminalId::new(uuid::Uuid::new_v4().to_string()),
format!("{} {}", args.command, args.args.join(" ")),
args.cwd.clone(),
args.output_byte_limit,
@@ -942,10 +896,7 @@ impl acp::Client for ClientDelegate {
})?;
let terminal_id =
terminal_entity.read_with(&self.cx, |terminal, _| terminal.id().clone())?;
- Ok(acp::CreateTerminalResponse {
- terminal_id,
- meta: None,
- })
+ Ok(acp::CreateTerminalResponse::new(terminal_id))
}
async fn kill_terminal_command(
@@ -1006,10 +957,7 @@ impl acp::Client for ClientDelegate {
})??
.await;
- Ok(acp::WaitForTerminalExitResponse {
- exit_status,
- meta: None,
- })
+ Ok(acp::WaitForTerminalExitResponse::new(exit_status))
}
}
@@ -41,7 +41,7 @@ impl AgentServer for ClaudeCode {
settings
.as_ref()
- .and_then(|s| s.default_mode.clone().map(|m| acp::SessionModeId(m.into())))
+ .and_then(|s| s.default_mode.clone().map(acp::SessionModeId::new))
}
fn set_default_mode(&self, mode_id: Option<acp::SessionModeId>, fs: Arc<dyn Fs>, cx: &mut App) {
@@ -62,7 +62,7 @@ impl AgentServer for ClaudeCode {
settings
.as_ref()
- .and_then(|s| s.default_model.clone().map(|m| acp::ModelId(m.into())))
+ .and_then(|s| s.default_model.clone().map(acp::ModelId::new))
}
fn set_default_model(&self, model_id: Option<acp::ModelId>, fs: Arc<dyn Fs>, cx: &mut App) {
@@ -42,7 +42,7 @@ impl AgentServer for Codex {
settings
.as_ref()
- .and_then(|s| s.default_mode.clone().map(|m| acp::SessionModeId(m.into())))
+ .and_then(|s| s.default_mode.clone().map(acp::SessionModeId::new))
}
fn set_default_mode(&self, mode_id: Option<acp::SessionModeId>, fs: Arc<dyn Fs>, cx: &mut App) {
@@ -63,7 +63,7 @@ impl AgentServer for Codex {
settings
.as_ref()
- .and_then(|s| s.default_model.clone().map(|m| acp::ModelId(m.into())))
+ .and_then(|s| s.default_model.clone().map(acp::ModelId::new))
}
fn set_default_model(&self, model_id: Option<acp::ModelId>, fs: Arc<dyn Fs>, cx: &mut App) {
@@ -44,7 +44,7 @@ impl crate::AgentServer for CustomAgentServer {
settings
.as_ref()
- .and_then(|s| s.default_mode().map(|m| acp::SessionModeId(m.into())))
+ .and_then(|s| s.default_mode().map(acp::SessionModeId::new))
}
fn set_default_mode(&self, mode_id: Option<acp::SessionModeId>, fs: Arc<dyn Fs>, cx: &mut App) {
@@ -80,7 +80,7 @@ impl crate::AgentServer for CustomAgentServer {
settings
.as_ref()
- .and_then(|s| s.default_model().map(|m| acp::ModelId(m.into())))
+ .and_then(|s| s.default_model().map(acp::ModelId::new))
}
fn set_default_model(&self, model_id: Option<acp::ModelId>, fs: Arc<dyn Fs>, cx: &mut App) {
@@ -82,26 +82,9 @@ where
.update(cx, |thread, cx| {
thread.send(
vec![
- acp::ContentBlock::Text(acp::TextContent {
- text: "Read the file ".into(),
- annotations: None,
- meta: None,
- }),
- acp::ContentBlock::ResourceLink(acp::ResourceLink {
- uri: "foo.rs".into(),
- name: "foo.rs".into(),
- annotations: None,
- description: None,
- mime_type: None,
- size: None,
- title: None,
- meta: None,
- }),
- acp::ContentBlock::Text(acp::TextContent {
- text: " and tell me what the content of the println! is".into(),
- annotations: None,
- meta: None,
- }),
+ "Read the file ".into(),
+ acp::ContentBlock::ResourceLink(acp::ResourceLink::new("foo.rs", "foo.rs")),
+ " and tell me what the content of the println! is".into(),
],
cx,
)
@@ -429,7 +412,7 @@ macro_rules! common_e2e_tests {
async fn tool_call_with_permission(cx: &mut ::gpui::TestAppContext) {
$crate::e2e_tests::test_tool_call_with_permission(
$server,
- ::agent_client_protocol::PermissionOptionId($allow_option_id.into()),
+ ::agent_client_protocol::PermissionOptionId::new($allow_option_id),
cx,
)
.await;
@@ -13,7 +13,8 @@ path = "src/agent_ui.rs"
doctest = false
[features]
-test-support = ["gpui/test-support", "language/test-support"]
+test-support = ["gpui/test-support", "language/test-support", "reqwest_client"]
+unit-eval = []
[dependencies]
acp_thread.workspace = true
@@ -47,6 +48,7 @@ fs.workspace = true
futures.workspace = true
fuzzy.workspace = true
gpui.workspace = true
+gpui_tokio.workspace = true
html_to_markdown.workspace = true
http_client.workspace = true
indoc.workspace = true
@@ -98,14 +100,17 @@ workspace.workspace = true
zed_actions.workspace = true
image.workspace = true
async-fs.workspace = true
+reqwest_client = { workspace = true, optional = true }
[dev-dependencies]
acp_thread = { workspace = true, features = ["test-support"] }
agent = { workspace = true, features = ["test-support"] }
assistant_text_thread = { workspace = true, features = ["test-support"] }
buffer_diff = { workspace = true, features = ["test-support"] }
+clock.workspace = true
db = { workspace = true, features = ["test-support"] }
editor = { workspace = true, features = ["test-support"] }
+eval_utils.workspace = true
gpui = { workspace = true, "features" = ["test-support"] }
indoc.workspace = true
language = { workspace = true, "features" = ["test-support"] }
@@ -115,5 +120,6 @@ pretty_assertions.workspace = true
project = { workspace = true, features = ["test-support"] }
semver.workspace = true
rand.workspace = true
+reqwest_client.workspace = true
tree-sitter-md.workspace = true
unindent.workspace = true
@@ -432,24 +432,11 @@ mod tests {
let (workspace, cx) =
cx.add_window_view(|window, cx| Workspace::test_new(project.clone(), window, cx));
- let tool_call = acp::ToolCall {
- id: acp::ToolCallId("tool".into()),
- title: "Tool call".into(),
- kind: acp::ToolKind::Other,
- status: acp::ToolCallStatus::InProgress,
- content: vec![acp::ToolCallContent::Diff {
- diff: acp::Diff {
- path: "/project/hello.txt".into(),
- old_text: Some("hi world".into()),
- new_text: "hello world".into(),
- meta: None,
- },
- }],
- locations: vec![],
- raw_input: None,
- raw_output: None,
- meta: None,
- };
+ let tool_call = acp::ToolCall::new("tool", "Tool call")
+ .status(acp::ToolCallStatus::InProgress)
+ .content(vec![acp::ToolCallContent::Diff(
+ acp::Diff::new("/project/hello.txt", "hello world").old_text("hi world"),
+ )]);
let connection = Rc::new(StubAgentConnection::new());
let thread = cx
.update(|_, cx| {
@@ -225,8 +225,13 @@ impl MessageEditor {
.iter()
.find(|command| command.name == command_name)?;
- let acp::AvailableCommandInput::Unstructured { mut hint } =
- available_command.input.clone()?;
+ let acp::AvailableCommandInput::Unstructured(acp::UnstructuredCommandInput {
+ mut hint,
+ ..
+ }) = available_command.input.clone()?
+ else {
+ return None;
+ };
let mut hint_pos = MultiBufferOffset(parsed_command.source_range.end) + 1usize;
if hint_pos > snapshot.len() {
@@ -403,34 +408,28 @@ impl MessageEditor {
} => {
all_tracked_buffers.extend(tracked_buffers.iter().cloned());
if supports_embedded_context {
- acp::ContentBlock::Resource(acp::EmbeddedResource {
- annotations: None,
- resource:
- acp::EmbeddedResourceResource::TextResourceContents(
- acp::TextResourceContents {
- mime_type: None,
- text: content.clone(),
- uri: uri.to_uri().to_string(),
- meta: None,
- },
+ acp::ContentBlock::Resource(acp::EmbeddedResource::new(
+ acp::EmbeddedResourceResource::TextResourceContents(
+ acp::TextResourceContents::new(
+ content.clone(),
+ uri.to_uri().to_string(),
),
- meta: None,
- })
+ ),
+ ))
} else {
- acp::ContentBlock::ResourceLink(acp::ResourceLink {
- name: uri.name(),
- uri: uri.to_uri().to_string(),
- annotations: None,
- description: None,
- mime_type: None,
- size: None,
- title: None,
- meta: None,
- })
+ acp::ContentBlock::ResourceLink(acp::ResourceLink::new(
+ uri.name(),
+ uri.to_uri().to_string(),
+ ))
}
}
Mention::Image(mention_image) => {
- let uri = match uri {
+ let mut image = acp::ImageContent::new(
+ mention_image.data.clone(),
+ mention_image.format.mime_type(),
+ );
+
+ if let Some(uri) = match uri {
MentionUri::File { .. } => Some(uri.to_uri().to_string()),
MentionUri::PastedImage => None,
other => {
@@ -440,25 +439,14 @@ impl MessageEditor {
);
None
}
+ } {
+ image = image.uri(uri)
};
- acp::ContentBlock::Image(acp::ImageContent {
- annotations: None,
- data: mention_image.data.to_string(),
- mime_type: mention_image.format.mime_type().into(),
- uri,
- meta: None,
- })
+ acp::ContentBlock::Image(image)
}
- Mention::Link => acp::ContentBlock::ResourceLink(acp::ResourceLink {
- name: uri.name(),
- uri: uri.to_uri().to_string(),
- annotations: None,
- description: None,
- mime_type: None,
- size: None,
- title: None,
- meta: None,
- }),
+ Mention::Link => acp::ContentBlock::ResourceLink(
+ acp::ResourceLink::new(uri.name(), uri.to_uri().to_string()),
+ ),
};
chunks.push(chunk);
ix = crease_range.end.0;
@@ -746,8 +734,7 @@ impl MessageEditor {
uri,
data,
mime_type,
- annotations: _,
- meta: _,
+ ..
}) => {
let mention_uri = if let Some(uri) = uri {
MentionUri::parse(&uri, path_style)
@@ -773,7 +760,7 @@ impl MessageEditor {
}),
));
}
- acp::ContentBlock::Audio(_) | acp::ContentBlock::Resource(_) => {}
+ _ => {}
}
}
@@ -1092,12 +1079,7 @@ mod tests {
assert!(error_message.contains("Available commands: none"));
// Now simulate Claude providing its list of available commands (which doesn't include file)
- available_commands.replace(vec![acp::AvailableCommand {
- name: "help".to_string(),
- description: "Get help".to_string(),
- input: None,
- meta: None,
- }]);
+ available_commands.replace(vec![acp::AvailableCommand::new("help", "Get help")]);
// Test that unsupported slash commands trigger an error when we have a list of available commands
editor.update_in(cx, |editor, window, cx| {
@@ -1211,20 +1193,12 @@ mod tests {
let history_store = cx.new(|cx| HistoryStore::new(text_thread_store, cx));
let prompt_capabilities = Rc::new(RefCell::new(acp::PromptCapabilities::default()));
let available_commands = Rc::new(RefCell::new(vec![
- acp::AvailableCommand {
- name: "quick-math".to_string(),
- description: "2 + 2 = 4 - 1 = 3".to_string(),
- input: None,
- meta: None,
- },
- acp::AvailableCommand {
- name: "say-hello".to_string(),
- description: "Say hello to whoever you want".to_string(),
- input: Some(acp::AvailableCommandInput::Unstructured {
- hint: "<name>".to_string(),
- }),
- meta: None,
- },
+ acp::AvailableCommand::new("quick-math", "2 + 2 = 4 - 1 = 3"),
+ acp::AvailableCommand::new("say-hello", "Say hello to whoever you want").input(
+ acp::AvailableCommandInput::Unstructured(acp::UnstructuredCommandInput::new(
+ "<name>",
+ )),
+ ),
]));
let editor = workspace.update_in(&mut cx, |workspace, window, cx| {
@@ -1504,12 +1478,12 @@ mod tests {
editor.set_text("", window, cx);
});
- prompt_capabilities.replace(acp::PromptCapabilities {
- image: true,
- audio: true,
- embedded_context: true,
- meta: None,
- });
+ prompt_capabilities.replace(
+ acp::PromptCapabilities::new()
+ .image(true)
+ .audio(true)
+ .embedded_context(true),
+ );
cx.simulate_input("Lorem ");
@@ -1960,11 +1934,9 @@ mod tests {
cx,
);
// Enable embedded context so files are actually included
- editor.prompt_capabilities.replace(acp::PromptCapabilities {
- embedded_context: true,
- meta: None,
- ..Default::default()
- });
+ editor
+ .prompt_capabilities
+ .replace(acp::PromptCapabilities::new().embedded_context(true));
editor
})
});
@@ -2043,7 +2015,7 @@ mod tests {
// Create a thread metadata to insert as summary
let thread_metadata = agent::DbThreadMetadata {
- id: acp::SessionId("thread-123".into()),
+ id: acp::SessionId::new("thread-123"),
title: "Previous Conversation".into(),
updated_at: chrono::Utc::now(),
};
@@ -2150,14 +2122,7 @@ mod tests {
.await
.unwrap();
- assert_eq!(
- content,
- vec![acp::ContentBlock::Text(acp::TextContent {
- text: "してhello world".into(),
- annotations: None,
- meta: None
- })]
- );
+ assert_eq!(content, vec!["してhello world".into()]);
}
#[gpui::test]
@@ -2236,38 +2201,24 @@ mod tests {
.0;
let main_rs_uri = if cfg!(windows) {
- "file:///C:/project/src/main.rs".to_string()
+ "file:///C:/project/src/main.rs"
} else {
- "file:///project/src/main.rs".to_string()
+ "file:///project/src/main.rs"
};
// When embedded context is `false` we should get a resource link
pretty_assertions::assert_eq!(
content,
vec![
- acp::ContentBlock::Text(acp::TextContent {
- text: "What is in ".to_string(),
- annotations: None,
- meta: None
- }),
- acp::ContentBlock::ResourceLink(acp::ResourceLink {
- uri: main_rs_uri.clone(),
- name: "main.rs".to_string(),
- annotations: None,
- meta: None,
- description: None,
- mime_type: None,
- size: None,
- title: None,
- })
+ "What is in ".into(),
+ acp::ContentBlock::ResourceLink(acp::ResourceLink::new("main.rs", main_rs_uri))
]
);
message_editor.update(cx, |editor, _cx| {
- editor.prompt_capabilities.replace(acp::PromptCapabilities {
- embedded_context: true,
- ..Default::default()
- })
+ editor
+ .prompt_capabilities
+ .replace(acp::PromptCapabilities::new().embedded_context(true))
});
let content = message_editor
@@ -2280,23 +2231,12 @@ mod tests {
pretty_assertions::assert_eq!(
content,
vec![
- acp::ContentBlock::Text(acp::TextContent {
- text: "What is in ".to_string(),
- annotations: None,
- meta: None
- }),
- acp::ContentBlock::Resource(acp::EmbeddedResource {
- resource: acp::EmbeddedResourceResource::TextResourceContents(
- acp::TextResourceContents {
- text: file_content.to_string(),
- uri: main_rs_uri,
- mime_type: None,
- meta: None
- }
- ),
- annotations: None,
- meta: None
- })
+ "What is in ".into(),
+ acp::ContentBlock::Resource(acp::EmbeddedResource::new(
+ acp::EmbeddedResourceResource::TextResourceContents(
+ acp::TextResourceContents::new(file_content, main_rs_uri)
+ )
+ ))
]
);
}
@@ -161,7 +161,7 @@ impl Render for ModeSelector {
.map(|mode| mode.name.clone())
.unwrap_or_else(|| "Unknown".into());
- let this = cx.entity();
+ let this = cx.weak_entity();
let icon = if self.menu_handle.is_deployed() {
IconName::ChevronUp
@@ -222,7 +222,8 @@ impl Render for ModeSelector {
y: px(-2.0),
})
.menu(move |window, cx| {
- Some(this.update(cx, |this, cx| this.build_context_menu(window, cx)))
+ this.update(cx, |this, cx| this.build_context_menu(window, cx))
+ .ok()
})
}
}
@@ -464,7 +464,7 @@ mod tests {
models
.into_iter()
.map(|model| acp_thread::AgentModelInfo {
- id: acp::ModelId(model.to_string().into()),
+ id: acp::ModelId::new(model.to_string()),
name: model.to_string().into(),
description: None,
icon: None,
@@ -498,17 +498,7 @@ impl AcpThreadView {
Some(new_version_available_tx),
);
- let agent_name = agent.name();
- let timeout = cx.background_executor().timer(Duration::from_secs(30));
- let connect_task = smol::future::or(
- agent.connect(root_dir.as_deref(), delegate, cx),
- async move {
- timeout.await;
- Err(anyhow::Error::new(LoadError::Other(
- format!("{agent_name} is unable to initialize after 30 seconds.").into(),
- )))
- },
- );
+ let connect_task = agent.connect(root_dir.as_deref(), delegate, cx);
let load_task = cx.spawn_in(window, async move |this, cx| {
let connection = match connect_task.await {
Ok((connection, login)) => {
@@ -1486,18 +1476,8 @@ impl AcpThreadView {
.iter()
.any(|method| method.id.0.as_ref() == "claude-login")
{
- available_commands.push(acp::AvailableCommand {
- name: "login".to_owned(),
- description: "Authenticate".to_owned(),
- input: None,
- meta: None,
- });
- available_commands.push(acp::AvailableCommand {
- name: "logout".to_owned(),
- description: "Authenticate".to_owned(),
- input: None,
- meta: None,
- });
+ available_commands.push(acp::AvailableCommand::new("login", "Authenticate"));
+ available_commands.push(acp::AvailableCommand::new("logout", "Authenticate"));
}
let has_commands = !available_commands.is_empty();
@@ -2572,7 +2552,7 @@ impl AcpThreadView {
acp::ToolKind::Think => IconName::ToolThink,
acp::ToolKind::Fetch => IconName::ToolWeb,
acp::ToolKind::SwitchMode => IconName::ArrowRightLeft,
- acp::ToolKind::Other => IconName::ToolHammer,
+ acp::ToolKind::Other | _ => IconName::ToolHammer,
})
}
.size(IconSize::Small)
@@ -2824,7 +2804,7 @@ impl AcpThreadView {
})
.gap_0p5()
.children(options.iter().map(move |option| {
- let option_id = SharedString::from(option.id.0.clone());
+ let option_id = SharedString::from(option.option_id.0.clone());
Button::new((option_id, entry_ix), option.name.clone())
.map(|this| {
let (this, action) = match option.kind {
@@ -2840,7 +2820,7 @@ impl AcpThreadView {
this.icon(IconName::Close).icon_color(Color::Error),
Some(&RejectOnce as &dyn Action),
),
- acp::PermissionOptionKind::RejectAlways => {
+ acp::PermissionOptionKind::RejectAlways | _ => {
(this.icon(IconName::Close).icon_color(Color::Error), None)
}
};
@@ -2865,7 +2845,7 @@ impl AcpThreadView {
.label_size(LabelSize::Small)
.on_click(cx.listener({
let tool_call_id = tool_call_id.clone();
- let option_id = option.id.clone();
+ let option_id = option.option_id.clone();
let option_kind = option.kind;
move |this, _, window, cx| {
this.authorize_tool_call(
@@ -3553,7 +3533,7 @@ impl AcpThreadView {
);
this.authenticate(
- acp::AuthMethodId(method_id.clone()),
+ acp::AuthMethodId::new(method_id.clone()),
window,
cx,
)
@@ -3847,10 +3827,6 @@ impl AcpThreadView {
.text_xs()
.text_color(cx.theme().colors().text_muted)
.child(match entry.status {
- acp::PlanEntryStatus::Pending => Icon::new(IconName::TodoPending)
- .size(IconSize::Small)
- .color(Color::Muted)
- .into_any_element(),
acp::PlanEntryStatus::InProgress => {
Icon::new(IconName::TodoProgress)
.size(IconSize::Small)
@@ -3864,6 +3840,12 @@ impl AcpThreadView {
.color(Color::Success)
.into_any_element()
}
+ acp::PlanEntryStatus::Pending | _ => {
+ Icon::new(IconName::TodoPending)
+ .size(IconSize::Small)
+ .color(Color::Muted)
+ .into_any_element()
+ }
})
.child(MarkdownElement::new(
entry.content.clone(),
@@ -4437,7 +4419,7 @@ impl AcpThreadView {
self.authorize_tool_call(
tool_call.id.clone(),
- option.id.clone(),
+ option.option_id.clone(),
option.kind,
window,
cx,
@@ -6253,27 +6235,18 @@ pub(crate) mod tests {
async fn test_notification_for_tool_authorization(cx: &mut TestAppContext) {
init_test(cx);
- let tool_call_id = acp::ToolCallId("1".into());
- let tool_call = acp::ToolCall {
- id: tool_call_id.clone(),
- title: "Label".into(),
- kind: acp::ToolKind::Edit,
- status: acp::ToolCallStatus::Pending,
- content: vec!["hi".into()],
- locations: vec![],
- raw_input: None,
- raw_output: None,
- meta: None,
- };
+ let tool_call_id = acp::ToolCallId::new("1");
+ let tool_call = acp::ToolCall::new(tool_call_id.clone(), "Label")
+ .kind(acp::ToolKind::Edit)
+ .content(vec!["hi".into()]);
let connection =
StubAgentConnection::new().with_permission_requests(HashMap::from_iter([(
tool_call_id,
- vec![acp::PermissionOption {
- id: acp::PermissionOptionId("1".into()),
- name: "Allow".into(),
- kind: acp::PermissionOptionKind::AllowOnce,
- meta: None,
- }],
+ vec![acp::PermissionOption::new(
+ "1".into(),
+ "Allow",
+ acp::PermissionOptionKind::AllowOnce,
+ )],
)]));
connection.set_next_prompt_updates(vec![acp::SessionUpdate::ToolCall(tool_call)]);
@@ -6492,10 +6465,7 @@ pub(crate) mod tests {
fn default_response() -> Self {
let conn = StubAgentConnection::new();
conn.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk(
- acp::ContentChunk {
- content: "Default response".into(),
- meta: None,
- },
+ acp::ContentChunk::new("Default response".into()),
)]);
Self::new(conn)
}
@@ -6552,13 +6522,13 @@ pub(crate) mod tests {
self,
project,
action_log,
- SessionId("test".into()),
- watch::Receiver::constant(acp::PromptCapabilities {
- image: true,
- audio: true,
- embedded_context: true,
- meta: None,
- }),
+ SessionId::new("test"),
+ watch::Receiver::constant(
+ acp::PromptCapabilities::new()
+ .image(true)
+ .audio(true)
+ .embedded_context(true),
+ ),
cx,
)
})))
@@ -6616,13 +6586,13 @@ pub(crate) mod tests {
self,
project,
action_log,
- SessionId("test".into()),
- watch::Receiver::constant(acp::PromptCapabilities {
- image: true,
- audio: true,
- embedded_context: true,
- meta: None,
- }),
+ SessionId::new("test"),
+ watch::Receiver::constant(
+ acp::PromptCapabilities::new()
+ .image(true)
+ .audio(true)
+ .embedded_context(true),
+ ),
cx,
)
})))
@@ -6646,10 +6616,7 @@ pub(crate) mod tests {
_params: acp::PromptRequest,
_cx: &mut App,
) -> Task<gpui::Result<acp::PromptResponse>> {
- Task::ready(Ok(acp::PromptResponse {
- stop_reason: acp::StopReason::Refusal,
- meta: None,
- }))
+ Task::ready(Ok(acp::PromptResponse::new(acp::StopReason::Refusal)))
}
fn cancel(&self, _session_id: &acp::SessionId, _cx: &mut App) {
@@ -6717,24 +6684,14 @@ pub(crate) mod tests {
.unwrap();
// First user message
- connection.set_next_prompt_updates(vec![acp::SessionUpdate::ToolCall(acp::ToolCall {
- id: acp::ToolCallId("tool1".into()),
- title: "Edit file 1".into(),
- kind: acp::ToolKind::Edit,
- status: acp::ToolCallStatus::Completed,
- content: vec![acp::ToolCallContent::Diff {
- diff: acp::Diff {
- path: "/project/test1.txt".into(),
- old_text: Some("old content 1".into()),
- new_text: "new content 1".into(),
- meta: None,
- },
- }],
- locations: vec![],
- raw_input: None,
- raw_output: None,
- meta: None,
- })]);
+ connection.set_next_prompt_updates(vec![acp::SessionUpdate::ToolCall(
+ acp::ToolCall::new("tool1", "Edit file 1")
+ .kind(acp::ToolKind::Edit)
+ .status(acp::ToolCallStatus::Completed)
+ .content(vec![acp::ToolCallContent::Diff(
+ acp::Diff::new("/project/test1.txt", "new content 1").old_text("old content 1"),
+ )]),
+ )]);
thread
.update(cx, |thread, cx| thread.send_raw("Give me a diff", cx))
@@ -6760,24 +6717,14 @@ pub(crate) mod tests {
});
// Second user message
- connection.set_next_prompt_updates(vec![acp::SessionUpdate::ToolCall(acp::ToolCall {
- id: acp::ToolCallId("tool2".into()),
- title: "Edit file 2".into(),
- kind: acp::ToolKind::Edit,
- status: acp::ToolCallStatus::Completed,
- content: vec![acp::ToolCallContent::Diff {
- diff: acp::Diff {
- path: "/project/test2.txt".into(),
- old_text: Some("old content 2".into()),
- new_text: "new content 2".into(),
- meta: None,
- },
- }],
- locations: vec![],
- raw_input: None,
- raw_output: None,
- meta: None,
- })]);
+ connection.set_next_prompt_updates(vec![acp::SessionUpdate::ToolCall(
+ acp::ToolCall::new("tool2", "Edit file 2")
+ .kind(acp::ToolKind::Edit)
+ .status(acp::ToolCallStatus::Completed)
+ .content(vec![acp::ToolCallContent::Diff(
+ acp::Diff::new("/project/test2.txt", "new content 2").old_text("old content 2"),
+ )]),
+ )]);
thread
.update(cx, |thread, cx| thread.send_raw("Another one", cx))
@@ -6851,14 +6798,7 @@ pub(crate) mod tests {
let connection = StubAgentConnection::new();
connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk(
- acp::ContentChunk {
- content: acp::ContentBlock::Text(acp::TextContent {
- text: "Response".into(),
- annotations: None,
- meta: None,
- }),
- meta: None,
- },
+ acp::ContentChunk::new("Response".into()),
)]);
let (thread_view, cx) = setup_thread_view(StubAgentServer::new(connection), cx).await;
@@ -6944,14 +6884,7 @@ pub(crate) mod tests {
let connection = StubAgentConnection::new();
connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk(
- acp::ContentChunk {
- content: acp::ContentBlock::Text(acp::TextContent {
- text: "Response".into(),
- annotations: None,
- meta: None,
- }),
- meta: None,
- },
+ acp::ContentChunk::new("Response".into()),
)]);
let (thread_view, cx) =
@@ -6991,14 +6924,7 @@ pub(crate) mod tests {
// Send
connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk(
- acp::ContentChunk {
- content: acp::ContentBlock::Text(acp::TextContent {
- text: "New Response".into(),
- annotations: None,
- meta: None,
- }),
- meta: None,
- },
+ acp::ContentChunk::new("New Response".into()),
)]);
user_message_editor.update_in(cx, |_editor, window, cx| {
@@ -7086,14 +7012,7 @@ pub(crate) mod tests {
cx.update(|_, cx| {
connection.send_update(
session_id.clone(),
- acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk {
- content: acp::ContentBlock::Text(acp::TextContent {
- text: "Response".into(),
- annotations: None,
- meta: None,
- }),
- meta: None,
- }),
+ acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new("Response".into())),
cx,
);
connection.end_turn(session_id, acp::StopReason::EndTurn);
@@ -7145,10 +7064,9 @@ pub(crate) mod tests {
cx.update(|_, cx| {
connection.send_update(
session_id.clone(),
- acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk {
- content: "Message 1 resp".into(),
- meta: None,
- }),
+ acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new(
+ "Message 1 resp".into(),
+ )),
cx,
);
});
@@ -7182,10 +7100,7 @@ pub(crate) mod tests {
// Simulate a response sent after beginning to cancel
connection.send_update(
session_id.clone(),
- acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk {
- content: "onse".into(),
- meta: None,
- }),
+ acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new("onse".into())),
cx,
);
});
@@ -7216,10 +7131,9 @@ pub(crate) mod tests {
cx.update(|_, cx| {
connection.send_update(
session_id.clone(),
- acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk {
- content: "Message 2 response".into(),
- meta: None,
- }),
+ acp::SessionUpdate::AgentMessageChunk(acp::ContentChunk::new(
+ "Message 2 response".into(),
+ )),
cx,
);
connection.end_turn(session_id.clone(), acp::StopReason::EndTurn);
@@ -7258,14 +7172,7 @@ pub(crate) mod tests {
let connection = StubAgentConnection::new();
connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk(
- acp::ContentChunk {
- content: acp::ContentBlock::Text(acp::TextContent {
- text: "Response".into(),
- annotations: None,
- meta: None,
- }),
- meta: None,
- },
+ acp::ContentChunk::new("Response".into()),
)]);
let (thread_view, cx) = setup_thread_view(StubAgentServer::new(connection), cx).await;
@@ -7344,14 +7251,7 @@ pub(crate) mod tests {
let connection = StubAgentConnection::new();
connection.set_next_prompt_updates(vec![acp::SessionUpdate::AgentMessageChunk(
- acp::ContentChunk {
- content: acp::ContentBlock::Text(acp::TextContent {
- text: "Response".into(),
- annotations: None,
- meta: None,
- }),
- meta: None,
- },
+ acp::ContentChunk::new("Response".into()),
)]);
let (thread_view, cx) = setup_thread_view(StubAgentServer::new(connection), cx).await;
@@ -7399,54 +7299,4 @@ pub(crate) mod tests {
assert_eq!(text, expected_txt);
})
}
-
- #[gpui::test]
- async fn test_initialize_timeout(cx: &mut TestAppContext) {
- init_test(cx);
-
- struct InfiniteInitialize;
-
- impl AgentServer for InfiniteInitialize {
- fn telemetry_id(&self) -> &'static str {
- "test"
- }
-
- fn logo(&self) -> ui::IconName {
- ui::IconName::Ai
- }
-
- fn name(&self) -> SharedString {
- "Test".into()
- }
-
- fn connect(
- &self,
- _root_dir: Option<&Path>,
- _delegate: AgentServerDelegate,
- cx: &mut App,
- ) -> Task<gpui::Result<(Rc<dyn AgentConnection>, Option<task::SpawnInTerminal>)>>
- {
- cx.spawn(async |_| futures::future::pending().await)
- }
-
- fn into_any(self: Rc<Self>) -> Rc<dyn Any> {
- self
- }
- }
-
- let (thread_view, cx) = setup_thread_view(InfiniteInitialize, cx).await;
-
- cx.executor().advance_clock(Duration::from_secs(31));
- cx.run_until_parked();
-
- let error = thread_view.read_with(cx, |thread_view, _| match &thread_view.thread_state {
- ThreadState::LoadError(err) => err.clone(),
- _ => panic!("Incorrect thread state"),
- });
-
- match error {
- LoadError::Other(str) => assert!(str.contains("initialize")),
- _ => panic!("Unexpected load error"),
- }
- }
}
@@ -2685,16 +2685,17 @@ impl rules_library::InlineAssistDelegate for PromptLibraryInlineAssist {
return;
};
let project = workspace.read(cx).project().downgrade();
+ let thread_store = panel.read(cx).thread_store().clone();
assistant.assist(
prompt_editor,
self.workspace.clone(),
project,
- panel.read(cx).thread_store().clone(),
+ thread_store,
None,
initial_prompt,
window,
cx,
- )
+ );
})
}
@@ -7,6 +7,8 @@ mod buffer_codegen;
mod completion_provider;
mod context;
mod context_server_configuration;
+#[cfg(test)]
+mod evals;
mod inline_assistant;
mod inline_prompt_editor;
mod language_model_selector;
@@ -719,6 +719,7 @@ impl CodegenAlternative {
output_tokens = usage.output_tokens,
)
}
+
cx.emit(CodegenEvent::Finished);
cx.notify();
})
@@ -1114,7 +1114,6 @@ impl<T: PromptCompletionProviderDelegate> CompletionProvider for PromptCompletio
position: language::Anchor,
_text: &str,
_trigger_in_words: bool,
- _menu_is_open: bool,
cx: &mut Context<Editor>,
) -> bool {
let buffer = buffer.read(cx);
@@ -0,0 +1,89 @@
+use std::str::FromStr;
+
+use crate::inline_assistant::test::run_inline_assistant_test;
+
+use eval_utils::{EvalOutput, NoProcessor};
+use gpui::TestAppContext;
+use language_model::{LanguageModelRegistry, SelectedModel};
+use rand::{SeedableRng as _, rngs::StdRng};
+
+#[test]
+#[cfg_attr(not(feature = "unit-eval"), ignore)]
+fn eval_single_cursor_edit() {
+ eval_utils::eval(20, 1.0, NoProcessor, move || {
+ run_eval(
+ &EvalInput {
+ prompt: "Rename this variable to buffer_text".to_string(),
+ buffer: indoc::indoc! {"
+ struct EvalExampleStruct {
+ text: Strˇing,
+ prompt: String,
+ }
+ "}
+ .to_string(),
+ },
+ &|_, output| {
+ let expected = indoc::indoc! {"
+ struct EvalExampleStruct {
+ buffer_text: String,
+ prompt: String,
+ }
+ "};
+ if output == expected {
+ EvalOutput {
+ outcome: eval_utils::OutcomeKind::Passed,
+ data: "Passed!".to_string(),
+ metadata: (),
+ }
+ } else {
+ EvalOutput {
+ outcome: eval_utils::OutcomeKind::Failed,
+ data: format!("Failed to rename variable, output: {}", output),
+ metadata: (),
+ }
+ }
+ },
+ )
+ });
+}
+
+struct EvalInput {
+ buffer: String,
+ prompt: String,
+}
+
+fn run_eval(
+ input: &EvalInput,
+ judge: &dyn Fn(&EvalInput, &str) -> eval_utils::EvalOutput<()>,
+) -> eval_utils::EvalOutput<()> {
+ let dispatcher = gpui::TestDispatcher::new(StdRng::from_os_rng());
+ let mut cx = TestAppContext::build(dispatcher, None);
+ cx.skip_drawing();
+
+ let buffer_text = run_inline_assistant_test(
+ input.buffer.clone(),
+ input.prompt.clone(),
+ |cx| {
+ // Reconfigure to use a real model instead of the fake one
+ let model_name = std::env::var("ZED_AGENT_MODEL")
+ .unwrap_or("anthropic/claude-sonnet-4-latest".into());
+
+ let selected_model = SelectedModel::from_str(&model_name)
+ .expect("Invalid model format. Use 'provider/model-id'");
+
+ log::info!("Selected model: {selected_model:?}");
+
+ cx.update(|_, cx| {
+ LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
+ registry.select_inline_assistant_model(Some(&selected_model), cx);
+ });
+ });
+ },
+ |_cx| {
+ log::info!("Waiting for actual response from the LLM...");
+ },
+ &mut cx,
+ );
+
+ judge(input, &buffer_text)
+}
@@ -32,7 +32,7 @@ use editor::{
},
};
use fs::Fs;
-use futures::FutureExt;
+use futures::{FutureExt, channel::mpsc};
use gpui::{
App, Context, Entity, Focusable, Global, HighlightStyle, Subscription, Task, UpdateGlobal,
WeakEntity, Window, point,
@@ -102,6 +102,7 @@ pub struct InlineAssistant {
prompt_builder: Arc<PromptBuilder>,
telemetry: Arc<Telemetry>,
fs: Arc<dyn Fs>,
+ _inline_assistant_completions: Option<mpsc::UnboundedSender<anyhow::Result<InlineAssistId>>>,
}
impl Global for InlineAssistant {}
@@ -123,9 +124,18 @@ impl InlineAssistant {
prompt_builder,
telemetry,
fs,
+ _inline_assistant_completions: None,
}
}
+ #[cfg(any(test, feature = "test-support"))]
+ pub fn set_completion_receiver(
+ &mut self,
+ sender: mpsc::UnboundedSender<anyhow::Result<InlineAssistId>>,
+ ) {
+ self._inline_assistant_completions = Some(sender);
+ }
+
pub fn register_workspace(
&mut self,
workspace: &Entity<Workspace>,
@@ -287,7 +297,7 @@ impl InlineAssistant {
action.prompt.clone(),
window,
cx,
- )
+ );
})
}
InlineAssistTarget::Terminal(active_terminal) => {
@@ -301,8 +311,8 @@ impl InlineAssistant {
action.prompt.clone(),
window,
cx,
- )
- })
+ );
+ });
}
};
@@ -598,13 +608,13 @@ impl InlineAssistant {
initial_prompt: Option<String>,
window: &mut Window,
cx: &mut App,
- ) {
+ ) -> Option<InlineAssistId> {
let snapshot = editor.update(cx, |editor, cx| editor.snapshot(window, cx));
let Some((codegen_ranges, newest_selection)) =
self.codegen_ranges(editor, &snapshot, window, cx)
else {
- return;
+ return None;
};
let assist_to_focus = self.batch_assist(
@@ -624,6 +634,8 @@ impl InlineAssistant {
if let Some(assist_id) = assist_to_focus {
self.focus_assist(assist_id, window, cx);
}
+
+ assist_to_focus
}
pub fn suggest_assist(
@@ -1740,6 +1752,16 @@ impl InlineAssist {
&& assist.decorations.is_none()
&& let Some(workspace) = assist.workspace.upgrade()
{
+ #[cfg(any(test, feature = "test-support"))]
+ if let Some(sender) = &mut this._inline_assistant_completions {
+ sender
+ .unbounded_send(Err(anyhow::anyhow!(
+ "Inline assistant error: {}",
+ error
+ )))
+ .ok();
+ }
+
let error = format!("Inline assistant error: {}", error);
workspace.update(cx, |workspace, cx| {
struct InlineAssistantError;
@@ -1750,6 +1772,11 @@ impl InlineAssist {
workspace.show_toast(Toast::new(id, error), cx);
})
+ } else {
+ #[cfg(any(test, feature = "test-support"))]
+ if let Some(sender) = &mut this._inline_assistant_completions {
+ sender.unbounded_send(Ok(assist_id)).ok();
+ }
}
if assist.decorations.is_none() {
@@ -1943,3 +1970,160 @@ fn merge_ranges(ranges: &mut Vec<Range<Anchor>>, buffer: &MultiBufferSnapshot) {
}
}
}
+
+#[cfg(any(test, feature = "test-support"))]
+pub mod test {
+ use std::sync::Arc;
+
+ use agent::HistoryStore;
+ use assistant_text_thread::TextThreadStore;
+ use client::{Client, UserStore};
+ use editor::{Editor, MultiBuffer, MultiBufferOffset};
+ use fs::FakeFs;
+ use futures::channel::mpsc;
+ use gpui::{AppContext, TestAppContext, UpdateGlobal as _};
+ use language::Buffer;
+ use language_model::LanguageModelRegistry;
+ use project::Project;
+ use prompt_store::PromptBuilder;
+ use smol::stream::StreamExt as _;
+ use util::test::marked_text_ranges;
+ use workspace::Workspace;
+
+ use crate::InlineAssistant;
+
+ pub fn run_inline_assistant_test<SetupF, TestF>(
+ base_buffer: String,
+ prompt: String,
+ setup: SetupF,
+ test: TestF,
+ cx: &mut TestAppContext,
+ ) -> String
+ where
+ SetupF: FnOnce(&mut gpui::VisualTestContext),
+ TestF: FnOnce(&mut gpui::VisualTestContext),
+ {
+ let fs = FakeFs::new(cx.executor());
+ let app_state = cx.update(|cx| workspace::AppState::test(cx));
+ let prompt_builder = Arc::new(PromptBuilder::new(None).unwrap());
+ let http = Arc::new(reqwest_client::ReqwestClient::user_agent("agent tests").unwrap());
+ let client = cx.update(|cx| {
+ cx.set_http_client(http);
+ Client::production(cx)
+ });
+ let mut inline_assistant =
+ InlineAssistant::new(fs.clone(), prompt_builder, client.telemetry().clone());
+
+ let (tx, mut completion_rx) = mpsc::unbounded();
+ inline_assistant.set_completion_receiver(tx);
+
+ // Initialize settings and client
+ cx.update(|cx| {
+ gpui_tokio::init(cx);
+ settings::init(cx);
+ client::init(&client, cx);
+ workspace::init(app_state.clone(), cx);
+ let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
+ language_model::init(client.clone(), cx);
+ language_models::init(user_store, client.clone(), cx);
+
+ cx.set_global(inline_assistant);
+ });
+
+ let project = cx
+ .executor()
+ .block_test(async { Project::test(fs.clone(), [], cx).await });
+
+ // Create workspace with window
+ let (workspace, cx) = cx.add_window_view(|window, cx| {
+ window.activate_window();
+ Workspace::new(None, project.clone(), app_state.clone(), window, cx)
+ });
+
+ setup(cx);
+
+ let (_editor, buffer) = cx.update(|window, cx| {
+ let buffer = cx.new(|cx| Buffer::local("", cx));
+ let multibuffer = cx.new(|cx| MultiBuffer::singleton(buffer.clone(), cx));
+ let editor = cx.new(|cx| Editor::for_multibuffer(multibuffer, None, window, cx));
+ editor.update(cx, |editor, cx| {
+ let (unmarked_text, selection_ranges) = marked_text_ranges(&base_buffer, true);
+ editor.set_text(unmarked_text, window, cx);
+ editor.change_selections(Default::default(), window, cx, |s| {
+ s.select_ranges(
+ selection_ranges.into_iter().map(|range| {
+ MultiBufferOffset(range.start)..MultiBufferOffset(range.end)
+ }),
+ )
+ })
+ });
+
+ let text_thread_store = cx.new(|cx| TextThreadStore::fake(project.clone(), cx));
+ let history_store = cx.new(|cx| HistoryStore::new(text_thread_store, cx));
+
+ // Add editor to workspace
+ workspace.update(cx, |workspace, cx| {
+ workspace.add_item_to_active_pane(Box::new(editor.clone()), None, true, window, cx);
+ });
+
+ // Call assist method
+ InlineAssistant::update_global(cx, |inline_assistant, cx| {
+ let assist_id = inline_assistant
+ .assist(
+ &editor,
+ workspace.downgrade(),
+ project.downgrade(),
+ history_store, // thread_store
+ None, // prompt_store
+ Some(prompt),
+ window,
+ cx,
+ )
+ .unwrap();
+
+ inline_assistant.start_assist(assist_id, window, cx);
+ });
+
+ (editor, buffer)
+ });
+
+ cx.run_until_parked();
+
+ test(cx);
+
+ cx.executor()
+ .block_test(async { completion_rx.next().await });
+
+ buffer.read_with(cx, |buffer, _| buffer.text())
+ }
+
+ #[allow(unused)]
+ pub fn test_inline_assistant(
+ base_buffer: &'static str,
+ llm_output: &'static str,
+ cx: &mut TestAppContext,
+ ) -> String {
+ run_inline_assistant_test(
+ base_buffer.to_string(),
+ "Prompt doesn't matter because we're using a fake model".to_string(),
+ |cx| {
+ cx.update(|_, cx| LanguageModelRegistry::test(cx));
+ },
+ |cx| {
+ let fake_model = cx.update(|_, cx| {
+ LanguageModelRegistry::global(cx)
+ .update(cx, |registry, _| registry.fake_model())
+ });
+ let fake = fake_model.as_fake();
+
+ // let fake = fake_model;
+ fake.send_last_completion_stream_text_chunk(llm_output.to_string());
+ fake.end_last_completion_stream();
+
+ // Run again to process the model's response
+ cx.run_until_parked();
+ },
+ cx,
+ )
+ }
+}
@@ -341,7 +341,6 @@ impl CompletionProvider for SlashCommandCompletionProvider {
position: language::Anchor,
_text: &str,
_trigger_in_words: bool,
- _menu_is_open: bool,
cx: &mut Context<Editor>,
) -> bool {
let buffer = buffer.read(cx);
@@ -2622,11 +2622,13 @@ impl SearchableItem for TextThreadEditor {
fn update_matches(
&mut self,
matches: &[Self::Match],
+ active_match_index: Option<usize>,
window: &mut Window,
cx: &mut Context<Self>,
) {
- self.editor
- .update(cx, |editor, cx| editor.update_matches(matches, window, cx));
+ self.editor.update(cx, |editor, cx| {
+ editor.update_matches(matches, active_match_index, window, cx)
+ });
}
fn query_suggestion(&mut self, window: &mut Window, cx: &mut Context<Self>) -> String {
@@ -584,41 +584,100 @@ impl Model {
}
}
- pub fn cross_region_inference_id(&self, region: &str) -> anyhow::Result<String> {
+ pub fn cross_region_inference_id(
+ &self,
+ region: &str,
+ allow_global: bool,
+ ) -> anyhow::Result<String> {
+ // List derived from here:
+ // https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-support.html#inference-profiles-support-system
+ let model_id = self.request_id();
+
+ let supports_global = matches!(
+ self,
+ Model::ClaudeOpus4_5
+ | Model::ClaudeOpus4_5Thinking
+ | Model::ClaudeHaiku4_5
+ | Model::ClaudeSonnet4
+ | Model::ClaudeSonnet4Thinking
+ | Model::ClaudeSonnet4_5
+ | Model::ClaudeSonnet4_5Thinking
+ );
+
let region_group = if region.starts_with("us-gov-") {
"us-gov"
- } else if region.starts_with("us-") {
- "us"
+ } else if region.starts_with("us-")
+ || region.starts_with("ca-")
+ || region.starts_with("sa-")
+ {
+ if allow_global && supports_global {
+ "global"
+ } else {
+ "us"
+ }
} else if region.starts_with("eu-") {
- "eu"
+ if allow_global && supports_global {
+ "global"
+ } else {
+ "eu"
+ }
} else if region.starts_with("ap-") || region == "me-central-1" || region == "me-south-1" {
- "apac"
- } else if region.starts_with("ca-") || region.starts_with("sa-") {
- // Canada and South America regions - default to US profiles
- "us"
+ if allow_global && supports_global {
+ "global"
+ } else {
+ "apac"
+ }
} else {
anyhow::bail!("Unsupported Region {region}");
};
- let model_id = self.request_id();
+ match (self, region_group, region) {
+ (Model::Custom { .. }, _, _) => Ok(self.request_id().into()),
- match (self, region_group) {
- // Custom models can't have CRI IDs
- (Model::Custom { .. }, _) => Ok(self.request_id().into()),
+ (
+ Model::ClaudeOpus4_5
+ | Model::ClaudeOpus4_5Thinking
+ | Model::ClaudeHaiku4_5
+ | Model::ClaudeSonnet4
+ | Model::ClaudeSonnet4Thinking
+ | Model::ClaudeSonnet4_5
+ | Model::ClaudeSonnet4_5Thinking,
+ "global",
+ _,
+ ) => Ok(format!("{}.{}", region_group, model_id)),
- // Models with US Gov only
- (Model::Claude3_5Sonnet, "us-gov") | (Model::Claude3Haiku, "us-gov") => {
- Ok(format!("{}.{}", region_group, model_id))
- }
+ (
+ Model::Claude3Haiku
+ | Model::Claude3_5Sonnet
+ | Model::Claude3_7Sonnet
+ | Model::Claude3_7SonnetThinking
+ | Model::ClaudeSonnet4_5
+ | Model::ClaudeSonnet4_5Thinking,
+ "us-gov",
+ _,
+ ) => Ok(format!("{}.{}", region_group, model_id)),
- // Available everywhere
- (Model::AmazonNovaLite | Model::AmazonNovaMicro | Model::AmazonNovaPro, _) => {
- Ok(format!("{}.{}", region_group, model_id))
+ (
+ Model::ClaudeHaiku4_5 | Model::ClaudeSonnet4_5 | Model::ClaudeSonnet4_5Thinking,
+ "apac",
+ "ap-southeast-2" | "ap-southeast-4",
+ ) => Ok(format!("au.{}", model_id)),
+
+ (
+ Model::ClaudeHaiku4_5 | Model::ClaudeSonnet4_5 | Model::ClaudeSonnet4_5Thinking,
+ "apac",
+ "ap-northeast-1" | "ap-northeast-3",
+ ) => Ok(format!("jp.{}", model_id)),
+
+ (Model::AmazonNovaLite, "us", r) if r.starts_with("ca-") => {
+ Ok(format!("ca.{}", model_id))
}
- // Models in US
(
Model::AmazonNovaPremier
+ | Model::AmazonNovaLite
+ | Model::AmazonNovaMicro
+ | Model::AmazonNovaPro
| Model::Claude3_5Haiku
| Model::ClaudeHaiku4_5
| Model::Claude3_5Sonnet
@@ -655,16 +714,18 @@ impl Model {
| Model::PalmyraWriterX4
| Model::PalmyraWriterX5,
"us",
+ _,
) => Ok(format!("{}.{}", region_group, model_id)),
- // Models available in EU
(
- Model::Claude3_5Sonnet
+ Model::AmazonNovaLite
+ | Model::AmazonNovaMicro
+ | Model::AmazonNovaPro
+ | Model::Claude3_5Sonnet
| Model::ClaudeHaiku4_5
| Model::Claude3_7Sonnet
| Model::Claude3_7SonnetThinking
| Model::ClaudeSonnet4
- | Model::ClaudeSonnet4Thinking
| Model::ClaudeSonnet4_5
| Model::ClaudeSonnet4_5Thinking
| Model::Claude3Haiku
@@ -673,26 +734,26 @@ impl Model {
| Model::MetaLlama323BInstructV1
| Model::MistralPixtralLarge2502V1,
"eu",
+ _,
) => Ok(format!("{}.{}", region_group, model_id)),
- // Models available in APAC
(
- Model::Claude3_5Sonnet
+ Model::AmazonNovaLite
+ | Model::AmazonNovaMicro
+ | Model::AmazonNovaPro
+ | Model::Claude3_5Sonnet
| Model::Claude3_5SonnetV2
| Model::ClaudeHaiku4_5
- | Model::Claude3Haiku
- | Model::Claude3Sonnet
| Model::Claude3_7Sonnet
| Model::Claude3_7SonnetThinking
| Model::ClaudeSonnet4
- | Model::ClaudeSonnet4Thinking
- | Model::ClaudeSonnet4_5
- | Model::ClaudeSonnet4_5Thinking,
+ | Model::Claude3Haiku
+ | Model::Claude3Sonnet,
"apac",
+ _,
) => Ok(format!("{}.{}", region_group, model_id)),
- // Any other combination is not supported
- _ => Ok(self.request_id().into()),
+ _ => Ok(model_id.into()),
}
}
}
@@ -705,15 +766,15 @@ mod tests {
fn test_us_region_inference_ids() -> anyhow::Result<()> {
// Test US regions
assert_eq!(
- Model::Claude3_5SonnetV2.cross_region_inference_id("us-east-1")?,
+ Model::Claude3_5SonnetV2.cross_region_inference_id("us-east-1", false)?,
"us.anthropic.claude-3-5-sonnet-20241022-v2:0"
);
assert_eq!(
- Model::Claude3_5SonnetV2.cross_region_inference_id("us-west-2")?,
+ Model::Claude3_5SonnetV2.cross_region_inference_id("us-west-2", false)?,
"us.anthropic.claude-3-5-sonnet-20241022-v2:0"
);
assert_eq!(
- Model::AmazonNovaPro.cross_region_inference_id("us-east-2")?,
+ Model::AmazonNovaPro.cross_region_inference_id("us-east-2", false)?,
"us.amazon.nova-pro-v1:0"
);
Ok(())
@@ -723,19 +784,19 @@ mod tests {
fn test_eu_region_inference_ids() -> anyhow::Result<()> {
// Test European regions
assert_eq!(
- Model::ClaudeSonnet4.cross_region_inference_id("eu-west-1")?,
+ Model::ClaudeSonnet4.cross_region_inference_id("eu-west-1", false)?,
"eu.anthropic.claude-sonnet-4-20250514-v1:0"
);
assert_eq!(
- Model::ClaudeSonnet4_5.cross_region_inference_id("eu-west-1")?,
+ Model::ClaudeSonnet4_5.cross_region_inference_id("eu-west-1", false)?,
"eu.anthropic.claude-sonnet-4-5-20250929-v1:0"
);
assert_eq!(
- Model::Claude3Sonnet.cross_region_inference_id("eu-west-1")?,
+ Model::Claude3Sonnet.cross_region_inference_id("eu-west-1", false)?,
"eu.anthropic.claude-3-sonnet-20240229-v1:0"
);
assert_eq!(
- Model::AmazonNovaMicro.cross_region_inference_id("eu-north-1")?,
+ Model::AmazonNovaMicro.cross_region_inference_id("eu-north-1", false)?,
"eu.amazon.nova-micro-v1:0"
);
Ok(())
@@ -745,15 +806,15 @@ mod tests {
fn test_apac_region_inference_ids() -> anyhow::Result<()> {
// Test Asia-Pacific regions
assert_eq!(
- Model::Claude3_5SonnetV2.cross_region_inference_id("ap-northeast-1")?,
+ Model::Claude3_5SonnetV2.cross_region_inference_id("ap-northeast-1", false)?,
"apac.anthropic.claude-3-5-sonnet-20241022-v2:0"
);
assert_eq!(
- Model::Claude3_5SonnetV2.cross_region_inference_id("ap-southeast-2")?,
+ Model::Claude3_5SonnetV2.cross_region_inference_id("ap-southeast-2", false)?,
"apac.anthropic.claude-3-5-sonnet-20241022-v2:0"
);
assert_eq!(
- Model::AmazonNovaLite.cross_region_inference_id("ap-south-1")?,
+ Model::AmazonNovaLite.cross_region_inference_id("ap-south-1", false)?,
"apac.amazon.nova-lite-v1:0"
);
Ok(())
@@ -763,11 +824,11 @@ mod tests {
fn test_gov_region_inference_ids() -> anyhow::Result<()> {
// Test Government regions
assert_eq!(
- Model::Claude3_5Sonnet.cross_region_inference_id("us-gov-east-1")?,
+ Model::Claude3_5Sonnet.cross_region_inference_id("us-gov-east-1", false)?,
"us-gov.anthropic.claude-3-5-sonnet-20240620-v1:0"
);
assert_eq!(
- Model::Claude3Haiku.cross_region_inference_id("us-gov-west-1")?,
+ Model::Claude3Haiku.cross_region_inference_id("us-gov-west-1", false)?,
"us-gov.anthropic.claude-3-haiku-20240307-v1:0"
);
Ok(())
@@ -777,15 +838,15 @@ mod tests {
fn test_meta_models_inference_ids() -> anyhow::Result<()> {
// Test Meta models
assert_eq!(
- Model::MetaLlama370BInstructV1.cross_region_inference_id("us-east-1")?,
+ Model::MetaLlama370BInstructV1.cross_region_inference_id("us-east-1", false)?,
"meta.llama3-70b-instruct-v1:0"
);
assert_eq!(
- Model::MetaLlama3170BInstructV1.cross_region_inference_id("us-east-1")?,
+ Model::MetaLlama3170BInstructV1.cross_region_inference_id("us-east-1", false)?,
"us.meta.llama3-1-70b-instruct-v1:0"
);
assert_eq!(
- Model::MetaLlama321BInstructV1.cross_region_inference_id("eu-west-1")?,
+ Model::MetaLlama321BInstructV1.cross_region_inference_id("eu-west-1", false)?,
"eu.meta.llama3-2-1b-instruct-v1:0"
);
Ok(())
@@ -796,11 +857,11 @@ mod tests {
// Mistral models don't follow the regional prefix pattern,
// so they should return their original IDs
assert_eq!(
- Model::MistralMistralLarge2402V1.cross_region_inference_id("us-east-1")?,
+ Model::MistralMistralLarge2402V1.cross_region_inference_id("us-east-1", false)?,
"mistral.mistral-large-2402-v1:0"
);
assert_eq!(
- Model::MistralMixtral8x7BInstructV0.cross_region_inference_id("eu-west-1")?,
+ Model::MistralMixtral8x7BInstructV0.cross_region_inference_id("eu-west-1", false)?,
"mistral.mixtral-8x7b-instruct-v0:1"
);
Ok(())
@@ -811,11 +872,11 @@ mod tests {
// AI21 models don't follow the regional prefix pattern,
// so they should return their original IDs
assert_eq!(
- Model::AI21J2UltraV1.cross_region_inference_id("us-east-1")?,
+ Model::AI21J2UltraV1.cross_region_inference_id("us-east-1", false)?,
"ai21.j2-ultra-v1"
);
assert_eq!(
- Model::AI21JambaInstructV1.cross_region_inference_id("eu-west-1")?,
+ Model::AI21JambaInstructV1.cross_region_inference_id("eu-west-1", false)?,
"ai21.jamba-instruct-v1:0"
);
Ok(())
@@ -826,11 +887,11 @@ mod tests {
// Cohere models don't follow the regional prefix pattern,
// so they should return their original IDs
assert_eq!(
- Model::CohereCommandRV1.cross_region_inference_id("us-east-1")?,
+ Model::CohereCommandRV1.cross_region_inference_id("us-east-1", false)?,
"cohere.command-r-v1:0"
);
assert_eq!(
- Model::CohereCommandTextV14_4k.cross_region_inference_id("ap-southeast-1")?,
+ Model::CohereCommandTextV14_4k.cross_region_inference_id("ap-southeast-1", false)?,
"cohere.command-text-v14:7:4k"
);
Ok(())
@@ -850,10 +911,17 @@ mod tests {
// Custom model should return its name unchanged
assert_eq!(
- custom_model.cross_region_inference_id("us-east-1")?,
+ custom_model.cross_region_inference_id("us-east-1", false)?,
"custom.my-model-v1:0"
);
+ // Test that models without global support fall back to regional when allow_global is true
+ assert_eq!(
+ Model::AmazonNovaPro.cross_region_inference_id("us-east-1", true)?,
+ "us.amazon.nova-pro-v1:0",
+ "Nova Pro should fall back to regional profile even when allow_global is true"
+ );
+
Ok(())
}
@@ -892,3 +960,28 @@ mod tests {
);
}
}
+
+#[test]
+fn test_global_inference_ids() -> anyhow::Result<()> {
+ // Test global inference for models that support it when allow_global is true
+ assert_eq!(
+ Model::ClaudeSonnet4.cross_region_inference_id("us-east-1", true)?,
+ "global.anthropic.claude-sonnet-4-20250514-v1:0"
+ );
+ assert_eq!(
+ Model::ClaudeSonnet4_5.cross_region_inference_id("eu-west-1", true)?,
+ "global.anthropic.claude-sonnet-4-5-20250929-v1:0"
+ );
+ assert_eq!(
+ Model::ClaudeHaiku4_5.cross_region_inference_id("ap-south-1", true)?,
+ "global.anthropic.claude-haiku-4-5-20251001-v1:0"
+ );
+
+ // Test that regional prefix is used when allow_global is false
+ assert_eq!(
+ Model::ClaudeSonnet4.cross_region_inference_id("us-east-1", false)?,
+ "us.anthropic.claude-sonnet-4-20250514-v1:0"
+ );
+
+ Ok(())
+}
@@ -524,6 +524,16 @@ impl Room {
self.id
}
+ pub fn room_id(&self) -> impl Future<Output = Option<String>> + 'static {
+ let room = self.live_kit.as_ref().map(|lk| lk.room.clone());
+ async move {
+ let room = room?;
+ let sid = room.sid().await;
+ let name = room.name();
+ Some(format!("{} (sid: {sid})", name))
+ }
+ }
+
pub fn status(&self) -> RoomStatus {
self.status
}
@@ -1723,6 +1723,10 @@ impl ProtoClient for Client {
fn is_via_collab(&self) -> bool {
true
}
+
+ fn has_wsl_interop(&self) -> bool {
+ false
+ }
}
/// prefix for the zed:// url scheme
@@ -206,11 +206,16 @@ pub struct AcceptEditPredictionBody {
pub request_id: String,
}
-#[derive(Debug, Clone, Serialize, Deserialize)]
+#[derive(Debug, Clone, Deserialize)]
pub struct RejectEditPredictionsBody {
pub rejections: Vec<EditPredictionRejection>,
}
+#[derive(Debug, Clone, Serialize)]
+pub struct RejectEditPredictionsBodyRef<'a> {
+ pub rejections: &'a [EditPredictionRejection],
+}
+
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub struct EditPredictionRejection {
pub request_id: String,
@@ -469,6 +469,8 @@ impl Server {
.add_request_handler(forward_mutating_project_request::<proto::GetBlobContent>)
.add_request_handler(forward_mutating_project_request::<proto::GitCreateBranch>)
.add_request_handler(forward_mutating_project_request::<proto::GitChangeBranch>)
+ .add_request_handler(forward_mutating_project_request::<proto::GitCreateRemote>)
+ .add_request_handler(forward_mutating_project_request::<proto::GitRemoveRemote>)
.add_request_handler(forward_mutating_project_request::<proto::CheckForPushedCommits>)
.add_message_handler(broadcast_project_message_from_host::<proto::AdvertiseContexts>)
.add_message_handler(update_context)
@@ -25,6 +25,7 @@ use gpui::{
use indoc::indoc;
use language::FakeLspAdapter;
use lsp::LSP_REQUEST_TIMEOUT;
+use pretty_assertions::assert_eq;
use project::{
ProgressToken, ProjectPath, SERVER_PROGRESS_THROTTLE_TIMEOUT,
lsp_store::lsp_ext_command::{ExpandedMacro, LspExtExpandMacro},
@@ -3192,13 +3193,12 @@ async fn test_lsp_pull_diagnostics(
.collect::<Vec<_>>();
let expected_messages = [
expected_pull_diagnostic_lib_message,
- // TODO bug: the pushed diagnostics are not being sent to the client when they open the corresponding buffer.
- // expected_push_diagnostic_lib_message,
+ expected_push_diagnostic_lib_message,
];
assert_eq!(
all_diagnostics.len(),
- 1,
- "Expected pull diagnostics, but got: {all_diagnostics:?}"
+ 2,
+ "Expected pull and push diagnostics, but got: {all_diagnostics:?}"
);
for diagnostic in all_diagnostics {
assert!(
@@ -3258,14 +3258,15 @@ async fn test_lsp_pull_diagnostics(
.diagnostics_in_range(MultiBufferOffset(0)..snapshot.len())
.collect::<Vec<_>>();
let expected_messages = [
- expected_workspace_pull_diagnostics_lib_message,
- // TODO bug: the pushed diagnostics are not being sent to the client when they open the corresponding buffer.
- // expected_push_diagnostic_lib_message,
+ // Despite workspace diagnostics provided,
+ // the currently open file's diagnostics should be preferred, as LSP suggests.
+ expected_pull_diagnostic_lib_message,
+ expected_push_diagnostic_lib_message,
];
assert_eq!(
all_diagnostics.len(),
- 1,
- "Expected pull diagnostics, but got: {all_diagnostics:?}"
+ 2,
+ "Expected pull and push diagnostics, but got: {all_diagnostics:?}"
);
for diagnostic in all_diagnostics {
assert!(
@@ -3378,8 +3379,9 @@ async fn test_lsp_pull_diagnostics(
"Another workspace diagnostics pull should happen after the diagnostics refresh server request"
);
{
- assert!(
- diagnostics_pulls_result_ids.lock().await.len() == diagnostic_pulls_result_ids,
+ assert_eq!(
+ diagnostics_pulls_result_ids.lock().await.len(),
+ diagnostic_pulls_result_ids,
"Pulls should not happen hence no extra ids should appear"
);
assert!(
@@ -3397,7 +3399,7 @@ async fn test_lsp_pull_diagnostics(
expected_pull_diagnostic_lib_message,
expected_push_diagnostic_lib_message,
];
- assert_eq!(all_diagnostics.len(), 1);
+ assert_eq!(all_diagnostics.len(), 2);
for diagnostic in &all_diagnostics {
assert!(
expected_messages.contains(&diagnostic.diagnostic.message.as_str()),
@@ -37,7 +37,7 @@ use ui::{
};
use util::{ResultExt, TryFutureExt, maybe};
use workspace::{
- Deafen, LeaveCall, Mute, OpenChannelNotes, ScreenShare, ShareProject, Workspace,
+ CopyRoomId, Deafen, LeaveCall, Mute, OpenChannelNotes, ScreenShare, ShareProject, Workspace,
dock::{DockPosition, Panel, PanelEvent},
notifications::{DetachAndPromptErr, NotifyResultExt},
};
@@ -128,6 +128,32 @@ pub fn init(cx: &mut App) {
workspace.register_action(|_, _: &LeaveCall, window, cx| {
CollabPanel::leave_call(window, cx);
});
+ workspace.register_action(|workspace, _: &CopyRoomId, window, cx| {
+ use workspace::notifications::{NotificationId, NotifyTaskExt as _};
+
+ struct RoomIdCopiedToast;
+
+ if let Some(room) = ActiveCall::global(cx).read(cx).room() {
+ let romo_id_fut = room.read(cx).room_id();
+ cx.spawn(async move |workspace, cx| {
+ let room_id = romo_id_fut.await.context("Failed to get livekit room")?;
+ workspace.update(cx, |workspace, cx| {
+ cx.write_to_clipboard(ClipboardItem::new_string(room_id));
+ workspace.show_toast(
+ workspace::Toast::new(
+ NotificationId::unique::<RoomIdCopiedToast>(),
+ "Room ID copied to clipboard",
+ )
+ .autohide(),
+ cx,
+ );
+ })
+ })
+ .detach_and_notify_err(window, cx);
+ } else {
+ workspace.show_error(&"There’s no active call; join one first.", cx);
+ }
+ });
workspace.register_action(|workspace, _: &ShareProject, window, cx| {
let project = workspace.project().clone();
println!("{project:?}");
@@ -23,6 +23,9 @@ zstd.workspace = true
[target.'cfg(target_os = "macos")'.dependencies]
mach2.workspace = true
+[target.'cfg(target_os = "windows")'.dependencies]
+windows.workspace = true
+
[lints]
workspace = true
@@ -3,6 +3,8 @@ use log::info;
use minidumper::{Client, LoopAction, MinidumpBinary};
use release_channel::{RELEASE_CHANNEL, ReleaseChannel};
use serde::{Deserialize, Serialize};
+
+#[cfg(not(target_os = "windows"))]
use smol::process::Command;
#[cfg(target_os = "macos")]
@@ -70,11 +72,16 @@ pub async fn init(crash_init: InitCrashHandler) {
// used by the crash handler isn't destroyed correctly which causes it to stay on the file
// system and block further attempts to initialize crash handlers with that socket path.
let socket_name = paths::temp_dir().join(format!("zed-crash-handler-{zed_pid}"));
+ #[cfg(not(target_os = "windows"))]
let _crash_handler = Command::new(exe)
.arg("--crash-handler")
.arg(&socket_name)
.spawn()
.expect("unable to spawn server process");
+
+ #[cfg(target_os = "windows")]
+ spawn_crash_handler_windows(&exe, &socket_name);
+
#[cfg(target_os = "linux")]
let server_pid = _crash_handler.id();
info!("spawning crash handler process");
@@ -342,6 +349,57 @@ pub fn panic_hook(info: &PanicHookInfo) {
}
}
+#[cfg(target_os = "windows")]
+fn spawn_crash_handler_windows(exe: &Path, socket_name: &Path) {
+ use std::ffi::OsStr;
+ use std::iter::once;
+ use std::os::windows::ffi::OsStrExt;
+ use windows::Win32::System::Threading::{
+ CreateProcessW, PROCESS_CREATION_FLAGS, PROCESS_INFORMATION, STARTF_FORCEOFFFEEDBACK,
+ STARTUPINFOW,
+ };
+ use windows::core::PWSTR;
+
+ let mut command_line: Vec<u16> = OsStr::new(&format!(
+ "\"{}\" --crash-handler \"{}\"",
+ exe.display(),
+ socket_name.display()
+ ))
+ .encode_wide()
+ .chain(once(0))
+ .collect();
+
+ let mut startup_info = STARTUPINFOW::default();
+ startup_info.cb = std::mem::size_of::<STARTUPINFOW>() as u32;
+
+ // By default, Windows enables a "busy" cursor when a GUI application is launched.
+ // This cursor is disabled once the application starts processing window messages.
+ // Since the crash handler process doesn't process messages, this "busy" cursor stays enabled for a long time.
+ // Disable the cursor feedback to prevent this from happening.
+ startup_info.dwFlags = STARTF_FORCEOFFFEEDBACK;
+
+ let mut process_info = PROCESS_INFORMATION::default();
+
+ unsafe {
+ CreateProcessW(
+ None,
+ Some(PWSTR(command_line.as_mut_ptr())),
+ None,
+ None,
+ false,
+ PROCESS_CREATION_FLAGS(0),
+ None,
+ None,
+ &startup_info,
+ &mut process_info,
+ )
+ .expect("unable to spawn server process");
+
+ windows::Win32::Foundation::CloseHandle(process_info.hProcess).ok();
+ windows::Win32::Foundation::CloseHandle(process_info.hThread).ok();
+ }
+}
+
pub fn crash_server(socket: &Path) {
let Ok(mut server) = minidumper::Server::with_name(socket) else {
log::info!("Couldn't create socket, there may already be a running crash server");
@@ -1017,11 +1017,13 @@ impl SearchableItem for DapLogView {
fn update_matches(
&mut self,
matches: &[Self::Match],
+ active_match_index: Option<usize>,
window: &mut Window,
cx: &mut Context<Self>,
) {
- self.editor
- .update(cx, |e, cx| e.update_matches(matches, window, cx))
+ self.editor.update(cx, |e, cx| {
+ e.update_matches(matches, active_match_index, window, cx)
+ })
}
fn query_suggestion(&mut self, window: &mut Window, cx: &mut Context<Self>) -> String {
@@ -740,7 +740,7 @@ impl DebugPanel {
}
})
.child(
- IconButton::new("debug-step-over", IconName::ArrowRight)
+ IconButton::new("step-over", IconName::DebugStepOver)
.icon_size(IconSize::Small)
.on_click(window.listener_for(
running_state,
@@ -762,32 +762,29 @@ impl DebugPanel {
}),
)
.child(
- IconButton::new(
- "debug-step-into",
- IconName::ArrowDownRight,
- )
- .icon_size(IconSize::Small)
- .on_click(window.listener_for(
- running_state,
- |this, _, _window, cx| {
- this.step_in(cx);
- },
- ))
- .disabled(thread_status != ThreadStatus::Stopped)
- .tooltip({
- let focus_handle = focus_handle.clone();
- move |_window, cx| {
- Tooltip::for_action_in(
- "Step In",
- &StepInto,
- &focus_handle,
- cx,
- )
- }
- }),
+ IconButton::new("step-into", IconName::DebugStepInto)
+ .icon_size(IconSize::Small)
+ .on_click(window.listener_for(
+ running_state,
+ |this, _, _window, cx| {
+ this.step_in(cx);
+ },
+ ))
+ .disabled(thread_status != ThreadStatus::Stopped)
+ .tooltip({
+ let focus_handle = focus_handle.clone();
+ move |_window, cx| {
+ Tooltip::for_action_in(
+ "Step In",
+ &StepInto,
+ &focus_handle,
+ cx,
+ )
+ }
+ }),
)
.child(
- IconButton::new("debug-step-out", IconName::ArrowUpRight)
+ IconButton::new("step-out", IconName::DebugStepOut)
.icon_size(IconSize::Small)
.on_click(window.listener_for(
running_state,
@@ -18,14 +18,14 @@ use gpui::{
use language::{Anchor, Buffer, CharScopeContext, CodeLabel, TextBufferSnapshot, ToOffset};
use menu::{Confirm, SelectNext, SelectPrevious};
use project::{
- Completion, CompletionDisplayOptions, CompletionResponse,
+ CompletionDisplayOptions, CompletionResponse,
debugger::session::{CompletionsQuery, OutputToken, Session},
lsp_store::CompletionDocumentation,
search_history::{SearchHistory, SearchHistoryCursor},
};
use settings::Settings;
use std::fmt::Write;
-use std::{cell::RefCell, ops::Range, rc::Rc, usize};
+use std::{ops::Range, rc::Rc, usize};
use theme::{Theme, ThemeSettings};
use ui::{ContextMenu, Divider, PopoverMenu, SplitButton, Tooltip, prelude::*};
use util::ResultExt;
@@ -252,10 +252,11 @@ impl Console {
let start_offset = range.start;
let range = buffer.anchor_after(MultiBufferOffset(range.start))
..buffer.anchor_before(MultiBufferOffset(range.end));
+ let color_fn = color_fetcher(color);
console.highlight_background_key::<ConsoleAnsiHighlight>(
start_offset,
&[range],
- color_fetcher(color),
+ move |_, theme| color_fn(theme),
cx,
);
}
@@ -553,24 +554,12 @@ impl CompletionProvider for ConsoleQueryBarCompletionProvider {
}
}
- fn apply_additional_edits_for_completion(
- &self,
- _buffer: Entity<Buffer>,
- _completions: Rc<RefCell<Box<[Completion]>>>,
- _completion_index: usize,
- _push_to_history: bool,
- _cx: &mut Context<Editor>,
- ) -> gpui::Task<anyhow::Result<Option<language::Transaction>>> {
- Task::ready(Ok(None))
- }
-
fn is_completion_trigger(
&self,
buffer: &Entity<Buffer>,
position: language::Anchor,
text: &str,
trigger_in_words: bool,
- menu_is_open: bool,
cx: &mut Context<Editor>,
) -> bool {
let mut chars = text.chars();
@@ -581,9 +570,6 @@ impl CompletionProvider for ConsoleQueryBarCompletionProvider {
};
let snapshot = buffer.read(cx).snapshot();
- if !menu_is_open && !snapshot.settings_at(position, cx).show_completions_on_input {
- return false;
- }
let classifier = snapshot
.char_classifier_at(position)
@@ -333,6 +333,19 @@ where
&bracket_colors_markup(&mut cx),
"All markdown brackets should be colored based on their depth"
);
+
+ cx.set_state(indoc! {r#"ˇ{{}}"#});
+ cx.executor().advance_clock(Duration::from_millis(100));
+ cx.executor().run_until_parked();
+
+ assert_eq!(
+ r#"«1{«2{}2»}1»
+1 hsla(207.80, 16.20%, 69.19%, 1.00)
+2 hsla(29.00, 54.00%, 65.88%, 1.00)
+"#,
+ &bracket_colors_markup(&mut cx),
+ "All markdown brackets should be colored based on their depth, again"
+ );
}
#[gpui::test]
@@ -146,8 +146,8 @@ use persistence::DB;
use project::{
BreakpointWithPosition, CodeAction, Completion, CompletionDisplayOptions, CompletionIntent,
CompletionResponse, CompletionSource, DisableAiSettings, DocumentHighlight, InlayHint, InlayId,
- InvalidationStrategy, Location, LocationLink, PrepareRenameResponse, Project, ProjectItem,
- ProjectPath, ProjectTransaction, TaskSourceKind,
+ InvalidationStrategy, Location, LocationLink, LspAction, PrepareRenameResponse, Project,
+ ProjectItem, ProjectPath, ProjectTransaction, TaskSourceKind,
debugger::{
breakpoint_store::{
Breakpoint, BreakpointEditAction, BreakpointSessionState, BreakpointState,
@@ -182,7 +182,7 @@ use std::{
iter::{self, Peekable},
mem,
num::NonZeroU32,
- ops::{Deref, DerefMut, Not, Range, RangeInclusive},
+ ops::{ControlFlow, Deref, DerefMut, Not, Range, RangeInclusive},
path::{Path, PathBuf},
rc::Rc,
sync::Arc,
@@ -191,7 +191,7 @@ use std::{
use task::{ResolvedTask, RunnableTag, TaskTemplate, TaskVariables};
use text::{BufferId, FromAnchor, OffsetUtf16, Rope, ToOffset as _};
use theme::{
- ActiveTheme, PlayerColor, StatusColors, SyntaxTheme, Theme, ThemeSettings,
+ AccentColors, ActiveTheme, PlayerColor, StatusColors, SyntaxTheme, Theme, ThemeSettings,
observe_buffer_font_size_adjustment,
};
use ui::{
@@ -726,7 +726,10 @@ impl EditorActionId {
// type GetFieldEditorTheme = dyn Fn(&theme::Theme) -> theme::FieldEditor;
// type OverrideTextStyle = dyn Fn(&EditorStyle) -> Option<HighlightStyle>;
-type BackgroundHighlight = (fn(&Theme) -> Hsla, Arc<[Range<Anchor>]>);
+type BackgroundHighlight = (
+ Arc<dyn Fn(&usize, &Theme) -> Hsla + Send + Sync>,
+ Arc<[Range<Anchor>]>,
+);
type GutterHighlight = (fn(&App) -> Hsla, Vec<Range<Anchor>>);
#[derive(Default)]
@@ -1172,6 +1175,7 @@ pub struct Editor {
gutter_breakpoint_indicator: (Option<PhantomBreakpointIndicator>, Option<Task<()>>),
hovered_diff_hunk_row: Option<DisplayRow>,
pull_diagnostics_task: Task<()>,
+ pull_diagnostics_background_task: Task<()>,
in_project_search: bool,
previous_search_ranges: Option<Arc<[Range<Anchor>]>>,
breadcrumb_header: Option<String>,
@@ -1202,11 +1206,17 @@ pub struct Editor {
select_next_is_case_sensitive: Option<bool>,
pub lookup_key: Option<Box<dyn Any + Send + Sync>>,
applicable_language_settings: HashMap<Option<LanguageName>, LanguageSettings>,
- accent_overrides: Vec<SharedString>,
+ accent_data: Option<AccentData>,
fetched_tree_sitter_chunks: HashMap<ExcerptId, HashSet<Range<BufferRow>>>,
use_base_text_line_numbers: bool,
}
+#[derive(Debug, PartialEq)]
+struct AccentData {
+ colors: AccentColors,
+ overrides: Vec<SharedString>,
+}
+
fn debounce_value(debounce_ms: u64) -> Option<Duration> {
if debounce_ms > 0 {
Some(Duration::from_millis(debounce_ms))
@@ -2316,6 +2326,7 @@ impl Editor {
.unwrap_or_default(),
tasks_update_task: None,
pull_diagnostics_task: Task::ready(()),
+ pull_diagnostics_background_task: Task::ready(()),
colors: None,
refresh_colors_task: Task::ready(()),
inlay_hints: None,
@@ -2349,7 +2360,7 @@ impl Editor {
lookup_key: None,
select_next_is_case_sensitive: None,
applicable_language_settings: HashMap::default(),
- accent_overrides: Vec::new(),
+ accent_data: None,
fetched_tree_sitter_chunks: HashMap::default(),
use_base_text_line_numbers: false,
};
@@ -2359,7 +2370,7 @@ impl Editor {
}
editor.applicable_language_settings = editor.fetch_applicable_language_settings(cx);
- editor.accent_overrides = editor.fetch_accent_overrides(cx);
+ editor.accent_data = editor.fetch_accent_data(cx);
if let Some(breakpoints) = editor.breakpoint_store.as_ref() {
editor
@@ -2492,7 +2503,6 @@ impl Editor {
if let Some(buffer) = multi_buffer.read(cx).as_singleton() {
editor.register_buffer(buffer.read(cx).remote_id(), cx);
}
- editor.update_lsp_data(None, window, cx);
editor.report_editor_event(ReportEditorEvent::EditorOpened, None, cx);
}
@@ -5509,6 +5519,22 @@ impl Editor {
};
let buffer_snapshot = buffer.read(cx).snapshot();
+ let menu_is_open = matches!(
+ self.context_menu.borrow().as_ref(),
+ Some(CodeContextMenu::Completions(_))
+ );
+
+ let language = buffer_snapshot
+ .language_at(buffer_position.text_anchor)
+ .map(|language| language.name());
+
+ let language_settings = language_settings(language.clone(), buffer_snapshot.file(), cx);
+ let completion_settings = language_settings.completions.clone();
+
+ if !menu_is_open && trigger.is_some() && !language_settings.show_completions_on_input {
+ return;
+ }
+
let query: Option<Arc<String>> =
Self::completion_query(&multibuffer_snapshot, buffer_position)
.map(|query| query.into());
@@ -5517,14 +5543,8 @@ impl Editor {
// Hide the current completions menu when query is empty. Without this, cached
// completions from before the trigger char may be reused (#32774).
- if query.is_none() {
- let menu_is_open = matches!(
- self.context_menu.borrow().as_ref(),
- Some(CodeContextMenu::Completions(_))
- );
- if menu_is_open {
- self.hide_context_menu(window, cx);
- }
+ if query.is_none() && menu_is_open {
+ self.hide_context_menu(window, cx);
}
let mut ignore_word_threshold = false;
@@ -5613,14 +5633,6 @@ impl Editor {
(buffer_position..buffer_position, None)
};
- let language = buffer_snapshot
- .language_at(buffer_position)
- .map(|language| language.name());
-
- let completion_settings = language_settings(language.clone(), buffer_snapshot.file(), cx)
- .completions
- .clone();
-
let show_completion_documentation = buffer_snapshot
.settings_at(buffer_position, cx)
.show_completion_documentation;
@@ -5651,7 +5663,6 @@ impl Editor {
position.text_anchor,
trigger,
trigger_in_words,
- completions_source.is_some(),
cx,
)
})
@@ -6151,9 +6162,43 @@ impl Editor {
}
let provider = self.completion_provider.as_ref()?;
+
+ let lsp_store = self.project().map(|project| project.read(cx).lsp_store());
+ let command = lsp_store.as_ref().and_then(|lsp_store| {
+ let CompletionSource::Lsp {
+ lsp_completion,
+ server_id,
+ ..
+ } = &completion.source
+ else {
+ return None;
+ };
+ let lsp_command = lsp_completion.command.as_ref()?;
+ let available_commands = lsp_store
+ .read(cx)
+ .lsp_server_capabilities
+ .get(server_id)
+ .and_then(|server_capabilities| {
+ server_capabilities
+ .execute_command_provider
+ .as_ref()
+ .map(|options| options.commands.as_slice())
+ })?;
+ if available_commands.contains(&lsp_command.command) {
+ Some(CodeAction {
+ server_id: *server_id,
+ range: language::Anchor::MIN..language::Anchor::MIN,
+ lsp_action: LspAction::Command(lsp_command.clone()),
+ resolved: false,
+ })
+ } else {
+ None
+ }
+ });
+
drop(completion);
let apply_edits = provider.apply_additional_edits_for_completion(
- buffer_handle,
+ buffer_handle.clone(),
completions_menu.completions.clone(),
candidate_id,
true,
@@ -6167,8 +6212,29 @@ impl Editor {
self.show_signature_help(&ShowSignatureHelp, window, cx);
}
- Some(cx.foreground_executor().spawn(async move {
+ Some(cx.spawn_in(window, async move |editor, cx| {
apply_edits.await?;
+
+ if let Some((lsp_store, command)) = lsp_store.zip(command) {
+ let title = command.lsp_action.title().to_owned();
+ let project_transaction = lsp_store
+ .update(cx, |lsp_store, cx| {
+ lsp_store.apply_code_action(buffer_handle, command, false, cx)
+ })?
+ .await
+ .context("applying post-completion command")?;
+ if let Some(workspace) = editor.read_with(cx, |editor, _| editor.workspace())? {
+ Self::open_project_transaction(
+ &editor,
+ workspace.downgrade(),
+ project_transaction,
+ title,
+ cx,
+ )
+ .await?;
+ }
+ }
+
Ok(())
}))
}
@@ -6553,7 +6619,7 @@ impl Editor {
editor.update(cx, |editor, cx| {
editor.highlight_background::<Self>(
&ranges_to_highlight,
- |theme| theme.colors().editor_highlighted_line_background,
+ |_, theme| theme.colors().editor_highlighted_line_background,
cx,
);
});
@@ -6754,6 +6820,9 @@ impl Editor {
return;
};
+ if self.blame.is_none() {
+ self.start_git_blame(true, window, cx);
+ }
let Some(blame) = self.blame.as_ref() else {
return;
};
@@ -6952,12 +7021,12 @@ impl Editor {
this.highlight_background::<DocumentHighlightRead>(
&read_ranges,
- |theme| theme.colors().editor_document_highlight_read_background,
+ |_, theme| theme.colors().editor_document_highlight_read_background,
cx,
);
this.highlight_background::<DocumentHighlightWrite>(
&write_ranges,
- |theme| theme.colors().editor_document_highlight_write_background,
+ |_, theme| theme.colors().editor_document_highlight_write_background,
cx,
);
cx.notify();
@@ -7065,7 +7134,7 @@ impl Editor {
if !match_ranges.is_empty() {
editor.highlight_background::<SelectedTextHighlight>(
&match_ranges,
- |theme| theme.colors().editor_document_highlight_bracket_background,
+ |_, theme| theme.colors().editor_document_highlight_bracket_background,
cx,
)
}
@@ -8004,10 +8073,17 @@ impl Editor {
if self.edit_prediction_indent_conflict {
let cursor_point = cursor.to_point(&multibuffer);
+ let mut suggested_indent = None;
+ multibuffer.suggested_indents_callback(
+ cursor_point.row..cursor_point.row + 1,
+ |_, indent| {
+ suggested_indent = Some(indent);
+ ControlFlow::Break(())
+ },
+ cx,
+ );
- let indents = multibuffer.suggested_indents(cursor_point.row..cursor_point.row + 1, cx);
-
- if let Some((_, indent)) = indents.iter().next()
+ if let Some(indent) = suggested_indent
&& indent.len == cursor_point.column
{
self.edit_prediction_indent_conflict = false;
@@ -16952,7 +17028,9 @@ impl Editor {
})
.collect();
- let workspace = self.workspace();
+ let Some(workspace) = self.workspace() else {
+ return Task::ready(Ok(Navigated::No));
+ };
cx.spawn_in(window, async move |editor, cx| {
let locations: Vec<Location> = future::join_all(definitions)
@@ -16978,10 +17056,6 @@ impl Editor {
}
if num_locations > 1 {
- let Some(workspace) = workspace else {
- return Ok(Navigated::No);
- };
-
let tab_kind = match kind {
Some(GotoDefinitionKind::Implementation) => "Implementations",
Some(GotoDefinitionKind::Symbol) | None => "Definitions",
@@ -17013,11 +17087,14 @@ impl Editor {
let opened = workspace
.update_in(cx, |workspace, window, cx| {
+ let allow_preview = PreviewTabsSettings::get_global(cx)
+ .enable_preview_multibuffer_from_code_navigation;
Self::open_locations_in_multibuffer(
workspace,
locations,
title,
split,
+ allow_preview,
MultibufferSelectionMode::First,
window,
cx,
@@ -17034,10 +17111,9 @@ impl Editor {
Ok(Navigated::Yes)
}
Some(Either::Right(path)) => {
- let Some(workspace) = workspace else {
- return Ok(Navigated::No);
- };
-
+ // TODO(andrew): respect preview tab settings
+ // `enable_keep_preview_on_code_navigation` and
+ // `enable_preview_file_from_code_navigation`
workspace
.update_in(cx, |workspace, window, cx| {
workspace.open_resolved_path(path, window, cx)
@@ -17048,10 +17124,6 @@ impl Editor {
None => Ok(Navigated::No),
}
} else {
- let Some(workspace) = workspace else {
- return Ok(Navigated::No);
- };
-
let (target_buffer, target_ranges) = locations.into_iter().next().unwrap();
let target_range = target_ranges.first().unwrap().clone();
@@ -17075,11 +17147,19 @@ impl Editor {
workspace.active_pane().clone()
};
+ let preview_tabs_settings = PreviewTabsSettings::get_global(cx);
+ let keep_old_preview = preview_tabs_settings
+ .enable_keep_preview_on_code_navigation;
+ let allow_new_preview = preview_tabs_settings
+ .enable_preview_file_from_code_navigation;
+
workspace.open_project_item(
pane,
target_buffer.clone(),
true,
true,
+ keep_old_preview,
+ allow_new_preview,
window,
cx,
)
@@ -17356,11 +17436,14 @@ impl Editor {
} else {
format!("References to {target}")
};
+ let allow_preview = PreviewTabsSettings::get_global(cx)
+ .enable_preview_multibuffer_from_code_navigation;
Self::open_locations_in_multibuffer(
workspace,
locations,
title,
false,
+ allow_preview,
MultibufferSelectionMode::First,
window,
cx,
@@ -17376,6 +17459,7 @@ impl Editor {
locations: std::collections::HashMap<Entity<Buffer>, Vec<Range<Point>>>,
title: String,
split: bool,
+ allow_preview: bool,
multibuffer_selection_mode: MultibufferSelectionMode,
window: &mut Window,
cx: &mut Context<Workspace>,
@@ -17423,6 +17507,7 @@ impl Editor {
.is_some_and(|it| *it == key)
})
});
+ let was_existing = existing.is_some();
let editor = existing.unwrap_or_else(|| {
cx.new(|cx| {
let mut editor = Editor::for_multibuffer(
@@ -17450,7 +17535,7 @@ impl Editor {
}
editor.highlight_background::<Self>(
&ranges,
- |theme| theme.colors().editor_highlighted_line_background,
+ |_, theme| theme.colors().editor_highlighted_line_background,
cx,
);
}
@@ -17463,29 +17548,23 @@ impl Editor {
});
let item = Box::new(editor);
- let item_id = item.item_id();
-
- if split {
- let pane = workspace.adjacent_pane(window, cx);
- workspace.add_item(pane, item, None, true, true, window, cx);
- } else if PreviewTabsSettings::get_global(cx).enable_preview_from_code_navigation {
- let (preview_item_id, preview_item_idx) =
- workspace.active_pane().read_with(cx, |pane, _| {
- (pane.preview_item_id(), pane.preview_item_idx())
- });
- workspace.add_item_to_active_pane(item, preview_item_idx, true, window, cx);
+ let pane = if split {
+ workspace.adjacent_pane(window, cx)
+ } else {
+ workspace.active_pane().clone()
+ };
+ let activate_pane = split;
- if let Some(preview_item_id) = preview_item_id {
- workspace.active_pane().update(cx, |pane, cx| {
- pane.remove_item(preview_item_id, false, false, window, cx);
- });
+ let mut destination_index = None;
+ pane.update(cx, |pane, cx| {
+ if allow_preview && !was_existing {
+ destination_index = pane.replace_preview_item_id(item.item_id(), window, cx);
}
- } else {
- workspace.add_item_to_active_pane(item, None, true, window, cx);
- }
- workspace.active_pane().update(cx, |pane, cx| {
- pane.set_preview_item_id(Some(item_id), cx);
+ if was_existing && !allow_preview {
+ pane.unpreview_item_if_preview(item.item_id());
+ }
+ pane.add_item(item, activate_pane, true, destination_index, window, cx);
});
}
@@ -18341,54 +18420,101 @@ impl Editor {
return None;
}
let project = self.project()?.downgrade();
- let debounce = Duration::from_millis(pull_diagnostics_settings.debounce_ms);
- let mut buffers = self.buffer.read(cx).all_buffers();
- buffers.retain(|buffer| {
- let buffer_id_to_retain = buffer.read(cx).remote_id();
- buffer_id.is_none_or(|buffer_id| buffer_id == buffer_id_to_retain)
- && self.registered_buffers.contains_key(&buffer_id_to_retain)
- });
- if buffers.is_empty() {
+
+ let mut edited_buffer_ids = HashSet::default();
+ let mut edited_worktree_ids = HashSet::default();
+ let edited_buffers = match buffer_id {
+ Some(buffer_id) => {
+ let buffer = self.buffer().read(cx).buffer(buffer_id)?;
+ let worktree_id = buffer.read(cx).file().map(|f| f.worktree_id(cx))?;
+ edited_buffer_ids.insert(buffer.read(cx).remote_id());
+ edited_worktree_ids.insert(worktree_id);
+ vec![buffer]
+ }
+ None => self
+ .buffer()
+ .read(cx)
+ .all_buffers()
+ .into_iter()
+ .filter(|buffer| {
+ let buffer = buffer.read(cx);
+ match buffer.file().map(|f| f.worktree_id(cx)) {
+ Some(worktree_id) => {
+ edited_buffer_ids.insert(buffer.remote_id());
+ edited_worktree_ids.insert(worktree_id);
+ true
+ }
+ None => false,
+ }
+ })
+ .collect::<Vec<_>>(),
+ };
+
+ if edited_buffers.is_empty() {
self.pull_diagnostics_task = Task::ready(());
+ self.pull_diagnostics_background_task = Task::ready(());
return None;
}
- self.pull_diagnostics_task = cx.spawn_in(window, async move |editor, cx| {
- cx.background_executor().timer(debounce).await;
+ let mut already_used_buffers = HashSet::default();
+ let related_open_buffers = self
+ .workspace
+ .as_ref()
+ .and_then(|(workspace, _)| workspace.upgrade())
+ .into_iter()
+ .flat_map(|workspace| workspace.read(cx).panes())
+ .flat_map(|pane| pane.read(cx).items_of_type::<Editor>())
+ .filter(|editor| editor != &cx.entity())
+ .flat_map(|editor| editor.read(cx).buffer().read(cx).all_buffers())
+ .filter(|buffer| {
+ let buffer = buffer.read(cx);
+ let buffer_id = buffer.remote_id();
+ if already_used_buffers.insert(buffer_id) {
+ if let Some(worktree_id) = buffer.file().map(|f| f.worktree_id(cx)) {
+ return !edited_buffer_ids.contains(&buffer_id)
+ && !edited_worktree_ids.contains(&worktree_id);
+ }
+ }
+ false
+ })
+ .collect::<Vec<_>>();
+
+ let debounce = Duration::from_millis(pull_diagnostics_settings.debounce_ms);
+ let make_spawn = |buffers: Vec<Entity<Buffer>>, delay: Duration| {
+ if buffers.is_empty() {
+ return Task::ready(());
+ }
+ let project_weak = project.clone();
+ cx.spawn_in(window, async move |_, cx| {
+ cx.background_executor().timer(delay).await;
- let Ok(mut pull_diagnostics_tasks) = cx.update(|_, cx| {
- buffers
- .into_iter()
- .filter_map(|buffer| {
- project
- .update(cx, |project, cx| {
- project.lsp_store().update(cx, |lsp_store, cx| {
- lsp_store.pull_diagnostics_for_buffer(buffer, cx)
+ let Ok(mut pull_diagnostics_tasks) = cx.update(|_, cx| {
+ buffers
+ .into_iter()
+ .filter_map(|buffer| {
+ project_weak
+ .update(cx, |project, cx| {
+ project.lsp_store().update(cx, |lsp_store, cx| {
+ lsp_store.pull_diagnostics_for_buffer(buffer, cx)
+ })
})
- })
- .ok()
- })
- .collect::<FuturesUnordered<_>>()
- }) else {
- return;
- };
+ .ok()
+ })
+ .collect::<FuturesUnordered<_>>()
+ }) else {
+ return;
+ };
- while let Some(pull_task) = pull_diagnostics_tasks.next().await {
- match pull_task {
- Ok(()) => {
- if editor
- .update_in(cx, |editor, window, cx| {
- editor.update_diagnostics_state(window, cx);
- })
- .is_err()
- {
- return;
- }
+ while let Some(pull_task) = pull_diagnostics_tasks.next().await {
+ if let Err(e) = pull_task {
+ log::error!("Failed to update project diagnostics: {e:#}");
}
- Err(e) => log::error!("Failed to update project diagnostics: {e:#}"),
}
- }
- });
+ })
+ };
+
+ self.pull_diagnostics_task = make_spawn(edited_buffers, debounce);
+ self.pull_diagnostics_background_task = make_spawn(related_open_buffers, debounce * 2);
Some(())
}
@@ -20676,6 +20802,7 @@ impl Editor {
locations,
format!("Selections for '{title}'"),
false,
+ false,
MultibufferSelectionMode::All,
window,
cx,
@@ -20878,7 +21005,7 @@ impl Editor {
pub fn set_search_within_ranges(&mut self, ranges: &[Range<Anchor>], cx: &mut Context<Self>) {
self.highlight_background::<SearchWithinRange>(
ranges,
- |colors| colors.colors().editor_document_highlight_read_background,
+ |_, colors| colors.colors().editor_document_highlight_read_background,
cx,
)
}
@@ -20894,12 +21021,12 @@ impl Editor {
pub fn highlight_background<T: 'static>(
&mut self,
ranges: &[Range<Anchor>],
- color_fetcher: fn(&Theme) -> Hsla,
+ color_fetcher: impl Fn(&usize, &Theme) -> Hsla + Send + Sync + 'static,
cx: &mut Context<Self>,
) {
self.background_highlights.insert(
HighlightKey::Type(TypeId::of::<T>()),
- (color_fetcher, Arc::from(ranges)),
+ (Arc::new(color_fetcher), Arc::from(ranges)),
);
self.scrollbar_marker_state.dirty = true;
cx.notify();
@@ -20909,12 +21036,12 @@ impl Editor {
&mut self,
key: usize,
ranges: &[Range<Anchor>],
- color_fetcher: fn(&Theme) -> Hsla,
+ color_fetcher: impl Fn(&usize, &Theme) -> Hsla + Send + Sync + 'static,
cx: &mut Context<Self>,
) {
self.background_highlights.insert(
HighlightKey::TypePlus(TypeId::of::<T>(), key),
- (color_fetcher, Arc::from(ranges)),
+ (Arc::new(color_fetcher), Arc::from(ranges)),
);
self.scrollbar_marker_state.dirty = true;
cx.notify();
@@ -21139,7 +21266,6 @@ impl Editor {
) -> Vec<(Range<DisplayPoint>, Hsla)> {
let mut results = Vec::new();
for (color_fetcher, ranges) in self.background_highlights.values() {
- let color = color_fetcher(theme);
let start_ix = match ranges.binary_search_by(|probe| {
let cmp = probe
.end
@@ -21152,7 +21278,7 @@ impl Editor {
}) {
Ok(i) | Err(i) => i,
};
- for range in &ranges[start_ix..] {
+ for (index, range) in ranges[start_ix..].iter().enumerate() {
if range
.start
.cmp(&search_range.end, &display_snapshot.buffer_snapshot())
@@ -21161,6 +21287,7 @@ impl Editor {
break;
}
+ let color = color_fetcher(&(start_ix + index), theme);
let start = range.start.to_display_point(display_snapshot);
let end = range.end.to_display_point(display_snapshot);
results.push((start..end, color))
@@ -21592,16 +21719,18 @@ impl Editor {
cx.notify();
}
- fn fetch_accent_overrides(&self, cx: &App) -> Vec<SharedString> {
+ fn fetch_accent_data(&self, cx: &App) -> Option<AccentData> {
if !self.mode.is_full() {
- return Vec::new();
+ return None;
}
let theme_settings = theme::ThemeSettings::get_global(cx);
+ let theme = cx.theme();
+ let accent_colors = theme.accents().clone();
- theme_settings
+ let accent_overrides = theme_settings
.theme_overrides
- .get(cx.theme().name.as_ref())
+ .get(theme.name.as_ref())
.map(|theme_style| &theme_style.accents)
.into_iter()
.flatten()
@@ -21614,7 +21743,12 @@ impl Editor {
.flatten(),
)
.flat_map(|accent| accent.0.clone())
- .collect()
+ .collect();
+
+ Some(AccentData {
+ colors: accent_colors,
+ overrides: accent_overrides,
+ })
}
fn fetch_applicable_language_settings(
@@ -21644,9 +21778,9 @@ impl Editor {
let language_settings_changed = new_language_settings != self.applicable_language_settings;
self.applicable_language_settings = new_language_settings;
- let new_accent_overrides = self.fetch_accent_overrides(cx);
- let accent_overrides_changed = new_accent_overrides != self.accent_overrides;
- self.accent_overrides = new_accent_overrides;
+ let new_accents = self.fetch_accent_data(cx);
+ let accents_changed = new_accents != self.accent_data;
+ self.accent_data = new_accents;
if self.diagnostics_enabled() {
let new_severity = EditorSettings::get_global(cx)
@@ -21720,7 +21854,7 @@ impl Editor {
}
}
- if language_settings_changed || accent_overrides_changed {
+ if language_settings_changed || accents_changed {
self.colorize_brackets(true, cx);
}
@@ -21880,43 +22014,64 @@ impl Editor {
};
for (buffer, (ranges, scroll_offset)) in new_selections_by_buffer {
- let editor = buffer
- .read(cx)
- .file()
- .is_none()
+ let buffer_read = buffer.read(cx);
+ let (has_file, is_project_file) = if let Some(file) = buffer_read.file() {
+ (true, project::File::from_dyn(Some(file)).is_some())
+ } else {
+ (false, false)
+ };
+
+ // If project file is none workspace.open_project_item will fail to open the excerpt
+ // in a pre existing workspace item if one exists, because Buffer entity_id will be None
+ // so we check if there's a tab match in that case first
+ let editor = (!has_file || !is_project_file)
.then(|| {
// Handle file-less buffers separately: those are not really the project items, so won't have a project path or entity id,
// so `workspace.open_project_item` will never find them, always opening a new editor.
// Instead, we try to activate the existing editor in the pane first.
- let (editor, pane_item_index) =
+ let (editor, pane_item_index, pane_item_id) =
pane.read(cx).items().enumerate().find_map(|(i, item)| {
let editor = item.downcast::<Editor>()?;
let singleton_buffer =
editor.read(cx).buffer().read(cx).as_singleton()?;
if singleton_buffer == buffer {
- Some((editor, i))
+ Some((editor, i, item.item_id()))
} else {
None
}
})?;
pane.update(cx, |pane, cx| {
- pane.activate_item(pane_item_index, true, true, window, cx)
+ pane.activate_item(pane_item_index, true, true, window, cx);
+ if !PreviewTabsSettings::get_global(cx)
+ .enable_preview_from_multibuffer
+ {
+ pane.unpreview_item_if_preview(pane_item_id);
+ }
});
Some(editor)
})
.flatten()
.unwrap_or_else(|| {
+ let keep_old_preview = PreviewTabsSettings::get_global(cx)
+ .enable_keep_preview_on_code_navigation;
+ let allow_new_preview =
+ PreviewTabsSettings::get_global(cx).enable_preview_from_multibuffer;
workspace.open_project_item::<Self>(
pane.clone(),
buffer,
true,
true,
+ keep_old_preview,
+ allow_new_preview,
window,
cx,
)
});
editor.update(cx, |editor, cx| {
+ if has_file && !is_project_file {
+ editor.set_read_only(true);
+ }
let autoscroll = match scroll_offset {
Some(scroll_offset) => Autoscroll::top_relative(scroll_offset as usize),
None => Autoscroll::newest(),
@@ -21940,10 +22095,11 @@ impl Editor {
});
}
- // For now, don't allow opening excerpts in buffers that aren't backed by
- // regular project files.
+ // Allow opening excerpts for buffers that either belong to the current project
+ // or represent synthetic/non-local files (e.g., git blobs). File-less buffers
+ // are also supported so tests and other in-memory views keep working.
fn can_open_excerpts_in_file(file: Option<&Arc<dyn language::File>>) -> bool {
- file.is_none_or(|file| project::File::from_dyn(Some(file)).is_some())
+ file.is_none_or(|file| project::File::from_dyn(Some(file)).is_some() || !file.is_local())
}
fn marked_text_ranges(&self, cx: &App) -> Option<Vec<Range<MultiBufferOffsetUtf16>>> {
@@ -22542,6 +22698,10 @@ impl Editor {
}
}
+ pub fn last_gutter_dimensions(&self) -> &GutterDimensions {
+ &self.gutter_dimensions
+ }
+
pub fn wait_for_diff_to_load(&self) -> Option<Shared<Task<()>>> {
self.load_diff_task.clone()
}
@@ -23431,7 +23591,6 @@ pub trait CompletionProvider {
position: language::Anchor,
text: &str,
trigger_in_words: bool,
- menu_is_open: bool,
cx: &mut Context<Editor>,
) -> bool;
@@ -23810,7 +23969,6 @@ impl CompletionProvider for Entity<Project> {
position: language::Anchor,
text: &str,
trigger_in_words: bool,
- menu_is_open: bool,
cx: &mut Context<Editor>,
) -> bool {
let mut chars = text.chars();
@@ -23825,9 +23983,6 @@ impl CompletionProvider for Entity<Project> {
let buffer = buffer.read(cx);
let snapshot = buffer.snapshot();
- if !menu_is_open && !snapshot.settings_at(position, cx).show_completions_on_input {
- return false;
- }
let classifier = snapshot
.char_classifier_at(position)
.scope_context(Some(CharScopeContext::Completion));
@@ -14755,6 +14755,180 @@ async fn test_completion(cx: &mut TestAppContext) {
apply_additional_edits.await.unwrap();
}
+#[gpui::test]
+async fn test_completion_can_run_commands(cx: &mut TestAppContext) {
+ init_test(cx, |_| {});
+
+ let fs = FakeFs::new(cx.executor());
+ fs.insert_tree(
+ path!("/a"),
+ json!({
+ "main.rs": "",
+ }),
+ )
+ .await;
+
+ let project = Project::test(fs, [path!("/a").as_ref()], cx).await;
+ let language_registry = project.read_with(cx, |project, _| project.languages().clone());
+ language_registry.add(rust_lang());
+ let command_calls = Arc::new(AtomicUsize::new(0));
+ let registered_command = "_the/command";
+
+ let closure_command_calls = command_calls.clone();
+ let mut fake_servers = language_registry.register_fake_lsp(
+ "Rust",
+ FakeLspAdapter {
+ capabilities: lsp::ServerCapabilities {
+ completion_provider: Some(lsp::CompletionOptions {
+ trigger_characters: Some(vec![".".to_string(), ":".to_string()]),
+ ..lsp::CompletionOptions::default()
+ }),
+ execute_command_provider: Some(lsp::ExecuteCommandOptions {
+ commands: vec![registered_command.to_owned()],
+ ..lsp::ExecuteCommandOptions::default()
+ }),
+ ..lsp::ServerCapabilities::default()
+ },
+ initializer: Some(Box::new(move |fake_server| {
+ fake_server.set_request_handler::<lsp::request::Completion, _, _>(
+ move |params, _| async move {
+ Ok(Some(lsp::CompletionResponse::Array(vec![
+ lsp::CompletionItem {
+ label: "registered_command".to_owned(),
+ text_edit: gen_text_edit(¶ms, ""),
+ command: Some(lsp::Command {
+ title: registered_command.to_owned(),
+ command: "_the/command".to_owned(),
+ arguments: Some(vec![serde_json::Value::Bool(true)]),
+ }),
+ ..lsp::CompletionItem::default()
+ },
+ lsp::CompletionItem {
+ label: "unregistered_command".to_owned(),
+ text_edit: gen_text_edit(¶ms, ""),
+ command: Some(lsp::Command {
+ title: "????????????".to_owned(),
+ command: "????????????".to_owned(),
+ arguments: Some(vec![serde_json::Value::Null]),
+ }),
+ ..lsp::CompletionItem::default()
+ },
+ ])))
+ },
+ );
+ fake_server.set_request_handler::<lsp::request::ExecuteCommand, _, _>({
+ let command_calls = closure_command_calls.clone();
+ move |params, _| {
+ assert_eq!(params.command, registered_command);
+ let command_calls = command_calls.clone();
+ async move {
+ command_calls.fetch_add(1, atomic::Ordering::Release);
+ Ok(Some(json!(null)))
+ }
+ }
+ });
+ })),
+ ..FakeLspAdapter::default()
+ },
+ );
+ let workspace = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx));
+ let cx = &mut VisualTestContext::from_window(*workspace, cx);
+ let editor = workspace
+ .update(cx, |workspace, window, cx| {
+ workspace.open_abs_path(
+ PathBuf::from(path!("/a/main.rs")),
+ OpenOptions::default(),
+ window,
+ cx,
+ )
+ })
+ .unwrap()
+ .await
+ .unwrap()
+ .downcast::<Editor>()
+ .unwrap();
+ let _fake_server = fake_servers.next().await.unwrap();
+
+ editor.update_in(cx, |editor, window, cx| {
+ cx.focus_self(window);
+ editor.move_to_end(&MoveToEnd, window, cx);
+ editor.handle_input(".", window, cx);
+ });
+ cx.run_until_parked();
+ editor.update(cx, |editor, _| {
+ assert!(editor.context_menu_visible());
+ if let Some(CodeContextMenu::Completions(menu)) = editor.context_menu.borrow_mut().as_ref()
+ {
+ let completion_labels = menu
+ .completions
+ .borrow()
+ .iter()
+ .map(|c| c.label.text.clone())
+ .collect::<Vec<_>>();
+ assert_eq!(
+ completion_labels,
+ &["registered_command", "unregistered_command",],
+ );
+ } else {
+ panic!("expected completion menu to be open");
+ }
+ });
+
+ editor
+ .update_in(cx, |editor, window, cx| {
+ editor
+ .confirm_completion(&ConfirmCompletion::default(), window, cx)
+ .unwrap()
+ })
+ .await
+ .unwrap();
+ cx.run_until_parked();
+ assert_eq!(
+ command_calls.load(atomic::Ordering::Acquire),
+ 1,
+ "For completion with a registered command, Zed should send a command execution request",
+ );
+
+ editor.update_in(cx, |editor, window, cx| {
+ cx.focus_self(window);
+ editor.handle_input(".", window, cx);
+ });
+ cx.run_until_parked();
+ editor.update(cx, |editor, _| {
+ assert!(editor.context_menu_visible());
+ if let Some(CodeContextMenu::Completions(menu)) = editor.context_menu.borrow_mut().as_ref()
+ {
+ let completion_labels = menu
+ .completions
+ .borrow()
+ .iter()
+ .map(|c| c.label.text.clone())
+ .collect::<Vec<_>>();
+ assert_eq!(
+ completion_labels,
+ &["registered_command", "unregistered_command",],
+ );
+ } else {
+ panic!("expected completion menu to be open");
+ }
+ });
+ editor
+ .update_in(cx, |editor, window, cx| {
+ editor.context_menu_next(&Default::default(), window, cx);
+ editor
+ .confirm_completion(&ConfirmCompletion::default(), window, cx)
+ .unwrap()
+ })
+ .await
+ .unwrap();
+ cx.run_until_parked();
+ assert_eq!(
+ command_calls.load(atomic::Ordering::Acquire),
+ 1,
+ "For completion with an unregistered command, Zed should not send a command execution request",
+ );
+}
+
#[gpui::test]
async fn test_completion_reuse(cx: &mut TestAppContext) {
init_test(cx, |_| {});
@@ -16804,7 +16978,7 @@ fn test_highlighted_ranges(cx: &mut TestAppContext) {
anchor_range(Point::new(6, 3)..Point::new(6, 5)),
anchor_range(Point::new(8, 4)..Point::new(8, 6)),
],
- |_| Hsla::red(),
+ |_, _| Hsla::red(),
cx,
);
editor.highlight_background::<Type2>(
@@ -16814,7 +16988,7 @@ fn test_highlighted_ranges(cx: &mut TestAppContext) {
anchor_range(Point::new(7, 4)..Point::new(7, 7)),
anchor_range(Point::new(9, 5)..Point::new(9, 8)),
],
- |_| Hsla::green(),
+ |_, _| Hsla::green(),
cx,
);
@@ -18921,6 +19095,109 @@ async fn test_document_format_with_prettier(cx: &mut TestAppContext) {
);
}
+#[gpui::test]
+async fn test_document_format_with_prettier_explicit_language(cx: &mut TestAppContext) {
+ init_test(cx, |settings| {
+ settings.defaults.formatter = Some(FormatterList::Single(Formatter::Prettier))
+ });
+
+ let fs = FakeFs::new(cx.executor());
+ fs.insert_file(path!("/file.settings"), Default::default())
+ .await;
+
+ let project = Project::test(fs, [path!("/file.settings").as_ref()], cx).await;
+ let language_registry = project.read_with(cx, |project, _| project.languages().clone());
+
+ let ts_lang = Arc::new(Language::new(
+ LanguageConfig {
+ name: "TypeScript".into(),
+ matcher: LanguageMatcher {
+ path_suffixes: vec!["ts".to_string()],
+ ..LanguageMatcher::default()
+ },
+ prettier_parser_name: Some("typescript".to_string()),
+ ..LanguageConfig::default()
+ },
+ Some(tree_sitter_typescript::LANGUAGE_TYPESCRIPT.into()),
+ ));
+
+ language_registry.add(ts_lang.clone());
+
+ update_test_language_settings(cx, |settings| {
+ settings.defaults.prettier.get_or_insert_default().allowed = Some(true);
+ });
+
+ let test_plugin = "test_plugin";
+ let _ = language_registry.register_fake_lsp(
+ "TypeScript",
+ FakeLspAdapter {
+ prettier_plugins: vec![test_plugin],
+ ..Default::default()
+ },
+ );
+
+ let prettier_format_suffix = project::TEST_PRETTIER_FORMAT_SUFFIX;
+ let buffer = project
+ .update(cx, |project, cx| {
+ project.open_local_buffer(path!("/file.settings"), cx)
+ })
+ .await
+ .unwrap();
+
+ project.update(cx, |project, cx| {
+ project.set_language_for_buffer(&buffer, ts_lang, cx)
+ });
+
+ let buffer_text = "one\ntwo\nthree\n";
+ let buffer = cx.new(|cx| MultiBuffer::singleton(buffer, cx));
+ let (editor, cx) = cx.add_window_view(|window, cx| build_editor(buffer, window, cx));
+ editor.update_in(cx, |editor, window, cx| {
+ editor.set_text(buffer_text, window, cx)
+ });
+
+ editor
+ .update_in(cx, |editor, window, cx| {
+ editor.perform_format(
+ project.clone(),
+ FormatTrigger::Manual,
+ FormatTarget::Buffers(editor.buffer().read(cx).all_buffers()),
+ window,
+ cx,
+ )
+ })
+ .unwrap()
+ .await;
+ assert_eq!(
+ editor.update(cx, |editor, cx| editor.text(cx)),
+ buffer_text.to_string() + prettier_format_suffix + "\ntypescript",
+ "Test prettier formatting was not applied to the original buffer text",
+ );
+
+ update_test_language_settings(cx, |settings| {
+ settings.defaults.formatter = Some(FormatterList::default())
+ });
+ let format = editor.update_in(cx, |editor, window, cx| {
+ editor.perform_format(
+ project.clone(),
+ FormatTrigger::Manual,
+ FormatTarget::Buffers(editor.buffer().read(cx).all_buffers()),
+ window,
+ cx,
+ )
+ });
+ format.await.unwrap();
+
+ assert_eq!(
+ editor.update(cx, |editor, cx| editor.text(cx)),
+ buffer_text.to_string()
+ + prettier_format_suffix
+ + "\ntypescript\n"
+ + prettier_format_suffix
+ + "\ntypescript",
+ "Autoformatting (via test prettier) was not applied to the original buffer text",
+ );
+}
+
#[gpui::test]
async fn test_addition_reverts(cx: &mut TestAppContext) {
init_test(cx, |_| {});
@@ -23799,7 +24076,7 @@ async fn test_rename_with_duplicate_edits(cx: &mut TestAppContext) {
let highlight_range = highlight_range.to_anchors(&editor.buffer().read(cx).snapshot(cx));
editor.highlight_background::<DocumentHighlightRead>(
&[highlight_range],
- |theme| theme.colors().editor_document_highlight_read_background,
+ |_, theme| theme.colors().editor_document_highlight_read_background,
cx,
);
});
@@ -23877,7 +24154,7 @@ async fn test_rename_without_prepare(cx: &mut TestAppContext) {
let highlight_range = highlight_range.to_anchors(&editor.buffer().read(cx).snapshot(cx));
editor.highlight_background::<DocumentHighlightRead>(
&[highlight_range],
- |theme| theme.colors().editor_document_highlight_read_background,
+ |_, theme| theme.colors().editor_document_highlight_read_background,
cx,
);
});
@@ -26415,7 +26692,7 @@ async fn test_pulling_diagnostics(cx: &mut TestAppContext) {
}
});
- let ensure_result_id = |expected: Option<String>, cx: &mut TestAppContext| {
+ let ensure_result_id = |expected: Option<SharedString>, cx: &mut TestAppContext| {
project.update(cx, |project, cx| {
let buffer_id = editor
.read(cx)
@@ -26428,7 +26705,7 @@ async fn test_pulling_diagnostics(cx: &mut TestAppContext) {
let buffer_result_id = project
.lsp_store()
.read(cx)
- .result_id(server_id, buffer_id, cx);
+ .result_id_for_buffer_pull(server_id, buffer_id, &None, cx);
assert_eq!(expected, buffer_result_id);
});
};
@@ -26445,7 +26722,7 @@ async fn test_pulling_diagnostics(cx: &mut TestAppContext) {
.next()
.await
.expect("should have sent the first diagnostics pull request");
- ensure_result_id(Some("1".to_string()), cx);
+ ensure_result_id(Some(SharedString::new("1")), cx);
// Editing should trigger diagnostics
editor.update_in(cx, |editor, window, cx| {
@@ -26458,7 +26735,7 @@ async fn test_pulling_diagnostics(cx: &mut TestAppContext) {
2,
"Editing should trigger diagnostic request"
);
- ensure_result_id(Some("2".to_string()), cx);
+ ensure_result_id(Some(SharedString::new("2")), cx);
// Moving cursor should not trigger diagnostic request
editor.update_in(cx, |editor, window, cx| {
@@ -26473,7 +26750,7 @@ async fn test_pulling_diagnostics(cx: &mut TestAppContext) {
2,
"Cursor movement should not trigger diagnostic request"
);
- ensure_result_id(Some("2".to_string()), cx);
+ ensure_result_id(Some(SharedString::new("2")), cx);
// Multiple rapid edits should be debounced
for _ in 0..5 {
editor.update_in(cx, |editor, window, cx| {
@@ -26488,7 +26765,7 @@ async fn test_pulling_diagnostics(cx: &mut TestAppContext) {
final_requests <= 4,
"Multiple rapid edits should be debounced (got {final_requests} requests)",
);
- ensure_result_id(Some(final_requests.to_string()), cx);
+ ensure_result_id(Some(SharedString::new(final_requests.to_string())), cx);
}
#[gpui::test]
@@ -27125,7 +27402,7 @@ let result = variable * 2;",
editor.highlight_background::<DocumentHighlightRead>(
&anchor_ranges,
- |theme| theme.colors().editor_document_highlight_read_background,
+ |_, theme| theme.colors().editor_document_highlight_read_background,
cx,
);
});
@@ -1227,7 +1227,13 @@ impl EditorElement {
editor.hide_blame_popover(false, cx);
}
} else {
- editor.hide_blame_popover(false, cx);
+ let keyboard_grace = editor
+ .inline_blame_popover
+ .as_ref()
+ .is_some_and(|state| state.keyboard_grace);
+ if !keyboard_grace {
+ editor.hide_blame_popover(false, cx);
+ }
}
let breakpoint_indicator = if gutter_hovered {
@@ -2511,7 +2517,6 @@ impl EditorElement {
scroll_position: gpui::Point<ScrollOffset>,
scroll_pixel_position: gpui::Point<ScrollPixelOffset>,
line_height: Pixels,
- text_hitbox: &Hitbox,
window: &mut Window,
cx: &mut App,
) -> Option<InlineBlameLayout> {
@@ -2580,16 +2585,6 @@ impl EditorElement {
let size = element.layout_as_root(AvailableSpace::min_size(), window, cx);
let bounds = Bounds::new(absolute_offset, size);
- self.layout_blame_entry_popover(
- entry.clone(),
- blame,
- line_height,
- text_hitbox,
- row_info.buffer_id?,
- window,
- cx,
- );
-
element.prepaint_as_root(absolute_offset, AvailableSpace::min_size(), window, cx);
Some(InlineBlameLayout {
@@ -2600,16 +2595,48 @@ impl EditorElement {
})
}
- fn layout_blame_entry_popover(
+ fn layout_blame_popover(
&self,
- blame_entry: BlameEntry,
- blame: Entity<GitBlame>,
- line_height: Pixels,
+ editor_snapshot: &EditorSnapshot,
text_hitbox: &Hitbox,
- buffer: BufferId,
+ line_height: Pixels,
window: &mut Window,
cx: &mut App,
) {
+ if !self.editor.read(cx).inline_blame_popover.is_some() {
+ return;
+ }
+
+ let Some(blame) = self.editor.read(cx).blame.clone() else {
+ return;
+ };
+ let cursor_point = self
+ .editor
+ .read(cx)
+ .selections
+ .newest::<language::Point>(&editor_snapshot.display_snapshot)
+ .head();
+
+ let Some((buffer, buffer_point, _)) = editor_snapshot
+ .buffer_snapshot()
+ .point_to_buffer_point(cursor_point)
+ else {
+ return;
+ };
+
+ let row_info = RowInfo {
+ buffer_id: Some(buffer.remote_id()),
+ buffer_row: Some(buffer_point.row),
+ ..Default::default()
+ };
+
+ let Some((buffer_id, blame_entry)) = blame
+ .update(cx, |blame, cx| blame.blame_for_rows(&[row_info], cx).next())
+ .flatten()
+ else {
+ return;
+ };
+
let Some((popover_state, target_point)) = self.editor.read_with(cx, |editor, _| {
editor
.inline_blame_popover
@@ -2631,7 +2658,7 @@ impl EditorElement {
popover_state.markdown,
workspace,
&blame,
- buffer,
+ buffer_id,
window,
cx,
)
@@ -9813,7 +9840,6 @@ impl Element for EditorElement {
scroll_position,
scroll_pixel_position,
line_height,
- &text_hitbox,
window,
cx,
) {
@@ -10011,6 +10037,8 @@ impl Element for EditorElement {
window,
cx,
);
+
+ self.layout_blame_popover(&snapshot, &hitbox, line_height, window, cx);
}
let mouse_context_menu = self.layout_mouse_context_menu(
@@ -518,7 +518,7 @@ fn show_hover(
// Highlight the selected symbol using a background highlight
editor.highlight_background::<HoverState>(
&hover_highlights,
- |theme| theme.colors().element_hover, // todo update theme
+ |_, theme| theme.colors().element_hover, // todo update theme
cx,
);
}
@@ -1487,6 +1487,7 @@ impl SearchableItem for Editor {
fn update_matches(
&mut self,
matches: &[Range<Anchor>],
+ active_match_index: Option<usize>,
_: &mut Window,
cx: &mut Context<Self>,
) {
@@ -1497,7 +1498,13 @@ impl SearchableItem for Editor {
let updated = existing_range != Some(matches);
self.highlight_background::<BufferSearchHighlights>(
matches,
- |theme| theme.colors().search_match_background,
+ move |index, theme| {
+ if active_match_index == Some(*index) {
+ theme.colors().search_active_match_background
+ } else {
+ theme.colors().search_match_background
+ }
+ },
cx,
);
if updated {
@@ -1891,15 +1898,20 @@ fn path_for_buffer<'a>(
cx: &'a App,
) -> Option<Cow<'a, str>> {
let file = buffer.read(cx).as_singleton()?.read(cx).file()?;
- path_for_file(file.as_ref(), height, include_filename, cx)
+ path_for_file(file, height, include_filename, cx)
}
fn path_for_file<'a>(
- file: &'a dyn language::File,
+ file: &'a Arc<dyn language::File>,
mut height: usize,
include_filename: bool,
cx: &'a App,
) -> Option<Cow<'a, str>> {
+ if project::File::from_dyn(Some(file)).is_none() {
+ return None;
+ }
+
+ let file = file.as_ref();
// Ensure we always render at least the filename.
height += 1;
@@ -1946,11 +1958,11 @@ mod tests {
#[gpui::test]
fn test_path_for_file(cx: &mut App) {
- let file = TestFile {
+ let file: Arc<dyn language::File> = Arc::new(TestFile {
path: RelPath::empty().into(),
root_name: String::new(),
local_root: None,
- };
+ });
assert_eq!(path_for_file(&file, 0, false, cx), None);
}
@@ -261,7 +261,7 @@ impl ExampleContext {
.expect("Unknown tool_name content in meta");
tool_uses_by_id.insert(
- tool_call.id,
+ tool_call.tool_call_id,
ToolUse {
name: tool_name.to_string(),
value: tool_call.raw_input.unwrap_or_default(),
@@ -277,7 +277,9 @@ impl ExampleContext {
ThreadEvent::ToolCallUpdate(tool_call_update) => {
if let acp_thread::ToolCallUpdate::UpdateFields(update) = tool_call_update {
if let Some(raw_input) = update.fields.raw_input {
- if let Some(tool_use) = tool_uses_by_id.get_mut(&update.id) {
+ if let Some(tool_use) =
+ tool_uses_by_id.get_mut(&update.tool_call_id)
+ {
tool_use.value = raw_input;
}
}
@@ -290,7 +292,7 @@ impl ExampleContext {
update.fields.status == Some(acp::ToolCallStatus::Completed);
let tool_use = tool_uses_by_id
- .remove(&update.id)
+ .remove(&update.tool_call_id)
.expect("Unrecognized tool call completed");
let log_message = if succeeded {
@@ -337,10 +339,7 @@ impl ExampleContext {
acp::StopReason::MaxTurnRequests => {
return Err(anyhow!("Exceeded maximum turn requests"));
}
- acp::StopReason::Refusal => {
- return Err(anyhow!("Refusal"));
- }
- acp::StopReason::Cancelled => return Err(anyhow!("Cancelled")),
+ stop_reason => return Err(anyhow!("{stop_reason:?}")),
},
}
}
@@ -303,13 +303,12 @@ impl ExampleInstance {
let context_server_registry = cx.new(|cx| ContextServerRegistry::new(project.read(cx).context_server_store(), cx));
let thread = if let Some(json) = &meta.existing_thread_json {
- let session_id = acp::SessionId(
+ let session_id = acp::SessionId::new(
rand::rng()
.sample_iter(&distr::Alphanumeric)
.take(7)
.map(char::from)
- .collect::<String>()
- .into(),
+ .collect::<String>(),
);
let db_thread = agent::DbThread::from_json(json.as_bytes()).expect("Can't read serialized thread");
@@ -640,7 +639,7 @@ impl agent::ThreadEnvironment for EvalThreadEnvironment {
cx.spawn(async move |cx| {
let language_registry =
project.read_with(cx, |project, _cx| project.languages().clone())?;
- let id = acp::TerminalId(uuid::Uuid::new_v4().to_string().into());
+ let id = acp::TerminalId::new(uuid::Uuid::new_v4().to_string());
let terminal =
acp_thread::create_terminal_entity(command, &[], vec![], cwd.clone(), &project, cx)
.await?;
@@ -0,0 +1,18 @@
+[package]
+name = "eval_utils"
+version = "0.1.0"
+edition.workspace = true
+publish.workspace = true
+license = "GPL-3.0-or-later"
+
+[lints]
+workspace = true
+
+[lib]
+path = "src/eval_utils.rs"
+doctest = false
+
+[dependencies]
+gpui.workspace = true
+serde.workspace = true
+smol.workspace = true
@@ -0,0 +1 @@
+LICENSE-GPL
@@ -0,0 +1,3 @@
+# eval_utils
+
+Utilities for evals of agents.
@@ -0,0 +1,128 @@
+//! Utilities for evaluation and benchmarking.
+
+use std::{
+ collections::HashMap,
+ sync::{Arc, mpsc},
+};
+
+fn report_progress(evaluated_count: usize, failed_count: usize, iterations: usize) {
+ let passed_count = evaluated_count - failed_count;
+ let passed_ratio = if evaluated_count == 0 {
+ 0.0
+ } else {
+ passed_count as f64 / evaluated_count as f64
+ };
+ println!(
+ "\r\x1b[KEvaluated {}/{} ({:.2}% passed)",
+ evaluated_count,
+ iterations,
+ passed_ratio * 100.0
+ )
+}
+
+#[derive(Clone, Debug, Eq, PartialEq)]
+pub enum OutcomeKind {
+ Passed,
+ Failed,
+ Error,
+}
+
+pub trait EvalOutputProcessor {
+ type Metadata: 'static + Send;
+ fn process(&mut self, output: &EvalOutput<Self::Metadata>);
+ fn assert(&mut self);
+}
+
+#[derive(Clone, Debug)]
+pub struct EvalOutput<M> {
+ pub outcome: OutcomeKind,
+ pub data: String,
+ pub metadata: M,
+}
+
+pub struct NoProcessor;
+impl EvalOutputProcessor for NoProcessor {
+ type Metadata = ();
+
+ fn process(&mut self, _output: &EvalOutput<Self::Metadata>) {}
+
+ fn assert(&mut self) {}
+}
+
+pub fn eval<P>(
+ iterations: usize,
+ expected_pass_ratio: f32,
+ mut processor: P,
+ evalf: impl Fn() -> EvalOutput<P::Metadata> + Send + Sync + 'static,
+) where
+ P: EvalOutputProcessor,
+{
+ let mut evaluated_count = 0;
+ let mut failed_count = 0;
+ let evalf = Arc::new(evalf);
+ report_progress(evaluated_count, failed_count, iterations);
+
+ let (tx, rx) = mpsc::channel();
+
+ let executor = gpui::background_executor();
+ let semaphore = Arc::new(smol::lock::Semaphore::new(32));
+ let evalf = Arc::new(evalf);
+ // Warm the cache once
+ let first_output = evalf();
+ tx.send(first_output).ok();
+
+ for _ in 1..iterations {
+ let tx = tx.clone();
+ let semaphore = semaphore.clone();
+ let evalf = evalf.clone();
+ executor
+ .spawn(async move {
+ let _guard = semaphore.acquire().await;
+ let output = evalf();
+ tx.send(output).ok();
+ })
+ .detach();
+ }
+ drop(tx);
+
+ let mut failed_evals = Vec::new();
+ let mut errored_evals = HashMap::new();
+ while let Ok(output) = rx.recv() {
+ processor.process(&output);
+
+ match output.outcome {
+ OutcomeKind::Passed => {}
+ OutcomeKind::Failed => {
+ failed_count += 1;
+ failed_evals.push(output);
+ }
+ OutcomeKind::Error => {
+ failed_count += 1;
+ *errored_evals.entry(output.data).or_insert(0) += 1;
+ }
+ }
+
+ evaluated_count += 1;
+ report_progress(evaluated_count, failed_count, iterations);
+ }
+
+ let actual_pass_ratio = (iterations - failed_count) as f32 / iterations as f32;
+ println!("Actual pass ratio: {}\n", actual_pass_ratio);
+ if actual_pass_ratio < expected_pass_ratio {
+ for (error, count) in errored_evals {
+ println!("Eval errored {} times. Error: {}", count, error);
+ }
+
+ for failed in failed_evals {
+ println!("Eval failed");
+ println!("{}", failed.data);
+ }
+
+ panic!(
+ "Actual pass ratio: {}\nExpected pass ratio: {}",
+ actual_pass_ratio, expected_pass_ratio
+ );
+ }
+
+ processor.assert();
+}
@@ -1,12 +1,13 @@
[package]
name = "zed_extension_api"
-version = "0.7.0"
+version = "0.8.0"
description = "APIs for creating Zed extensions in Rust"
repository = "https://github.com/zed-industries/zed"
documentation = "https://docs.rs/zed_extension_api"
keywords = ["zed", "extension"]
edition.workspace = true
-publish = true
+# Change back to `true` when we're ready to publish v0.8.0.
+publish = false
license = "Apache-2.0"
[lints]
@@ -274,10 +274,6 @@ pub trait Extension: Send + Sync {
Err("`run_dap_locator` not implemented".to_string())
}
- // =========================================================================
- // Language Model Provider Methods
- // =========================================================================
-
/// Returns information about language model providers offered by this extension.
fn llm_providers(&self) -> Vec<LlmProviderInfo> {
Vec::new()
@@ -427,7 +423,7 @@ mod wit {
wit_bindgen::generate!({
skip: ["init-extension"],
- path: "./wit/since_v0.7.0",
+ path: "./wit/since_v0.8.0",
});
}
@@ -612,10 +608,6 @@ impl wit::Guest for Component {
extension().run_dap_locator(locator_name, build_task)
}
- // =========================================================================
- // Language Model Provider Methods
- // =========================================================================
-
fn llm_providers() -> Vec<LlmProviderInfo> {
extension().llm_providers()
}
@@ -0,0 +1,12 @@
+interface common {
+ /// A (half-open) range (`[start, end)`).
+ record range {
+ /// The start of the range (inclusive).
+ start: u32,
+ /// The end of the range (exclusive).
+ end: u32,
+ }
+
+ /// A list of environment variables.
+ type env-vars = list<tuple<string, string>>;
+}
@@ -0,0 +1,11 @@
+interface context-server {
+ /// Configuration for context server setup and installation.
+ record context-server-configuration {
+ /// Installation instructions in Markdown format.
+ installation-instructions: string,
+ /// JSON schema for settings validation.
+ settings-schema: string,
+ /// Default settings template.
+ default-settings: string,
+ }
+}
@@ -0,0 +1,123 @@
+interface dap {
+ use common.{env-vars};
+
+ /// Resolves a specified TcpArgumentsTemplate into TcpArguments
+ resolve-tcp-template: func(template: tcp-arguments-template) -> result<tcp-arguments, string>;
+
+ record launch-request {
+ program: string,
+ cwd: option<string>,
+ args: list<string>,
+ envs: env-vars,
+ }
+
+ record attach-request {
+ process-id: option<u32>,
+ }
+
+ variant debug-request {
+ launch(launch-request),
+ attach(attach-request)
+ }
+
+ record tcp-arguments {
+ port: u16,
+ host: u32,
+ timeout: option<u64>,
+ }
+
+ record tcp-arguments-template {
+ port: option<u16>,
+ host: option<u32>,
+ timeout: option<u64>,
+ }
+
+ /// Debug Config is the "highest-level" configuration for a debug session.
+ /// It comes from a new process modal UI; thus, it is essentially debug-adapter-agnostic.
+ /// It is expected of the extension to translate this generic configuration into something that can be debugged by the adapter (debug scenario).
+ record debug-config {
+ /// Name of the debug task
+ label: string,
+ /// The debug adapter to use
+ adapter: string,
+ request: debug-request,
+ stop-on-entry: option<bool>,
+ }
+
+ record task-template {
+ /// Human readable name of the task to display in the UI.
+ label: string,
+ /// Executable command to spawn.
+ command: string,
+ args: list<string>,
+ env: env-vars,
+ cwd: option<string>,
+ }
+
+ /// A task template with substituted task variables.
+ type resolved-task = task-template;
+
+ /// A task template for building a debug target.
+ type build-task-template = task-template;
+
+ variant build-task-definition {
+ by-name(string),
+ template(build-task-definition-template-payload )
+ }
+ record build-task-definition-template-payload {
+ locator-name: option<string>,
+ template: build-task-template
+ }
+
+ /// Debug Scenario is the user-facing configuration type (used in debug.json). It is still concerned with what to debug and not necessarily how to do it (except for any
+ /// debug-adapter-specific configuration options).
+ record debug-scenario {
+ /// Unsubstituted label for the task.DebugAdapterBinary
+ label: string,
+ /// Name of the Debug Adapter this configuration is intended for.
+ adapter: string,
+ /// An optional build step to be ran prior to starting a debug session. Build steps are used by Zed's locators to locate the executable to debug.
+ build: option<build-task-definition>,
+ /// JSON-encoded configuration for a given debug adapter.
+ config: string,
+ /// TCP connection parameters (if they were specified by user)
+ tcp-connection: option<tcp-arguments-template>,
+ }
+
+ enum start-debugging-request-arguments-request {
+ launch,
+ attach,
+ }
+
+ record debug-task-definition {
+ /// Unsubstituted label for the task.DebugAdapterBinary
+ label: string,
+ /// Name of the Debug Adapter this configuration is intended for.
+ adapter: string,
+ /// JSON-encoded configuration for a given debug adapter.
+ config: string,
+ /// TCP connection parameters (if they were specified by user)
+ tcp-connection: option<tcp-arguments-template>,
+ }
+
+ record start-debugging-request-arguments {
+ /// JSON-encoded configuration for a given debug adapter. It is specific to each debug adapter.
+ /// `configuration` will have it's Zed variable references substituted prior to being passed to the debug adapter.
+ configuration: string,
+ request: start-debugging-request-arguments-request,
+ }
+
+ /// The lowest-level representation of a debug session, which specifies:
+ /// - How to start a debug adapter process
+ /// - How to start a debug session with it (using DAP protocol)
+ /// for a given debug scenario.
+ record debug-adapter-binary {
+ command: option<string>,
+ arguments: list<string>,
+ envs: env-vars,
+ cwd: option<string>,
+ /// Zed will use TCP transport if `connection` is specified.
+ connection: option<tcp-arguments>,
+ request-args: start-debugging-request-arguments
+ }
+}
@@ -0,0 +1,252 @@
+package zed:extension;
+
+world extension {
+ import context-server;
+ import dap;
+ import github;
+ import http-client;
+ import platform;
+ import process;
+ import nodejs;
+ import llm-provider;
+
+ use common.{env-vars, range};
+ use context-server.{context-server-configuration};
+ use dap.{attach-request, build-task-template, debug-config, debug-adapter-binary, debug-task-definition, debug-request, debug-scenario, launch-request, resolved-task, start-debugging-request-arguments-request};
+ use lsp.{completion, symbol};
+ use process.{command};
+ use slash-command.{slash-command, slash-command-argument-completion, slash-command-output};
+ use llm-provider.{
+ provider-info, model-info, completion-request,
+ credential-type, cache-configuration, completion-event, token-usage
+ };
+
+ /// Initializes the extension.
+ export init-extension: func();
+
+ /// The type of a downloaded file.
+ enum downloaded-file-type {
+ /// A gzipped file (`.gz`).
+ gzip,
+ /// A gzipped tar archive (`.tar.gz`).
+ gzip-tar,
+ /// A ZIP file (`.zip`).
+ zip,
+ /// An uncompressed file.
+ uncompressed,
+ }
+
+ /// The installation status for a language server.
+ variant language-server-installation-status {
+ /// The language server has no installation status.
+ none,
+ /// The language server is being downloaded.
+ downloading,
+ /// The language server is checking for updates.
+ checking-for-update,
+ /// The language server installation failed for specified reason.
+ failed(string),
+ }
+
+ record settings-location {
+ worktree-id: u64,
+ path: string,
+ }
+
+ import get-settings: func(path: option<settings-location>, category: string, key: option<string>) -> result<string, string>;
+
+ /// Downloads a file from the given URL and saves it to the given path within the extension's
+ /// working directory.
+ ///
+ /// The file will be extracted according to the given file type.
+ import download-file: func(url: string, file-path: string, file-type: downloaded-file-type) -> result<_, string>;
+
+ /// Makes the file at the given path executable.
+ import make-file-executable: func(filepath: string) -> result<_, string>;
+
+ /// Updates the installation status for the given language server.
+ import set-language-server-installation-status: func(language-server-name: string, status: language-server-installation-status);
+
+ /// A Zed worktree.
+ resource worktree {
+ /// Returns the ID of the worktree.
+ id: func() -> u64;
+ /// Returns the root path of the worktree.
+ root-path: func() -> string;
+ /// Returns the textual contents of the specified file in the worktree.
+ read-text-file: func(path: string) -> result<string, string>;
+ /// Returns the path to the given binary name, if one is present on the `$PATH`.
+ which: func(binary-name: string) -> option<string>;
+ /// Returns the current shell environment.
+ shell-env: func() -> env-vars;
+ }
+
+ /// A Zed project.
+ resource project {
+ /// Returns the IDs of all of the worktrees in this project.
+ worktree-ids: func() -> list<u64>;
+ }
+
+ /// A key-value store.
+ resource key-value-store {
+ /// Inserts an entry under the specified key.
+ insert: func(key: string, value: string) -> result<_, string>;
+ }
+
+ /// Returns the command used to start up the language server.
+ export language-server-command: func(language-server-id: string, worktree: borrow<worktree>) -> result<command, string>;
+
+ /// Returns the initialization options to pass to the language server on startup.
+ ///
+ /// The initialization options are represented as a JSON string.
+ export language-server-initialization-options: func(language-server-id: string, worktree: borrow<worktree>) -> result<option<string>, string>;
+
+ /// Returns the workspace configuration options to pass to the language server.
+ export language-server-workspace-configuration: func(language-server-id: string, worktree: borrow<worktree>) -> result<option<string>, string>;
+
+ /// Returns the initialization options to pass to the other language server.
+ export language-server-additional-initialization-options: func(language-server-id: string, target-language-server-id: string, worktree: borrow<worktree>) -> result<option<string>, string>;
+
+ /// Returns the workspace configuration options to pass to the other language server.
+ export language-server-additional-workspace-configuration: func(language-server-id: string, target-language-server-id: string, worktree: borrow<worktree>) -> result<option<string>, string>;
+
+ /// A label containing some code.
+ record code-label {
+ /// The source code to parse with Tree-sitter.
+ code: string,
+ /// The spans to display in the label.
+ spans: list<code-label-span>,
+ /// The range of the displayed label to include when filtering.
+ filter-range: range,
+ }
+
+ /// A span within a code label.
+ variant code-label-span {
+ /// A range into the parsed code.
+ code-range(range),
+ /// A span containing a code literal.
+ literal(code-label-span-literal),
+ }
+
+ /// A span containing a code literal.
+ record code-label-span-literal {
+ /// The literal text.
+ text: string,
+ /// The name of the highlight to use for this literal.
+ highlight-name: option<string>,
+ }
+
+ export labels-for-completions: func(language-server-id: string, completions: list<completion>) -> result<list<option<code-label>>, string>;
+ export labels-for-symbols: func(language-server-id: string, symbols: list<symbol>) -> result<list<option<code-label>>, string>;
+
+
+ /// Returns the completions that should be shown when completing the provided slash command with the given query.
+ export complete-slash-command-argument: func(command: slash-command, args: list<string>) -> result<list<slash-command-argument-completion>, string>;
+
+ /// Returns the output from running the provided slash command.
+ export run-slash-command: func(command: slash-command, args: list<string>, worktree: option<borrow<worktree>>) -> result<slash-command-output, string>;
+
+ /// Returns the command used to start up a context server.
+ export context-server-command: func(context-server-id: string, project: borrow<project>) -> result<command, string>;
+
+ /// Returns the configuration for a context server.
+ export context-server-configuration: func(context-server-id: string, project: borrow<project>) -> result<option<context-server-configuration>, string>;
+
+ /// Returns a list of packages as suggestions to be included in the `/docs`
+ /// search results.
+ ///
+ /// This can be used to provide completions for known packages (e.g., from the
+ /// local project or a registry) before a package has been indexed.
+ export suggest-docs-packages: func(provider-name: string) -> result<list<string>, string>;
+
+ /// Indexes the docs for the specified package.
+ export index-docs: func(provider-name: string, package-name: string, database: borrow<key-value-store>) -> result<_, string>;
+
+ /// Returns a configured debug adapter binary for a given debug task.
+ export get-dap-binary: func(adapter-name: string, config: debug-task-definition, user-installed-path: option<string>, worktree: borrow<worktree>) -> result<debug-adapter-binary, string>;
+ /// Returns the kind of a debug scenario (launch or attach).
+ export dap-request-kind: func(adapter-name: string, config: string) -> result<start-debugging-request-arguments-request, string>;
+ export dap-config-to-scenario: func(config: debug-config) -> result<debug-scenario, string>;
+ export dap-locator-create-scenario: func(locator-name: string, build-config-template: build-task-template, resolved-label: string, debug-adapter-name: string) -> option<debug-scenario>;
+ export run-dap-locator: func(locator-name: string, config: resolved-task) -> result<debug-request, string>;
+
+ // =========================================================================
+ // Language Model Provider Extension API
+ // =========================================================================
+
+ /// Returns information about language model providers offered by this extension.
+ export llm-providers: func() -> list<provider-info>;
+
+ /// Returns the models available for a provider.
+ export llm-provider-models: func(provider-id: string) -> result<list<model-info>, string>;
+
+ /// Returns markdown content to display in the provider's settings UI.
+ /// This can include setup instructions, links to documentation, etc.
+ export llm-provider-settings-markdown: func(provider-id: string) -> option<string>;
+
+ /// Check if the provider is authenticated.
+ export llm-provider-is-authenticated: func(provider-id: string) -> bool;
+
+ /// Attempt to authenticate the provider.
+ export llm-provider-authenticate: func(provider-id: string) -> result<_, string>;
+
+ /// Reset credentials for the provider.
+ export llm-provider-reset-credentials: func(provider-id: string) -> result<_, string>;
+
+ /// Count tokens for a request.
+ export llm-count-tokens: func(
+ provider-id: string,
+ model-id: string,
+ request: completion-request
+ ) -> result<u64, string>;
+
+ /// Start streaming a completion from the model.
+ /// Returns a stream ID that can be used with llm-stream-next and llm-stream-close.
+ export llm-stream-completion-start: func(
+ provider-id: string,
+ model-id: string,
+ request: completion-request
+ ) -> result<string, string>;
+
+ /// Get the next event from a completion stream.
+ /// Returns None when the stream is complete.
+ export llm-stream-completion-next: func(
+ stream-id: string
+ ) -> result<option<completion-event>, string>;
+
+ /// Close a completion stream and release its resources.
+ export llm-stream-completion-close: func(
+ stream-id: string
+ );
+
+ /// Get cache configuration for a model (if prompt caching is supported).
+ export llm-cache-configuration: func(
+ provider-id: string,
+ model-id: string
+ ) -> option<cache-configuration>;
+
+ // =========================================================================
+ // Language Model Provider Imports (callable by extensions)
+ // =========================================================================
+
+ /// Request a credential from the user.
+ /// Returns true if the credential was provided, false if the user cancelled.
+ import llm-request-credential: func(
+ provider-id: string,
+ credential-type: credential-type,
+ label: string,
+ placeholder: string
+ ) -> result<bool, string>;
+
+ /// Get a stored credential for this provider.
+ import llm-get-credential: func(provider-id: string) -> option<string>;
+
+ /// Store a credential for this provider.
+ import llm-store-credential: func(provider-id: string, value: string) -> result<_, string>;
+
+ /// Delete a stored credential for this provider.
+ import llm-delete-credential: func(provider-id: string) -> result<_, string>;
+
+ /// Read an environment variable.
+ import llm-get-env-var: func(name: string) -> option<string>;
+}
@@ -0,0 +1,35 @@
+interface github {
+ /// A GitHub release.
+ record github-release {
+ /// The version of the release.
+ version: string,
+ /// The list of assets attached to the release.
+ assets: list<github-release-asset>,
+ }
+
+ /// An asset from a GitHub release.
+ record github-release-asset {
+ /// The name of the asset.
+ name: string,
+ /// The download URL for the asset.
+ download-url: string,
+ }
+
+ /// The options used to filter down GitHub releases.
+ record github-release-options {
+ /// Whether releases without assets should be included.
+ require-assets: bool,
+ /// Whether pre-releases should be included.
+ pre-release: bool,
+ }
+
+ /// Returns the latest release for the given GitHub repository.
+ ///
+ /// Takes repo as a string in the form "<owner-name>/<repo-name>", for example: "zed-industries/zed".
+ latest-github-release: func(repo: string, options: github-release-options) -> result<github-release, string>;
+
+ /// Returns the GitHub release with the specified tag name for the given GitHub repository.
+ ///
+ /// Returns an error if a release with the given tag name does not exist.
+ github-release-by-tag-name: func(repo: string, tag: string) -> result<github-release, string>;
+}
@@ -0,0 +1,67 @@
+interface http-client {
+ /// An HTTP request.
+ record http-request {
+ /// The HTTP method for the request.
+ method: http-method,
+ /// The URL to which the request should be made.
+ url: string,
+ /// The headers for the request.
+ headers: list<tuple<string, string>>,
+ /// The request body.
+ body: option<list<u8>>,
+ /// The policy to use for redirects.
+ redirect-policy: redirect-policy,
+ }
+
+ /// HTTP methods.
+ enum http-method {
+ /// `GET`
+ get,
+ /// `HEAD`
+ head,
+ /// `POST`
+ post,
+ /// `PUT`
+ put,
+ /// `DELETE`
+ delete,
+ /// `OPTIONS`
+ options,
+ /// `PATCH`
+ patch,
+ }
+
+ /// The policy for dealing with redirects received from the server.
+ variant redirect-policy {
+ /// Redirects from the server will not be followed.
+ ///
+ /// This is the default behavior.
+ no-follow,
+ /// Redirects from the server will be followed up to the specified limit.
+ follow-limit(u32),
+ /// All redirects from the server will be followed.
+ follow-all,
+ }
+
+ /// An HTTP response.
+ record http-response {
+ /// The response headers.
+ headers: list<tuple<string, string>>,
+ /// The response body.
+ body: list<u8>,
+ }
+
+ /// Performs an HTTP request and returns the response.
+ fetch: func(req: http-request) -> result<http-response, string>;
+
+ /// An HTTP response stream.
+ resource http-response-stream {
+ /// Retrieves the next chunk of data from the response stream.
+ ///
+ /// Returns `Ok(None)` if the stream has ended.
+ next-chunk: func() -> result<option<list<u8>>, string>;
+ }
+
+ /// Performs an HTTP request and returns a response stream.
+ fetch-stream: func(req: http-request) -> result<http-response-stream, string>;
+}
@@ -0,0 +1,255 @@
+interface llm-provider {
+ /// Information about a language model provider.
+ record provider-info {
+ /// Unique identifier for the provider (e.g., "my-extension.my-provider").
+ id: string,
+ /// Display name for the provider.
+ name: string,
+ /// Icon name from Zed's icon set (optional).
+ icon: option<string>,
+ }
+
+ /// Capabilities of a language model.
+ record model-capabilities {
+ /// Whether the model supports image inputs.
+ supports-images: bool,
+ /// Whether the model supports tool/function calling.
+ supports-tools: bool,
+ /// Whether the model supports the "auto" tool choice.
+ supports-tool-choice-auto: bool,
+ /// Whether the model supports the "any" tool choice.
+ supports-tool-choice-any: bool,
+ /// Whether the model supports the "none" tool choice.
+ supports-tool-choice-none: bool,
+ /// Whether the model supports extended thinking/reasoning.
+ supports-thinking: bool,
+ /// The format for tool input schemas.
+ tool-input-format: tool-input-format,
+ }
+
+ /// Format for tool input schemas.
+ enum tool-input-format {
+ /// Standard JSON Schema format.
+ json-schema,
+ /// Simplified schema format for certain providers.
+ simplified,
+ }
+
+ /// Information about a specific model.
+ record model-info {
+ /// Unique identifier for the model.
+ id: string,
+ /// Display name for the model.
+ name: string,
+ /// Maximum input token count.
+ max-token-count: u64,
+ /// Maximum output tokens (optional).
+ max-output-tokens: option<u64>,
+ /// Model capabilities.
+ capabilities: model-capabilities,
+ /// Whether this is the default model for the provider.
+ is-default: bool,
+ /// Whether this is the default fast model.
+ is-default-fast: bool,
+ }
+
+ /// The role of a message participant.
+ enum message-role {
+ /// User message.
+ user,
+ /// Assistant message.
+ assistant,
+ /// System message.
+ system,
+ }
+
+ /// A message in a completion request.
+ record request-message {
+ /// The role of the message sender.
+ role: message-role,
+ /// The content of the message.
+ content: list<message-content>,
+ /// Whether to cache this message for prompt caching.
+ cache: bool,
+ }
+
+ /// Content within a message.
+ variant message-content {
+ /// Plain text content.
+ text(string),
+ /// Image content.
+ image(image-data),
+ /// A tool use request from the assistant.
+ tool-use(tool-use),
+ /// A tool result from the user.
+ tool-result(tool-result),
+ /// Thinking/reasoning content.
+ thinking(thinking-content),
+ /// Redacted/encrypted thinking content.
+ redacted-thinking(string),
+ }
+
+ /// Image data for vision models.
+ record image-data {
+ /// Base64-encoded image data.
+ source: string,
+ /// Image width in pixels (optional).
+ width: option<u32>,
+ /// Image height in pixels (optional).
+ height: option<u32>,
+ }
+
+ /// A tool use request from the model.
+ record tool-use {
+ /// Unique identifier for this tool use.
+ id: string,
+ /// The name of the tool being used.
+ name: string,
+ /// JSON string of the tool input arguments.
+ input: string,
+ /// Thought signature for providers that support it (e.g., Anthropic).
+ thought-signature: option<string>,
+ }
+
+ /// A tool result to send back to the model.
+ record tool-result {
+ /// The ID of the tool use this is a result for.
+ tool-use-id: string,
+ /// The name of the tool.
+ tool-name: string,
+ /// Whether this result represents an error.
+ is-error: bool,
+ /// The content of the result.
+ content: tool-result-content,
+ }
+
+ /// Content of a tool result.
+ variant tool-result-content {
+ /// Text result.
+ text(string),
+ /// Image result.
+ image(image-data),
+ }
+
+ /// Thinking/reasoning content from models that support extended thinking.
+ record thinking-content {
+ /// The thinking text.
+ text: string,
+ /// Signature for the thinking block (provider-specific).
+ signature: option<string>,
+ }
+
+ /// A tool definition for function calling.
+ record tool-definition {
+ /// The name of the tool.
+ name: string,
+ /// Description of what the tool does.
+ description: string,
+ /// JSON Schema for input parameters.
+ input-schema: string,
+ }
+
+ /// Tool choice preference for the model.
+ enum tool-choice {
+ /// Let the model decide whether to use tools.
+ auto,
+ /// Force the model to use at least one tool.
+ any,
+ /// Prevent the model from using tools.
+ none,
+ }
+
+ /// A completion request to send to the model.
+ record completion-request {
+ /// The messages in the conversation.
+ messages: list<request-message>,
+ /// Available tools for the model to use.
+ tools: list<tool-definition>,
+ /// Tool choice preference.
+ tool-choice: option<tool-choice>,
+ /// Stop sequences to end generation.
+ stop-sequences: list<string>,
+ /// Temperature for sampling (0.0-1.0).
+ temperature: option<f32>,
+ /// Whether thinking/reasoning is allowed.
+ thinking-allowed: bool,
+ /// Maximum tokens to generate.
+ max-tokens: option<u64>,
+ }
+
+ /// Events emitted during completion streaming.
+ variant completion-event {
+ /// Completion has started.
+ started,
+ /// Text content chunk.
+ text(string),
+ /// Thinking/reasoning content chunk.
+ thinking(thinking-content),
+ /// Redacted thinking (encrypted) chunk.
+ redacted-thinking(string),
+ /// Tool use request from the model.
+ tool-use(tool-use),
+ /// JSON parse error when parsing tool input.
+ tool-use-json-parse-error(tool-use-json-parse-error),
+ /// Completion stopped.
+ stop(stop-reason),
+ /// Token usage update.
+ usage(token-usage),
+ /// Reasoning details (provider-specific JSON).
+ reasoning-details(string),
+ }
+
+ /// Error information when tool use JSON parsing fails.
+ record tool-use-json-parse-error {
+ /// The tool use ID.
+ id: string,
+ /// The tool name.
+ tool-name: string,
+ /// The raw input that failed to parse.
+ raw-input: string,
+ /// The parse error message.
+ error: string,
+ }
+
+ /// Reason the completion stopped.
+ enum stop-reason {
+ /// The model finished generating.
+ end-turn,
+ /// Maximum tokens reached.
+ max-tokens,
+ /// The model wants to use a tool.
+ tool-use,
+ /// The model refused to respond.
+ refusal,
+ }
+
+ /// Token usage statistics.
+ record token-usage {
+ /// Number of input tokens used.
+ input-tokens: u64,
+ /// Number of output tokens generated.
+ output-tokens: u64,
+ /// Tokens used for cache creation (if supported).
+ cache-creation-input-tokens: option<u64>,
+ /// Tokens read from cache (if supported).
+ cache-read-input-tokens: option<u64>,
+ }
+
+ /// Credential types that can be requested.
+ enum credential-type {
+ /// An API key.
+ api-key,
+ /// An OAuth token.
+ oauth-token,
+ }
+
+ /// Cache configuration for prompt caching.
+ record cache-configuration {
+ /// Maximum number of cache anchors.
+ max-cache-anchors: u32,
+ /// Whether caching should be applied to tool definitions.
+ should-cache-tool-definitions: bool,
+ /// Minimum token count for a message to be cached.
+ min-total-token-count: u64,
+ }
+}
@@ -0,0 +1,90 @@
+interface lsp {
+ /// An LSP completion.
+ record completion {
+ label: string,
+ label-details: option<completion-label-details>,
+ detail: option<string>,
+ kind: option<completion-kind>,
+ insert-text-format: option<insert-text-format>,
+ }
+
+ /// The kind of an LSP completion.
+ variant completion-kind {
+ text,
+ method,
+ function,
+ %constructor,
+ field,
+ variable,
+ class,
+ %interface,
+ module,
+ property,
+ unit,
+ value,
+ %enum,
+ keyword,
+ snippet,
+ color,
+ file,
+ reference,
+ folder,
+ enum-member,
+ constant,
+ struct,
+ event,
+ operator,
+ type-parameter,
+ other(s32),
+ }
+
+ /// Label details for an LSP completion.
+ record completion-label-details {
+ detail: option<string>,
+ description: option<string>,
+ }
+
+ /// Defines how to interpret the insert text in a completion item.
+ variant insert-text-format {
+ plain-text,
+ snippet,
+ other(s32),
+ }
+
+ /// An LSP symbol.
+ record symbol {
+ kind: symbol-kind,
+ name: string,
+ }
+
+ /// The kind of an LSP symbol.
+ variant symbol-kind {
+ file,
+ module,
+ namespace,
+ %package,
+ class,
+ method,
+ property,
+ field,
+ %constructor,
+ %enum,
+ %interface,
+ function,
+ variable,
+ constant,
+ %string,
+ number,
+ boolean,
+ array,
+ object,
+ key,
+ null,
+ enum-member,
+ struct,
+ event,
+ operator,
+ type-parameter,
+ other(s32),
+ }
+}
@@ -0,0 +1,13 @@
+interface nodejs {
+ /// Returns the path to the Node binary used by Zed.
+ node-binary-path: func() -> result<string, string>;
+
+ /// Returns the latest version of the given NPM package.
+ npm-package-latest-version: func(package-name: string) -> result<string, string>;
+
+ /// Returns the installed version of the given NPM package, if it exists.
+ npm-package-installed-version: func(package-name: string) -> result<option<string>, string>;
+
+ /// Installs the specified NPM package.
+ npm-install-package: func(package-name: string, version: string) -> result<_, string>;
+}
@@ -0,0 +1,24 @@
+interface platform {
+ /// An operating system.
+ enum os {
+ /// macOS.
+ mac,
+ /// Linux.
+ linux,
+ /// Windows.
+ windows,
+ }
+
+ /// A platform architecture.
+ enum architecture {
+ /// AArch64 (e.g., Apple Silicon).
+ aarch64,
+ /// x86.
+ x86,
+ /// x86-64.
+ x8664,
+ }
+
+ /// Gets the current operating system and architecture.
+ current-platform: func() -> tuple<os, architecture>;
+}
@@ -0,0 +1,29 @@
+interface process {
+ use common.{env-vars};
+
+ /// A command.
+ record command {
+ /// The command to execute.
+ command: string,
+ /// The arguments to pass to the command.
+ args: list<string>,
+ /// The environment variables to set for the command.
+ env: env-vars,
+ }
+
+ /// The output of a finished process.
+ record output {
+ /// The status (exit code) of the process.
+ ///
+ /// On Unix, this will be `None` if the process was terminated by a signal.
+ status: option<s32>,
+ /// The data that the process wrote to stdout.
+ stdout: list<u8>,
+ /// The data that the process wrote to stderr.
+ stderr: list<u8>,
+ }
+
+ /// Executes the given command as a child process, waiting for it to finish
+ /// and collecting all of its output.
+ run-command: func(command: command) -> result<output, string>;
+}
@@ -0,0 +1,40 @@
+use serde::{Deserialize, Serialize};
+use std::{collections::HashMap, num::NonZeroU32};
+
+/// The settings for a particular language.
+#[derive(Debug, Serialize, Deserialize)]
+pub struct LanguageSettings {
+ /// How many columns a tab should occupy.
+ pub tab_size: NonZeroU32,
+}
+
+/// The settings for a particular language server.
+#[derive(Default, Debug, Serialize, Deserialize)]
+pub struct LspSettings {
+ /// The settings for the language server binary.
+ pub binary: Option<CommandSettings>,
+ /// The initialization options to pass to the language server.
+ pub initialization_options: Option<serde_json::Value>,
+ /// The settings to pass to language server.
+ pub settings: Option<serde_json::Value>,
+}
+
+/// The settings for a particular context server.
+#[derive(Default, Debug, Serialize, Deserialize, PartialEq, Eq)]
+pub struct ContextServerSettings {
+ /// The settings for the context server binary.
+ pub command: Option<CommandSettings>,
+ /// The settings to pass to the context server.
+ pub settings: Option<serde_json::Value>,
+}
+
+/// The settings for a command.
+#[derive(Debug, Serialize, Deserialize, PartialEq, Eq)]
+pub struct CommandSettings {
+ /// The path to the command.
+ pub path: Option<String>,
+ /// The arguments to pass to the command.
+ pub arguments: Option<Vec<String>>,
+ /// The environment variables.
+ pub env: Option<HashMap<String, String>>,
+}
@@ -0,0 +1,41 @@
+interface slash-command {
+ use common.{range};
+
+ /// A slash command for use in the Assistant.
+ record slash-command {
+ /// The name of the slash command.
+ name: string,
+ /// The description of the slash command.
+ description: string,
+ /// The tooltip text to display for the run button.
+ tooltip-text: string,
+ /// Whether this slash command requires an argument.
+ requires-argument: bool,
+ }
+
+ /// The output of a slash command.
+ record slash-command-output {
+ /// The text produced by the slash command.
+ text: string,
+ /// The list of sections to show in the slash command placeholder.
+ sections: list<slash-command-output-section>,
+ }
+
+ /// A section in the slash command output.
+ record slash-command-output-section {
+ /// The range this section occupies.
+ range: range,
+ /// The label to display in the placeholder for this section.
+ label: string,
+ }
+
+ /// A completion for a slash command argument.
+ record slash-command-argument-completion {
+ /// The label to display for this completion.
+ label: string,
+ /// The new text that should be inserted into the command when this completion is accepted.
+ new-text: string,
+ /// Whether the command should be run when accepting this completion.
+ run-command: bool,
+ }
+}
@@ -8,6 +8,7 @@ mod since_v0_4_0;
mod since_v0_5_0;
mod since_v0_6_0;
mod since_v0_7_0;
+mod since_v0_8_0;
use dap::DebugRequest;
use extension::{DebugTaskDefinition, KeyValueStoreDelegate, WorktreeDelegate};
use gpui::BackgroundExecutor;
@@ -16,12 +17,12 @@ use lsp::LanguageServerName;
use release_channel::ReleaseChannel;
use task::{DebugScenario, SpawnInTerminal, TaskTemplate, ZedDebugConfig};
-use crate::wasm_host::wit::since_v0_7_0::dap::StartDebuggingRequestArgumentsRequest;
+use crate::wasm_host::wit::since_v0_8_0::dap::StartDebuggingRequestArgumentsRequest;
use super::{WasmState, wasm_engine};
use anyhow::{Context as _, Result, anyhow};
use semver::Version;
-use since_v0_7_0 as latest;
+use since_v0_8_0 as latest;
use std::{ops::RangeInclusive, path::PathBuf, sync::Arc};
use wasmtime::{
Store,
@@ -109,6 +110,7 @@ pub fn authorize_access_to_unreleased_wasm_api_version(
}
pub enum Extension {
+ V0_8_0(since_v0_8_0::Extension),
V0_7_0(since_v0_7_0::Extension),
V0_6_0(since_v0_6_0::Extension),
V0_5_0(since_v0_5_0::Extension),
@@ -133,10 +135,21 @@ impl Extension {
let _ = release_channel;
if version >= latest::MIN_VERSION {
+ authorize_access_to_unreleased_wasm_api_version(release_channel)?;
+
let extension =
latest::Extension::instantiate_async(store, component, latest::linker(executor))
.await
.context("failed to instantiate wasm extension")?;
+ Ok(Self::V0_8_0(extension))
+ } else if version >= since_v0_7_0::MIN_VERSION {
+ let extension = since_v0_7_0::Extension::instantiate_async(
+ store,
+ component,
+ since_v0_7_0::linker(executor),
+ )
+ .await
+ .context("failed to instantiate wasm extension")?;
Ok(Self::V0_7_0(extension))
} else if version >= since_v0_6_0::MIN_VERSION {
let extension = since_v0_6_0::Extension::instantiate_async(
@@ -224,6 +237,7 @@ impl Extension {
pub async fn call_init_extension(&self, store: &mut Store<WasmState>) -> Result<()> {
match self {
+ Extension::V0_8_0(ext) => ext.call_init_extension(store).await,
Extension::V0_7_0(ext) => ext.call_init_extension(store).await,
Extension::V0_6_0(ext) => ext.call_init_extension(store).await,
Extension::V0_5_0(ext) => ext.call_init_extension(store).await,
@@ -245,10 +259,14 @@ impl Extension {
resource: Resource<Arc<dyn WorktreeDelegate>>,
) -> Result<Result<Command, String>> {
match self {
- Extension::V0_7_0(ext) => {
+ Extension::V0_8_0(ext) => {
ext.call_language_server_command(store, &language_server_id.0, resource)
.await
}
+ Extension::V0_7_0(ext) => Ok(ext
+ .call_language_server_command(store, &language_server_id.0, resource)
+ .await?
+ .map(Into::into)),
Extension::V0_6_0(ext) => {
ext.call_language_server_command(store, &language_server_id.0, resource)
.await
@@ -311,6 +329,14 @@ impl Extension {
resource: Resource<Arc<dyn WorktreeDelegate>>,
) -> Result<Result<Option<String>, String>> {
match self {
+ Extension::V0_8_0(ext) => {
+ ext.call_language_server_initialization_options(
+ store,
+ &language_server_id.0,
+ resource,
+ )
+ .await
+ }
Extension::V0_7_0(ext) => {
ext.call_language_server_initialization_options(
store,
@@ -408,6 +434,14 @@ impl Extension {
resource: Resource<Arc<dyn WorktreeDelegate>>,
) -> Result<Result<Option<String>, String>> {
match self {
+ Extension::V0_8_0(ext) => {
+ ext.call_language_server_workspace_configuration(
+ store,
+ &language_server_id.0,
+ resource,
+ )
+ .await
+ }
Extension::V0_7_0(ext) => {
ext.call_language_server_workspace_configuration(
store,
@@ -484,6 +518,15 @@ impl Extension {
resource: Resource<Arc<dyn WorktreeDelegate>>,
) -> Result<Result<Option<String>, String>> {
match self {
+ Extension::V0_8_0(ext) => {
+ ext.call_language_server_additional_initialization_options(
+ store,
+ &language_server_id.0,
+ &target_language_server_id.0,
+ resource,
+ )
+ .await
+ }
Extension::V0_7_0(ext) => {
ext.call_language_server_additional_initialization_options(
store,
@@ -537,6 +580,15 @@ impl Extension {
resource: Resource<Arc<dyn WorktreeDelegate>>,
) -> Result<Result<Option<String>, String>> {
match self {
+ Extension::V0_8_0(ext) => {
+ ext.call_language_server_additional_workspace_configuration(
+ store,
+ &language_server_id.0,
+ &target_language_server_id.0,
+ resource,
+ )
+ .await
+ }
Extension::V0_7_0(ext) => {
ext.call_language_server_additional_workspace_configuration(
store,
@@ -589,10 +641,24 @@ impl Extension {
completions: Vec<latest::Completion>,
) -> Result<Result<Vec<Option<CodeLabel>>, String>> {
match self {
- Extension::V0_7_0(ext) => {
+ Extension::V0_8_0(ext) => {
ext.call_labels_for_completions(store, &language_server_id.0, &completions)
.await
}
+ Extension::V0_7_0(ext) => ext
+ .call_labels_for_completions(
+ store,
+ &language_server_id.0,
+ &completions
+ .iter()
+ .cloned()
+ .map(Into::into)
+ .collect::<Vec<_>>(),
+ )
+ .await
+ .map(|res| {
+ res.map(|labels| labels.into_iter().map(|l| l.map(Into::into)).collect())
+ }),
Extension::V0_6_0(ext) => Ok(ext
.call_labels_for_completions(
store,
@@ -695,10 +761,20 @@ impl Extension {
symbols: Vec<latest::Symbol>,
) -> Result<Result<Vec<Option<CodeLabel>>, String>> {
match self {
- Extension::V0_7_0(ext) => {
+ Extension::V0_8_0(ext) => {
ext.call_labels_for_symbols(store, &language_server_id.0, &symbols)
.await
}
+ Extension::V0_7_0(ext) => ext
+ .call_labels_for_symbols(
+ store,
+ &language_server_id.0,
+ &symbols.iter().cloned().map(Into::into).collect::<Vec<_>>(),
+ )
+ .await
+ .map(|res| {
+ res.map(|labels| labels.into_iter().map(|l| l.map(Into::into)).collect())
+ }),
Extension::V0_6_0(ext) => Ok(ext
.call_labels_for_symbols(
store,
@@ -801,10 +877,17 @@ impl Extension {
arguments: &[String],
) -> Result<Result<Vec<SlashCommandArgumentCompletion>, String>> {
match self {
- Extension::V0_7_0(ext) => {
+ Extension::V0_8_0(ext) => {
ext.call_complete_slash_command_argument(store, command, arguments)
.await
}
+ Extension::V0_7_0(ext) => {
+ let command: since_v0_7_0::slash_command::SlashCommand = command.into();
+ Ok(ext
+ .call_complete_slash_command_argument(store, &command, arguments)
+ .await?
+ .map(|completions| completions.into_iter().map(Into::into).collect()))
+ }
Extension::V0_6_0(ext) => {
ext.call_complete_slash_command_argument(store, command, arguments)
.await
@@ -843,10 +926,17 @@ impl Extension {
resource: Option<Resource<Arc<dyn WorktreeDelegate>>>,
) -> Result<Result<SlashCommandOutput, String>> {
match self {
- Extension::V0_7_0(ext) => {
+ Extension::V0_8_0(ext) => {
ext.call_run_slash_command(store, command, arguments, resource)
.await
}
+ Extension::V0_7_0(ext) => {
+ let command: since_v0_7_0::slash_command::SlashCommand = command.into();
+ Ok(ext
+ .call_run_slash_command(store, &command, arguments, resource)
+ .await?
+ .map(Into::into))
+ }
Extension::V0_6_0(ext) => {
ext.call_run_slash_command(store, command, arguments, resource)
.await
@@ -884,10 +974,14 @@ impl Extension {
project: Resource<ExtensionProject>,
) -> Result<Result<Command, String>> {
match self {
- Extension::V0_7_0(ext) => {
+ Extension::V0_8_0(ext) => {
ext.call_context_server_command(store, &context_server_id, project)
.await
}
+ Extension::V0_7_0(ext) => Ok(ext
+ .call_context_server_command(store, &context_server_id, project)
+ .await?
+ .map(Into::into)),
Extension::V0_6_0(ext) => {
ext.call_context_server_command(store, &context_server_id, project)
.await
@@ -924,10 +1018,14 @@ impl Extension {
project: Resource<ExtensionProject>,
) -> Result<Result<Option<ContextServerConfiguration>, String>> {
match self {
- Extension::V0_7_0(ext) => {
+ Extension::V0_8_0(ext) => {
ext.call_context_server_configuration(store, &context_server_id, project)
.await
}
+ Extension::V0_7_0(ext) => Ok(ext
+ .call_context_server_configuration(store, &context_server_id, project)
+ .await?
+ .map(|opt| opt.map(Into::into))),
Extension::V0_6_0(ext) => {
ext.call_context_server_configuration(store, &context_server_id, project)
.await
@@ -954,6 +1052,7 @@ impl Extension {
provider: &str,
) -> Result<Result<Vec<String>, String>> {
match self {
+ Extension::V0_8_0(ext) => ext.call_suggest_docs_packages(store, provider).await,
Extension::V0_7_0(ext) => ext.call_suggest_docs_packages(store, provider).await,
Extension::V0_6_0(ext) => ext.call_suggest_docs_packages(store, provider).await,
Extension::V0_5_0(ext) => ext.call_suggest_docs_packages(store, provider).await,
@@ -975,6 +1074,10 @@ impl Extension {
kv_store: Resource<Arc<dyn KeyValueStoreDelegate>>,
) -> Result<Result<(), String>> {
match self {
+ Extension::V0_8_0(ext) => {
+ ext.call_index_docs(store, provider, package_name, kv_store)
+ .await
+ }
Extension::V0_7_0(ext) => {
ext.call_index_docs(store, provider, package_name, kv_store)
.await
@@ -1017,7 +1120,7 @@ impl Extension {
resource: Resource<Arc<dyn WorktreeDelegate>>,
) -> Result<Result<DebugAdapterBinary, String>> {
match self {
- Extension::V0_7_0(ext) => {
+ Extension::V0_8_0(ext) => {
let dap_binary = ext
.call_get_dap_binary(
store,
@@ -1031,6 +1134,20 @@ impl Extension {
Ok(Ok(dap_binary))
}
+ Extension::V0_7_0(ext) => {
+ let dap_binary = ext
+ .call_get_dap_binary(
+ store,
+ &adapter_name,
+ &task.try_into()?,
+ user_installed_path.as_ref().and_then(|p| p.to_str()),
+ resource,
+ )
+ .await?
+ .map_err(|e| anyhow!("{e:?}"))?;
+
+ Ok(Ok(dap_binary.into()))
+ }
Extension::V0_6_0(ext) => {
let dap_binary = ext
.call_get_dap_binary(
@@ -1055,15 +1172,25 @@ impl Extension {
config: serde_json::Value,
) -> Result<Result<StartDebuggingRequestArgumentsRequest, String>> {
match self {
+ Extension::V0_8_0(ext) => {
+ let config =
+ serde_json::to_string(&config).context("Adapter config is not a valid JSON")?;
+ let result = ext
+ .call_dap_request_kind(store, &adapter_name, &config)
+ .await?
+ .map_err(|e| anyhow!("{e:?}"))?;
+
+ Ok(Ok(result))
+ }
Extension::V0_7_0(ext) => {
let config =
serde_json::to_string(&config).context("Adapter config is not a valid JSON")?;
- let dap_binary = ext
+ let result = ext
.call_dap_request_kind(store, &adapter_name, &config)
.await?
.map_err(|e| anyhow!("{e:?}"))?;
- Ok(Ok(dap_binary))
+ Ok(Ok(result.into()))
}
Extension::V0_6_0(ext) => {
let config =
@@ -1084,14 +1211,23 @@ impl Extension {
config: ZedDebugConfig,
) -> Result<Result<DebugScenario, String>> {
match self {
- Extension::V0_7_0(ext) => {
+ Extension::V0_8_0(ext) => {
let config = config.into();
- let dap_binary = ext
+ let result = ext
.call_dap_config_to_scenario(store, &config)
.await?
.map_err(|e| anyhow!("{e:?}"))?;
- Ok(Ok(dap_binary.try_into()?))
+ Ok(Ok(result.try_into()?))
+ }
+ Extension::V0_7_0(ext) => {
+ let config: since_v0_7_0::dap::DebugConfig = config.into();
+ let result = ext
+ .call_dap_config_to_scenario(store, &config)
+ .await?
+ .map_err(|e| anyhow!("{e:?}"))?;
+
+ Ok(Ok(result.try_into()?))
}
Extension::V0_6_0(ext) => {
let config = config.into();
@@ -1114,9 +1250,9 @@ impl Extension {
debug_adapter_name: String,
) -> Result<Option<DebugScenario>> {
match self {
- Extension::V0_7_0(ext) => {
+ Extension::V0_8_0(ext) => {
let build_config_template = build_config_template.into();
- let dap_binary = ext
+ let result = ext
.call_dap_locator_create_scenario(
store,
&locator_name,
@@ -1126,7 +1262,22 @@ impl Extension {
)
.await?;
- Ok(dap_binary.map(TryInto::try_into).transpose()?)
+ Ok(result.map(TryInto::try_into).transpose()?)
+ }
+ Extension::V0_7_0(ext) => {
+ let build_config_template: since_v0_7_0::dap::BuildTaskTemplate =
+ build_config_template.into();
+ let result = ext
+ .call_dap_locator_create_scenario(
+ store,
+ &locator_name,
+ &build_config_template,
+ &resolved_label,
+ &debug_adapter_name,
+ )
+ .await?;
+
+ Ok(result.map(TryInto::try_into).transpose()?)
}
Extension::V0_6_0(ext) => {
let build_config_template = build_config_template.into();
@@ -1183,7 +1334,13 @@ impl Extension {
store: &mut Store<WasmState>,
) -> Result<Vec<latest::llm_provider::ProviderInfo>> {
match self {
- Extension::V0_7_0(ext) => ext.call_llm_providers(store).await,
+ Extension::V0_8_0(ext) => ext.call_llm_providers(store).await,
+ Extension::V0_7_0(ext) => Ok(ext
+ .call_llm_providers(store)
+ .await?
+ .into_iter()
+ .map(Into::into)
+ .collect()),
_ => Ok(Vec::new()),
}
}
@@ -1194,7 +1351,11 @@ impl Extension {
provider_id: &str,
) -> Result<Result<Vec<latest::llm_provider::ModelInfo>, String>> {
match self {
- Extension::V0_7_0(ext) => ext.call_llm_provider_models(store, provider_id).await,
+ Extension::V0_8_0(ext) => ext.call_llm_provider_models(store, provider_id).await,
+ Extension::V0_7_0(ext) => Ok(ext
+ .call_llm_provider_models(store, provider_id)
+ .await?
+ .map(|models| models.into_iter().map(Into::into).collect())),
_ => anyhow::bail!("`llm_provider_models` not available prior to v0.7.0"),
}
}
@@ -1205,6 +1366,10 @@ impl Extension {
provider_id: &str,
) -> Result<Option<String>> {
match self {
+ Extension::V0_8_0(ext) => {
+ ext.call_llm_provider_settings_markdown(store, provider_id)
+ .await
+ }
Extension::V0_7_0(ext) => {
ext.call_llm_provider_settings_markdown(store, provider_id)
.await
@@ -1219,6 +1384,10 @@ impl Extension {
provider_id: &str,
) -> Result<bool> {
match self {
+ Extension::V0_8_0(ext) => {
+ ext.call_llm_provider_is_authenticated(store, provider_id)
+ .await
+ }
Extension::V0_7_0(ext) => {
ext.call_llm_provider_is_authenticated(store, provider_id)
.await
@@ -1233,6 +1402,7 @@ impl Extension {
provider_id: &str,
) -> Result<Result<(), String>> {
match self {
+ Extension::V0_8_0(ext) => ext.call_llm_provider_authenticate(store, provider_id).await,
Extension::V0_7_0(ext) => ext.call_llm_provider_authenticate(store, provider_id).await,
_ => anyhow::bail!("`llm_provider_authenticate` not available prior to v0.7.0"),
}
@@ -1244,6 +1414,10 @@ impl Extension {
provider_id: &str,
) -> Result<Result<(), String>> {
match self {
+ Extension::V0_8_0(ext) => {
+ ext.call_llm_provider_reset_credentials(store, provider_id)
+ .await
+ }
Extension::V0_7_0(ext) => {
ext.call_llm_provider_reset_credentials(store, provider_id)
.await
@@ -1260,10 +1434,15 @@ impl Extension {
request: &latest::llm_provider::CompletionRequest,
) -> Result<Result<u64, String>> {
match self {
- Extension::V0_7_0(ext) => {
+ Extension::V0_8_0(ext) => {
ext.call_llm_count_tokens(store, provider_id, model_id, request)
.await
}
+ Extension::V0_7_0(ext) => {
+ let request: since_v0_7_0::llm_provider::CompletionRequest = request.clone().into();
+ ext.call_llm_count_tokens(store, provider_id, model_id, &request)
+ .await
+ }
_ => anyhow::bail!("`llm_count_tokens` not available prior to v0.7.0"),
}
}
@@ -1276,10 +1455,15 @@ impl Extension {
request: &latest::llm_provider::CompletionRequest,
) -> Result<Result<String, String>> {
match self {
- Extension::V0_7_0(ext) => {
+ Extension::V0_8_0(ext) => {
ext.call_llm_stream_completion_start(store, provider_id, model_id, request)
.await
}
+ Extension::V0_7_0(ext) => {
+ let request: since_v0_7_0::llm_provider::CompletionRequest = request.clone().into();
+ ext.call_llm_stream_completion_start(store, provider_id, model_id, &request)
+ .await
+ }
_ => anyhow::bail!("`llm_stream_completion_start` not available prior to v0.7.0"),
}
}
@@ -1290,7 +1474,11 @@ impl Extension {
stream_id: &str,
) -> Result<Result<Option<latest::llm_provider::CompletionEvent>, String>> {
match self {
- Extension::V0_7_0(ext) => ext.call_llm_stream_completion_next(store, stream_id).await,
+ Extension::V0_8_0(ext) => ext.call_llm_stream_completion_next(store, stream_id).await,
+ Extension::V0_7_0(ext) => Ok(ext
+ .call_llm_stream_completion_next(store, stream_id)
+ .await?
+ .map(|opt| opt.map(Into::into))),
_ => anyhow::bail!("`llm_stream_completion_next` not available prior to v0.7.0"),
}
}
@@ -1301,6 +1489,7 @@ impl Extension {
stream_id: &str,
) -> Result<()> {
match self {
+ Extension::V0_8_0(ext) => ext.call_llm_stream_completion_close(store, stream_id).await,
Extension::V0_7_0(ext) => ext.call_llm_stream_completion_close(store, stream_id).await,
_ => anyhow::bail!("`llm_stream_completion_close` not available prior to v0.7.0"),
}
@@ -1313,10 +1502,14 @@ impl Extension {
model_id: &str,
) -> Result<Option<latest::llm_provider::CacheConfiguration>> {
match self {
- Extension::V0_7_0(ext) => {
+ Extension::V0_8_0(ext) => {
ext.call_llm_cache_configuration(store, provider_id, model_id)
.await
}
+ Extension::V0_7_0(ext) => Ok(ext
+ .call_llm_cache_configuration(store, provider_id, model_id)
+ .await?
+ .map(Into::into)),
_ => Ok(None),
}
}
@@ -38,6 +38,7 @@ use util::{
use wasmtime::component::{Linker, Resource};
pub const MIN_VERSION: Version = Version::new(0, 7, 0);
+#[allow(dead_code)]
pub const MAX_VERSION: Version = Version::new(0, 8, 0);
wasmtime::component::bindgen!({
@@ -1203,3 +1204,581 @@ impl ExtensionImports for WasmState {
// =============================================================================
impl llm_provider::Host for WasmState {}
+
+// =============================================================================
+// LLM Provider Type Conversions (v0.7.0 -> latest/v0.8.0)
+// =============================================================================
+
+use super::since_v0_8_0 as latest;
+
+impl From<llm_provider::ProviderInfo> for latest::llm_provider::ProviderInfo {
+ fn from(value: llm_provider::ProviderInfo) -> Self {
+ Self {
+ id: value.id,
+ name: value.name,
+ icon: value.icon,
+ }
+ }
+}
+
+impl From<llm_provider::ModelInfo> for latest::llm_provider::ModelInfo {
+ fn from(value: llm_provider::ModelInfo) -> Self {
+ Self {
+ id: value.id,
+ name: value.name,
+ max_token_count: value.max_token_count,
+ max_output_tokens: value.max_output_tokens,
+ capabilities: value.capabilities.into(),
+ is_default: value.is_default,
+ is_default_fast: value.is_default_fast,
+ }
+ }
+}
+
+impl From<llm_provider::ModelCapabilities> for latest::llm_provider::ModelCapabilities {
+ fn from(value: llm_provider::ModelCapabilities) -> Self {
+ Self {
+ supports_images: value.supports_images,
+ supports_tools: value.supports_tools,
+ supports_tool_choice_auto: value.supports_tool_choice_auto,
+ supports_tool_choice_any: value.supports_tool_choice_any,
+ supports_tool_choice_none: value.supports_tool_choice_none,
+ supports_thinking: value.supports_thinking,
+ tool_input_format: value.tool_input_format.into(),
+ }
+ }
+}
+
+impl From<llm_provider::ToolInputFormat> for latest::llm_provider::ToolInputFormat {
+ fn from(value: llm_provider::ToolInputFormat) -> Self {
+ match value {
+ llm_provider::ToolInputFormat::JsonSchema => Self::JsonSchema,
+ llm_provider::ToolInputFormat::Simplified => Self::Simplified,
+ }
+ }
+}
+
+impl From<llm_provider::CompletionEvent> for latest::llm_provider::CompletionEvent {
+ fn from(value: llm_provider::CompletionEvent) -> Self {
+ match value {
+ llm_provider::CompletionEvent::Started => Self::Started,
+ llm_provider::CompletionEvent::Text(s) => Self::Text(s),
+ llm_provider::CompletionEvent::Thinking(t) => Self::Thinking(t.into()),
+ llm_provider::CompletionEvent::RedactedThinking(s) => Self::RedactedThinking(s),
+ llm_provider::CompletionEvent::ToolUse(t) => Self::ToolUse(t.into()),
+ llm_provider::CompletionEvent::ToolUseJsonParseError(e) => {
+ Self::ToolUseJsonParseError(e.into())
+ }
+ llm_provider::CompletionEvent::Stop(r) => Self::Stop(r.into()),
+ llm_provider::CompletionEvent::Usage(u) => Self::Usage(u.into()),
+ llm_provider::CompletionEvent::ReasoningDetails(s) => Self::ReasoningDetails(s),
+ }
+ }
+}
+
+impl From<llm_provider::ThinkingContent> for latest::llm_provider::ThinkingContent {
+ fn from(value: llm_provider::ThinkingContent) -> Self {
+ Self {
+ text: value.text,
+ signature: value.signature,
+ }
+ }
+}
+
+impl From<llm_provider::ToolUse> for latest::llm_provider::ToolUse {
+ fn from(value: llm_provider::ToolUse) -> Self {
+ Self {
+ id: value.id,
+ name: value.name,
+ input: value.input,
+ thought_signature: value.thought_signature,
+ }
+ }
+}
+
+impl From<llm_provider::ToolUseJsonParseError> for latest::llm_provider::ToolUseJsonParseError {
+ fn from(value: llm_provider::ToolUseJsonParseError) -> Self {
+ Self {
+ id: value.id,
+ tool_name: value.tool_name,
+ raw_input: value.raw_input,
+ error: value.error,
+ }
+ }
+}
+
+impl From<llm_provider::StopReason> for latest::llm_provider::StopReason {
+ fn from(value: llm_provider::StopReason) -> Self {
+ match value {
+ llm_provider::StopReason::EndTurn => Self::EndTurn,
+ llm_provider::StopReason::MaxTokens => Self::MaxTokens,
+ llm_provider::StopReason::ToolUse => Self::ToolUse,
+ llm_provider::StopReason::Refusal => Self::Refusal,
+ }
+ }
+}
+
+impl From<llm_provider::TokenUsage> for latest::llm_provider::TokenUsage {
+ fn from(value: llm_provider::TokenUsage) -> Self {
+ Self {
+ input_tokens: value.input_tokens,
+ output_tokens: value.output_tokens,
+ cache_creation_input_tokens: value.cache_creation_input_tokens,
+ cache_read_input_tokens: value.cache_read_input_tokens,
+ }
+ }
+}
+
+impl From<llm_provider::CacheConfiguration> for latest::llm_provider::CacheConfiguration {
+ fn from(value: llm_provider::CacheConfiguration) -> Self {
+ Self {
+ max_cache_anchors: value.max_cache_anchors,
+ should_cache_tool_definitions: value.should_cache_tool_definitions,
+ min_total_token_count: value.min_total_token_count,
+ }
+ }
+}
+
+// Conversions from latest (v0.8.0) -> v0.7.0 for requests
+
+impl From<latest::llm_provider::CompletionRequest> for llm_provider::CompletionRequest {
+ fn from(value: latest::llm_provider::CompletionRequest) -> Self {
+ Self {
+ messages: value.messages.into_iter().map(Into::into).collect(),
+ tools: value.tools.into_iter().map(Into::into).collect(),
+ tool_choice: value.tool_choice.map(Into::into),
+ stop_sequences: value.stop_sequences,
+ temperature: value.temperature,
+ thinking_allowed: value.thinking_allowed,
+ max_tokens: value.max_tokens,
+ }
+ }
+}
+
+impl From<latest::llm_provider::RequestMessage> for llm_provider::RequestMessage {
+ fn from(value: latest::llm_provider::RequestMessage) -> Self {
+ Self {
+ role: value.role.into(),
+ content: value.content.into_iter().map(Into::into).collect(),
+ cache: value.cache,
+ }
+ }
+}
+
+impl From<latest::llm_provider::MessageRole> for llm_provider::MessageRole {
+ fn from(value: latest::llm_provider::MessageRole) -> Self {
+ match value {
+ latest::llm_provider::MessageRole::User => Self::User,
+ latest::llm_provider::MessageRole::Assistant => Self::Assistant,
+ latest::llm_provider::MessageRole::System => Self::System,
+ }
+ }
+}
+
+impl From<latest::llm_provider::MessageContent> for llm_provider::MessageContent {
+ fn from(value: latest::llm_provider::MessageContent) -> Self {
+ match value {
+ latest::llm_provider::MessageContent::Text(s) => Self::Text(s),
+ latest::llm_provider::MessageContent::Image(i) => Self::Image(i.into()),
+ latest::llm_provider::MessageContent::ToolUse(t) => Self::ToolUse(t.into()),
+ latest::llm_provider::MessageContent::ToolResult(t) => Self::ToolResult(t.into()),
+ latest::llm_provider::MessageContent::Thinking(t) => Self::Thinking(t.into()),
+ latest::llm_provider::MessageContent::RedactedThinking(s) => Self::RedactedThinking(s),
+ }
+ }
+}
+
+impl From<latest::llm_provider::ImageData> for llm_provider::ImageData {
+ fn from(value: latest::llm_provider::ImageData) -> Self {
+ Self {
+ source: value.source,
+ width: value.width,
+ height: value.height,
+ }
+ }
+}
+
+impl From<latest::llm_provider::ToolUse> for llm_provider::ToolUse {
+ fn from(value: latest::llm_provider::ToolUse) -> Self {
+ Self {
+ id: value.id,
+ name: value.name,
+ input: value.input,
+ thought_signature: value.thought_signature,
+ }
+ }
+}
+
+impl From<latest::llm_provider::ToolResult> for llm_provider::ToolResult {
+ fn from(value: latest::llm_provider::ToolResult) -> Self {
+ Self {
+ tool_use_id: value.tool_use_id,
+ tool_name: value.tool_name,
+ is_error: value.is_error,
+ content: value.content.into(),
+ }
+ }
+}
+
+impl From<latest::llm_provider::ToolResultContent> for llm_provider::ToolResultContent {
+ fn from(value: latest::llm_provider::ToolResultContent) -> Self {
+ match value {
+ latest::llm_provider::ToolResultContent::Text(s) => Self::Text(s),
+ latest::llm_provider::ToolResultContent::Image(i) => Self::Image(i.into()),
+ }
+ }
+}
+
+impl From<latest::llm_provider::ThinkingContent> for llm_provider::ThinkingContent {
+ fn from(value: latest::llm_provider::ThinkingContent) -> Self {
+ Self {
+ text: value.text,
+ signature: value.signature,
+ }
+ }
+}
+
+impl From<latest::llm_provider::ToolDefinition> for llm_provider::ToolDefinition {
+ fn from(value: latest::llm_provider::ToolDefinition) -> Self {
+ Self {
+ name: value.name,
+ description: value.description,
+ input_schema: value.input_schema,
+ }
+ }
+}
+
+impl From<latest::llm_provider::ToolChoice> for llm_provider::ToolChoice {
+ fn from(value: latest::llm_provider::ToolChoice) -> Self {
+ match value {
+ latest::llm_provider::ToolChoice::Auto => Self::Auto,
+ latest::llm_provider::ToolChoice::Any => Self::Any,
+ latest::llm_provider::ToolChoice::None => Self::None,
+ }
+ }
+}
+
+// =============================================================================
+// Command Type Conversions (v0.7.0 -> latest/v0.8.0)
+// =============================================================================
+
+impl From<Command> for latest::Command {
+ fn from(value: Command) -> Self {
+ Self {
+ command: value.command,
+ args: value.args,
+ env: value.env,
+ }
+ }
+}
+
+// =============================================================================
+// LSP Type Conversions (latest/v0.8.0 -> v0.7.0)
+// =============================================================================
+
+impl From<latest::lsp::Completion> for lsp::Completion {
+ fn from(value: latest::lsp::Completion) -> Self {
+ Self {
+ label: value.label,
+ label_details: value.label_details.map(Into::into),
+ detail: value.detail,
+ kind: value.kind.map(Into::into),
+ insert_text_format: value.insert_text_format.map(Into::into),
+ }
+ }
+}
+
+impl From<latest::lsp::CompletionLabelDetails> for lsp::CompletionLabelDetails {
+ fn from(value: latest::lsp::CompletionLabelDetails) -> Self {
+ Self {
+ detail: value.detail,
+ description: value.description,
+ }
+ }
+}
+
+impl From<latest::lsp::CompletionKind> for lsp::CompletionKind {
+ fn from(value: latest::lsp::CompletionKind) -> Self {
+ match value {
+ latest::lsp::CompletionKind::Text => Self::Text,
+ latest::lsp::CompletionKind::Method => Self::Method,
+ latest::lsp::CompletionKind::Function => Self::Function,
+ latest::lsp::CompletionKind::Constructor => Self::Constructor,
+ latest::lsp::CompletionKind::Field => Self::Field,
+ latest::lsp::CompletionKind::Variable => Self::Variable,
+ latest::lsp::CompletionKind::Class => Self::Class,
+ latest::lsp::CompletionKind::Interface => Self::Interface,
+ latest::lsp::CompletionKind::Module => Self::Module,
+ latest::lsp::CompletionKind::Property => Self::Property,
+ latest::lsp::CompletionKind::Unit => Self::Unit,
+ latest::lsp::CompletionKind::Value => Self::Value,
+ latest::lsp::CompletionKind::Enum => Self::Enum,
+ latest::lsp::CompletionKind::Keyword => Self::Keyword,
+ latest::lsp::CompletionKind::Snippet => Self::Snippet,
+ latest::lsp::CompletionKind::Color => Self::Color,
+ latest::lsp::CompletionKind::File => Self::File,
+ latest::lsp::CompletionKind::Reference => Self::Reference,
+ latest::lsp::CompletionKind::Folder => Self::Folder,
+ latest::lsp::CompletionKind::EnumMember => Self::EnumMember,
+ latest::lsp::CompletionKind::Constant => Self::Constant,
+ latest::lsp::CompletionKind::Struct => Self::Struct,
+ latest::lsp::CompletionKind::Event => Self::Event,
+ latest::lsp::CompletionKind::Operator => Self::Operator,
+ latest::lsp::CompletionKind::TypeParameter => Self::TypeParameter,
+ latest::lsp::CompletionKind::Other(n) => Self::Other(n),
+ }
+ }
+}
+
+impl From<latest::lsp::InsertTextFormat> for lsp::InsertTextFormat {
+ fn from(value: latest::lsp::InsertTextFormat) -> Self {
+ match value {
+ latest::lsp::InsertTextFormat::PlainText => Self::PlainText,
+ latest::lsp::InsertTextFormat::Snippet => Self::Snippet,
+ latest::lsp::InsertTextFormat::Other(n) => Self::Other(n),
+ }
+ }
+}
+
+impl From<latest::lsp::Symbol> for lsp::Symbol {
+ fn from(value: latest::lsp::Symbol) -> Self {
+ Self {
+ kind: value.kind.into(),
+ name: value.name,
+ }
+ }
+}
+
+impl From<latest::lsp::SymbolKind> for lsp::SymbolKind {
+ fn from(value: latest::lsp::SymbolKind) -> Self {
+ match value {
+ latest::lsp::SymbolKind::File => Self::File,
+ latest::lsp::SymbolKind::Module => Self::Module,
+ latest::lsp::SymbolKind::Namespace => Self::Namespace,
+ latest::lsp::SymbolKind::Package => Self::Package,
+ latest::lsp::SymbolKind::Class => Self::Class,
+ latest::lsp::SymbolKind::Method => Self::Method,
+ latest::lsp::SymbolKind::Property => Self::Property,
+ latest::lsp::SymbolKind::Field => Self::Field,
+ latest::lsp::SymbolKind::Constructor => Self::Constructor,
+ latest::lsp::SymbolKind::Enum => Self::Enum,
+ latest::lsp::SymbolKind::Interface => Self::Interface,
+ latest::lsp::SymbolKind::Function => Self::Function,
+ latest::lsp::SymbolKind::Variable => Self::Variable,
+ latest::lsp::SymbolKind::Constant => Self::Constant,
+ latest::lsp::SymbolKind::String => Self::String,
+ latest::lsp::SymbolKind::Number => Self::Number,
+ latest::lsp::SymbolKind::Boolean => Self::Boolean,
+ latest::lsp::SymbolKind::Array => Self::Array,
+ latest::lsp::SymbolKind::Object => Self::Object,
+ latest::lsp::SymbolKind::Key => Self::Key,
+ latest::lsp::SymbolKind::Null => Self::Null,
+ latest::lsp::SymbolKind::EnumMember => Self::EnumMember,
+ latest::lsp::SymbolKind::Struct => Self::Struct,
+ latest::lsp::SymbolKind::Event => Self::Event,
+ latest::lsp::SymbolKind::Operator => Self::Operator,
+ latest::lsp::SymbolKind::TypeParameter => Self::TypeParameter,
+ latest::lsp::SymbolKind::Other(n) => Self::Other(n),
+ }
+ }
+}
+
+// =============================================================================
+// CodeLabel Type Conversions (v0.7.0 -> latest/v0.8.0)
+// =============================================================================
+
+impl From<CodeLabel> for latest::CodeLabel {
+ fn from(value: CodeLabel) -> Self {
+ Self {
+ code: value.code,
+ spans: value.spans.into_iter().map(Into::into).collect(),
+ filter_range: value.filter_range.into(),
+ }
+ }
+}
+
+impl From<CodeLabelSpan> for latest::CodeLabelSpan {
+ fn from(value: CodeLabelSpan) -> Self {
+ match value {
+ CodeLabelSpan::CodeRange(r) => Self::CodeRange(r.into()),
+ CodeLabelSpan::Literal(l) => Self::Literal(l.into()),
+ }
+ }
+}
+
+impl From<CodeLabelSpanLiteral> for latest::CodeLabelSpanLiteral {
+ fn from(value: CodeLabelSpanLiteral) -> Self {
+ Self {
+ text: value.text,
+ highlight_name: value.highlight_name,
+ }
+ }
+}
+
+impl From<Range> for latest::Range {
+ fn from(value: Range) -> Self {
+ Self {
+ start: value.start,
+ end: value.end,
+ }
+ }
+}
+
+// =============================================================================
+// SlashCommand Type Conversions (latest/v0.8.0 -> v0.7.0)
+// =============================================================================
+
+impl From<&latest::SlashCommand> for slash_command::SlashCommand {
+ fn from(value: &latest::SlashCommand) -> Self {
+ Self {
+ name: value.name.clone(),
+ description: value.description.clone(),
+ tooltip_text: value.tooltip_text.clone(),
+ requires_argument: value.requires_argument,
+ }
+ }
+}
+
+// =============================================================================
+// SlashCommand Type Conversions (v0.7.0 -> latest/v0.8.0)
+// =============================================================================
+
+impl From<slash_command::SlashCommandArgumentCompletion>
+ for latest::SlashCommandArgumentCompletion
+{
+ fn from(value: slash_command::SlashCommandArgumentCompletion) -> Self {
+ Self {
+ label: value.label,
+ new_text: value.new_text,
+ run_command: value.run_command,
+ }
+ }
+}
+
+impl From<slash_command::SlashCommandOutput> for latest::SlashCommandOutput {
+ fn from(value: slash_command::SlashCommandOutput) -> Self {
+ Self {
+ sections: value.sections.into_iter().map(Into::into).collect(),
+ text: value.text,
+ }
+ }
+}
+
+impl From<SlashCommandOutputSection> for latest::slash_command::SlashCommandOutputSection {
+ fn from(value: SlashCommandOutputSection) -> Self {
+ Self {
+ range: value.range.into(),
+ label: value.label,
+ }
+ }
+}
+
+// =============================================================================
+// ContextServer Type Conversions (v0.7.0 -> latest/v0.8.0)
+// =============================================================================
+
+impl From<context_server::ContextServerConfiguration>
+ for latest::context_server::ContextServerConfiguration
+{
+ fn from(value: context_server::ContextServerConfiguration) -> Self {
+ Self {
+ installation_instructions: value.installation_instructions,
+ settings_schema: value.settings_schema,
+ default_settings: value.default_settings,
+ }
+ }
+}
+
+// =============================================================================
+// DAP Type Conversions (v0.7.0 -> latest/v0.8.0)
+// =============================================================================
+
+impl From<dap::DebugAdapterBinary> for latest::dap::DebugAdapterBinary {
+ fn from(value: dap::DebugAdapterBinary) -> Self {
+ Self {
+ command: value.command,
+ arguments: value.arguments,
+ envs: value.envs,
+ cwd: value.cwd,
+ connection: value.connection.map(|c| latest::dap::TcpArguments {
+ host: c.host,
+ port: c.port,
+ timeout: c.timeout,
+ }),
+ request_args: latest::dap::StartDebuggingRequestArguments {
+ configuration: value.request_args.configuration,
+ request: match value.request_args.request {
+ dap::StartDebuggingRequestArgumentsRequest::Launch => {
+ latest::dap::StartDebuggingRequestArgumentsRequest::Launch
+ }
+ dap::StartDebuggingRequestArgumentsRequest::Attach => {
+ latest::dap::StartDebuggingRequestArgumentsRequest::Attach
+ }
+ },
+ },
+ }
+ }
+}
+
+impl From<dap::StartDebuggingRequestArgumentsRequest>
+ for latest::dap::StartDebuggingRequestArgumentsRequest
+{
+ fn from(value: dap::StartDebuggingRequestArgumentsRequest) -> Self {
+ match value {
+ dap::StartDebuggingRequestArgumentsRequest::Launch => Self::Launch,
+ dap::StartDebuggingRequestArgumentsRequest::Attach => Self::Attach,
+ }
+ }
+}
+
+impl From<dap::DebugScenario> for latest::dap::DebugScenario {
+ fn from(value: dap::DebugScenario) -> Self {
+ Self {
+ adapter: value.adapter,
+ label: value.label,
+ build: value.build.map(|b| match b {
+ dap::BuildTaskDefinition::ByName(name) => {
+ latest::dap::BuildTaskDefinition::ByName(name)
+ }
+ dap::BuildTaskDefinition::Template(t) => {
+ latest::dap::BuildTaskDefinition::Template(
+ latest::dap::BuildTaskDefinitionTemplatePayload {
+ locator_name: t.locator_name,
+ template: latest::dap::BuildTaskTemplate {
+ label: t.template.label,
+ command: t.template.command,
+ args: t.template.args,
+ env: t.template.env,
+ cwd: t.template.cwd,
+ },
+ },
+ )
+ }
+ }),
+ config: value.config,
+ tcp_connection: value
+ .tcp_connection
+ .map(|t| latest::dap::TcpArgumentsTemplate {
+ host: t.host,
+ port: t.port,
+ timeout: t.timeout,
+ }),
+ }
+ }
+}
+
+impl From<dap::DebugRequest> for latest::dap::DebugRequest {
+ fn from(value: dap::DebugRequest) -> Self {
+ match value {
+ dap::DebugRequest::Attach(a) => Self::Attach(latest::dap::AttachRequest {
+ process_id: a.process_id,
+ }),
+ dap::DebugRequest::Launch(l) => Self::Launch(latest::dap::LaunchRequest {
+ program: l.program,
+ cwd: l.cwd,
+ args: l.args,
+ envs: l.envs,
+ }),
+ }
+ }
+}
@@ -0,0 +1,1203 @@
+use crate::wasm_host::wit::since_v0_8_0::{
+ dap::{
+ AttachRequest, BuildTaskDefinition, BuildTaskDefinitionTemplatePayload, LaunchRequest,
+ StartDebuggingRequestArguments, TcpArguments, TcpArgumentsTemplate,
+ },
+ lsp::{CompletionKind, CompletionLabelDetails, InsertTextFormat, SymbolKind},
+ slash_command::SlashCommandOutputSection,
+};
+use crate::wasm_host::{WasmState, wit::ToWasmtimeResult};
+use ::http_client::{AsyncBody, HttpRequestExt};
+use ::settings::{Settings, WorktreeId};
+use anyhow::{Context as _, Result, bail};
+use async_compression::futures::bufread::GzipDecoder;
+use async_tar::Archive;
+use async_trait::async_trait;
+use credentials_provider::CredentialsProvider;
+use extension::{
+ ExtensionLanguageServerProxy, KeyValueStoreDelegate, ProjectDelegate, WorktreeDelegate,
+};
+use futures::{AsyncReadExt, lock::Mutex};
+use futures::{FutureExt as _, io::BufReader};
+use gpui::{BackgroundExecutor, SharedString};
+use language::{BinaryStatus, LanguageName, language_settings::AllLanguageSettings};
+use project::project_settings::ProjectSettings;
+use semver::Version;
+use std::{
+ env,
+ net::Ipv4Addr,
+ path::{Path, PathBuf},
+ str::FromStr,
+ sync::{Arc, OnceLock},
+};
+use task::{SpawnInTerminal, ZedDebugConfig};
+use url::Url;
+use util::{
+ archive::extract_zip, fs::make_file_executable, maybe, paths::PathStyle, rel_path::RelPath,
+};
+use wasmtime::component::{Linker, Resource};
+
+pub const MIN_VERSION: Version = Version::new(0, 8, 0);
+pub const MAX_VERSION: Version = Version::new(0, 8, 0);
+
+wasmtime::component::bindgen!({
+ async: true,
+ trappable_imports: true,
+ path: "../extension_api/wit/since_v0.8.0",
+ with: {
+ "worktree": ExtensionWorktree,
+ "project": ExtensionProject,
+ "key-value-store": ExtensionKeyValueStore,
+ "zed:extension/http-client/http-response-stream": ExtensionHttpResponseStream
+ },
+});
+
+pub use self::zed::extension::*;
+
+mod settings {
+ #![allow(dead_code)]
+ include!(concat!(env!("OUT_DIR"), "/since_v0.8.0/settings.rs"));
+}
+
+pub type ExtensionWorktree = Arc<dyn WorktreeDelegate>;
+pub type ExtensionProject = Arc<dyn ProjectDelegate>;
+pub type ExtensionKeyValueStore = Arc<dyn KeyValueStoreDelegate>;
+pub type ExtensionHttpResponseStream = Arc<Mutex<::http_client::Response<AsyncBody>>>;
+
+pub fn linker(executor: &BackgroundExecutor) -> &'static Linker<WasmState> {
+ static LINKER: OnceLock<Linker<WasmState>> = OnceLock::new();
+ LINKER.get_or_init(|| super::new_linker(executor, Extension::add_to_linker))
+}
+
+impl From<Range> for std::ops::Range<usize> {
+ fn from(range: Range) -> Self {
+ let start = range.start as usize;
+ let end = range.end as usize;
+ start..end
+ }
+}
+
+impl From<Command> for extension::Command {
+ fn from(value: Command) -> Self {
+ Self {
+ command: value.command.into(),
+ args: value.args,
+ env: value.env,
+ }
+ }
+}
+
+impl From<StartDebuggingRequestArgumentsRequest>
+ for extension::StartDebuggingRequestArgumentsRequest
+{
+ fn from(value: StartDebuggingRequestArgumentsRequest) -> Self {
+ match value {
+ StartDebuggingRequestArgumentsRequest::Launch => Self::Launch,
+ StartDebuggingRequestArgumentsRequest::Attach => Self::Attach,
+ }
+ }
+}
+impl TryFrom<StartDebuggingRequestArguments> for extension::StartDebuggingRequestArguments {
+ type Error = anyhow::Error;
+
+ fn try_from(value: StartDebuggingRequestArguments) -> Result<Self, Self::Error> {
+ Ok(Self {
+ configuration: serde_json::from_str(&value.configuration)?,
+ request: value.request.into(),
+ })
+ }
+}
+impl From<TcpArguments> for extension::TcpArguments {
+ fn from(value: TcpArguments) -> Self {
+ Self {
+ host: value.host.into(),
+ port: value.port,
+ timeout: value.timeout,
+ }
+ }
+}
+
+impl From<extension::TcpArgumentsTemplate> for TcpArgumentsTemplate {
+ fn from(value: extension::TcpArgumentsTemplate) -> Self {
+ Self {
+ host: value.host.map(Ipv4Addr::to_bits),
+ port: value.port,
+ timeout: value.timeout,
+ }
+ }
+}
+
+impl From<TcpArgumentsTemplate> for extension::TcpArgumentsTemplate {
+ fn from(value: TcpArgumentsTemplate) -> Self {
+ Self {
+ host: value.host.map(Ipv4Addr::from_bits),
+ port: value.port,
+ timeout: value.timeout,
+ }
+ }
+}
+
+impl TryFrom<extension::DebugTaskDefinition> for DebugTaskDefinition {
+ type Error = anyhow::Error;
+ fn try_from(value: extension::DebugTaskDefinition) -> Result<Self, Self::Error> {
+ Ok(Self {
+ label: value.label.to_string(),
+ adapter: value.adapter.to_string(),
+ config: value.config.to_string(),
+ tcp_connection: value.tcp_connection.map(Into::into),
+ })
+ }
+}
+
+impl From<task::DebugRequest> for DebugRequest {
+ fn from(value: task::DebugRequest) -> Self {
+ match value {
+ task::DebugRequest::Launch(launch_request) => Self::Launch(launch_request.into()),
+ task::DebugRequest::Attach(attach_request) => Self::Attach(attach_request.into()),
+ }
+ }
+}
+
+impl From<DebugRequest> for task::DebugRequest {
+ fn from(value: DebugRequest) -> Self {
+ match value {
+ DebugRequest::Launch(launch_request) => Self::Launch(launch_request.into()),
+ DebugRequest::Attach(attach_request) => Self::Attach(attach_request.into()),
+ }
+ }
+}
+
+impl From<task::LaunchRequest> for LaunchRequest {
+ fn from(value: task::LaunchRequest) -> Self {
+ Self {
+ program: value.program,
+ cwd: value.cwd.map(|p| p.to_string_lossy().into_owned()),
+ args: value.args,
+ envs: value.env.into_iter().collect(),
+ }
+ }
+}
+
+impl From<task::AttachRequest> for AttachRequest {
+ fn from(value: task::AttachRequest) -> Self {
+ Self {
+ process_id: value.process_id,
+ }
+ }
+}
+
+impl From<LaunchRequest> for task::LaunchRequest {
+ fn from(value: LaunchRequest) -> Self {
+ Self {
+ program: value.program,
+ cwd: value.cwd.map(|p| p.into()),
+ args: value.args,
+ env: value.envs.into_iter().collect(),
+ }
+ }
+}
+impl From<AttachRequest> for task::AttachRequest {
+ fn from(value: AttachRequest) -> Self {
+ Self {
+ process_id: value.process_id,
+ }
+ }
+}
+
+impl From<ZedDebugConfig> for DebugConfig {
+ fn from(value: ZedDebugConfig) -> Self {
+ Self {
+ label: value.label.into(),
+ adapter: value.adapter.into(),
+ request: value.request.into(),
+ stop_on_entry: value.stop_on_entry,
+ }
+ }
+}
+impl TryFrom<DebugAdapterBinary> for extension::DebugAdapterBinary {
+ type Error = anyhow::Error;
+ fn try_from(value: DebugAdapterBinary) -> Result<Self, Self::Error> {
+ Ok(Self {
+ command: value.command,
+ arguments: value.arguments,
+ envs: value.envs.into_iter().collect(),
+ cwd: value.cwd.map(|s| s.into()),
+ connection: value.connection.map(Into::into),
+ request_args: value.request_args.try_into()?,
+ })
+ }
+}
+
+impl From<BuildTaskDefinition> for extension::BuildTaskDefinition {
+ fn from(value: BuildTaskDefinition) -> Self {
+ match value {
+ BuildTaskDefinition::ByName(name) => Self::ByName(name.into()),
+ BuildTaskDefinition::Template(build_task_template) => Self::Template {
+ task_template: build_task_template.template.into(),
+ locator_name: build_task_template.locator_name.map(SharedString::from),
+ },
+ }
+ }
+}
+
+impl From<extension::BuildTaskDefinition> for BuildTaskDefinition {
+ fn from(value: extension::BuildTaskDefinition) -> Self {
+ match value {
+ extension::BuildTaskDefinition::ByName(name) => Self::ByName(name.into()),
+ extension::BuildTaskDefinition::Template {
+ task_template,
+ locator_name,
+ } => Self::Template(BuildTaskDefinitionTemplatePayload {
+ template: task_template.into(),
+ locator_name: locator_name.map(String::from),
+ }),
+ }
+ }
+}
+impl From<BuildTaskTemplate> for extension::BuildTaskTemplate {
+ fn from(value: BuildTaskTemplate) -> Self {
+ Self {
+ label: value.label,
+ command: value.command,
+ args: value.args,
+ env: value.env.into_iter().collect(),
+ cwd: value.cwd,
+ ..Default::default()
+ }
+ }
+}
+impl From<extension::BuildTaskTemplate> for BuildTaskTemplate {
+ fn from(value: extension::BuildTaskTemplate) -> Self {
+ Self {
+ label: value.label,
+ command: value.command,
+ args: value.args,
+ env: value.env.into_iter().collect(),
+ cwd: value.cwd,
+ }
+ }
+}
+
+impl TryFrom<DebugScenario> for extension::DebugScenario {
+ type Error = anyhow::Error;
+
+ fn try_from(value: DebugScenario) -> std::result::Result<Self, Self::Error> {
+ Ok(Self {
+ adapter: value.adapter.into(),
+ label: value.label.into(),
+ build: value.build.map(Into::into),
+ config: serde_json::Value::from_str(&value.config)?,
+ tcp_connection: value.tcp_connection.map(Into::into),
+ })
+ }
+}
+
+impl From<extension::DebugScenario> for DebugScenario {
+ fn from(value: extension::DebugScenario) -> Self {
+ Self {
+ adapter: value.adapter.into(),
+ label: value.label.into(),
+ build: value.build.map(Into::into),
+ config: value.config.to_string(),
+ tcp_connection: value.tcp_connection.map(Into::into),
+ }
+ }
+}
+
+impl TryFrom<SpawnInTerminal> for ResolvedTask {
+ type Error = anyhow::Error;
+
+ fn try_from(value: SpawnInTerminal) -> Result<Self, Self::Error> {
+ Ok(Self {
+ label: value.label,
+ command: value.command.context("missing command")?,
+ args: value.args,
+ env: value.env.into_iter().collect(),
+ cwd: value.cwd.map(|s| {
+ let s = s.to_string_lossy();
+ if cfg!(target_os = "windows") {
+ s.replace('\\', "/")
+ } else {
+ s.into_owned()
+ }
+ }),
+ })
+ }
+}
+
+impl From<CodeLabel> for extension::CodeLabel {
+ fn from(value: CodeLabel) -> Self {
+ Self {
+ code: value.code,
+ spans: value.spans.into_iter().map(Into::into).collect(),
+ filter_range: value.filter_range.into(),
+ }
+ }
+}
+
+impl From<CodeLabelSpan> for extension::CodeLabelSpan {
+ fn from(value: CodeLabelSpan) -> Self {
+ match value {
+ CodeLabelSpan::CodeRange(range) => Self::CodeRange(range.into()),
+ CodeLabelSpan::Literal(literal) => Self::Literal(literal.into()),
+ }
+ }
+}
+
+impl From<CodeLabelSpanLiteral> for extension::CodeLabelSpanLiteral {
+ fn from(value: CodeLabelSpanLiteral) -> Self {
+ Self {
+ text: value.text,
+ highlight_name: value.highlight_name,
+ }
+ }
+}
+
+impl From<extension::Completion> for Completion {
+ fn from(value: extension::Completion) -> Self {
+ Self {
+ label: value.label,
+ label_details: value.label_details.map(Into::into),
+ detail: value.detail,
+ kind: value.kind.map(Into::into),
+ insert_text_format: value.insert_text_format.map(Into::into),
+ }
+ }
+}
+
+impl From<extension::CompletionLabelDetails> for CompletionLabelDetails {
+ fn from(value: extension::CompletionLabelDetails) -> Self {
+ Self {
+ detail: value.detail,
+ description: value.description,
+ }
+ }
+}
+
+impl From<extension::CompletionKind> for CompletionKind {
+ fn from(value: extension::CompletionKind) -> Self {
+ match value {
+ extension::CompletionKind::Text => Self::Text,
+ extension::CompletionKind::Method => Self::Method,
+ extension::CompletionKind::Function => Self::Function,
+ extension::CompletionKind::Constructor => Self::Constructor,
+ extension::CompletionKind::Field => Self::Field,
+ extension::CompletionKind::Variable => Self::Variable,
+ extension::CompletionKind::Class => Self::Class,
+ extension::CompletionKind::Interface => Self::Interface,
+ extension::CompletionKind::Module => Self::Module,
+ extension::CompletionKind::Property => Self::Property,
+ extension::CompletionKind::Unit => Self::Unit,
+ extension::CompletionKind::Value => Self::Value,
+ extension::CompletionKind::Enum => Self::Enum,
+ extension::CompletionKind::Keyword => Self::Keyword,
+ extension::CompletionKind::Snippet => Self::Snippet,
+ extension::CompletionKind::Color => Self::Color,
+ extension::CompletionKind::File => Self::File,
+ extension::CompletionKind::Reference => Self::Reference,
+ extension::CompletionKind::Folder => Self::Folder,
+ extension::CompletionKind::EnumMember => Self::EnumMember,
+ extension::CompletionKind::Constant => Self::Constant,
+ extension::CompletionKind::Struct => Self::Struct,
+ extension::CompletionKind::Event => Self::Event,
+ extension::CompletionKind::Operator => Self::Operator,
+ extension::CompletionKind::TypeParameter => Self::TypeParameter,
+ extension::CompletionKind::Other(value) => Self::Other(value),
+ }
+ }
+}
+
+impl From<extension::InsertTextFormat> for InsertTextFormat {
+ fn from(value: extension::InsertTextFormat) -> Self {
+ match value {
+ extension::InsertTextFormat::PlainText => Self::PlainText,
+ extension::InsertTextFormat::Snippet => Self::Snippet,
+ extension::InsertTextFormat::Other(value) => Self::Other(value),
+ }
+ }
+}
+
+impl From<extension::Symbol> for Symbol {
+ fn from(value: extension::Symbol) -> Self {
+ Self {
+ kind: value.kind.into(),
+ name: value.name,
+ }
+ }
+}
+
+impl From<extension::SymbolKind> for SymbolKind {
+ fn from(value: extension::SymbolKind) -> Self {
+ match value {
+ extension::SymbolKind::File => Self::File,
+ extension::SymbolKind::Module => Self::Module,
+ extension::SymbolKind::Namespace => Self::Namespace,
+ extension::SymbolKind::Package => Self::Package,
+ extension::SymbolKind::Class => Self::Class,
+ extension::SymbolKind::Method => Self::Method,
+ extension::SymbolKind::Property => Self::Property,
+ extension::SymbolKind::Field => Self::Field,
+ extension::SymbolKind::Constructor => Self::Constructor,
+ extension::SymbolKind::Enum => Self::Enum,
+ extension::SymbolKind::Interface => Self::Interface,
+ extension::SymbolKind::Function => Self::Function,
+ extension::SymbolKind::Variable => Self::Variable,
+ extension::SymbolKind::Constant => Self::Constant,
+ extension::SymbolKind::String => Self::String,
+ extension::SymbolKind::Number => Self::Number,
+ extension::SymbolKind::Boolean => Self::Boolean,
+ extension::SymbolKind::Array => Self::Array,
+ extension::SymbolKind::Object => Self::Object,
+ extension::SymbolKind::Key => Self::Key,
+ extension::SymbolKind::Null => Self::Null,
+ extension::SymbolKind::EnumMember => Self::EnumMember,
+ extension::SymbolKind::Struct => Self::Struct,
+ extension::SymbolKind::Event => Self::Event,
+ extension::SymbolKind::Operator => Self::Operator,
+ extension::SymbolKind::TypeParameter => Self::TypeParameter,
+ extension::SymbolKind::Other(value) => Self::Other(value),
+ }
+ }
+}
+
+impl From<extension::SlashCommand> for SlashCommand {
+ fn from(value: extension::SlashCommand) -> Self {
+ Self {
+ name: value.name,
+ description: value.description,
+ tooltip_text: value.tooltip_text,
+ requires_argument: value.requires_argument,
+ }
+ }
+}
+
+impl From<SlashCommandOutput> for extension::SlashCommandOutput {
+ fn from(value: SlashCommandOutput) -> Self {
+ Self {
+ text: value.text,
+ sections: value.sections.into_iter().map(Into::into).collect(),
+ }
+ }
+}
+
+impl From<SlashCommandOutputSection> for extension::SlashCommandOutputSection {
+ fn from(value: SlashCommandOutputSection) -> Self {
+ Self {
+ range: value.range.start as usize..value.range.end as usize,
+ label: value.label,
+ }
+ }
+}
+
+impl From<SlashCommandArgumentCompletion> for extension::SlashCommandArgumentCompletion {
+ fn from(value: SlashCommandArgumentCompletion) -> Self {
+ Self {
+ label: value.label,
+ new_text: value.new_text,
+ run_command: value.run_command,
+ }
+ }
+}
+
+impl TryFrom<ContextServerConfiguration> for extension::ContextServerConfiguration {
+ type Error = anyhow::Error;
+
+ fn try_from(value: ContextServerConfiguration) -> Result<Self, Self::Error> {
+ let settings_schema: serde_json::Value = serde_json::from_str(&value.settings_schema)
+ .context("Failed to parse settings_schema")?;
+
+ Ok(Self {
+ installation_instructions: value.installation_instructions,
+ default_settings: value.default_settings,
+ settings_schema,
+ })
+ }
+}
+
+impl HostKeyValueStore for WasmState {
+ async fn insert(
+ &mut self,
+ kv_store: Resource<ExtensionKeyValueStore>,
+ key: String,
+ value: String,
+ ) -> wasmtime::Result<Result<(), String>> {
+ let kv_store = self.table.get(&kv_store)?;
+ kv_store.insert(key, value).await.to_wasmtime_result()
+ }
+
+ async fn drop(&mut self, _worktree: Resource<ExtensionKeyValueStore>) -> Result<()> {
+ // We only ever hand out borrows of key-value stores.
+ Ok(())
+ }
+}
+
+impl HostProject for WasmState {
+ async fn worktree_ids(
+ &mut self,
+ project: Resource<ExtensionProject>,
+ ) -> wasmtime::Result<Vec<u64>> {
+ let project = self.table.get(&project)?;
+ Ok(project.worktree_ids())
+ }
+
+ async fn drop(&mut self, _project: Resource<Project>) -> Result<()> {
+ // We only ever hand out borrows of projects.
+ Ok(())
+ }
+}
+
+impl HostWorktree for WasmState {
+ async fn id(&mut self, delegate: Resource<Arc<dyn WorktreeDelegate>>) -> wasmtime::Result<u64> {
+ let delegate = self.table.get(&delegate)?;
+ Ok(delegate.id())
+ }
+
+ async fn root_path(
+ &mut self,
+ delegate: Resource<Arc<dyn WorktreeDelegate>>,
+ ) -> wasmtime::Result<String> {
+ let delegate = self.table.get(&delegate)?;
+ Ok(delegate.root_path())
+ }
+
+ async fn read_text_file(
+ &mut self,
+ delegate: Resource<Arc<dyn WorktreeDelegate>>,
+ path: String,
+ ) -> wasmtime::Result<Result<String, String>> {
+ let delegate = self.table.get(&delegate)?;
+ Ok(delegate
+ .read_text_file(&RelPath::new(Path::new(&path), PathStyle::Posix)?)
+ .await
+ .map_err(|error| error.to_string()))
+ }
+
+ async fn shell_env(
+ &mut self,
+ delegate: Resource<Arc<dyn WorktreeDelegate>>,
+ ) -> wasmtime::Result<EnvVars> {
+ let delegate = self.table.get(&delegate)?;
+ Ok(delegate.shell_env().await.into_iter().collect())
+ }
+
+ async fn which(
+ &mut self,
+ delegate: Resource<Arc<dyn WorktreeDelegate>>,
+ binary_name: String,
+ ) -> wasmtime::Result<Option<String>> {
+ let delegate = self.table.get(&delegate)?;
+ Ok(delegate.which(binary_name).await)
+ }
+
+ async fn drop(&mut self, _worktree: Resource<Worktree>) -> Result<()> {
+ // We only ever hand out borrows of worktrees.
+ Ok(())
+ }
+}
+
+impl common::Host for WasmState {}
+
+impl http_client::Host for WasmState {
+ async fn fetch(
+ &mut self,
+ request: http_client::HttpRequest,
+ ) -> wasmtime::Result<Result<http_client::HttpResponse, String>> {
+ maybe!(async {
+ let url = &request.url;
+ let request = convert_request(&request)?;
+ let mut response = self.host.http_client.send(request).await?;
+
+ if response.status().is_client_error() || response.status().is_server_error() {
+ bail!("failed to fetch '{url}': status code {}", response.status())
+ }
+ convert_response(&mut response).await
+ })
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn fetch_stream(
+ &mut self,
+ request: http_client::HttpRequest,
+ ) -> wasmtime::Result<Result<Resource<ExtensionHttpResponseStream>, String>> {
+ let request = convert_request(&request)?;
+ let response = self.host.http_client.send(request);
+ maybe!(async {
+ let response = response.await?;
+ let stream = Arc::new(Mutex::new(response));
+ let resource = self.table.push(stream)?;
+ Ok(resource)
+ })
+ .await
+ .to_wasmtime_result()
+ }
+}
+
+impl http_client::HostHttpResponseStream for WasmState {
+ async fn next_chunk(
+ &mut self,
+ resource: Resource<ExtensionHttpResponseStream>,
+ ) -> wasmtime::Result<Result<Option<Vec<u8>>, String>> {
+ let stream = self.table.get(&resource)?.clone();
+ maybe!(async move {
+ let mut response = stream.lock().await;
+ let mut buffer = vec![0; 8192]; // 8KB buffer
+ let bytes_read = response.body_mut().read(&mut buffer).await?;
+ if bytes_read == 0 {
+ Ok(None)
+ } else {
+ buffer.truncate(bytes_read);
+ Ok(Some(buffer))
+ }
+ })
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn drop(&mut self, _resource: Resource<ExtensionHttpResponseStream>) -> Result<()> {
+ Ok(())
+ }
+}
+
+impl From<http_client::HttpMethod> for ::http_client::Method {
+ fn from(value: http_client::HttpMethod) -> Self {
+ match value {
+ http_client::HttpMethod::Get => Self::GET,
+ http_client::HttpMethod::Post => Self::POST,
+ http_client::HttpMethod::Put => Self::PUT,
+ http_client::HttpMethod::Delete => Self::DELETE,
+ http_client::HttpMethod::Head => Self::HEAD,
+ http_client::HttpMethod::Options => Self::OPTIONS,
+ http_client::HttpMethod::Patch => Self::PATCH,
+ }
+ }
+}
+
+fn convert_request(
+ extension_request: &http_client::HttpRequest,
+) -> anyhow::Result<::http_client::Request<AsyncBody>> {
+ let mut request = ::http_client::Request::builder()
+ .method(::http_client::Method::from(extension_request.method))
+ .uri(&extension_request.url)
+ .follow_redirects(match extension_request.redirect_policy {
+ http_client::RedirectPolicy::NoFollow => ::http_client::RedirectPolicy::NoFollow,
+ http_client::RedirectPolicy::FollowLimit(limit) => {
+ ::http_client::RedirectPolicy::FollowLimit(limit)
+ }
+ http_client::RedirectPolicy::FollowAll => ::http_client::RedirectPolicy::FollowAll,
+ });
+ for (key, value) in &extension_request.headers {
+ request = request.header(key, value);
+ }
+ let body = extension_request
+ .body
+ .clone()
+ .map(AsyncBody::from)
+ .unwrap_or_default();
+ request.body(body).map_err(anyhow::Error::from)
+}
+
+async fn convert_response(
+ response: &mut ::http_client::Response<AsyncBody>,
+) -> anyhow::Result<http_client::HttpResponse> {
+ let mut extension_response = http_client::HttpResponse {
+ body: Vec::new(),
+ headers: Vec::new(),
+ };
+
+ for (key, value) in response.headers() {
+ extension_response
+ .headers
+ .push((key.to_string(), value.to_str().unwrap_or("").to_string()));
+ }
+
+ response
+ .body_mut()
+ .read_to_end(&mut extension_response.body)
+ .await?;
+
+ Ok(extension_response)
+}
+
+impl nodejs::Host for WasmState {
+ async fn node_binary_path(&mut self) -> wasmtime::Result<Result<String, String>> {
+ self.host
+ .node_runtime
+ .binary_path()
+ .await
+ .map(|path| path.to_string_lossy().into_owned())
+ .to_wasmtime_result()
+ }
+
+ async fn npm_package_latest_version(
+ &mut self,
+ package_name: String,
+ ) -> wasmtime::Result<Result<String, String>> {
+ self.host
+ .node_runtime
+ .npm_package_latest_version(&package_name)
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn npm_package_installed_version(
+ &mut self,
+ package_name: String,
+ ) -> wasmtime::Result<Result<Option<String>, String>> {
+ self.host
+ .node_runtime
+ .npm_package_installed_version(&self.work_dir(), &package_name)
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn npm_install_package(
+ &mut self,
+ package_name: String,
+ version: String,
+ ) -> wasmtime::Result<Result<(), String>> {
+ self.capability_granter
+ .grant_npm_install_package(&package_name)?;
+
+ self.host
+ .node_runtime
+ .npm_install_packages(&self.work_dir(), &[(&package_name, &version)])
+ .await
+ .to_wasmtime_result()
+ }
+}
+
+#[async_trait]
+impl lsp::Host for WasmState {}
+
+impl From<::http_client::github::GithubRelease> for github::GithubRelease {
+ fn from(value: ::http_client::github::GithubRelease) -> Self {
+ Self {
+ version: value.tag_name,
+ assets: value.assets.into_iter().map(Into::into).collect(),
+ }
+ }
+}
+
+impl From<::http_client::github::GithubReleaseAsset> for github::GithubReleaseAsset {
+ fn from(value: ::http_client::github::GithubReleaseAsset) -> Self {
+ Self {
+ name: value.name,
+ download_url: value.browser_download_url,
+ }
+ }
+}
+
+impl github::Host for WasmState {
+ async fn latest_github_release(
+ &mut self,
+ repo: String,
+ options: github::GithubReleaseOptions,
+ ) -> wasmtime::Result<Result<github::GithubRelease, String>> {
+ maybe!(async {
+ let release = ::http_client::github::latest_github_release(
+ &repo,
+ options.require_assets,
+ options.pre_release,
+ self.host.http_client.clone(),
+ )
+ .await?;
+ Ok(release.into())
+ })
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn github_release_by_tag_name(
+ &mut self,
+ repo: String,
+ tag: String,
+ ) -> wasmtime::Result<Result<github::GithubRelease, String>> {
+ maybe!(async {
+ let release = ::http_client::github::get_release_by_tag_name(
+ &repo,
+ &tag,
+ self.host.http_client.clone(),
+ )
+ .await?;
+ Ok(release.into())
+ })
+ .await
+ .to_wasmtime_result()
+ }
+}
+
+impl platform::Host for WasmState {
+ async fn current_platform(&mut self) -> Result<(platform::Os, platform::Architecture)> {
+ Ok((
+ match env::consts::OS {
+ "macos" => platform::Os::Mac,
+ "linux" => platform::Os::Linux,
+ "windows" => platform::Os::Windows,
+ _ => panic!("unsupported os"),
+ },
+ match env::consts::ARCH {
+ "aarch64" => platform::Architecture::Aarch64,
+ "x86" => platform::Architecture::X86,
+ "x86_64" => platform::Architecture::X8664,
+ _ => panic!("unsupported architecture"),
+ },
+ ))
+ }
+}
+
+impl From<std::process::Output> for process::Output {
+ fn from(output: std::process::Output) -> Self {
+ Self {
+ status: output.status.code(),
+ stdout: output.stdout,
+ stderr: output.stderr,
+ }
+ }
+}
+
+impl process::Host for WasmState {
+ async fn run_command(
+ &mut self,
+ command: process::Command,
+ ) -> wasmtime::Result<Result<process::Output, String>> {
+ maybe!(async {
+ self.capability_granter
+ .grant_exec(&command.command, &command.args)?;
+
+ let output = util::command::new_smol_command(command.command.as_str())
+ .args(&command.args)
+ .envs(command.env)
+ .output()
+ .await?;
+
+ Ok(output.into())
+ })
+ .await
+ .to_wasmtime_result()
+ }
+}
+
+#[async_trait]
+impl slash_command::Host for WasmState {}
+
+#[async_trait]
+impl context_server::Host for WasmState {}
+
+impl dap::Host for WasmState {
+ async fn resolve_tcp_template(
+ &mut self,
+ template: TcpArgumentsTemplate,
+ ) -> wasmtime::Result<Result<TcpArguments, String>> {
+ maybe!(async {
+ let (host, port, timeout) =
+ ::dap::configure_tcp_connection(task::TcpArgumentsTemplate {
+ port: template.port,
+ host: template.host.map(Ipv4Addr::from_bits),
+ timeout: template.timeout,
+ })
+ .await?;
+ Ok(TcpArguments {
+ port,
+ host: host.to_bits(),
+ timeout,
+ })
+ })
+ .await
+ .to_wasmtime_result()
+ }
+}
+
+impl ExtensionImports for WasmState {
+ async fn get_settings(
+ &mut self,
+ location: Option<self::SettingsLocation>,
+ category: String,
+ key: Option<String>,
+ ) -> wasmtime::Result<Result<String, String>> {
+ self.on_main_thread(|cx| {
+ async move {
+ let path = location.as_ref().and_then(|location| {
+ RelPath::new(Path::new(&location.path), PathStyle::Posix).ok()
+ });
+ let location = path
+ .as_ref()
+ .zip(location.as_ref())
+ .map(|(path, location)| ::settings::SettingsLocation {
+ worktree_id: WorktreeId::from_proto(location.worktree_id),
+ path,
+ });
+
+ cx.update(|cx| match category.as_str() {
+ "language" => {
+ let key = key.map(|k| LanguageName::new(&k));
+ let settings = AllLanguageSettings::get(location, cx).language(
+ location,
+ key.as_ref(),
+ cx,
+ );
+ Ok(serde_json::to_string(&settings::LanguageSettings {
+ tab_size: settings.tab_size,
+ })?)
+ }
+ "lsp" => {
+ let settings = key
+ .and_then(|key| {
+ ProjectSettings::get(location, cx)
+ .lsp
+ .get(&::lsp::LanguageServerName::from_proto(key))
+ })
+ .cloned()
+ .unwrap_or_default();
+ Ok(serde_json::to_string(&settings::LspSettings {
+ binary: settings.binary.map(|binary| settings::CommandSettings {
+ path: binary.path,
+ arguments: binary.arguments,
+ env: binary.env.map(|env| env.into_iter().collect()),
+ }),
+ settings: settings.settings,
+ initialization_options: settings.initialization_options,
+ })?)
+ }
+ "context_servers" => {
+ let settings = key
+ .and_then(|key| {
+ ProjectSettings::get(location, cx)
+ .context_servers
+ .get(key.as_str())
+ })
+ .cloned()
+ .unwrap_or_else(|| {
+ project::project_settings::ContextServerSettings::default_extension(
+ )
+ });
+
+ match settings {
+ project::project_settings::ContextServerSettings::Stdio {
+ enabled: _,
+ command,
+ } => Ok(serde_json::to_string(&settings::ContextServerSettings {
+ command: Some(settings::CommandSettings {
+ path: command.path.to_str().map(|path| path.to_string()),
+ arguments: Some(command.args),
+ env: command.env.map(|env| env.into_iter().collect()),
+ }),
+ settings: None,
+ })?),
+ project::project_settings::ContextServerSettings::Extension {
+ enabled: _,
+ settings,
+ } => Ok(serde_json::to_string(&settings::ContextServerSettings {
+ command: None,
+ settings: Some(settings),
+ })?),
+ project::project_settings::ContextServerSettings::Http { .. } => {
+ bail!("remote context server settings not supported in 0.6.0")
+ }
+ }
+ }
+ _ => {
+ bail!("Unknown settings category: {}", category);
+ }
+ })
+ }
+ .boxed_local()
+ })
+ .await?
+ .to_wasmtime_result()
+ }
+
+ async fn set_language_server_installation_status(
+ &mut self,
+ server_name: String,
+ status: LanguageServerInstallationStatus,
+ ) -> wasmtime::Result<()> {
+ let status = match status {
+ LanguageServerInstallationStatus::CheckingForUpdate => BinaryStatus::CheckingForUpdate,
+ LanguageServerInstallationStatus::Downloading => BinaryStatus::Downloading,
+ LanguageServerInstallationStatus::None => BinaryStatus::None,
+ LanguageServerInstallationStatus::Failed(error) => BinaryStatus::Failed { error },
+ };
+
+ self.host
+ .proxy
+ .update_language_server_status(::lsp::LanguageServerName(server_name.into()), status);
+
+ Ok(())
+ }
+
+ async fn download_file(
+ &mut self,
+ url: String,
+ path: String,
+ file_type: DownloadedFileType,
+ ) -> wasmtime::Result<Result<(), String>> {
+ maybe!(async {
+ let parsed_url = Url::parse(&url)?;
+ self.capability_granter.grant_download_file(&parsed_url)?;
+
+ let path = PathBuf::from(path);
+ let extension_work_dir = self.host.work_dir.join(self.manifest.id.as_ref());
+
+ self.host.fs.create_dir(&extension_work_dir).await?;
+
+ let destination_path = self
+ .host
+ .writeable_path_from_extension(&self.manifest.id, &path)?;
+
+ let mut response = self
+ .host
+ .http_client
+ .get(&url, Default::default(), true)
+ .await
+ .context("downloading release")?;
+
+ anyhow::ensure!(
+ response.status().is_success(),
+ "download failed with status {}",
+ response.status()
+ );
+ let body = BufReader::new(response.body_mut());
+
+ match file_type {
+ DownloadedFileType::Uncompressed => {
+ futures::pin_mut!(body);
+ self.host
+ .fs
+ .create_file_with(&destination_path, body)
+ .await?;
+ }
+ DownloadedFileType::Gzip => {
+ let body = GzipDecoder::new(body);
+ futures::pin_mut!(body);
+ self.host
+ .fs
+ .create_file_with(&destination_path, body)
+ .await?;
+ }
+ DownloadedFileType::GzipTar => {
+ let body = GzipDecoder::new(body);
+ futures::pin_mut!(body);
+ self.host
+ .fs
+ .extract_tar_file(&destination_path, Archive::new(body))
+ .await?;
+ }
+ DownloadedFileType::Zip => {
+ futures::pin_mut!(body);
+ extract_zip(&destination_path, body)
+ .await
+ .with_context(|| format!("unzipping {path:?} archive"))?;
+ }
+ }
+
+ Ok(())
+ })
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn make_file_executable(&mut self, path: String) -> wasmtime::Result<Result<(), String>> {
+ let path = self
+ .host
+ .writeable_path_from_extension(&self.manifest.id, Path::new(&path))?;
+
+ make_file_executable(&path)
+ .await
+ .with_context(|| format!("setting permissions for path {path:?}"))
+ .to_wasmtime_result()
+ }
+
+ // =========================================================================
+ // LLM Provider Import Implementations
+ // =========================================================================
+
+ async fn llm_request_credential(
+ &mut self,
+ _provider_id: String,
+ _credential_type: llm_provider::CredentialType,
+ _label: String,
+ _placeholder: String,
+ ) -> wasmtime::Result<Result<bool, String>> {
+ // For now, credential requests return false (not provided)
+ // Extensions should use llm_get_env_var to check for env vars first,
+ // then llm_store_credential/llm_get_credential for manual storage
+ // Full UI credential prompting will be added in a future phase
+ Ok(Ok(false))
+ }
+
+ async fn llm_get_credential(
+ &mut self,
+ provider_id: String,
+ ) -> wasmtime::Result<Option<String>> {
+ let extension_id = self.manifest.id.clone();
+ let credential_key = format!("extension-llm-{}:{}", extension_id, provider_id);
+
+ self.on_main_thread(move |cx| {
+ async move {
+ let credentials_provider = cx.update(|cx| <dyn CredentialsProvider>::global(cx))?;
+ let result = credentials_provider
+ .read_credentials(&credential_key, cx)
+ .await
+ .ok()
+ .flatten();
+ Ok(result.map(|(_, password)| String::from_utf8_lossy(&password).to_string()))
+ }
+ .boxed_local()
+ })
+ .await
+ }
+
+ async fn llm_store_credential(
+ &mut self,
+ provider_id: String,
+ value: String,
+ ) -> wasmtime::Result<Result<(), String>> {
+ let extension_id = self.manifest.id.clone();
+ let credential_key = format!("extension-llm-{}:{}", extension_id, provider_id);
+
+ self.on_main_thread(move |cx| {
+ async move {
+ let credentials_provider = cx.update(|cx| <dyn CredentialsProvider>::global(cx))?;
+ credentials_provider
+ .write_credentials(&credential_key, "api_key", value.as_bytes(), cx)
+ .await
+ .map_err(|e| anyhow::anyhow!("{}", e))
+ }
+ .boxed_local()
+ })
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn llm_delete_credential(
+ &mut self,
+ provider_id: String,
+ ) -> wasmtime::Result<Result<(), String>> {
+ let extension_id = self.manifest.id.clone();
+ let credential_key = format!("extension-llm-{}:{}", extension_id, provider_id);
+
+ self.on_main_thread(move |cx| {
+ async move {
+ let credentials_provider = cx.update(|cx| <dyn CredentialsProvider>::global(cx))?;
+ credentials_provider
+ .delete_credentials(&credential_key, cx)
+ .await
+ .map_err(|e| anyhow::anyhow!("{}", e))
+ }
+ .boxed_local()
+ })
+ .await
+ .to_wasmtime_result()
+ }
+
+ async fn llm_get_env_var(&mut self, name: String) -> wasmtime::Result<Option<String>> {
+ Ok(env::var(&name).ok())
+ }
+}
+
+// =============================================================================
+// LLM Provider Host Implementations
+// =============================================================================
+
+impl llm_provider::Host for WasmState {}
@@ -229,8 +229,10 @@ enum Feature {
AgentClaude,
AgentCodex,
AgentGemini,
+ ExtensionBasedpyright,
ExtensionRuff,
ExtensionTailwind,
+ ExtensionTy,
Git,
LanguageBash,
LanguageC,
@@ -251,8 +253,13 @@ fn keywords_by_feature() -> &'static BTreeMap<Feature, Vec<&'static str>> {
(Feature::AgentClaude, vec!["claude", "claude code"]),
(Feature::AgentCodex, vec!["codex", "codex cli"]),
(Feature::AgentGemini, vec!["gemini", "gemini cli"]),
+ (
+ Feature::ExtensionBasedpyright,
+ vec!["basedpyright", "pyright"],
+ ),
(Feature::ExtensionRuff, vec!["ruff"]),
(Feature::ExtensionTailwind, vec!["tail", "tailwind"]),
+ (Feature::ExtensionTy, vec!["ty"]),
(Feature::Git, vec!["git"]),
(Feature::LanguageBash, vec!["sh", "bash"]),
(Feature::LanguageC, vec!["c", "clang"]),
@@ -732,7 +739,7 @@ impl ExtensionsPage {
extension: &ExtensionMetadata,
cx: &mut Context<Self>,
) -> ExtensionCard {
- let this = cx.entity();
+ let this = cx.weak_entity();
let status = Self::extension_status(&extension.id, cx);
let has_dev_extension = Self::dev_extension_exists(&extension.id, cx);
@@ -882,13 +889,15 @@ impl ExtensionsPage {
y: px(2.0),
})
.menu(move |window, cx| {
- Some(Self::render_remote_extension_context_menu(
- &this,
- extension_id.clone(),
- authors.clone(),
- window,
- cx,
- ))
+ this.upgrade().map(|this| {
+ Self::render_remote_extension_context_menu(
+ &this,
+ extension_id.clone(),
+ authors.clone(),
+ window,
+ cx,
+ )
+ })
}),
),
),
@@ -1364,6 +1373,23 @@ impl ExtensionsPage {
return;
};
+ if let Some(id) = search.strip_prefix("id:") {
+ self.upsells.clear();
+
+ let upsell = match id.to_lowercase().as_str() {
+ "ruff" => Some(Feature::ExtensionRuff),
+ "basedpyright" => Some(Feature::ExtensionBasedpyright),
+ "ty" => Some(Feature::ExtensionTy),
+ _ => None,
+ };
+
+ if let Some(upsell) = upsell {
+ self.upsells.insert(upsell);
+ }
+
+ return;
+ }
+
let search = search.to_lowercase();
let search_terms = search
.split_whitespace()
@@ -1482,6 +1508,12 @@ impl ExtensionsPage {
false,
cx,
),
+ Feature::ExtensionBasedpyright => self.render_feature_upsell_banner(
+ "Basedpyright (Python language server) support is built-in to Zed!".into(),
+ "https://zed.dev/docs/languages/python#basedpyright".into(),
+ false,
+ cx,
+ ),
Feature::ExtensionRuff => self.render_feature_upsell_banner(
"Ruff (linter for Python) support is built-in to Zed!".into(),
"https://zed.dev/docs/languages/python#code-formatting--linting".into(),
@@ -1494,6 +1526,12 @@ impl ExtensionsPage {
false,
cx,
),
+ Feature::ExtensionTy => self.render_feature_upsell_banner(
+ "Ty (Python language server) support is built-in to Zed!".into(),
+ "https://zed.dev/docs/languages/python".into(),
+ false,
+ cx,
+ ),
Feature::Git => self.render_feature_upsell_banner(
"Zed comes with basic Git support—more features are coming in the future."
.into(),
@@ -50,6 +50,8 @@ pub struct FakeGitRepositoryState {
pub blames: HashMap<RepoPath, Blame>,
pub current_branch_name: Option<String>,
pub branches: HashSet<String>,
+ /// List of remotes, keys are names and values are URLs
+ pub remotes: HashMap<String, String>,
pub simulated_index_write_error_message: Option<String>,
pub refs: HashMap<String, String>,
}
@@ -68,6 +70,7 @@ impl FakeGitRepositoryState {
refs: HashMap::from_iter([("HEAD".into(), "abc".into())]),
merge_base_contents: Default::default(),
oids: Default::default(),
+ remotes: HashMap::default(),
}
}
}
@@ -152,8 +155,8 @@ impl GitRepository for FakeGitRepository {
})
}
- fn remote_url(&self, _name: &str) -> Option<String> {
- None
+ fn remote_url(&self, _name: &str) -> BoxFuture<'_, Option<String>> {
+ async move { None }.boxed()
}
fn diff_tree(&self, _request: DiffTreeType) -> BoxFuture<'_, Result<TreeDiff>> {
@@ -432,8 +435,13 @@ impl GitRepository for FakeGitRepository {
})
}
- fn delete_branch(&self, _name: String) -> BoxFuture<'_, Result<()>> {
- unimplemented!()
+ fn delete_branch(&self, name: String) -> BoxFuture<'_, Result<()>> {
+ self.with_state_async(true, move |state| {
+ if !state.branches.remove(&name) {
+ bail!("no such branch: {name}");
+ }
+ Ok(())
+ })
}
fn blame(&self, path: RepoPath, _content: Rope) -> BoxFuture<'_, Result<git::blame::Blame>> {
@@ -598,15 +606,24 @@ impl GitRepository for FakeGitRepository {
unimplemented!()
}
- fn get_push_remote(&self, _branch: String) -> BoxFuture<'_, Result<Option<Remote>>> {
- unimplemented!()
+ fn get_all_remotes(&self) -> BoxFuture<'_, Result<Vec<Remote>>> {
+ self.with_state_async(false, move |state| {
+ let remotes = state
+ .remotes
+ .keys()
+ .map(|r| Remote {
+ name: r.clone().into(),
+ })
+ .collect::<Vec<_>>();
+ Ok(remotes)
+ })
}
- fn get_branch_remote(&self, _branch: String) -> BoxFuture<'_, Result<Option<Remote>>> {
+ fn get_push_remote(&self, _branch: String) -> BoxFuture<'_, Result<Option<Remote>>> {
unimplemented!()
}
- fn get_all_remotes(&self) -> BoxFuture<'_, Result<Vec<Remote>>> {
+ fn get_branch_remote(&self, _branch: String) -> BoxFuture<'_, Result<Option<Remote>>> {
unimplemented!()
}
@@ -683,6 +700,20 @@ impl GitRepository for FakeGitRepository {
fn default_branch(&self) -> BoxFuture<'_, Result<Option<SharedString>>> {
async { Ok(Some("main".into())) }.boxed()
}
+
+ fn create_remote(&self, name: String, url: String) -> BoxFuture<'_, Result<()>> {
+ self.with_state_async(true, move |state| {
+ state.remotes.insert(name, url);
+ Ok(())
+ })
+ }
+
+ fn remove_remote(&self, name: String) -> BoxFuture<'_, Result<()>> {
+ self.with_state_async(true, move |state| {
+ state.remotes.remove(&name);
+ Ok(())
+ })
+ }
}
#[cfg(test)]
@@ -1,3 +1,4 @@
+use std::str::FromStr;
use std::sync::LazyLock;
use derive_more::Deref;
@@ -11,7 +12,7 @@ pub struct RemoteUrl(Url);
static USERNAME_REGEX: LazyLock<Regex> =
LazyLock::new(|| Regex::new(r"^[0-9a-zA-Z\-_]+@").expect("Failed to create USERNAME_REGEX"));
-impl std::str::FromStr for RemoteUrl {
+impl FromStr for RemoteUrl {
type Err = url::ParseError;
fn from_str(input: &str) -> Result<Self, Self::Err> {
@@ -7,13 +7,15 @@ use collections::HashMap;
use futures::future::BoxFuture;
use futures::io::BufWriter;
use futures::{AsyncWriteExt, FutureExt as _, select_biased};
-use git2::BranchType;
+use git2::{BranchType, ErrorCode};
use gpui::{AppContext as _, AsyncApp, BackgroundExecutor, SharedString, Task};
use parking_lot::Mutex;
use rope::Rope;
use schemars::JsonSchema;
use serde::Deserialize;
use smol::io::{AsyncBufReadExt, AsyncReadExt, BufReader};
+
+use std::collections::HashSet;
use std::ffi::{OsStr, OsString};
use std::process::{ExitStatus, Stdio};
use std::{
@@ -55,6 +57,12 @@ impl Branch {
self.ref_name.starts_with("refs/remotes/")
}
+ pub fn remote_name(&self) -> Option<&str> {
+ self.ref_name
+ .strip_prefix("refs/remotes/")
+ .and_then(|stripped| stripped.split("/").next())
+ }
+
pub fn tracking_status(&self) -> Option<UpstreamTrackingStatus> {
self.upstream
.as_ref()
@@ -420,7 +428,7 @@ pub trait GitRepository: Send + Sync {
) -> BoxFuture<'_, anyhow::Result<()>>;
/// Returns the URL of the remote with the given name.
- fn remote_url(&self, name: &str) -> Option<String>;
+ fn remote_url(&self, name: &str) -> BoxFuture<'_, Option<String>>;
/// Resolve a list of refs to SHAs.
fn revparse_batch(&self, revs: Vec<String>) -> BoxFuture<'_, Result<Vec<Option<String>>>>;
@@ -590,6 +598,10 @@ pub trait GitRepository: Send + Sync {
fn get_all_remotes(&self) -> BoxFuture<'_, Result<Vec<Remote>>>;
+ fn remove_remote(&self, name: String) -> BoxFuture<'_, Result<()>>;
+
+ fn create_remote(&self, name: String, url: String) -> BoxFuture<'_, Result<()>>;
+
/// returns a list of remote branches that contain HEAD
fn check_for_pushed_commit(&self) -> BoxFuture<'_, Result<Vec<SharedString>>>;
@@ -967,7 +979,15 @@ impl GitRepository for RealGitRepository {
index.read(false)?;
const STAGE_NORMAL: i32 = 0;
- let oid = match index.get_path(path.as_std_path(), STAGE_NORMAL) {
+ let path = path.as_std_path();
+ // `RepoPath` contains a `RelPath` which normalizes `.` into an empty path
+ // `get_path` unwraps on empty paths though, so undo that normalization here
+ let path = if path.components().next().is_none() {
+ ".".as_ref()
+ } else {
+ path
+ };
+ let oid = match index.get_path(path, STAGE_NORMAL) {
Some(entry) if entry.mode != GIT_MODE_SYMLINK => entry.id,
_ => return Ok(None),
};
@@ -1077,10 +1097,16 @@ impl GitRepository for RealGitRepository {
.boxed()
}
- fn remote_url(&self, name: &str) -> Option<String> {
- let repo = self.repository.lock();
- let remote = repo.find_remote(name).ok()?;
- remote.url().map(|url| url.to_string())
+ fn remote_url(&self, name: &str) -> BoxFuture<'_, Option<String>> {
+ let repo = self.repository.clone();
+ let name = name.to_owned();
+ self.executor
+ .spawn(async move {
+ let repo = repo.lock();
+ let remote = repo.find_remote(&name).ok()?;
+ remote.url().map(|url| url.to_string())
+ })
+ .boxed()
}
fn revparse_batch(&self, revs: Vec<String>) -> BoxFuture<'_, Result<Vec<Option<String>>>> {
@@ -1371,9 +1397,19 @@ impl GitRepository for RealGitRepository {
branch
} else if let Ok(revision) = repo.find_branch(&name, BranchType::Remote) {
let (_, branch_name) = name.split_once("/").context("Unexpected branch format")?;
+
let revision = revision.get();
let branch_commit = revision.peel_to_commit()?;
- let mut branch = repo.branch(&branch_name, &branch_commit, false)?;
+ let mut branch = match repo.branch(&branch_name, &branch_commit, false) {
+ Ok(branch) => branch,
+ Err(err) if err.code() == ErrorCode::Exists => {
+ repo.find_branch(&branch_name, BranchType::Local)?
+ }
+ Err(err) => {
+ return Err(err.into());
+ }
+ };
+
branch.set_upstream(Some(&name))?;
branch
} else {
@@ -1389,7 +1425,6 @@ impl GitRepository for RealGitRepository {
self.executor
.spawn(async move {
let branch = branch.await?;
-
GitBinary::new(git_binary_path, working_directory?, executor)
.run(&["checkout", &branch])
.await?;
@@ -1457,23 +1492,30 @@ impl GitRepository for RealGitRepository {
fn blame(&self, path: RepoPath, content: Rope) -> BoxFuture<'_, Result<crate::blame::Blame>> {
let working_directory = self.working_directory();
let git_binary_path = self.any_git_binary_path.clone();
+ let executor = self.executor.clone();
- let remote_url = self
- .remote_url("upstream")
- .or_else(|| self.remote_url("origin"));
-
- self.executor
- .spawn(async move {
- crate::blame::Blame::for_path(
- &git_binary_path,
- &working_directory?,
- &path,
- &content,
- remote_url,
- )
+ async move {
+ let remote_url = if let Some(remote_url) = self.remote_url("upstream").await {
+ Some(remote_url)
+ } else if let Some(remote_url) = self.remote_url("origin").await {
+ Some(remote_url)
+ } else {
+ None
+ };
+ executor
+ .spawn(async move {
+ crate::blame::Blame::for_path(
+ &git_binary_path,
+ &working_directory?,
+ &path,
+ &content,
+ remote_url,
+ )
+ .await
+ })
.await
- })
- .boxed()
+ }
+ .boxed()
}
fn file_history(&self, path: RepoPath) -> BoxFuture<'_, Result<FileHistory>> {
@@ -1972,7 +2014,7 @@ impl GitRepository for RealGitRepository {
let working_directory = working_directory?;
let output = new_smol_command(&git_binary_path)
.current_dir(&working_directory)
- .args(["remote"])
+ .args(["remote", "-v"])
.output()
.await?;
@@ -1981,14 +2023,43 @@ impl GitRepository for RealGitRepository {
"Failed to get all remotes:\n{}",
String::from_utf8_lossy(&output.stderr)
);
- let remote_names = String::from_utf8_lossy(&output.stdout)
- .split('\n')
- .filter(|name| !name.is_empty())
- .map(|name| Remote {
- name: name.trim().to_string().into(),
+ let remote_names: HashSet<Remote> = String::from_utf8_lossy(&output.stdout)
+ .lines()
+ .filter(|line| !line.is_empty())
+ .filter_map(|line| {
+ let mut split_line = line.split_whitespace();
+ let remote_name = split_line.next()?;
+
+ Some(Remote {
+ name: remote_name.trim().to_string().into(),
+ })
})
.collect();
- Ok(remote_names)
+
+ Ok(remote_names.into_iter().collect())
+ })
+ .boxed()
+ }
+
+ fn remove_remote(&self, name: String) -> BoxFuture<'_, Result<()>> {
+ let repo = self.repository.clone();
+ self.executor
+ .spawn(async move {
+ let repo = repo.lock();
+ repo.remote_delete(&name)?;
+
+ Ok(())
+ })
+ .boxed()
+ }
+
+ fn create_remote(&self, name: String, url: String) -> BoxFuture<'_, Result<()>> {
+ let repo = self.repository.clone();
+ self.executor
+ .spawn(async move {
+ let repo = repo.lock();
+ repo.remote(&name, url.as_ref())?;
+ Ok(())
})
.boxed()
}
@@ -33,11 +33,11 @@ pub fn init(cx: &mut App) {
///
/// These require information from the Git repository to construct, so their
/// registration is deferred until we have a Git repository initialized.
-pub fn register_additional_providers(
+pub async fn register_additional_providers(
provider_registry: Arc<GitHostingProviderRegistry>,
repository: Arc<dyn GitRepository>,
) {
- let Some(origin_url) = repository.remote_url("origin") else {
+ let Some(origin_url) = repository.remote_url("origin").await else {
return;
};
@@ -198,9 +198,6 @@ impl BlameRenderer for GitBlameRenderer {
let link_color = cx.theme().colors().text_accent;
let markdown_style = {
let mut style = hover_markdown_style(window, cx);
- if let Some(code_block) = &style.code_block.text {
- style.base_text_style.refine(code_block);
- }
style.link.refine(&TextStyleRefinement {
color: Some(link_color),
underline: Some(UnderlineStyle {
@@ -1,10 +1,12 @@
use anyhow::Context as _;
+use editor::Editor;
use fuzzy::StringMatchCandidate;
use collections::HashSet;
use git::repository::Branch;
+use gpui::http_client::Url;
use gpui::{
- Action, App, Context, DismissEvent, Entity, EventEmitter, FocusHandle, Focusable,
+ Action, App, AsyncApp, Context, DismissEvent, Entity, EventEmitter, FocusHandle, Focusable,
InteractiveElement, IntoElement, Modifiers, ModifiersChangedEvent, ParentElement, Render,
SharedString, Styled, Subscription, Task, WeakEntity, Window, actions, rems,
};
@@ -14,7 +16,10 @@ use project::project_settings::ProjectSettings;
use settings::Settings;
use std::sync::Arc;
use time::OffsetDateTime;
-use ui::{HighlightedLabel, KeyBinding, ListItem, ListItemSpacing, Tooltip, prelude::*};
+use ui::{
+ CommonAnimationExt, Divider, HighlightedLabel, KeyBinding, ListItem, ListItemSpacing, Tooltip,
+ prelude::*,
+};
use util::ResultExt;
use workspace::notifications::DetachAndPromptErr;
use workspace::{ModalView, Workspace};
@@ -24,8 +29,10 @@ use crate::{branch_picker, git_panel::show_error_toast};
actions!(
branch_picker,
[
- /// Deletes the selected git branch.
- DeleteBranch
+ /// Deletes the selected git branch or remote.
+ DeleteBranch,
+ /// Filter the list of remotes
+ FilterRemotes
]
);
@@ -206,7 +213,7 @@ impl BranchList {
.update(cx, |picker, _| picker.delegate.modifiers = ev.modifiers)
}
- fn handle_delete_branch(
+ fn handle_delete(
&mut self,
_: &branch_picker::DeleteBranch,
window: &mut Window,
@@ -215,9 +222,32 @@ impl BranchList {
self.picker.update(cx, |picker, cx| {
picker
.delegate
- .delete_branch_at(picker.delegate.selected_index, window, cx)
+ .delete_at(picker.delegate.selected_index, window, cx)
})
}
+
+ fn handle_filter(
+ &mut self,
+ _: &branch_picker::FilterRemotes,
+ window: &mut Window,
+ cx: &mut Context<Self>,
+ ) {
+ self.picker.update(cx, |this, cx| {
+ this.delegate.display_remotes = !this.delegate.display_remotes;
+ cx.spawn_in(window, async move |this, cx| {
+ this.update_in(cx, |picker, window, cx| {
+ let last_query = picker.delegate.last_query.clone();
+ picker.delegate.update_matches(last_query, window, cx)
+ })?
+ .await;
+
+ Result::Ok::<_, anyhow::Error>(())
+ })
+ .detach_and_log_err(cx);
+ });
+
+ cx.notify();
+ }
}
impl ModalView for BranchList {}
impl EventEmitter<DismissEvent> for BranchList {}
@@ -234,7 +264,8 @@ impl Render for BranchList {
.key_context("GitBranchSelector")
.w(self.width)
.on_modifiers_changed(cx.listener(Self::handle_modifiers_changed))
- .on_action(cx.listener(Self::handle_delete_branch))
+ .on_action(cx.listener(Self::handle_delete))
+ .on_action(cx.listener(Self::handle_filter))
.child(self.picker.clone())
.on_mouse_down_out({
cx.listener(move |this, _, window, cx| {
@@ -246,16 +277,50 @@ impl Render for BranchList {
}
}
-#[derive(Debug, Clone)]
-struct BranchEntry {
- branch: Branch,
- positions: Vec<usize>,
- is_new: bool,
+#[derive(Debug, Clone, PartialEq)]
+enum Entry {
+ Branch {
+ branch: Branch,
+ positions: Vec<usize>,
+ },
+ NewUrl {
+ url: String,
+ },
+ NewBranch {
+ name: String,
+ },
+}
+
+impl Entry {
+ fn as_branch(&self) -> Option<&Branch> {
+ match self {
+ Entry::Branch { branch, .. } => Some(branch),
+ _ => None,
+ }
+ }
+
+ fn name(&self) -> &str {
+ match self {
+ Entry::Branch { branch, .. } => branch.name(),
+ Entry::NewUrl { url, .. } => url.as_str(),
+ Entry::NewBranch { name, .. } => name.as_str(),
+ }
+ }
+
+ #[cfg(test)]
+ fn is_new_url(&self) -> bool {
+ matches!(self, Self::NewUrl { .. })
+ }
+
+ #[cfg(test)]
+ fn is_new_branch(&self) -> bool {
+ matches!(self, Self::NewBranch { .. })
+ }
}
pub struct BranchListDelegate {
workspace: Option<WeakEntity<Workspace>>,
- matches: Vec<BranchEntry>,
+ matches: Vec<Entry>,
all_branches: Option<Vec<Branch>>,
default_branch: Option<SharedString>,
repo: Option<Entity<Repository>>,
@@ -263,9 +328,24 @@ pub struct BranchListDelegate {
selected_index: usize,
last_query: String,
modifiers: Modifiers,
+ display_remotes: bool,
+ state: PickerState,
+ loading: bool,
focus_handle: FocusHandle,
}
+#[derive(Debug)]
+enum PickerState {
+ /// When we display list of branches/remotes
+ List,
+ /// When we set an url to create a new remote
+ NewRemote,
+ /// When we confirm the new remote url (after NewRemote)
+ CreateRemote(SharedString),
+ /// When we set a new branch to create
+ NewBranch,
+}
+
impl BranchListDelegate {
fn new(
workspace: Option<WeakEntity<Workspace>>,
@@ -283,6 +363,9 @@ impl BranchListDelegate {
selected_index: 0,
last_query: Default::default(),
modifiers: Default::default(),
+ display_remotes: false,
+ state: PickerState::List,
+ loading: false,
focus_handle: cx.focus_handle(),
}
}
@@ -313,8 +396,59 @@ impl BranchListDelegate {
cx.emit(DismissEvent);
}
- fn delete_branch_at(&self, idx: usize, window: &mut Window, cx: &mut Context<Picker<Self>>) {
- let Some(branch_entry) = self.matches.get(idx) else {
+ fn create_remote(
+ &self,
+ remote_name: String,
+ remote_url: String,
+ window: &mut Window,
+ cx: &mut Context<Picker<Self>>,
+ ) {
+ let Some(repo) = self.repo.clone() else {
+ return;
+ };
+ cx.spawn(async move |this, cx| {
+ this.update(cx, |picker, cx| {
+ picker.delegate.loading = true;
+ cx.notify();
+ })
+ .log_err();
+
+ let stop_loader = |this: &WeakEntity<Picker<BranchListDelegate>>, cx: &mut AsyncApp| {
+ this.update(cx, |picker, cx| {
+ picker.delegate.loading = false;
+ cx.notify();
+ })
+ .log_err();
+ };
+ repo.update(cx, |repo, _| repo.create_remote(remote_name, remote_url))
+ .inspect_err(|_err| {
+ stop_loader(&this, cx);
+ })?
+ .await
+ .inspect_err(|_err| {
+ stop_loader(&this, cx);
+ })?
+ .inspect_err(|_err| {
+ stop_loader(&this, cx);
+ })?;
+ stop_loader(&this, cx);
+ Ok(())
+ })
+ .detach_and_prompt_err("Failed to create remote", window, cx, |e, _, _cx| {
+ Some(e.to_string())
+ });
+ cx.emit(DismissEvent);
+ }
+
+ fn loader(&self) -> AnyElement {
+ Icon::new(IconName::LoadCircle)
+ .size(IconSize::Small)
+ .with_rotate_animation(3)
+ .into_any_element()
+ }
+
+ fn delete_at(&self, idx: usize, window: &mut Window, cx: &mut Context<Picker<Self>>) {
+ let Some(entry) = self.matches.get(idx).cloned() else {
return;
};
let Some(repo) = self.repo.clone() else {
@@ -322,20 +456,51 @@ impl BranchListDelegate {
};
let workspace = self.workspace.clone();
- let branch_name = branch_entry.branch.name().to_string();
- let branch_ref = branch_entry.branch.ref_name.clone();
cx.spawn_in(window, async move |picker, cx| {
- let result = repo
- .update(cx, |repo, _| repo.delete_branch(branch_name.clone()))?
- .await?;
+ let mut is_remote = false;
+ let result = match &entry {
+ Entry::Branch { branch, .. } => match branch.remote_name() {
+ Some(remote_name) => {
+ is_remote = true;
+ repo.update(cx, |repo, _| repo.remove_remote(remote_name.to_string()))?
+ .await?
+ }
+ None => {
+ repo.update(cx, |repo, _| repo.delete_branch(branch.name().to_string()))?
+ .await?
+ }
+ },
+ _ => {
+ log::error!("Failed to delete remote: wrong entry to delete");
+ return Ok(());
+ }
+ };
if let Err(e) = result {
- log::error!("Failed to delete branch: {}", e);
+ if is_remote {
+ log::error!("Failed to delete remote: {}", e);
+ } else {
+ log::error!("Failed to delete branch: {}", e);
+ }
if let Some(workspace) = workspace.and_then(|w| w.upgrade()) {
cx.update(|_window, cx| {
- show_error_toast(workspace, format!("branch -d {branch_name}"), e, cx)
+ if is_remote {
+ show_error_toast(
+ workspace,
+ format!("remote remove {}", entry.name()),
+ e,
+ cx,
+ )
+ } else {
+ show_error_toast(
+ workspace,
+ format!("branch -d {}", entry.name()),
+ e,
+ cx,
+ )
+ }
})?;
}
@@ -343,13 +508,12 @@ impl BranchListDelegate {
}
picker.update_in(cx, |picker, _, cx| {
- picker
- .delegate
- .matches
- .retain(|entry| entry.branch.ref_name != branch_ref);
+ picker.delegate.matches.retain(|e| e != &entry);
- if let Some(all_branches) = &mut picker.delegate.all_branches {
- all_branches.retain(|branch| branch.ref_name != branch_ref);
+ if let Entry::Branch { branch, .. } = &entry {
+ if let Some(all_branches) = &mut picker.delegate.all_branches {
+ all_branches.retain(|e| e.ref_name != branch.ref_name);
+ }
}
if picker.delegate.matches.is_empty() {
@@ -374,6 +538,45 @@ impl PickerDelegate for BranchListDelegate {
"Select branch…".into()
}
+ fn render_editor(
+ &self,
+ editor: &Entity<Editor>,
+ window: &mut Window,
+ cx: &mut Context<Picker<Self>>,
+ ) -> Div {
+ cx.update_entity(editor, move |editor, cx| {
+ let placeholder = match self.state {
+ PickerState::List | PickerState::NewRemote | PickerState::NewBranch => {
+ if self.display_remotes {
+ "Select remote…"
+ } else {
+ "Select branch…"
+ }
+ }
+ PickerState::CreateRemote(_) => "Choose a name…",
+ };
+ editor.set_placeholder_text(placeholder, window, cx);
+ });
+
+ v_flex()
+ .when(
+ self.editor_position() == PickerEditorPosition::End,
+ |this| this.child(Divider::horizontal()),
+ )
+ .child(
+ h_flex()
+ .overflow_hidden()
+ .flex_none()
+ .h_9()
+ .px_2p5()
+ .child(editor.clone()),
+ )
+ .when(
+ self.editor_position() == PickerEditorPosition::Start,
+ |this| this.child(Divider::horizontal()),
+ )
+ }
+
fn editor_position(&self) -> PickerEditorPosition {
match self.style {
BranchListStyle::Modal => PickerEditorPosition::Start,
@@ -409,20 +612,36 @@ impl PickerDelegate for BranchListDelegate {
};
const RECENT_BRANCHES_COUNT: usize = 10;
+ let display_remotes = self.display_remotes;
cx.spawn_in(window, async move |picker, cx| {
- let mut matches: Vec<BranchEntry> = if query.is_empty() {
+ let mut matches: Vec<Entry> = if query.is_empty() {
all_branches
.into_iter()
- .filter(|branch| !branch.is_remote())
+ .filter(|branch| {
+ if display_remotes {
+ branch.is_remote()
+ } else {
+ !branch.is_remote()
+ }
+ })
.take(RECENT_BRANCHES_COUNT)
- .map(|branch| BranchEntry {
+ .map(|branch| Entry::Branch {
branch,
positions: Vec::new(),
- is_new: false,
})
.collect()
} else {
- let candidates = all_branches
+ let branches = all_branches
+ .iter()
+ .filter(|branch| {
+ if display_remotes {
+ branch.is_remote()
+ } else {
+ !branch.is_remote()
+ }
+ })
+ .collect::<Vec<_>>();
+ let candidates = branches
.iter()
.enumerate()
.map(|(ix, branch)| StringMatchCandidate::new(ix, branch.name()))
@@ -438,31 +657,40 @@ impl PickerDelegate for BranchListDelegate {
)
.await
.into_iter()
- .map(|candidate| BranchEntry {
- branch: all_branches[candidate.candidate_id].clone(),
+ .map(|candidate| Entry::Branch {
+ branch: branches[candidate.candidate_id].clone(),
positions: candidate.positions,
- is_new: false,
})
.collect()
};
picker
.update(cx, |picker, _| {
+ if matches!(picker.delegate.state, PickerState::CreateRemote(_)) {
+ picker.delegate.last_query = query;
+ picker.delegate.matches = Vec::new();
+ picker.delegate.selected_index = 0;
+
+ return;
+ }
+
if !query.is_empty()
- && !matches
- .first()
- .is_some_and(|entry| entry.branch.name() == query)
+ && !matches.first().is_some_and(|entry| entry.name() == query)
{
let query = query.replace(' ', "-");
- matches.push(BranchEntry {
- branch: Branch {
- ref_name: format!("refs/heads/{query}").into(),
- is_head: false,
- upstream: None,
- most_recent_commit: None,
- },
- positions: Vec::new(),
- is_new: true,
- })
+ let is_url = query.trim_start_matches("git@").parse::<Url>().is_ok();
+ let entry = if is_url {
+ Entry::NewUrl { url: query }
+ } else {
+ Entry::NewBranch { name: query }
+ };
+ picker.delegate.state = if is_url {
+ PickerState::NewRemote
+ } else {
+ PickerState::NewBranch
+ };
+ matches.push(entry);
+ } else {
+ picker.delegate.state = PickerState::List;
}
let delegate = &mut picker.delegate;
delegate.matches = matches;
@@ -479,56 +707,78 @@ impl PickerDelegate for BranchListDelegate {
}
fn confirm(&mut self, secondary: bool, window: &mut Window, cx: &mut Context<Picker<Self>>) {
- let Some(entry) = self.matches.get(self.selected_index()) else {
- return;
- };
-
- if entry.is_new {
- let from_branch = if secondary {
- self.default_branch.clone()
- } else {
- None
- };
- self.create_branch(
- from_branch,
- entry.branch.name().to_owned().into(),
- window,
- cx,
- );
- return;
- }
-
- let current_branch = self.repo.as_ref().map(|repo| {
- repo.read_with(cx, |repo, _| {
- repo.branch.as_ref().map(|branch| branch.ref_name.clone())
- })
- });
-
- if current_branch
- .flatten()
- .is_some_and(|current_branch| current_branch == entry.branch.ref_name)
- {
- cx.emit(DismissEvent);
+ if let PickerState::CreateRemote(remote_url) = &self.state {
+ self.create_remote(self.last_query.clone(), remote_url.to_string(), window, cx);
+ self.state = PickerState::List;
+ cx.notify();
return;
}
- let Some(repo) = self.repo.clone() else {
+ let Some(entry) = self.matches.get(self.selected_index()) else {
return;
};
- let branch = entry.branch.clone();
- cx.spawn(async move |_, cx| {
- repo.update(cx, |repo, _| repo.change_branch(branch.name().to_string()))?
- .await??;
+ match entry {
+ Entry::Branch { branch, .. } => {
+ let current_branch = self.repo.as_ref().map(|repo| {
+ repo.read_with(cx, |repo, _| {
+ repo.branch.as_ref().map(|branch| branch.ref_name.clone())
+ })
+ });
+
+ if current_branch
+ .flatten()
+ .is_some_and(|current_branch| current_branch == branch.ref_name)
+ {
+ cx.emit(DismissEvent);
+ return;
+ }
- anyhow::Ok(())
- })
- .detach_and_prompt_err("Failed to change branch", window, cx, |_, _, _| None);
+ let Some(repo) = self.repo.clone() else {
+ return;
+ };
+
+ let branch = branch.clone();
+ cx.spawn(async move |_, cx| {
+ repo.update(cx, |repo, _| repo.change_branch(branch.name().to_string()))?
+ .await??;
+
+ anyhow::Ok(())
+ })
+ .detach_and_prompt_err(
+ "Failed to change branch",
+ window,
+ cx,
+ |_, _, _| None,
+ );
+ }
+ Entry::NewUrl { url } => {
+ self.state = PickerState::CreateRemote(url.clone().into());
+ self.matches = Vec::new();
+ self.selected_index = 0;
+ cx.spawn_in(window, async move |this, cx| {
+ this.update_in(cx, |picker, window, cx| {
+ picker.set_query("", window, cx);
+ })
+ })
+ .detach_and_log_err(cx);
+ cx.notify();
+ }
+ Entry::NewBranch { name } => {
+ let from_branch = if secondary {
+ self.default_branch.clone()
+ } else {
+ None
+ };
+ self.create_branch(from_branch, format!("refs/heads/{name}").into(), window, cx);
+ }
+ }
cx.emit(DismissEvent);
}
fn dismissed(&mut self, _: &mut Window, cx: &mut Context<Picker<Self>>) {
+ self.state = PickerState::List;
cx.emit(DismissEvent);
}
@@ -542,49 +792,60 @@ impl PickerDelegate for BranchListDelegate {
let entry = &self.matches.get(ix)?;
let (commit_time, author_name, subject) = entry
- .branch
- .most_recent_commit
- .as_ref()
- .map(|commit| {
- let subject = commit.subject.clone();
- let commit_time = OffsetDateTime::from_unix_timestamp(commit.commit_timestamp)
- .unwrap_or_else(|_| OffsetDateTime::now_utc());
- let local_offset =
- time::UtcOffset::current_local_offset().unwrap_or(time::UtcOffset::UTC);
- let formatted_time = time_format::format_localized_timestamp(
- commit_time,
- OffsetDateTime::now_utc(),
- local_offset,
- time_format::TimestampFormat::Relative,
- );
- let author = commit.author_name.clone();
- (Some(formatted_time), Some(author), Some(subject))
+ .as_branch()
+ .and_then(|branch| {
+ branch.most_recent_commit.as_ref().map(|commit| {
+ let subject = commit.subject.clone();
+ let commit_time = OffsetDateTime::from_unix_timestamp(commit.commit_timestamp)
+ .unwrap_or_else(|_| OffsetDateTime::now_utc());
+ let local_offset =
+ time::UtcOffset::current_local_offset().unwrap_or(time::UtcOffset::UTC);
+ let formatted_time = time_format::format_localized_timestamp(
+ commit_time,
+ OffsetDateTime::now_utc(),
+ local_offset,
+ time_format::TimestampFormat::Relative,
+ );
+ let author = commit.author_name.clone();
+ (Some(formatted_time), Some(author), Some(subject))
+ })
})
.unwrap_or_else(|| (None, None, None));
- let icon = if let Some(default_branch) = self.default_branch.clone()
- && entry.is_new
- {
- Some(
- IconButton::new("branch-from-default", IconName::GitBranchAlt)
+ let icon = if let Some(default_branch) = self.default_branch.clone() {
+ let icon = match &entry {
+ Entry::Branch { .. } => Some((
+ IconName::GitBranchAlt,
+ format!("Create branch based off default: {default_branch}"),
+ )),
+ Entry::NewUrl { url } => {
+ Some((IconName::Screen, format!("Create remote based off {url}")))
+ }
+ Entry::NewBranch { .. } => None,
+ };
+
+ icon.map(|(icon, tooltip_text)| {
+ IconButton::new("branch-from-default", icon)
.on_click(cx.listener(move |this, _, window, cx| {
this.delegate.set_selected_index(ix, window, cx);
this.delegate.confirm(true, window, cx);
}))
.tooltip(move |_window, cx| {
- Tooltip::for_action(
- format!("Create branch based off default: {default_branch}"),
- &menu::SecondaryConfirm,
- cx,
- )
- }),
- )
+ Tooltip::for_action(tooltip_text.clone(), &menu::SecondaryConfirm, cx)
+ })
+ })
} else {
None
};
- let branch_name = if entry.is_new {
- h_flex()
+ let icon_element = if self.display_remotes {
+ Icon::new(IconName::Screen)
+ } else {
+ Icon::new(IconName::GitBranchAlt)
+ };
+
+ let entry_name = match entry {
+ Entry::NewUrl { .. } => h_flex()
.gap_1()
.child(
Icon::new(IconName::Plus)
@@ -592,19 +853,31 @@ impl PickerDelegate for BranchListDelegate {
.color(Color::Muted),
)
.child(
- Label::new(format!("Create branch \"{}\"…", entry.branch.name()))
+ Label::new("Create remote repository".to_string())
.single_line()
.truncate(),
)
- .into_any_element()
- } else {
- h_flex()
- .max_w_48()
+ .into_any_element(),
+ Entry::NewBranch { name } => h_flex()
+ .gap_1()
.child(
- HighlightedLabel::new(entry.branch.name().to_owned(), entry.positions.clone())
+ Icon::new(IconName::Plus)
+ .size(IconSize::Small)
+ .color(Color::Muted),
+ )
+ .child(
+ Label::new(format!("Create branch \"{name}\"…"))
+ .single_line()
.truncate(),
)
- .into_any_element()
+ .into_any_element(),
+ Entry::Branch { branch, positions } => h_flex()
+ .max_w_48()
+ .child(h_flex().mr_1().child(icon_element))
+ .child(
+ HighlightedLabel::new(branch.name().to_string(), positions.clone()).truncate(),
+ )
+ .into_any_element(),
};
Some(
@@ -613,11 +886,14 @@ impl PickerDelegate for BranchListDelegate {
.spacing(ListItemSpacing::Sparse)
.toggle_state(selected)
.tooltip({
- let branch_name = entry.branch.name().to_string();
- if entry.is_new {
- Tooltip::text(format!("Create branch \"{}\"", branch_name))
- } else {
- Tooltip::text(branch_name)
+ match entry {
+ Entry::Branch { branch, .. } => Tooltip::text(branch.name().to_string()),
+ Entry::NewUrl { .. } => {
+ Tooltip::text("Create remote repository".to_string())
+ }
+ Entry::NewBranch { name } => {
+ Tooltip::text(format!("Create branch \"{name}\""))
+ }
}
})
.child(
@@ -629,7 +905,7 @@ impl PickerDelegate for BranchListDelegate {
.gap_6()
.justify_between()
.overflow_x_hidden()
- .child(branch_name)
+ .child(entry_name)
.when_some(commit_time, |label, commit_time| {
label.child(
Label::new(commit_time)
@@ -641,30 +917,35 @@ impl PickerDelegate for BranchListDelegate {
)
.when(self.style == BranchListStyle::Modal, |el| {
el.child(div().max_w_96().child({
- let message = if entry.is_new {
- if let Some(current_branch) =
- self.repo.as_ref().and_then(|repo| {
- repo.read(cx).branch.as_ref().map(|b| b.name())
- })
- {
- format!("based off {}", current_branch)
- } else {
- "based off the current branch".to_string()
- }
- } else {
- let show_author_name = ProjectSettings::get_global(cx)
- .git
- .branch_picker
- .show_author_name;
-
- subject.map_or("no commits found".into(), |subject| {
- if show_author_name && author_name.is_some() {
- format!("{} • {}", author_name.unwrap(), subject)
+ let message = match entry {
+ Entry::NewUrl { url } => format!("based off {url}"),
+ Entry::NewBranch { .. } => {
+ if let Some(current_branch) =
+ self.repo.as_ref().and_then(|repo| {
+ repo.read(cx).branch.as_ref().map(|b| b.name())
+ })
+ {
+ format!("based off {}", current_branch)
} else {
- subject.to_string()
+ "based off the current branch".to_string()
}
- })
+ }
+ Entry::Branch { .. } => {
+ let show_author_name = ProjectSettings::get_global(cx)
+ .git
+ .branch_picker
+ .show_author_name;
+
+ subject.map_or("no commits found".into(), |subject| {
+ if show_author_name && author_name.is_some() {
+ format!("{} • {}", author_name.unwrap(), subject)
+ } else {
+ subject.to_string()
+ }
+ })
+ }
};
+
Label::new(message)
.size(LabelSize::Small)
.truncate()
@@ -676,40 +957,715 @@ impl PickerDelegate for BranchListDelegate {
)
}
- fn render_footer(
+ fn render_header(
&self,
_window: &mut Window,
cx: &mut Context<Picker<Self>>,
) -> Option<AnyElement> {
- let focus_handle = self.focus_handle.clone();
-
+ if matches!(
+ self.state,
+ PickerState::CreateRemote(_) | PickerState::NewRemote | PickerState::NewBranch
+ ) {
+ return None;
+ }
+ let label = if self.display_remotes {
+ "Remote"
+ } else {
+ "Local"
+ };
Some(
h_flex()
.w_full()
.p_1p5()
- .gap_0p5()
- .justify_end()
+ .gap_1()
.border_t_1()
.border_color(cx.theme().colors().border_variant)
- .child(
- Button::new("delete-branch", "Delete")
- .key_binding(
- KeyBinding::for_action_in(
- &branch_picker::DeleteBranch,
- &focus_handle,
- cx,
- )
- .map(|kb| kb.size(rems_from_px(12.))),
- )
- .on_click(|_, window, cx| {
- window.dispatch_action(branch_picker::DeleteBranch.boxed_clone(), cx);
- }),
- )
+ .child(Label::new(label).size(LabelSize::Small).color(Color::Muted))
.into_any(),
)
}
+ fn render_footer(&self, _: &mut Window, cx: &mut Context<Picker<Self>>) -> Option<AnyElement> {
+ let focus_handle = self.focus_handle.clone();
+
+ if self.loading {
+ return Some(
+ h_flex()
+ .w_full()
+ .p_1p5()
+ .gap_1()
+ .justify_end()
+ .border_t_1()
+ .border_color(cx.theme().colors().border_variant)
+ .child(self.loader())
+ .into_any(),
+ );
+ }
+ match self.state {
+ PickerState::List => Some(
+ h_flex()
+ .w_full()
+ .p_1p5()
+ .gap_0p5()
+ .border_t_1()
+ .border_color(cx.theme().colors().border_variant)
+ .justify_between()
+ .child(
+ Button::new("filter-remotes", "Filter remotes")
+ .key_binding(
+ KeyBinding::for_action_in(
+ &branch_picker::FilterRemotes,
+ &focus_handle,
+ cx,
+ )
+ .map(|kb| kb.size(rems_from_px(12.))),
+ )
+ .on_click(|_click, window, cx| {
+ window.dispatch_action(
+ branch_picker::FilterRemotes.boxed_clone(),
+ cx,
+ );
+ })
+ .disabled(self.loading)
+ .style(ButtonStyle::Subtle)
+ .toggle_state(self.display_remotes),
+ )
+ .child(
+ Button::new("delete-branch", "Delete")
+ .key_binding(
+ KeyBinding::for_action_in(
+ &branch_picker::DeleteBranch,
+ &focus_handle,
+ cx,
+ )
+ .map(|kb| kb.size(rems_from_px(12.))),
+ )
+ .disabled(self.loading)
+ .on_click(|_, window, cx| {
+ window
+ .dispatch_action(branch_picker::DeleteBranch.boxed_clone(), cx);
+ }),
+ )
+ .when(self.loading, |this| this.child(self.loader()))
+ .into_any(),
+ ),
+ PickerState::CreateRemote(_) => Some(
+ h_flex()
+ .w_full()
+ .p_1p5()
+ .gap_1()
+ .border_t_1()
+ .border_color(cx.theme().colors().border_variant)
+ .child(
+ Label::new("Choose a name for this remote repository")
+ .size(LabelSize::Small)
+ .color(Color::Muted),
+ )
+ .child(
+ h_flex().w_full().justify_end().child(
+ Label::new("Save")
+ .size(LabelSize::Small)
+ .color(Color::Muted),
+ ),
+ )
+ .into_any(),
+ ),
+ PickerState::NewRemote | PickerState::NewBranch => None,
+ }
+ }
+
fn no_matches_text(&self, _window: &mut Window, _cx: &mut App) -> Option<SharedString> {
None
}
}
+
+#[cfg(test)]
+mod tests {
+ use std::collections::HashSet;
+
+ use super::*;
+ use git::repository::{CommitSummary, Remote};
+ use gpui::{TestAppContext, VisualTestContext};
+ use project::{FakeFs, Project};
+ use serde_json::json;
+ use settings::SettingsStore;
+ use util::path;
+
+ fn init_test(cx: &mut TestAppContext) {
+ cx.update(|cx| {
+ let settings_store = SettingsStore::test(cx);
+ cx.set_global(settings_store);
+ theme::init(theme::LoadThemes::JustBase, cx);
+ });
+ }
+
+ fn create_test_branch(
+ name: &str,
+ is_head: bool,
+ remote_name: Option<&str>,
+ timestamp: Option<i64>,
+ ) -> Branch {
+ let ref_name = match remote_name {
+ Some(remote_name) => format!("refs/remotes/{remote_name}/{name}"),
+ None => format!("refs/heads/{name}"),
+ };
+
+ Branch {
+ is_head,
+ ref_name: ref_name.into(),
+ upstream: None,
+ most_recent_commit: timestamp.map(|ts| CommitSummary {
+ sha: "abc123".into(),
+ commit_timestamp: ts,
+ author_name: "Test Author".into(),
+ subject: "Test commit".into(),
+ has_parent: true,
+ }),
+ }
+ }
+
+ fn create_test_branches() -> Vec<Branch> {
+ vec![
+ create_test_branch("main", true, None, Some(1000)),
+ create_test_branch("feature-auth", false, None, Some(900)),
+ create_test_branch("feature-ui", false, None, Some(800)),
+ create_test_branch("develop", false, None, Some(700)),
+ ]
+ }
+
+ fn init_branch_list_test(
+ cx: &mut TestAppContext,
+ repository: Option<Entity<Repository>>,
+ branches: Vec<Branch>,
+ ) -> (VisualTestContext, Entity<BranchList>) {
+ let window = cx.add_window(|window, cx| {
+ let mut delegate =
+ BranchListDelegate::new(None, repository, BranchListStyle::Modal, cx);
+ delegate.all_branches = Some(branches);
+ let picker = cx.new(|cx| Picker::uniform_list(delegate, window, cx));
+ let picker_focus_handle = picker.focus_handle(cx);
+ picker.update(cx, |picker, _| {
+ picker.delegate.focus_handle = picker_focus_handle.clone();
+ });
+
+ let _subscription = cx.subscribe(&picker, |_, _, _, cx| {
+ cx.emit(DismissEvent);
+ });
+
+ BranchList {
+ picker,
+ picker_focus_handle,
+ width: rems(34.),
+ _subscription,
+ }
+ });
+
+ let branch_list = window.root(cx).unwrap();
+ let cx = VisualTestContext::from_window(*window, cx);
+
+ (cx, branch_list)
+ }
+
+ async fn init_fake_repository(cx: &mut TestAppContext) -> Entity<Repository> {
+ let fs = FakeFs::new(cx.executor());
+ fs.insert_tree(
+ path!("/dir"),
+ json!({
+ ".git": {},
+ "file.txt": "buffer_text".to_string()
+ }),
+ )
+ .await;
+ fs.set_head_for_repo(
+ path!("/dir/.git").as_ref(),
+ &[("file.txt", "test".to_string())],
+ "deadbeef",
+ );
+ fs.set_index_for_repo(
+ path!("/dir/.git").as_ref(),
+ &[("file.txt", "index_text".to_string())],
+ );
+
+ let project = Project::test(fs.clone(), [path!("/dir").as_ref()], cx).await;
+ let repository = cx.read(|cx| project.read(cx).active_repository(cx));
+
+ repository.unwrap()
+ }
+
+ #[gpui::test]
+ async fn test_update_branch_matches_with_query(cx: &mut TestAppContext) {
+ init_test(cx);
+
+ let branches = create_test_branches();
+ let (mut ctx, branch_list) = init_branch_list_test(cx, None, branches);
+ let cx = &mut ctx;
+
+ branch_list
+ .update_in(cx, |branch_list, window, cx| {
+ let query = "feature".to_string();
+ branch_list.picker.update(cx, |picker, cx| {
+ picker.delegate.update_matches(query, window, cx)
+ })
+ })
+ .await;
+ cx.run_until_parked();
+
+ branch_list.update(cx, |branch_list, cx| {
+ branch_list.picker.update(cx, |picker, _cx| {
+ // Should have 2 existing branches + 1 "create new branch" entry = 3 total
+ assert_eq!(picker.delegate.matches.len(), 3);
+ assert!(
+ picker
+ .delegate
+ .matches
+ .iter()
+ .any(|m| m.name() == "feature-auth")
+ );
+ assert!(
+ picker
+ .delegate
+ .matches
+ .iter()
+ .any(|m| m.name() == "feature-ui")
+ );
+ // Verify the last entry is the "create new branch" option
+ let last_match = picker.delegate.matches.last().unwrap();
+ assert!(last_match.is_new_branch());
+ })
+ });
+ }
+
+ async fn update_branch_list_matches_with_empty_query(
+ branch_list: &Entity<BranchList>,
+ cx: &mut VisualTestContext,
+ ) {
+ branch_list
+ .update_in(cx, |branch_list, window, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ picker.delegate.update_matches(String::new(), window, cx)
+ })
+ })
+ .await;
+ cx.run_until_parked();
+ }
+
+ #[gpui::test]
+ async fn test_delete_branch(cx: &mut TestAppContext) {
+ init_test(cx);
+ let repository = init_fake_repository(cx).await;
+
+ let branches = create_test_branches();
+
+ let branch_names = branches
+ .iter()
+ .map(|branch| branch.name().to_string())
+ .collect::<Vec<String>>();
+ let repo = repository.clone();
+ cx.spawn(async move |mut cx| {
+ for branch in branch_names {
+ repo.update(&mut cx, |repo, _| repo.create_branch(branch, None))
+ .unwrap()
+ .await
+ .unwrap()
+ .unwrap();
+ }
+ })
+ .await;
+ cx.run_until_parked();
+
+ let (mut ctx, branch_list) = init_branch_list_test(cx, repository.into(), branches);
+ let cx = &mut ctx;
+
+ update_branch_list_matches_with_empty_query(&branch_list, cx).await;
+
+ let branch_to_delete = branch_list.update_in(cx, |branch_list, window, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ assert_eq!(picker.delegate.matches.len(), 4);
+ let branch_to_delete = picker.delegate.matches.get(1).unwrap().name().to_string();
+ picker.delegate.delete_at(1, window, cx);
+ branch_to_delete
+ })
+ });
+ cx.run_until_parked();
+
+ branch_list.update(cx, move |branch_list, cx| {
+ branch_list.picker.update(cx, move |picker, _cx| {
+ assert_eq!(picker.delegate.matches.len(), 3);
+ let branches = picker
+ .delegate
+ .matches
+ .iter()
+ .map(|be| be.name())
+ .collect::<HashSet<_>>();
+ assert_eq!(
+ branches,
+ ["main", "feature-auth", "feature-ui", "develop"]
+ .into_iter()
+ .filter(|name| name != &branch_to_delete)
+ .collect::<HashSet<_>>()
+ );
+ })
+ });
+ }
+
+ #[gpui::test]
+ async fn test_delete_remote(cx: &mut TestAppContext) {
+ init_test(cx);
+ let repository = init_fake_repository(cx).await;
+ let branches = vec![
+ create_test_branch("main", true, Some("origin"), Some(1000)),
+ create_test_branch("feature-auth", false, Some("origin"), Some(900)),
+ create_test_branch("feature-ui", false, Some("fork"), Some(800)),
+ create_test_branch("develop", false, Some("private"), Some(700)),
+ ];
+
+ let remote_names = branches
+ .iter()
+ .filter_map(|branch| branch.remote_name().map(|r| r.to_string()))
+ .collect::<Vec<String>>();
+ let repo = repository.clone();
+ cx.spawn(async move |mut cx| {
+ for branch in remote_names {
+ repo.update(&mut cx, |repo, _| {
+ repo.create_remote(branch, String::from("test"))
+ })
+ .unwrap()
+ .await
+ .unwrap()
+ .unwrap();
+ }
+ })
+ .await;
+ cx.run_until_parked();
+
+ let (mut ctx, branch_list) = init_branch_list_test(cx, repository.into(), branches);
+ let cx = &mut ctx;
+ // Enable remote filter
+ branch_list.update(cx, |branch_list, cx| {
+ branch_list.picker.update(cx, |picker, _cx| {
+ picker.delegate.display_remotes = true;
+ });
+ });
+ update_branch_list_matches_with_empty_query(&branch_list, cx).await;
+
+ // Check matches, it should match all existing branches and no option to create new branch
+ let branch_to_delete = branch_list.update_in(cx, |branch_list, window, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ assert_eq!(picker.delegate.matches.len(), 4);
+ let branch_to_delete = picker.delegate.matches.get(1).unwrap().name().to_string();
+ picker.delegate.delete_at(1, window, cx);
+ branch_to_delete
+ })
+ });
+ cx.run_until_parked();
+
+ // Check matches, it should match one less branch than before
+ branch_list.update(cx, move |branch_list, cx| {
+ branch_list.picker.update(cx, move |picker, _cx| {
+ assert_eq!(picker.delegate.matches.len(), 3);
+ let branches = picker
+ .delegate
+ .matches
+ .iter()
+ .map(|be| be.name())
+ .collect::<HashSet<_>>();
+ assert_eq!(
+ branches,
+ [
+ "origin/main",
+ "origin/feature-auth",
+ "fork/feature-ui",
+ "private/develop"
+ ]
+ .into_iter()
+ .filter(|name| name != &branch_to_delete)
+ .collect::<HashSet<_>>()
+ );
+ })
+ });
+ }
+
+ #[gpui::test]
+ async fn test_update_remote_matches_with_query(cx: &mut TestAppContext) {
+ init_test(cx);
+
+ let branches = vec![
+ create_test_branch("main", true, Some("origin"), Some(1000)),
+ create_test_branch("feature-auth", false, Some("fork"), Some(900)),
+ create_test_branch("feature-ui", false, None, Some(800)),
+ create_test_branch("develop", false, None, Some(700)),
+ ];
+
+ let (mut ctx, branch_list) = init_branch_list_test(cx, None, branches);
+ let cx = &mut ctx;
+
+ update_branch_list_matches_with_empty_query(&branch_list, cx).await;
+
+ // Check matches, it should match all existing branches and no option to create new branch
+ branch_list
+ .update_in(cx, |branch_list, window, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ assert_eq!(picker.delegate.matches.len(), 2);
+ let branches = picker
+ .delegate
+ .matches
+ .iter()
+ .map(|be| be.name())
+ .collect::<HashSet<_>>();
+ assert_eq!(
+ branches,
+ ["feature-ui", "develop"]
+ .into_iter()
+ .collect::<HashSet<_>>()
+ );
+
+ // Verify the last entry is NOT the "create new branch" option
+ let last_match = picker.delegate.matches.last().unwrap();
+ assert!(!last_match.is_new_branch());
+ assert!(!last_match.is_new_url());
+ picker.delegate.display_remotes = true;
+ picker.delegate.update_matches(String::new(), window, cx)
+ })
+ })
+ .await;
+ cx.run_until_parked();
+
+ branch_list
+ .update_in(cx, |branch_list, window, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ assert_eq!(picker.delegate.matches.len(), 2);
+ let branches = picker
+ .delegate
+ .matches
+ .iter()
+ .map(|be| be.name())
+ .collect::<HashSet<_>>();
+ assert_eq!(
+ branches,
+ ["origin/main", "fork/feature-auth"]
+ .into_iter()
+ .collect::<HashSet<_>>()
+ );
+
+ // Verify the last entry is NOT the "create new branch" option
+ let last_match = picker.delegate.matches.last().unwrap();
+ assert!(!last_match.is_new_url());
+ picker.delegate.display_remotes = true;
+ picker
+ .delegate
+ .update_matches(String::from("fork"), window, cx)
+ })
+ })
+ .await;
+ cx.run_until_parked();
+
+ branch_list.update(cx, |branch_list, cx| {
+ branch_list.picker.update(cx, |picker, _cx| {
+ // Should have 1 existing branch + 1 "create new branch" entry = 2 total
+ assert_eq!(picker.delegate.matches.len(), 2);
+ assert!(
+ picker
+ .delegate
+ .matches
+ .iter()
+ .any(|m| m.name() == "fork/feature-auth")
+ );
+ // Verify the last entry is the "create new branch" option
+ let last_match = picker.delegate.matches.last().unwrap();
+ assert!(last_match.is_new_branch());
+ })
+ });
+ }
+
+ #[gpui::test]
+ async fn test_new_branch_creation_with_query(test_cx: &mut TestAppContext) {
+ init_test(test_cx);
+ let repository = init_fake_repository(test_cx).await;
+
+ let branches = vec![
+ create_test_branch("main", true, None, Some(1000)),
+ create_test_branch("feature", false, None, Some(900)),
+ ];
+
+ let (mut ctx, branch_list) = init_branch_list_test(test_cx, repository.into(), branches);
+ let cx = &mut ctx;
+
+ branch_list
+ .update_in(cx, |branch_list, window, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ let query = "new-feature-branch".to_string();
+ picker.delegate.update_matches(query, window, cx)
+ })
+ })
+ .await;
+
+ cx.run_until_parked();
+
+ branch_list.update_in(cx, |branch_list, window, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ let last_match = picker.delegate.matches.last().unwrap();
+ assert!(last_match.is_new_branch());
+ assert_eq!(last_match.name(), "new-feature-branch");
+ assert!(matches!(picker.delegate.state, PickerState::NewBranch));
+ picker.delegate.confirm(false, window, cx);
+ })
+ });
+ cx.run_until_parked();
+
+ let branches = branch_list
+ .update(cx, |branch_list, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ picker
+ .delegate
+ .repo
+ .as_ref()
+ .unwrap()
+ .update(cx, |repo, _cx| repo.branches())
+ })
+ })
+ .await
+ .unwrap()
+ .unwrap();
+
+ assert!(
+ branches
+ .into_iter()
+ .any(|branch| branch.name() == "new-feature-branch")
+ );
+ }
+
+ #[gpui::test]
+ async fn test_remote_url_detection_https(cx: &mut TestAppContext) {
+ init_test(cx);
+ let repository = init_fake_repository(cx).await;
+ let branches = vec![create_test_branch("main", true, None, Some(1000))];
+
+ let (mut ctx, branch_list) = init_branch_list_test(cx, repository.into(), branches);
+ let cx = &mut ctx;
+
+ branch_list
+ .update_in(cx, |branch_list, window, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ let query = "https://github.com/user/repo.git".to_string();
+ picker.delegate.update_matches(query, window, cx)
+ })
+ })
+ .await;
+
+ cx.run_until_parked();
+
+ branch_list
+ .update_in(cx, |branch_list, window, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ let last_match = picker.delegate.matches.last().unwrap();
+ assert!(last_match.is_new_url());
+ assert!(matches!(picker.delegate.state, PickerState::NewRemote));
+ picker.delegate.confirm(false, window, cx);
+ assert_eq!(picker.delegate.matches.len(), 0);
+ if let PickerState::CreateRemote(remote_url) = &picker.delegate.state
+ && remote_url.as_ref() == "https://github.com/user/repo.git"
+ {
+ } else {
+ panic!("wrong picker state");
+ }
+ picker
+ .delegate
+ .update_matches("my_new_remote".to_string(), window, cx)
+ })
+ })
+ .await;
+
+ cx.run_until_parked();
+
+ branch_list.update_in(cx, |branch_list, window, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ picker.delegate.confirm(false, window, cx);
+ assert_eq!(picker.delegate.matches.len(), 0);
+ })
+ });
+ cx.run_until_parked();
+
+ // List remotes
+ let remotes = branch_list
+ .update(cx, |branch_list, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ picker
+ .delegate
+ .repo
+ .as_ref()
+ .unwrap()
+ .update(cx, |repo, _cx| repo.get_remotes(None, false))
+ })
+ })
+ .await
+ .unwrap()
+ .unwrap();
+ assert_eq!(
+ remotes,
+ vec![Remote {
+ name: SharedString::from("my_new_remote".to_string())
+ }]
+ );
+ }
+
+ #[gpui::test]
+ async fn test_confirm_remote_url_transitions(cx: &mut TestAppContext) {
+ init_test(cx);
+
+ let branches = vec![create_test_branch("main_branch", true, None, Some(1000))];
+ let (mut ctx, branch_list) = init_branch_list_test(cx, None, branches);
+ let cx = &mut ctx;
+
+ branch_list
+ .update_in(cx, |branch_list, window, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ let query = "https://github.com/user/repo.git".to_string();
+ picker.delegate.update_matches(query, window, cx)
+ })
+ })
+ .await;
+ cx.run_until_parked();
+
+ // Try to create a new remote but cancel in the middle of the process
+ branch_list
+ .update_in(cx, |branch_list, window, cx| {
+ branch_list.picker.update(cx, |picker, cx| {
+ picker.delegate.selected_index = picker.delegate.matches.len() - 1;
+ picker.delegate.confirm(false, window, cx);
+
+ assert!(matches!(
+ picker.delegate.state,
+ PickerState::CreateRemote(_)
+ ));
+ if let PickerState::CreateRemote(ref url) = picker.delegate.state {
+ assert_eq!(url.as_ref(), "https://github.com/user/repo.git");
+ }
+ assert_eq!(picker.delegate.matches.len(), 0);
+ picker.delegate.dismissed(window, cx);
+ assert!(matches!(picker.delegate.state, PickerState::List));
+ let query = "main".to_string();
+ picker.delegate.update_matches(query, window, cx)
+ })
+ })
+ .await;
+ cx.run_until_parked();
+
+ // Try to search a branch again to see if the state is restored properly
+ branch_list.update(cx, |branch_list, cx| {
+ branch_list.picker.update(cx, |picker, _cx| {
+ // Should have 1 existing branch + 1 "create new branch" entry = 2 total
+ assert_eq!(picker.delegate.matches.len(), 2);
+ assert!(
+ picker
+ .delegate
+ .matches
+ .iter()
+ .any(|m| m.name() == "main_branch")
+ );
+ // Verify the last entry is the "create new branch" option
+ let last_match = picker.delegate.matches.last().unwrap();
+ assert!(last_match.is_new_branch());
+ })
+ });
+ }
+}
@@ -197,10 +197,7 @@ impl Render for CommitTooltip {
time_format::TimestampFormat::MediumAbsolute,
);
let markdown_style = {
- let mut style = hover_markdown_style(window, cx);
- if let Some(code_block) = &style.code_block.text {
- style.base_text_style.refine(code_block);
- }
+ let style = hover_markdown_style(window, cx);
style
};
@@ -1,19 +1,18 @@
use anyhow::{Context as _, Result};
use buffer_diff::{BufferDiff, BufferDiffSnapshot};
-use editor::{Addon, Editor, EditorEvent, MultiBuffer};
+use editor::display_map::{BlockPlacement, BlockProperties, BlockStyle};
+use editor::{Addon, Editor, EditorEvent, ExcerptId, ExcerptRange, MultiBuffer};
use git::repository::{CommitDetails, CommitDiff, RepoPath};
use git::{GitHostingProviderRegistry, GitRemote, parse_git_remote_url};
use gpui::{
AnyElement, App, AppContext as _, Asset, AsyncApp, AsyncWindowContext, Context, Element,
Entity, EventEmitter, FocusHandle, Focusable, InteractiveElement, IntoElement, ParentElement,
- PromptLevel, Render, Styled, Task, TextStyleRefinement, UnderlineStyle, WeakEntity, Window,
- actions, px,
+ PromptLevel, Render, Styled, Task, WeakEntity, Window, actions,
};
use language::{
- Buffer, Capability, DiskState, File, LanguageRegistry, LineEnding, ReplicaId, Rope, TextBuffer,
- ToPoint,
+ Anchor, Buffer, Capability, DiskState, File, LanguageRegistry, LineEnding, ReplicaId, Rope,
+ TextBuffer, ToPoint,
};
-use markdown::{Markdown, MarkdownElement, MarkdownStyle};
use multi_buffer::ExcerptInfo;
use multi_buffer::PathKey;
use project::{Project, WorktreeId, git_store::Repository};
@@ -63,13 +62,13 @@ pub struct CommitView {
multibuffer: Entity<MultiBuffer>,
repository: Entity<Repository>,
remote: Option<GitRemote>,
- markdown: Entity<Markdown>,
}
struct GitBlob {
path: RepoPath,
worktree_id: WorktreeId,
is_deleted: bool,
+ display_name: Arc<str>,
}
const FILE_NAMESPACE_SORT_PREFIX: u64 = 1;
@@ -159,6 +158,7 @@ impl CommitView {
});
editor
});
+ let commit_sha = Arc::<str>::from(commit.sha.as_ref());
let first_worktree_id = project
.read(cx)
@@ -167,6 +167,8 @@ impl CommitView {
.map(|worktree| worktree.read(cx).id());
let repository_clone = repository.clone();
+ let commit_message = commit.message.clone();
+
cx.spawn(async move |this, cx| {
for file in commit_diff.files {
let is_deleted = file.new_text.is_none();
@@ -180,10 +182,20 @@ impl CommitView {
.or(first_worktree_id)
})?
.context("project has no worktrees")?;
+ let short_sha = commit_sha.get(0..7).unwrap_or(&commit_sha);
+ let file_name = file
+ .path
+ .file_name()
+ .map(|name| name.to_string())
+ .unwrap_or_else(|| file.path.display(PathStyle::Posix).to_string());
+ let display_name: Arc<str> =
+ Arc::from(format!("{short_sha} - {file_name}").into_boxed_str());
+
let file = Arc::new(GitBlob {
path: file.path.clone(),
is_deleted,
worktree_id,
+ display_name,
}) as Arc<dyn language::File>;
let buffer = build_buffer(new_text, file, &language_registry, cx).await?;
@@ -227,6 +239,58 @@ impl CommitView {
});
})?;
}
+
+ let message_buffer = cx.new(|cx| {
+ let mut buffer = Buffer::local(commit_message, cx);
+ buffer.set_capability(Capability::ReadOnly, cx);
+ buffer
+ })?;
+
+ this.update(cx, |this, cx| {
+ this.multibuffer.update(cx, |multibuffer, cx| {
+ let range = ExcerptRange {
+ context: Anchor::MIN..Anchor::MAX,
+ primary: Anchor::MIN..Anchor::MAX,
+ };
+ multibuffer.insert_excerpts_after(
+ ExcerptId::min(),
+ message_buffer.clone(),
+ [range],
+ cx,
+ )
+ });
+
+ this.editor.update(cx, |editor, cx| {
+ editor.disable_header_for_buffer(message_buffer.read(cx).remote_id(), cx);
+
+ editor.insert_blocks(
+ [BlockProperties {
+ placement: BlockPlacement::Above(editor::Anchor::min()),
+ height: Some(1),
+ style: BlockStyle::Sticky,
+ render: Arc::new(|_| gpui::Empty.into_any_element()),
+ priority: 0,
+ }]
+ .into_iter()
+ .chain(
+ editor
+ .buffer()
+ .read(cx)
+ .buffer_anchor_to_anchor(&message_buffer, Anchor::MAX, cx)
+ .map(|anchor| BlockProperties {
+ placement: BlockPlacement::Below(anchor),
+ height: Some(1),
+ style: BlockStyle::Sticky,
+ render: Arc::new(|_| gpui::Empty.into_any_element()),
+ priority: 0,
+ }),
+ ),
+ None,
+ cx,
+ )
+ });
+ })?;
+
anyhow::Ok(())
})
.detach();
@@ -246,14 +310,6 @@ impl CommitView {
})
});
- let processed_message = if let Some(ref remote) = remote {
- Self::process_github_issues(&commit.message, remote)
- } else {
- commit.message.to_string()
- };
-
- let markdown = cx.new(|cx| Markdown::new(processed_message.into(), None, None, cx));
-
Self {
commit,
editor,
@@ -261,18 +317,9 @@ impl CommitView {
stash,
repository,
remote,
- markdown,
}
}
- fn fallback_commit_avatar() -> AnyElement {
- Icon::new(IconName::Person)
- .color(Color::Muted)
- .size(IconSize::Medium)
- .into_element()
- .into_any()
- }
-
fn render_commit_avatar(
&self,
sha: &SharedString,
@@ -280,21 +327,34 @@ impl CommitView {
window: &mut Window,
cx: &mut App,
) -> AnyElement {
+ let size = size.into();
let remote = self.remote.as_ref().filter(|r| r.host_supports_avatars());
if let Some(remote) = remote {
let avatar_asset = CommitAvatarAsset::new(remote.clone(), sha.clone());
if let Some(Some(url)) = window.use_asset::<CommitAvatarAsset>(&avatar_asset, cx) {
- Avatar::new(url.to_string())
+ return Avatar::new(url.to_string())
.size(size)
.into_element()
- .into_any()
- } else {
- Self::fallback_commit_avatar()
+ .into_any();
}
- } else {
- Self::fallback_commit_avatar()
}
+
+ v_flex()
+ .w(size)
+ .h(size)
+ .border_1()
+ .border_color(cx.theme().colors().border)
+ .rounded_full()
+ .justify_center()
+ .items_center()
+ .child(
+ Icon::new(IconName::Person)
+ .color(Color::Muted)
+ .size(IconSize::Medium)
+ .into_element(),
+ )
+ .into_any()
}
fn render_header(&self, window: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
@@ -322,14 +382,24 @@ impl CommitView {
v_flex()
.p_4()
+ .pl_0()
.gap_4()
.border_b_1()
.border_color(cx.theme().colors().border)
.child(
h_flex()
.items_start()
- .gap_3()
- .child(self.render_commit_avatar(&commit.sha, gpui::rems(3.0), window, cx))
+ .child(
+ h_flex()
+ .w(self.editor.read(cx).last_gutter_dimensions().full_width())
+ .justify_center()
+ .child(self.render_commit_avatar(
+ &commit.sha,
+ gpui::rems(3.0),
+ window,
+ cx,
+ )),
+ )
.child(
v_flex()
.gap_1()
@@ -353,66 +423,6 @@ impl CommitView {
.on_click(move |_, _, cx| cx.open_url(&url))
})),
)
- .child(self.render_commit_message(window, cx))
- }
-
- fn process_github_issues(message: &str, remote: &GitRemote) -> String {
- let mut result = String::new();
- let chars: Vec<char> = message.chars().collect();
- let mut i = 0;
-
- while i < chars.len() {
- if chars[i] == '#' && i + 1 < chars.len() && chars[i + 1].is_ascii_digit() {
- let mut j = i + 1;
- while j < chars.len() && chars[j].is_ascii_digit() {
- j += 1;
- }
- let issue_number = &message[i + 1..i + (j - i)];
- let url = format!(
- "{}/{}/{}/issues/{}",
- remote.host.base_url().as_str().trim_end_matches('/'),
- remote.owner,
- remote.repo,
- issue_number
- );
- result.push_str(&format!("[#{}]({})", issue_number, url));
- i = j;
- } else if i + 3 < chars.len()
- && chars[i] == 'G'
- && chars[i + 1] == 'H'
- && chars[i + 2] == '-'
- && chars[i + 3].is_ascii_digit()
- {
- let mut j = i + 3;
- while j < chars.len() && chars[j].is_ascii_digit() {
- j += 1;
- }
- let issue_number = &message[i + 3..i + (j - i)];
- let url = format!(
- "{}/{}/{}/issues/{}",
- remote.host.base_url().as_str().trim_end_matches('/'),
- remote.owner,
- remote.repo,
- issue_number
- );
- result.push_str(&format!("[GH-{}]({})", issue_number, url));
- i = j;
- } else {
- result.push(chars[i]);
- i += 1;
- }
- }
-
- result
- }
-
- fn render_commit_message(
- &self,
- window: &mut Window,
- cx: &mut Context<Self>,
- ) -> impl IntoElement {
- let style = hover_markdown_style(window, cx);
- MarkdownElement::new(self.markdown.clone(), style)
}
fn apply_stash(workspace: &mut Workspace, window: &mut Window, cx: &mut App) {
@@ -649,7 +659,7 @@ impl language::File for GitBlob {
}
fn file_name<'a>(&'a self, _: &'a App) -> &'a str {
- self.path.file_name().unwrap()
+ self.display_name.as_ref()
}
fn worktree_id(&self, _: &App) -> WorktreeId {
@@ -963,12 +973,6 @@ impl Item for CommitView {
.update(cx, |editor, cx| editor.clone(window, cx))
});
let multibuffer = editor.read(cx).buffer().clone();
- let processed_message = if let Some(ref remote) = self.remote {
- Self::process_github_issues(&self.commit.message, remote)
- } else {
- self.commit.message.to_string()
- };
- let markdown = cx.new(|cx| Markdown::new(processed_message.into(), None, None, cx));
Self {
editor,
multibuffer,
@@ -976,7 +980,6 @@ impl Item for CommitView {
stash: self.stash,
repository: self.repository.clone(),
remote: self.remote.clone(),
- markdown,
}
})))
}
@@ -1046,117 +1049,3 @@ fn stash_matches_index(sha: &str, stash_index: usize, repo: &Repository) -> bool
.map(|entry| entry.oid.to_string() == sha)
.unwrap_or(false)
}
-
-fn hover_markdown_style(window: &Window, cx: &App) -> MarkdownStyle {
- let colors = cx.theme().colors();
- let mut style = MarkdownStyle::default();
- style.base_text_style = window.text_style();
- style.syntax = cx.theme().syntax().clone();
- style.selection_background_color = colors.element_selection_background;
- style.link = TextStyleRefinement {
- color: Some(colors.text_accent),
- underline: Some(UnderlineStyle {
- thickness: px(1.0),
- color: Some(colors.text_accent),
- wavy: false,
- }),
- ..Default::default()
- };
- style
-}
-
-#[cfg(test)]
-mod tests {
- use super::*;
- use git_hosting_providers::Github;
-
- fn create_test_remote() -> GitRemote {
- GitRemote {
- host: Arc::new(Github::public_instance()),
- owner: "zed-industries".into(),
- repo: "zed".into(),
- }
- }
-
- #[test]
- fn test_process_github_issues_simple_issue_number() {
- let remote = create_test_remote();
- let message = "Fix bug #123";
- let result = CommitView::process_github_issues(message, &remote);
- assert_eq!(
- result,
- "Fix bug [#123](https://github.com/zed-industries/zed/issues/123)"
- );
- }
-
- #[test]
- fn test_process_github_issues_multiple_issue_numbers() {
- let remote = create_test_remote();
- let message = "Fix #123 and #456";
- let result = CommitView::process_github_issues(message, &remote);
- assert_eq!(
- result,
- "Fix [#123](https://github.com/zed-industries/zed/issues/123) and [#456](https://github.com/zed-industries/zed/issues/456)"
- );
- }
-
- #[test]
- fn test_process_github_issues_gh_format() {
- let remote = create_test_remote();
- let message = "Fix GH-789";
- let result = CommitView::process_github_issues(message, &remote);
- assert_eq!(
- result,
- "Fix [GH-789](https://github.com/zed-industries/zed/issues/789)"
- );
- }
-
- #[test]
- fn test_process_github_issues_mixed_formats() {
- let remote = create_test_remote();
- let message = "Fix #123 and GH-456";
- let result = CommitView::process_github_issues(message, &remote);
- assert_eq!(
- result,
- "Fix [#123](https://github.com/zed-industries/zed/issues/123) and [GH-456](https://github.com/zed-industries/zed/issues/456)"
- );
- }
-
- #[test]
- fn test_process_github_issues_no_issues() {
- let remote = create_test_remote();
- let message = "This is a commit message without any issues";
- let result = CommitView::process_github_issues(message, &remote);
- assert_eq!(result, message);
- }
-
- #[test]
- fn test_process_github_issues_hash_without_number() {
- let remote = create_test_remote();
- let message = "Use # for comments";
- let result = CommitView::process_github_issues(message, &remote);
- assert_eq!(result, message);
- }
-
- #[test]
- fn test_process_github_issues_consecutive_issues() {
- let remote = create_test_remote();
- let message = "#123#456";
- let result = CommitView::process_github_issues(message, &remote);
- assert_eq!(
- result,
- "[#123](https://github.com/zed-industries/zed/issues/123)[#456](https://github.com/zed-industries/zed/issues/456)"
- );
- }
-
- #[test]
- fn test_process_github_issues_multiline() {
- let remote = create_test_remote();
- let message = "Fix #123\n\nThis also fixes #456";
- let result = CommitView::process_github_issues(message, &remote);
- assert_eq!(
- result,
- "Fix [#123](https://github.com/zed-industries/zed/issues/123)\n\nThis also fixes [#456](https://github.com/zed-industries/zed/issues/456)"
- );
- }
-}
@@ -6,7 +6,8 @@ use crate::project_diff::{self, Diff, ProjectDiff};
use crate::remote_output::{self, RemoteAction, SuccessMessage};
use crate::{branch_picker, picker_prompt, render_remote_button};
use crate::{
- git_panel_settings::GitPanelSettings, git_status_icon, repository_selector::RepositorySelector,
+ file_history_view::FileHistoryView, git_panel_settings::GitPanelSettings, git_status_icon,
+ repository_selector::RepositorySelector,
};
use agent_settings::AgentSettings;
use anyhow::Context as _;
@@ -842,6 +843,26 @@ impl GitPanel {
});
}
+ fn file_history(&mut self, _: &git::FileHistory, window: &mut Window, cx: &mut Context<Self>) {
+ maybe!({
+ let entry = self.entries.get(self.selected_entry?)?.status_entry()?;
+ let active_repo = self.active_repository.as_ref()?;
+ let repo_path = entry.repo_path.clone();
+ let git_store = self.project.read(cx).git_store();
+
+ FileHistoryView::open(
+ repo_path,
+ git_store.downgrade(),
+ active_repo.downgrade(),
+ self.workspace.clone(),
+ window,
+ cx,
+ );
+
+ Some(())
+ });
+ }
+
fn open_file(
&mut self,
_: &menu::SecondaryConfirm,
@@ -3442,7 +3463,6 @@ impl GitPanel {
) -> Option<impl IntoElement> {
let active_repository = self.active_repository.clone()?;
let panel_editor_style = panel_editor_style(true, window, cx);
-
let enable_coauthors = self.render_co_authors(cx);
let editor_focus_handle = self.commit_editor.focus_handle(cx);
@@ -3983,20 +4003,21 @@ impl GitPanel {
"Restore File"
};
let context_menu = ContextMenu::build(window, cx, |context_menu, _, _| {
- let mut context_menu = context_menu
+ let is_created = entry.status.is_created();
+ context_menu
.context(self.focus_handle.clone())
.action(stage_title, ToggleStaged.boxed_clone())
- .action(restore_title, git::RestoreFile::default().boxed_clone());
-
- if entry.status.is_created() {
- context_menu =
- context_menu.action("Add to .gitignore", git::AddToGitignore.boxed_clone());
- }
-
- context_menu
+ .action(restore_title, git::RestoreFile::default().boxed_clone())
+ .action_disabled_when(
+ !is_created,
+ "Add to .gitignore",
+ git::AddToGitignore.boxed_clone(),
+ )
.separator()
.action("Open Diff", Confirm.boxed_clone())
.action("Open File", SecondaryConfirm.boxed_clone())
+ .separator()
+ .action_disabled_when(is_created, "File History", Box::new(git::FileHistory))
});
self.selected_entry = Some(ix);
self.set_context_menu(context_menu, position, window, cx);
@@ -4499,6 +4520,7 @@ impl Render for GitPanel {
.on_action(cx.listener(Self::close_panel))
.on_action(cx.listener(Self::open_diff))
.on_action(cx.listener(Self::open_file))
+ .on_action(cx.listener(Self::file_history))
.on_action(cx.listener(Self::focus_changes_list))
.on_action(cx.listener(Self::focus_editor))
.on_action(cx.listener(Self::expand_commit_editor))
@@ -4749,7 +4771,6 @@ impl RenderOnce for PanelRepoFooter {
const MAX_REPO_LEN: usize = 16;
const LABEL_CHARACTER_BUDGET: usize = MAX_BRANCH_LEN + MAX_REPO_LEN;
const MAX_SHORT_SHA_LEN: usize = 8;
-
let branch_name = self
.branch
.as_ref()
@@ -1,4 +1,5 @@
use anyhow::Context as _;
+
use git::repository::{Remote, RemoteCommandOutput};
use linkify::{LinkFinder, LinkKind};
use ui::SharedString;
@@ -551,12 +551,39 @@ impl SystemWindowTabController {
}
}
+pub(crate) enum GpuiMode {
+ #[cfg(any(test, feature = "test-support"))]
+ Test {
+ skip_drawing: bool,
+ },
+ Production,
+}
+
+impl GpuiMode {
+ #[cfg(any(test, feature = "test-support"))]
+ pub fn test() -> Self {
+ GpuiMode::Test {
+ skip_drawing: false,
+ }
+ }
+
+ #[inline]
+ pub(crate) fn skip_drawing(&self) -> bool {
+ match self {
+ #[cfg(any(test, feature = "test-support"))]
+ GpuiMode::Test { skip_drawing } => *skip_drawing,
+ GpuiMode::Production => false,
+ }
+ }
+}
+
/// Contains the state of the full application, and passed as a reference to a variety of callbacks.
/// Other [Context] derefs to this type.
/// You need a reference to an `App` to access the state of a [Entity].
pub struct App {
pub(crate) this: Weak<AppCell>,
pub(crate) platform: Rc<dyn Platform>,
+ pub(crate) mode: GpuiMode,
text_system: Arc<TextSystem>,
flushing_effects: bool,
pending_updates: usize,
@@ -635,6 +662,7 @@ impl App {
this: this.clone(),
platform: platform.clone(),
text_system,
+ mode: GpuiMode::Production,
actions: Rc::new(ActionRegistry::default()),
flushing_effects: false,
pending_updates: 0,
@@ -5,7 +5,7 @@ use crate::{
ModifiersChangedEvent, MouseButton, MouseDownEvent, MouseMoveEvent, MouseUpEvent, Pixels,
Platform, Point, Render, Result, Size, Task, TestDispatcher, TestPlatform,
TestScreenCaptureSource, TestWindow, TextSystem, VisualContext, Window, WindowBounds,
- WindowHandle, WindowOptions,
+ WindowHandle, WindowOptions, app::GpuiMode,
};
use anyhow::{anyhow, bail};
use futures::{Stream, StreamExt, channel::oneshot};
@@ -132,8 +132,11 @@ impl TestAppContext {
let http_client = http_client::FakeHttpClient::with_404_response();
let text_system = Arc::new(TextSystem::new(platform.text_system()));
+ let mut app = App::new_app(platform.clone(), asset_source, http_client);
+ app.borrow_mut().mode = GpuiMode::test();
+
Self {
- app: App::new_app(platform.clone(), asset_source, http_client),
+ app,
background_executor,
foreground_executor,
dispatcher,
@@ -144,6 +147,11 @@ impl TestAppContext {
}
}
+ /// Skip all drawing operations for the duration of this test.
+ pub fn skip_drawing(&mut self) {
+ self.app.borrow_mut().mode = GpuiMode::Test { skip_drawing: true };
+ }
+
/// Create a single TestAppContext, for non-multi-client tests
pub fn single() -> Self {
let dispatcher = TestDispatcher::new(StdRng::seed_from_u64(0));
@@ -748,7 +748,7 @@ impl Size<Length> {
/// assert_eq!(bounds.origin, origin);
/// assert_eq!(bounds.size, size);
/// ```
-#[derive(Refineable, Clone, Default, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
+#[derive(Refineable, Copy, Clone, Default, Debug, Eq, PartialEq, Serialize, Deserialize, Hash)]
#[refineable(Debug)]
#[repr(C)]
pub struct Bounds<T: Clone + Debug + Default + PartialEq> {
@@ -1676,8 +1676,6 @@ impl Bounds<DevicePixels> {
}
}
-impl<T: Copy + Clone + Debug + Default + PartialEq> Copy for Bounds<T> {}
-
/// Represents the edges of a box in a 2D space, such as padding or margin.
///
/// Each field represents the size of the edge on one side of the box: `top`, `right`, `bottom`, and `left`.
@@ -26,12 +26,13 @@ pub(crate) struct LinuxDispatcher {
main_thread_id: thread::ThreadId,
}
+const MIN_THREADS: usize = 2;
+
impl LinuxDispatcher {
pub fn new(main_sender: Sender<RunnableVariant>) -> Self {
let (background_sender, background_receiver) = flume::unbounded::<RunnableVariant>();
- let thread_count = std::thread::available_parallelism()
- .map(|i| i.get())
- .unwrap_or(1);
+ let thread_count =
+ std::thread::available_parallelism().map_or(MIN_THREADS, |i| i.get().max(MIN_THREADS));
let mut background_threads = (0..thread_count)
.map(|i| {
@@ -1419,7 +1419,7 @@ impl Dispatch<wl_keyboard::WlKeyboard, ()> for WaylandClientStatePtr {
state.repeat.current_keycode = Some(keycode);
let rate = state.repeat.characters_per_second;
- let repeat_interval = Duration::from_secs(1) / rate;
+ let repeat_interval = Duration::from_secs(1) / rate.max(1);
let id = state.repeat.current_id;
state
.loop_handle
@@ -7,9 +7,7 @@ use std::{
use flume::Sender;
use util::ResultExt;
use windows::{
- System::Threading::{
- ThreadPool, ThreadPoolTimer, TimerElapsedHandler, WorkItemHandler, WorkItemPriority,
- },
+ System::Threading::{ThreadPool, ThreadPoolTimer, TimerElapsedHandler, WorkItemHandler},
Win32::{
Foundation::{LPARAM, WPARAM},
UI::WindowsAndMessaging::PostMessageW,
@@ -55,7 +53,7 @@ impl WindowsDispatcher {
Ok(())
})
};
- ThreadPool::RunWithPriorityAsync(&handler, WorkItemPriority::High).log_err();
+ ThreadPool::RunAsync(&handler).log_err();
}
fn dispatch_on_threadpool_after(&self, runnable: RunnableVariant, duration: Duration) {
@@ -51,7 +51,7 @@ impl WindowsWindowInner {
WM_NCCALCSIZE => self.handle_calc_client_size(handle, wparam, lparam),
WM_DPICHANGED => self.handle_dpi_changed_msg(handle, wparam, lparam),
WM_DISPLAYCHANGE => self.handle_display_change_msg(handle),
- WM_NCHITTEST => self.handle_hit_test_msg(handle, msg, wparam, lparam),
+ WM_NCHITTEST => self.handle_hit_test_msg(handle, lparam),
WM_PAINT => self.handle_paint_msg(handle),
WM_CLOSE => self.handle_close_msg(),
WM_DESTROY => self.handle_destroy_msg(handle),
@@ -116,17 +116,16 @@ impl WindowsWindowInner {
}
fn handle_move_msg(&self, handle: HWND, lparam: LPARAM) -> Option<isize> {
- let mut lock = self.state.borrow_mut();
let origin = logical_point(
lparam.signed_loword() as f32,
lparam.signed_hiword() as f32,
- lock.scale_factor,
+ self.state.scale_factor.get(),
);
- lock.origin = origin;
- let size = lock.logical_size;
+ self.state.origin.set(origin);
+ let size = self.state.logical_size.get();
let center_x = origin.x.0 + size.width.0 / 2.;
let center_y = origin.y.0 + size.height.0 / 2.;
- let monitor_bounds = lock.display.bounds();
+ let monitor_bounds = self.state.display.get().bounds();
if center_x < monitor_bounds.left().0
|| center_x > monitor_bounds.right().0
|| center_y < monitor_bounds.top().0
@@ -136,42 +135,42 @@ impl WindowsWindowInner {
let monitor = unsafe { MonitorFromWindow(handle, MONITOR_DEFAULTTONULL) };
// minimize the window can trigger this event too, in this case,
// monitor is invalid, we do nothing.
- if !monitor.is_invalid() && lock.display.handle != monitor {
+ if !monitor.is_invalid() && self.state.display.get().handle != monitor {
// we will get the same monitor if we only have one
- lock.display = WindowsDisplay::new_with_handle(monitor).log_err()?;
+ self.state
+ .display
+ .set(WindowsDisplay::new_with_handle(monitor).log_err()?);
}
}
- if let Some(mut callback) = lock.callbacks.moved.take() {
- drop(lock);
+ if let Some(mut callback) = self.state.callbacks.moved.take() {
callback();
- self.state.borrow_mut().callbacks.moved = Some(callback);
+ self.state.callbacks.moved.set(Some(callback));
}
Some(0)
}
fn handle_get_min_max_info_msg(&self, lparam: LPARAM) -> Option<isize> {
- let lock = self.state.borrow();
- let min_size = lock.min_size?;
- let scale_factor = lock.scale_factor;
- let boarder_offset = lock.border_offset;
- drop(lock);
+ let min_size = self.state.min_size?;
+ let scale_factor = self.state.scale_factor.get();
+ let boarder_offset = &self.state.border_offset;
+
unsafe {
let minmax_info = &mut *(lparam.0 as *mut MINMAXINFO);
minmax_info.ptMinTrackSize.x =
- min_size.width.scale(scale_factor).0 as i32 + boarder_offset.width_offset;
+ min_size.width.scale(scale_factor).0 as i32 + boarder_offset.width_offset.get();
minmax_info.ptMinTrackSize.y =
- min_size.height.scale(scale_factor).0 as i32 + boarder_offset.height_offset;
+ min_size.height.scale(scale_factor).0 as i32 + boarder_offset.height_offset.get();
}
Some(0)
}
fn handle_size_msg(&self, wparam: WPARAM, lparam: LPARAM) -> Option<isize> {
- let mut lock = self.state.borrow_mut();
-
// Don't resize the renderer when the window is minimized, but record that it was minimized so
// that on restore the swap chain can be recreated via `update_drawable_size_even_if_unchanged`.
if wparam.0 == SIZE_MINIMIZED as usize {
- lock.restore_from_minimized = lock.callbacks.request_frame.take();
+ self.state
+ .restore_from_minimized
+ .set(self.state.callbacks.request_frame.take());
return Some(0);
}
@@ -179,14 +178,16 @@ impl WindowsWindowInner {
let height = lparam.hiword().max(1) as i32;
let new_size = size(DevicePixels(width), DevicePixels(height));
- let scale_factor = lock.scale_factor;
+ let scale_factor = self.state.scale_factor.get();
let mut should_resize_renderer = false;
- if lock.restore_from_minimized.is_some() {
- lock.callbacks.request_frame = lock.restore_from_minimized.take();
+ if let Some(restore_from_minimized) = self.state.restore_from_minimized.take() {
+ self.state
+ .callbacks
+ .request_frame
+ .set(Some(restore_from_minimized));
} else {
should_resize_renderer = true;
}
- drop(lock);
self.handle_size_change(new_size, scale_factor, should_resize_renderer);
Some(0)
@@ -199,17 +200,19 @@ impl WindowsWindowInner {
should_resize_renderer: bool,
) {
let new_logical_size = device_size.to_pixels(scale_factor);
- let mut lock = self.state.borrow_mut();
- lock.logical_size = new_logical_size;
- if should_resize_renderer && let Err(e) = lock.renderer.resize(device_size) {
+
+ self.state.logical_size.set(new_logical_size);
+ if should_resize_renderer
+ && let Err(e) = self.state.renderer.borrow_mut().resize(device_size)
+ {
log::error!("Failed to resize renderer, invalidating devices: {}", e);
- lock.invalidate_devices
+ self.state
+ .invalidate_devices
.store(true, std::sync::atomic::Ordering::Release);
}
- if let Some(mut callback) = lock.callbacks.resize.take() {
- drop(lock);
+ if let Some(mut callback) = self.state.callbacks.resize.take() {
callback(new_logical_size, scale_factor);
- self.state.borrow_mut().callbacks.resize = Some(callback);
+ self.state.callbacks.resize.set(Some(callback));
}
}
@@ -254,17 +257,14 @@ impl WindowsWindowInner {
}
fn handle_close_msg(&self) -> Option<isize> {
- let mut callback = self.state.borrow_mut().callbacks.should_close.take()?;
+ let mut callback = self.state.callbacks.should_close.take()?;
let should_close = callback();
- self.state.borrow_mut().callbacks.should_close = Some(callback);
+ self.state.callbacks.should_close.set(Some(callback));
if should_close { None } else { Some(0) }
}
fn handle_destroy_msg(&self, handle: HWND) -> Option<isize> {
- let callback = {
- let mut lock = self.state.borrow_mut();
- lock.callbacks.close.take()
- };
+ let callback = { self.state.callbacks.close.take() };
if let Some(callback) = callback {
callback();
}
@@ -283,12 +283,10 @@ impl WindowsWindowInner {
fn handle_mouse_move_msg(&self, handle: HWND, lparam: LPARAM, wparam: WPARAM) -> Option<isize> {
self.start_tracking_mouse(handle, TME_LEAVE);
- let mut lock = self.state.borrow_mut();
- let Some(mut func) = lock.callbacks.input.take() else {
+ let Some(mut func) = self.state.callbacks.input.take() else {
return Some(1);
};
- let scale_factor = lock.scale_factor;
- drop(lock);
+ let scale_factor = self.state.scale_factor.get();
let pressed_button = match MODIFIERKEYS_FLAGS(wparam.loword() as u32) {
flags if flags.contains(MK_LBUTTON) => Some(MouseButton::Left),
@@ -310,32 +308,32 @@ impl WindowsWindowInner {
modifiers: current_modifiers(),
});
let handled = !func(input).propagate;
- self.state.borrow_mut().callbacks.input = Some(func);
+ self.state.callbacks.input.set(Some(func));
if handled { Some(0) } else { Some(1) }
}
fn handle_mouse_leave_msg(&self) -> Option<isize> {
- let mut lock = self.state.borrow_mut();
- lock.hovered = false;
- if let Some(mut callback) = lock.callbacks.hovered_status_change.take() {
- drop(lock);
+ self.state.hovered.set(false);
+ if let Some(mut callback) = self.state.callbacks.hovered_status_change.take() {
callback(false);
- self.state.borrow_mut().callbacks.hovered_status_change = Some(callback);
+ self.state
+ .callbacks
+ .hovered_status_change
+ .set(Some(callback));
}
Some(0)
}
fn handle_syskeyup_msg(&self, wparam: WPARAM, lparam: LPARAM) -> Option<isize> {
- let mut lock = self.state.borrow_mut();
- let input = handle_key_event(wparam, lparam, &mut lock, |keystroke, _| {
+ let input = handle_key_event(wparam, lparam, &self.state, |keystroke, _| {
PlatformInput::KeyUp(KeyUpEvent { keystroke })
})?;
- let mut func = lock.callbacks.input.take()?;
- drop(lock);
+ let mut func = self.state.callbacks.input.take()?;
+
func(input);
- self.state.borrow_mut().callbacks.input = Some(func);
+ self.state.callbacks.input.set(Some(func));
// Always return 0 to indicate that the message was handled, so we could properly handle `ModifiersChanged` event.
Some(0)
@@ -344,11 +342,10 @@ impl WindowsWindowInner {
// It's a known bug that you can't trigger `ctrl-shift-0`. See:
// https://superuser.com/questions/1455762/ctrl-shift-number-key-combination-has-stopped-working-for-a-few-numbers
fn handle_keydown_msg(&self, wparam: WPARAM, lparam: LPARAM) -> Option<isize> {
- let mut lock = self.state.borrow_mut();
let Some(input) = handle_key_event(
wparam,
lparam,
- &mut lock,
+ &self.state,
|keystroke, prefer_character_input| {
PlatformInput::KeyDown(KeyDownEvent {
keystroke,
@@ -359,34 +356,31 @@ impl WindowsWindowInner {
) else {
return Some(1);
};
- drop(lock);
- let Some(mut func) = self.state.borrow_mut().callbacks.input.take() else {
+ let Some(mut func) = self.state.callbacks.input.take() else {
return Some(1);
};
let handled = !func(input).propagate;
- self.state.borrow_mut().callbacks.input = Some(func);
+ self.state.callbacks.input.set(Some(func));
if handled { Some(0) } else { Some(1) }
}
fn handle_keyup_msg(&self, wparam: WPARAM, lparam: LPARAM) -> Option<isize> {
- let mut lock = self.state.borrow_mut();
- let Some(input) = handle_key_event(wparam, lparam, &mut lock, |keystroke, _| {
+ let Some(input) = handle_key_event(wparam, lparam, &self.state, |keystroke, _| {
PlatformInput::KeyUp(KeyUpEvent { keystroke })
}) else {
return Some(1);
};
- let Some(mut func) = lock.callbacks.input.take() else {
+ let Some(mut func) = self.state.callbacks.input.take() else {
return Some(1);
};
- drop(lock);
let handled = !func(input).propagate;
- self.state.borrow_mut().callbacks.input = Some(func);
+ self.state.callbacks.input.set(Some(func));
if handled { Some(0) } else { Some(1) }
}
@@ -407,16 +401,15 @@ impl WindowsWindowInner {
lparam: LPARAM,
) -> Option<isize> {
unsafe { SetCapture(handle) };
- let mut lock = self.state.borrow_mut();
- let Some(mut func) = lock.callbacks.input.take() else {
+
+ let Some(mut func) = self.state.callbacks.input.take() else {
return Some(1);
};
let x = lparam.signed_loword();
let y = lparam.signed_hiword();
let physical_point = point(DevicePixels(x as i32), DevicePixels(y as i32));
- let click_count = lock.click_state.update(button, physical_point);
- let scale_factor = lock.scale_factor;
- drop(lock);
+ let click_count = self.state.click_state.update(button, physical_point);
+ let scale_factor = self.state.scale_factor.get();
let input = PlatformInput::MouseDown(MouseDownEvent {
button,
@@ -426,7 +419,7 @@ impl WindowsWindowInner {
first_mouse: false,
});
let handled = !func(input).propagate;
- self.state.borrow_mut().callbacks.input = Some(func);
+ self.state.callbacks.input.set(Some(func));
if handled { Some(0) } else { Some(1) }
}
@@ -438,15 +431,14 @@ impl WindowsWindowInner {
lparam: LPARAM,
) -> Option<isize> {
unsafe { ReleaseCapture().log_err() };
- let mut lock = self.state.borrow_mut();
- let Some(mut func) = lock.callbacks.input.take() else {
+
+ let Some(mut func) = self.state.callbacks.input.take() else {
return Some(1);
};
let x = lparam.signed_loword() as f32;
let y = lparam.signed_hiword() as f32;
- let click_count = lock.click_state.current_count;
- let scale_factor = lock.scale_factor;
- drop(lock);
+ let click_count = self.state.click_state.current_count.get();
+ let scale_factor = self.state.scale_factor.get();
let input = PlatformInput::MouseUp(MouseUpEvent {
button,
@@ -455,7 +447,7 @@ impl WindowsWindowInner {
click_count,
});
let handled = !func(input).propagate;
- self.state.borrow_mut().callbacks.input = Some(func);
+ self.state.callbacks.input.set(Some(func));
if handled { Some(0) } else { Some(1) }
}
@@ -482,24 +474,23 @@ impl WindowsWindowInner {
lparam: LPARAM,
) -> Option<isize> {
let modifiers = current_modifiers();
- let mut lock = self.state.borrow_mut();
- let Some(mut func) = lock.callbacks.input.take() else {
+
+ let Some(mut func) = self.state.callbacks.input.take() else {
return Some(1);
};
- let scale_factor = lock.scale_factor;
+ let scale_factor = self.state.scale_factor.get();
let wheel_scroll_amount = match modifiers.shift {
- true => {
- self.system_settings()
- .mouse_wheel_settings
- .wheel_scroll_chars
- }
- false => {
- self.system_settings()
- .mouse_wheel_settings
- .wheel_scroll_lines
- }
+ true => self
+ .system_settings()
+ .mouse_wheel_settings
+ .wheel_scroll_chars
+ .get(),
+ false => self
+ .system_settings()
+ .mouse_wheel_settings
+ .wheel_scroll_lines
+ .get(),
};
- drop(lock);
let wheel_distance =
(wparam.signed_hiword() as f32 / WHEEL_DELTA as f32) * wheel_scroll_amount as f32;
@@ -524,7 +515,7 @@ impl WindowsWindowInner {
touch_phase: TouchPhase::Moved,
});
let handled = !func(input).propagate;
- self.state.borrow_mut().callbacks.input = Some(func);
+ self.state.callbacks.input.set(Some(func));
if handled { Some(0) } else { Some(1) }
}
@@ -535,16 +526,15 @@ impl WindowsWindowInner {
wparam: WPARAM,
lparam: LPARAM,
) -> Option<isize> {
- let mut lock = self.state.borrow_mut();
- let Some(mut func) = lock.callbacks.input.take() else {
+ let Some(mut func) = self.state.callbacks.input.take() else {
return Some(1);
};
- let scale_factor = lock.scale_factor;
+ let scale_factor = self.state.scale_factor.get();
let wheel_scroll_chars = self
.system_settings()
.mouse_wheel_settings
- .wheel_scroll_chars;
- drop(lock);
+ .wheel_scroll_chars
+ .get();
let wheel_distance =
(-wparam.signed_hiword() as f32 / WHEEL_DELTA as f32) * wheel_scroll_chars as f32;
@@ -563,7 +553,7 @@ impl WindowsWindowInner {
touch_phase: TouchPhase::Moved,
});
let handled = !func(event).propagate;
- self.state.borrow_mut().callbacks.input = Some(func);
+ self.state.callbacks.input.set(Some(func));
if handled { Some(0) } else { Some(1) }
}
@@ -657,11 +647,11 @@ impl WindowsWindowInner {
wparam: WPARAM,
lparam: LPARAM,
) -> Option<isize> {
- if !self.hide_title_bar || self.state.borrow().is_fullscreen() || wparam.0 == 0 {
+ if !self.hide_title_bar || self.state.is_fullscreen() || wparam.0 == 0 {
return None;
}
- let is_maximized = self.state.borrow().is_maximized();
+ let is_maximized = self.state.is_maximized();
let insets = get_client_area_insets(handle, is_maximized, self.windows_version);
// wparam is TRUE so lparam points to an NCCALCSIZE_PARAMS structure
let mut params = lparam.0 as *mut NCCALCSIZE_PARAMS;
@@ -676,7 +666,7 @@ impl WindowsWindowInner {
// used by Chrome. However, it may result in one row of pixels being obscured
// in our client area. But as Chrome says, "there seems to be no better solution."
if is_maximized
- && let Some(ref taskbar_position) = self.system_settings().auto_hide_taskbar_position
+ && let Some(taskbar_position) = self.system_settings().auto_hide_taskbar_position.get()
{
// For the auto-hide taskbar, adjust in by 1 pixel on taskbar edge,
// so the window isn't treated as a "fullscreen app", which would cause
@@ -705,11 +695,9 @@ impl WindowsWindowInner {
let this = self.clone();
self.executor
.spawn(async move {
- let mut lock = this.state.borrow_mut();
- if let Some(mut func) = lock.callbacks.active_status_change.take() {
- drop(lock);
+ if let Some(mut func) = this.state.callbacks.active_status_change.take() {
func(activated);
- this.state.borrow_mut().callbacks.active_status_change = Some(func);
+ this.state.callbacks.active_status_change.set(Some(func));
}
})
.detach();
@@ -733,12 +721,11 @@ impl WindowsWindowInner {
lparam: LPARAM,
) -> Option<isize> {
let new_dpi = wparam.loword() as f32;
- let mut lock = self.state.borrow_mut();
- let is_maximized = lock.is_maximized();
+
+ let is_maximized = self.state.is_maximized();
let new_scale_factor = new_dpi / USER_DEFAULT_SCREEN_DPI as f32;
- lock.scale_factor = new_scale_factor;
- lock.border_offset.update(handle).log_err();
- drop(lock);
+ self.state.scale_factor.set(new_scale_factor);
+ self.state.border_offset.update(handle).log_err();
if is_maximized {
// Get the monitor and its work area at the new DPI
@@ -812,7 +799,7 @@ impl WindowsWindowInner {
// Because WM_DPICHANGED, WM_MOVE, WM_SIZE will come first, window reposition and resize
// are handled there.
// So we only care about if monitor is disconnected.
- let previous_monitor = self.state.borrow().display;
+ let previous_monitor = self.state.display.get();
if WindowsDisplay::is_connected(previous_monitor.handle) {
// we are fine, other display changed
return None;
@@ -830,86 +817,78 @@ impl WindowsWindowInner {
return None;
}
let new_display = WindowsDisplay::new_with_handle(new_monitor).log_err()?;
- self.state.borrow_mut().display = new_display;
+ self.state.display.set(new_display);
Some(0)
}
- fn handle_hit_test_msg(
- &self,
- handle: HWND,
- msg: u32,
- wparam: WPARAM,
- lparam: LPARAM,
- ) -> Option<isize> {
- if !self.is_movable || self.state.borrow().is_fullscreen() {
+ fn handle_hit_test_msg(&self, handle: HWND, lparam: LPARAM) -> Option<isize> {
+ if !self.is_movable || self.state.is_fullscreen() {
return None;
}
- let mut lock = self.state.borrow_mut();
- if let Some(mut callback) = lock.callbacks.hit_test_window_control.take() {
- drop(lock);
+ let callback = self.state.callbacks.hit_test_window_control.take();
+ let drag_area = if let Some(mut callback) = callback {
let area = callback();
- self.state.borrow_mut().callbacks.hit_test_window_control = Some(callback);
+ self.state
+ .callbacks
+ .hit_test_window_control
+ .set(Some(callback));
if let Some(area) = area {
- return match area {
+ match area {
WindowControlArea::Drag => Some(HTCAPTION as _),
- WindowControlArea::Close => Some(HTCLOSE as _),
- WindowControlArea::Max => Some(HTMAXBUTTON as _),
- WindowControlArea::Min => Some(HTMINBUTTON as _),
- };
+ WindowControlArea::Close => return Some(HTCLOSE as _),
+ WindowControlArea::Max => return Some(HTMAXBUTTON as _),
+ WindowControlArea::Min => return Some(HTMINBUTTON as _),
+ }
+ } else {
+ None
}
} else {
- drop(lock);
- }
+ None
+ };
if !self.hide_title_bar {
// If the OS draws the title bar, we don't need to handle hit test messages.
- return None;
- }
-
- // default handler for resize areas
- let hit = unsafe { DefWindowProcW(handle, msg, wparam, lparam) };
- if matches!(
- hit.0 as u32,
- HTNOWHERE
- | HTRIGHT
- | HTLEFT
- | HTTOPLEFT
- | HTTOP
- | HTTOPRIGHT
- | HTBOTTOMRIGHT
- | HTBOTTOM
- | HTBOTTOMLEFT
- ) {
- return Some(hit.0);
- }
-
- if self.state.borrow().is_fullscreen() {
- return Some(HTCLIENT as _);
+ return drag_area;
}
let dpi = unsafe { GetDpiForWindow(handle) };
- let frame_y = unsafe { GetSystemMetricsForDpi(SM_CYFRAME, dpi) };
-
+ // We do not use the OS title bar, so the default `DefWindowProcW` will only register a 1px edge for resizes
+ // We need to calculate the frame thickness ourselves and do the hit test manually.
+ let frame_y = get_frame_thicknessx(dpi);
+ let frame_x = get_frame_thicknessy(dpi);
let mut cursor_point = POINT {
x: lparam.signed_loword().into(),
y: lparam.signed_hiword().into(),
};
+
unsafe { ScreenToClient(handle, &mut cursor_point).ok().log_err() };
- if !self.state.borrow().is_maximized() && cursor_point.y >= 0 && cursor_point.y <= frame_y {
- return Some(HTTOP as _);
+ if !self.state.is_maximized() && 0 <= cursor_point.y && cursor_point.y <= frame_y {
+ // x-axis actually goes from -frame_x to 0
+ return Some(if cursor_point.x <= 0 {
+ HTTOPLEFT
+ } else {
+ let mut rect = Default::default();
+ unsafe { GetWindowRect(handle, &mut rect) }.log_err();
+ // right and bottom bounds of RECT are exclusive, thus `-1`
+ let right = rect.right - rect.left - 1;
+ // the bounds include the padding frames, so accomodate for both of them
+ if right - 2 * frame_x <= cursor_point.x {
+ HTTOPRIGHT
+ } else {
+ HTTOP
+ }
+ } as _);
}
- Some(HTCLIENT as _)
+ drag_area
}
fn handle_nc_mouse_move_msg(&self, handle: HWND, lparam: LPARAM) -> Option<isize> {
self.start_tracking_mouse(handle, TME_LEAVE | TME_NONCLIENT);
- let mut lock = self.state.borrow_mut();
- let mut func = lock.callbacks.input.take()?;
- let scale_factor = lock.scale_factor;
- drop(lock);
+ let mut func = self.state.callbacks.input.take()?;
+ let scale_factor = self.state.scale_factor.get();
let mut cursor_point = POINT {
x: lparam.signed_loword().into(),
@@ -922,7 +901,7 @@ impl WindowsWindowInner {
modifiers: current_modifiers(),
});
let handled = !func(input).propagate;
- self.state.borrow_mut().callbacks.input = Some(func);
+ self.state.callbacks.input.set(Some(func));
if handled { Some(0) } else { None }
}
@@ -934,17 +913,15 @@ impl WindowsWindowInner {
wparam: WPARAM,
lparam: LPARAM,
) -> Option<isize> {
- let mut lock = self.state.borrow_mut();
- if let Some(mut func) = lock.callbacks.input.take() {
- let scale_factor = lock.scale_factor;
+ if let Some(mut func) = self.state.callbacks.input.take() {
+ let scale_factor = self.state.scale_factor.get();
let mut cursor_point = POINT {
x: lparam.signed_loword().into(),
y: lparam.signed_hiword().into(),
};
unsafe { ScreenToClient(handle, &mut cursor_point).ok().log_err() };
let physical_point = point(DevicePixels(cursor_point.x), DevicePixels(cursor_point.y));
- let click_count = lock.click_state.update(button, physical_point);
- drop(lock);
+ let click_count = self.state.click_state.update(button, physical_point);
let input = PlatformInput::MouseDown(MouseDownEvent {
button,
@@ -955,21 +932,20 @@ impl WindowsWindowInner {
});
let result = func(input);
let handled = !result.propagate || result.default_prevented;
- self.state.borrow_mut().callbacks.input = Some(func);
+ self.state.callbacks.input.set(Some(func));
if handled {
return Some(0);
}
} else {
- drop(lock);
};
// Since these are handled in handle_nc_mouse_up_msg we must prevent the default window proc
if button == MouseButton::Left {
match wparam.0 as u32 {
- HTMINBUTTON => self.state.borrow_mut().nc_button_pressed = Some(HTMINBUTTON),
- HTMAXBUTTON => self.state.borrow_mut().nc_button_pressed = Some(HTMAXBUTTON),
- HTCLOSE => self.state.borrow_mut().nc_button_pressed = Some(HTCLOSE),
+ HTMINBUTTON => self.state.nc_button_pressed.set(Some(HTMINBUTTON)),
+ HTMAXBUTTON => self.state.nc_button_pressed.set(Some(HTMAXBUTTON)),
+ HTCLOSE => self.state.nc_button_pressed.set(Some(HTCLOSE)),
_ => return None,
};
Some(0)
@@ -985,10 +961,8 @@ impl WindowsWindowInner {
wparam: WPARAM,
lparam: LPARAM,
) -> Option<isize> {
- let mut lock = self.state.borrow_mut();
- if let Some(mut func) = lock.callbacks.input.take() {
- let scale_factor = lock.scale_factor;
- drop(lock);
+ if let Some(mut func) = self.state.callbacks.input.take() {
+ let scale_factor = self.state.scale_factor.get();
let mut cursor_point = POINT {
x: lparam.signed_loword().into(),
@@ -1002,16 +976,15 @@ impl WindowsWindowInner {
click_count: 1,
});
let handled = !func(input).propagate;
- self.state.borrow_mut().callbacks.input = Some(func);
+ self.state.callbacks.input.set(Some(func));
if handled {
return Some(0);
}
} else {
- drop(lock);
}
- let last_pressed = self.state.borrow_mut().nc_button_pressed.take();
+ let last_pressed = self.state.nc_button_pressed.take();
if button == MouseButton::Left
&& let Some(last_pressed) = last_pressed
{
@@ -1021,7 +994,7 @@ impl WindowsWindowInner {
true
}
(HTMAXBUTTON, HTMAXBUTTON) => {
- if self.state.borrow().is_maximized() {
+ if self.state.is_maximized() {
unsafe { ShowWindowAsync(handle, SW_NORMAL).ok().log_err() };
} else {
unsafe { ShowWindowAsync(handle, SW_MAXIMIZE).ok().log_err() };
@@ -1046,17 +1019,16 @@ impl WindowsWindowInner {
}
fn handle_cursor_changed(&self, lparam: LPARAM) -> Option<isize> {
- let mut state = self.state.borrow_mut();
- let had_cursor = state.current_cursor.is_some();
+ let had_cursor = self.state.current_cursor.get().is_some();
- state.current_cursor = if lparam.0 == 0 {
+ self.state.current_cursor.set(if lparam.0 == 0 {
None
} else {
Some(HCURSOR(lparam.0 as _))
- };
+ });
- if had_cursor != state.current_cursor.is_some() {
- unsafe { SetCursor(state.current_cursor) };
+ if had_cursor != self.state.current_cursor.get().is_some() {
+ unsafe { SetCursor(self.state.current_cursor.get()) };
}
Some(0)
@@ -1079,7 +1051,7 @@ impl WindowsWindowInner {
return None;
}
unsafe {
- SetCursor(self.state.borrow().current_cursor);
+ SetCursor(self.state.current_cursor.get());
};
Some(0)
}
@@ -1091,13 +1063,12 @@ impl WindowsWindowInner {
lparam: LPARAM,
) -> Option<isize> {
if wparam.0 != 0 {
- let mut lock = self.state.borrow_mut();
- let display = lock.display;
- lock.click_state.system_update(wparam.0);
- lock.border_offset.update(handle).log_err();
- // system settings may emit a window message which wants to take the refcell lock, so drop it
- drop(lock);
- self.system_settings_mut().update(display, wparam.0);
+ let display = self.state.display.get();
+ self.state.click_state.system_update(wparam.0);
+ self.state.border_offset.update(handle).log_err();
+ // system settings may emit a window message which wants to take the refcell self.state, so drop it
+
+ self.system_settings().update(display, wparam.0);
} else {
self.handle_system_theme_changed(handle, lparam)?;
};
@@ -1120,13 +1091,13 @@ impl WindowsWindowInner {
let new_appearance = system_appearance()
.context("unable to get system appearance when handling ImmersiveColorSet")
.log_err()?;
- let mut lock = self.state.borrow_mut();
- if new_appearance != lock.appearance {
- lock.appearance = new_appearance;
- let mut callback = lock.callbacks.appearance_changed.take()?;
- drop(lock);
+
+ if new_appearance != self.state.appearance.get() {
+ self.state.appearance.set(new_appearance);
+ let mut callback = self.state.callbacks.appearance_changed.take()?;
+
callback();
- self.state.borrow_mut().callbacks.appearance_changed = Some(callback);
+ self.state.callbacks.appearance_changed.set(Some(callback));
configure_dwm_dark_mode(handle, new_appearance);
}
}
@@ -1155,10 +1126,14 @@ impl WindowsWindowInner {
}
fn handle_device_lost(&self, lparam: LPARAM) -> Option<isize> {
- let mut lock = self.state.borrow_mut();
let devices = lparam.0 as *const DirectXDevices;
let devices = unsafe { &*devices };
- if let Err(err) = lock.renderer.handle_device_lost(&devices) {
+ if let Err(err) = self
+ .state
+ .renderer
+ .borrow_mut()
+ .handle_device_lost(&devices)
+ {
panic!("Device lost: {err}");
}
Some(0)
@@ -1166,18 +1141,18 @@ impl WindowsWindowInner {
#[inline]
fn draw_window(&self, handle: HWND, force_render: bool) -> Option<isize> {
- let mut request_frame = self.state.borrow_mut().callbacks.request_frame.take()?;
+ let mut request_frame = self.state.callbacks.request_frame.take()?;
// we are instructing gpui to force render a frame, this will
// re-populate all the gpu textures for us so we can resume drawing in
// case we disabled drawing earlier due to a device loss
- self.state.borrow_mut().renderer.mark_drawable();
+ self.state.renderer.borrow_mut().mark_drawable();
request_frame(RequestFrameOptions {
require_presentation: false,
force_render,
});
- self.state.borrow_mut().callbacks.request_frame = Some(request_frame);
+ self.state.callbacks.request_frame.set(Some(request_frame));
unsafe { ValidateRect(Some(handle), None).ok().log_err() };
Some(0)
@@ -1186,16 +1161,16 @@ impl WindowsWindowInner {
#[inline]
fn parse_char_message(&self, wparam: WPARAM) -> Option<String> {
let code_point = wparam.loword();
- let mut lock = self.state.borrow_mut();
+
// https://www.unicode.org/versions/Unicode16.0.0/core-spec/chapter-3/#G2630
match code_point {
0xD800..=0xDBFF => {
// High surrogate, wait for low surrogate
- lock.pending_surrogate = Some(code_point);
+ self.state.pending_surrogate.set(Some(code_point));
None
}
0xDC00..=0xDFFF => {
- if let Some(high_surrogate) = lock.pending_surrogate.take() {
+ if let Some(high_surrogate) = self.state.pending_surrogate.take() {
// Low surrogate, combine with pending high surrogate
String::from_utf16(&[high_surrogate, code_point]).ok()
} else {
@@ -1207,7 +1182,7 @@ impl WindowsWindowInner {
}
}
_ => {
- lock.pending_surrogate = None;
+ self.state.pending_surrogate.set(None);
char::from_u32(code_point as u32)
.filter(|c| !c.is_control())
.map(|c| c.to_string())
@@ -1216,9 +1191,8 @@ impl WindowsWindowInner {
}
fn start_tracking_mouse(&self, handle: HWND, flags: TRACKMOUSEEVENT_FLAGS) {
- let mut lock = self.state.borrow_mut();
- if !lock.hovered {
- lock.hovered = true;
+ if !self.state.hovered.get() {
+ self.state.hovered.set(true);
unsafe {
TrackMouseEvent(&mut TRACKMOUSEEVENT {
cbSize: std::mem::size_of::<TRACKMOUSEEVENT>() as u32,
@@ -1228,10 +1202,12 @@ impl WindowsWindowInner {
})
.log_err()
};
- if let Some(mut callback) = lock.callbacks.hovered_status_change.take() {
- drop(lock);
+ if let Some(mut callback) = self.state.callbacks.hovered_status_change.take() {
callback(true);
- self.state.borrow_mut().callbacks.hovered_status_change = Some(callback);
+ self.state
+ .callbacks
+ .hovered_status_change
+ .set(Some(callback));
}
}
}
@@ -1240,9 +1216,9 @@ impl WindowsWindowInner {
where
F: FnOnce(&mut PlatformInputHandler) -> R,
{
- let mut input_handler = self.state.borrow_mut().input_handler.take()?;
+ let mut input_handler = self.state.input_handler.take()?;
let result = f(&mut input_handler);
- self.state.borrow_mut().input_handler = Some(input_handler);
+ self.state.input_handler.set(Some(input_handler));
Some(result)
}
@@ -1250,12 +1226,11 @@ impl WindowsWindowInner {
where
F: FnOnce(&mut PlatformInputHandler, f32) -> Option<R>,
{
- let mut lock = self.state.borrow_mut();
- let mut input_handler = lock.input_handler.take()?;
- let scale_factor = lock.scale_factor;
- drop(lock);
+ let mut input_handler = self.state.input_handler.take()?;
+ let scale_factor = self.state.scale_factor.get();
+
let result = f(&mut input_handler, scale_factor);
- self.state.borrow_mut().input_handler = Some(input_handler);
+ self.state.input_handler.set(Some(input_handler));
result
}
}
@@ -1263,7 +1238,7 @@ impl WindowsWindowInner {
fn handle_key_event<F>(
wparam: WPARAM,
lparam: LPARAM,
- state: &mut WindowsWindowState,
+ state: &WindowsWindowState,
f: F,
) -> Option<PlatformInput>
where
@@ -1276,11 +1251,12 @@ where
VK_SHIFT | VK_CONTROL | VK_MENU | VK_LMENU | VK_RMENU | VK_LWIN | VK_RWIN => {
if state
.last_reported_modifiers
+ .get()
.is_some_and(|prev_modifiers| prev_modifiers == modifiers)
{
return None;
}
- state.last_reported_modifiers = Some(modifiers);
+ state.last_reported_modifiers.set(Some(modifiers));
Some(PlatformInput::ModifiersChanged(ModifiersChangedEvent {
modifiers,
capslock: current_capslock(),
@@ -1291,11 +1267,12 @@ where
let capslock = current_capslock();
if state
.last_reported_capslock
+ .get()
.is_some_and(|prev_capslock| prev_capslock == capslock)
{
return None;
}
- state.last_reported_capslock = Some(capslock);
+ state.last_reported_capslock.set(Some(capslock));
Some(PlatformInput::ModifiersChanged(ModifiersChangedEvent {
modifiers,
capslock,
@@ -1527,7 +1504,7 @@ fn get_client_area_insets(
// The top inset is calculated using an empirical formula that I derived through various
// tests. Without this, the top 1-2 rows of pixels in our window would be obscured.
let dpi = unsafe { GetDpiForWindow(handle) };
- let frame_thickness = get_frame_thickness(dpi);
+ let frame_thickness = get_frame_thicknessx(dpi);
let top_insets = if is_maximized {
frame_thickness
} else {
@@ -1548,12 +1525,18 @@ fn get_client_area_insets(
// borders on Windows:
// - SM_CXSIZEFRAME: The resize handle.
// - SM_CXPADDEDBORDER: Additional border space that isn't part of the resize handle.
-fn get_frame_thickness(dpi: u32) -> i32 {
+fn get_frame_thicknessx(dpi: u32) -> i32 {
let resize_frame_thickness = unsafe { GetSystemMetricsForDpi(SM_CXSIZEFRAME, dpi) };
let padding_thickness = unsafe { GetSystemMetricsForDpi(SM_CXPADDEDBORDER, dpi) };
resize_frame_thickness + padding_thickness
}
+fn get_frame_thicknessy(dpi: u32) -> i32 {
+ let resize_frame_thickness = unsafe { GetSystemMetricsForDpi(SM_CYSIZEFRAME, dpi) };
+ let padding_thickness = unsafe { GetSystemMetricsForDpi(SM_CXPADDEDBORDER, dpi) };
+ resize_frame_thickness + padding_thickness
+}
+
fn notify_frame_changed(handle: HWND) {
unsafe {
SetWindowPos(
@@ -1,5 +1,5 @@
use std::{
- cell::RefCell,
+ cell::{Cell, RefCell},
ffi::OsStr,
path::{Path, PathBuf},
rc::{Rc, Weak},
@@ -47,7 +47,7 @@ pub(crate) struct WindowsPlatform {
}
struct WindowsPlatformInner {
- state: RefCell<WindowsPlatformState>,
+ state: WindowsPlatformState,
raw_window_handles: std::sync::Weak<RwLock<SmallVec<[SafeHwnd; 4]>>>,
// The below members will never change throughout the entire lifecycle of the app.
validation_number: usize,
@@ -57,22 +57,22 @@ struct WindowsPlatformInner {
pub(crate) struct WindowsPlatformState {
callbacks: PlatformCallbacks,
- menus: Vec<OwnedMenu>,
- jump_list: JumpList,
+ menus: RefCell<Vec<OwnedMenu>>,
+ jump_list: RefCell<JumpList>,
// NOTE: standard cursor handles don't need to close.
- pub(crate) current_cursor: Option<HCURSOR>,
- directx_devices: Option<DirectXDevices>,
+ pub(crate) current_cursor: Cell<Option<HCURSOR>>,
+ directx_devices: RefCell<Option<DirectXDevices>>,
}
#[derive(Default)]
struct PlatformCallbacks {
- open_urls: Option<Box<dyn FnMut(Vec<String>)>>,
- quit: Option<Box<dyn FnMut()>>,
- reopen: Option<Box<dyn FnMut()>>,
- app_menu_action: Option<Box<dyn FnMut(&dyn Action)>>,
- will_open_app_menu: Option<Box<dyn FnMut()>>,
- validate_app_menu_command: Option<Box<dyn FnMut(&dyn Action) -> bool>>,
- keyboard_layout_change: Option<Box<dyn FnMut()>>,
+ open_urls: Cell<Option<Box<dyn FnMut(Vec<String>)>>>,
+ quit: Cell<Option<Box<dyn FnMut()>>>,
+ reopen: Cell<Option<Box<dyn FnMut()>>>,
+ app_menu_action: Cell<Option<Box<dyn FnMut(&dyn Action)>>>,
+ will_open_app_menu: Cell<Option<Box<dyn FnMut()>>>,
+ validate_app_menu_command: Cell<Option<Box<dyn FnMut(&dyn Action) -> bool>>>,
+ keyboard_layout_change: Cell<Option<Box<dyn FnMut()>>>,
}
impl WindowsPlatformState {
@@ -84,10 +84,10 @@ impl WindowsPlatformState {
Self {
callbacks,
- jump_list,
- current_cursor,
- directx_devices,
- menus: Vec::new(),
+ jump_list: RefCell::new(jump_list),
+ current_cursor: Cell::new(current_cursor),
+ directx_devices: RefCell::new(directx_devices),
+ menus: RefCell::new(Vec::new()),
}
}
}
@@ -194,14 +194,14 @@ impl WindowsPlatform {
WindowCreationInfo {
icon: self.icon,
executor: self.foreground_executor.clone(),
- current_cursor: self.inner.state.borrow().current_cursor,
+ current_cursor: self.inner.state.current_cursor.get(),
windows_version: self.windows_version,
drop_target_helper: self.drop_target_helper.clone(),
validation_number: self.inner.validation_number,
main_receiver: self.inner.main_receiver.clone(),
platform_window_handle: self.handle,
disable_direct_composition: self.disable_direct_composition,
- directx_devices: self.inner.state.borrow().directx_devices.clone().unwrap(),
+ directx_devices: self.inner.state.directx_devices.borrow().clone().unwrap(),
invalidate_devices: self.invalidate_devices.clone(),
}
}
@@ -213,9 +213,8 @@ impl WindowsPlatform {
actions.push(dock_menu);
}
});
- let mut lock = self.inner.state.borrow_mut();
- lock.jump_list.dock_menus = actions;
- update_jump_list(&lock.jump_list).log_err();
+ self.inner.state.jump_list.borrow_mut().dock_menus = actions;
+ update_jump_list(&self.inner.state.jump_list.borrow()).log_err();
}
fn update_jump_list(
@@ -229,12 +228,10 @@ impl WindowsPlatform {
actions.push(dock_menu);
}
});
- let mut lock = self.inner.state.borrow_mut();
- lock.jump_list.dock_menus = actions;
- lock.jump_list.recent_workspaces = entries;
- update_jump_list(&lock.jump_list)
- .log_err()
- .unwrap_or_default()
+ let mut jump_list = self.inner.state.jump_list.borrow_mut();
+ jump_list.dock_menus = actions;
+ jump_list.recent_workspaces = entries;
+ update_jump_list(&jump_list).log_err().unwrap_or_default()
}
fn find_current_active_window(&self) -> Option<HWND> {
@@ -250,7 +247,7 @@ impl WindowsPlatform {
}
fn begin_vsync_thread(&self) {
- let mut directx_device = self.inner.state.borrow().directx_devices.clone().unwrap();
+ let mut directx_device = self.inner.state.directx_devices.borrow().clone().unwrap();
let platform_window: SafeHwnd = self.handle.into();
let validation_number = self.inner.validation_number;
let all_windows = Arc::downgrade(&self.raw_window_handles);
@@ -334,9 +331,9 @@ impl Platform for WindowsPlatform {
fn on_keyboard_layout_change(&self, callback: Box<dyn FnMut()>) {
self.inner
.state
- .borrow_mut()
.callbacks
- .keyboard_layout_change = Some(callback);
+ .keyboard_layout_change
+ .set(Some(callback));
}
fn run(&self, on_finish_launching: Box<dyn 'static + FnOnce()>) {
@@ -354,7 +351,7 @@ impl Platform for WindowsPlatform {
}
self.inner
- .with_callback(|callbacks| &mut callbacks.quit, |callback| callback());
+ .with_callback(|callbacks| &callbacks.quit, |callback| callback());
}
fn quit(&self) {
@@ -473,7 +470,7 @@ impl Platform for WindowsPlatform {
}
fn on_open_urls(&self, callback: Box<dyn FnMut(Vec<String>)>) {
- self.inner.state.borrow_mut().callbacks.open_urls = Some(callback);
+ self.inner.state.callbacks.open_urls.set(Some(callback));
}
fn prompt_for_paths(
@@ -543,19 +540,19 @@ impl Platform for WindowsPlatform {
}
fn on_quit(&self, callback: Box<dyn FnMut()>) {
- self.inner.state.borrow_mut().callbacks.quit = Some(callback);
+ self.inner.state.callbacks.quit.set(Some(callback));
}
fn on_reopen(&self, callback: Box<dyn FnMut()>) {
- self.inner.state.borrow_mut().callbacks.reopen = Some(callback);
+ self.inner.state.callbacks.reopen.set(Some(callback));
}
fn set_menus(&self, menus: Vec<Menu>, _keymap: &Keymap) {
- self.inner.state.borrow_mut().menus = menus.into_iter().map(|menu| menu.owned()).collect();
+ *self.inner.state.menus.borrow_mut() = menus.into_iter().map(|menu| menu.owned()).collect();
}
fn get_menus(&self) -> Option<Vec<OwnedMenu>> {
- Some(self.inner.state.borrow().menus.clone())
+ Some(self.inner.state.menus.borrow().clone())
}
fn set_dock_menu(&self, menus: Vec<MenuItem>, _keymap: &Keymap) {
@@ -563,19 +560,27 @@ impl Platform for WindowsPlatform {
}
fn on_app_menu_action(&self, callback: Box<dyn FnMut(&dyn Action)>) {
- self.inner.state.borrow_mut().callbacks.app_menu_action = Some(callback);
+ self.inner
+ .state
+ .callbacks
+ .app_menu_action
+ .set(Some(callback));
}
fn on_will_open_app_menu(&self, callback: Box<dyn FnMut()>) {
- self.inner.state.borrow_mut().callbacks.will_open_app_menu = Some(callback);
+ self.inner
+ .state
+ .callbacks
+ .will_open_app_menu
+ .set(Some(callback));
}
fn on_validate_app_menu_command(&self, callback: Box<dyn FnMut(&dyn Action) -> bool>) {
self.inner
.state
- .borrow_mut()
.callbacks
- .validate_app_menu_command = Some(callback);
+ .validate_app_menu_command
+ .set(Some(callback));
}
fn app_path(&self) -> Result<PathBuf> {
@@ -589,13 +594,13 @@ impl Platform for WindowsPlatform {
fn set_cursor_style(&self, style: CursorStyle) {
let hcursor = load_cursor(style);
- if self.inner.state.borrow_mut().current_cursor.map(|c| c.0) != hcursor.map(|c| c.0) {
+ if self.inner.state.current_cursor.get().map(|c| c.0) != hcursor.map(|c| c.0) {
self.post_message(
WM_GPUI_CURSOR_STYLE_CHANGED,
WPARAM(0),
LPARAM(hcursor.map_or(0, |c| c.0 as isize)),
);
- self.inner.state.borrow_mut().current_cursor = hcursor;
+ self.inner.state.current_cursor.set(hcursor);
}
}
@@ -721,12 +726,12 @@ impl Platform for WindowsPlatform {
impl WindowsPlatformInner {
fn new(context: &mut PlatformWindowCreateContext) -> Result<Rc<Self>> {
- let state = RefCell::new(WindowsPlatformState::new(
+ let state = WindowsPlatformState::new(
context
.directx_devices
.take()
.context("missing directx devices")?,
- ));
+ );
Ok(Rc::new(Self {
state,
raw_window_handles: context.raw_window_handles.clone(),
@@ -746,13 +751,13 @@ impl WindowsPlatformInner {
/// Calls `project` to project to the corresponding callback field, removes it from callbacks, calls `f` with the callback and then puts the callback back.
fn with_callback<T>(
&self,
- project: impl Fn(&mut PlatformCallbacks) -> &mut Option<T>,
+ project: impl Fn(&PlatformCallbacks) -> &Cell<Option<T>>,
f: impl FnOnce(&mut T),
) {
- let callback = project(&mut self.state.borrow_mut().callbacks).take();
+ let callback = project(&self.state.callbacks).take();
if let Some(mut callback) = callback {
f(&mut callback);
- *project(&mut self.state.borrow_mut().callbacks) = Some(callback)
+ project(&self.state.callbacks).set(Some(callback));
}
}
@@ -877,8 +882,8 @@ impl WindowsPlatformInner {
fn handle_dock_action_event(&self, action_idx: usize) -> Option<isize> {
let Some(action) = self
.state
- .borrow_mut()
.jump_list
+ .borrow()
.dock_menus
.get(action_idx)
.map(|dock_menu| dock_menu.action.boxed_clone())
@@ -887,7 +892,7 @@ impl WindowsPlatformInner {
return Some(1);
};
self.with_callback(
- |callbacks| &mut callbacks.app_menu_action,
+ |callbacks| &callbacks.app_menu_action,
|callback| callback(&*action),
);
Some(0)
@@ -895,7 +900,7 @@ impl WindowsPlatformInner {
fn handle_keyboard_layout_change(&self) -> Option<isize> {
self.with_callback(
- |callbacks| &mut callbacks.keyboard_layout_change,
+ |callbacks| &callbacks.keyboard_layout_change,
|callback| callback(),
);
Some(0)
@@ -904,9 +909,8 @@ impl WindowsPlatformInner {
fn handle_device_lost(&self, lparam: LPARAM) -> Option<isize> {
let directx_devices = lparam.0 as *const DirectXDevices;
let directx_devices = unsafe { &*directx_devices };
- let mut lock = self.state.borrow_mut();
- lock.directx_devices.take();
- lock.directx_devices = Some(directx_devices.clone());
+ self.state.directx_devices.borrow_mut().take();
+ *self.state.directx_devices.borrow_mut() = Some(directx_devices.clone());
Some(0)
}
@@ -1,4 +1,7 @@
-use std::ffi::{c_uint, c_void};
+use std::{
+ cell::Cell,
+ ffi::{c_uint, c_void},
+};
use ::util::ResultExt;
use windows::Win32::UI::{
@@ -15,18 +18,18 @@ use super::WindowsDisplay;
/// Windows settings pulled from SystemParametersInfo
/// https://learn.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-systemparametersinfow
-#[derive(Default, Debug, Clone, Copy)]
+#[derive(Default, Debug, Clone)]
pub(crate) struct WindowsSystemSettings {
pub(crate) mouse_wheel_settings: MouseWheelSettings,
- pub(crate) auto_hide_taskbar_position: Option<AutoHideTaskbarPosition>,
+ pub(crate) auto_hide_taskbar_position: Cell<Option<AutoHideTaskbarPosition>>,
}
-#[derive(Default, Debug, Clone, Copy)]
+#[derive(Default, Debug, Clone)]
pub(crate) struct MouseWheelSettings {
/// SEE: SPI_GETWHEELSCROLLCHARS
- pub(crate) wheel_scroll_chars: u32,
+ pub(crate) wheel_scroll_chars: Cell<u32>,
/// SEE: SPI_GETWHEELSCROLLLINES
- pub(crate) wheel_scroll_lines: u32,
+ pub(crate) wheel_scroll_lines: Cell<u32>,
}
impl WindowsSystemSettings {
@@ -36,12 +39,13 @@ impl WindowsSystemSettings {
settings
}
- fn init(&mut self, display: WindowsDisplay) {
+ fn init(&self, display: WindowsDisplay) {
self.mouse_wheel_settings.update();
- self.auto_hide_taskbar_position = AutoHideTaskbarPosition::new(display).log_err().flatten();
+ self.auto_hide_taskbar_position
+ .set(AutoHideTaskbarPosition::new(display).log_err().flatten());
}
- pub(crate) fn update(&mut self, display: WindowsDisplay, wparam: usize) {
+ pub(crate) fn update(&self, display: WindowsDisplay, wparam: usize) {
match wparam {
// SPI_SETWORKAREA
47 => self.update_taskbar_position(display),
@@ -51,22 +55,23 @@ impl WindowsSystemSettings {
}
}
- fn update_mouse_wheel_settings(&mut self) {
+ fn update_mouse_wheel_settings(&self) {
self.mouse_wheel_settings.update();
}
- fn update_taskbar_position(&mut self, display: WindowsDisplay) {
- self.auto_hide_taskbar_position = AutoHideTaskbarPosition::new(display).log_err().flatten();
+ fn update_taskbar_position(&self, display: WindowsDisplay) {
+ self.auto_hide_taskbar_position
+ .set(AutoHideTaskbarPosition::new(display).log_err().flatten());
}
}
impl MouseWheelSettings {
- fn update(&mut self) {
+ fn update(&self) {
self.update_wheel_scroll_chars();
self.update_wheel_scroll_lines();
}
- fn update_wheel_scroll_chars(&mut self) {
+ fn update_wheel_scroll_chars(&self) {
let mut value = c_uint::default();
let result = unsafe {
SystemParametersInfoW(
@@ -77,12 +82,12 @@ impl MouseWheelSettings {
)
};
- if result.log_err() != None && self.wheel_scroll_chars != value {
- self.wheel_scroll_chars = value;
+ if result.log_err() != None && self.wheel_scroll_chars.get() != value {
+ self.wheel_scroll_chars.set(value);
}
}
- fn update_wheel_scroll_lines(&mut self) {
+ fn update_wheel_scroll_lines(&self) {
let mut value = c_uint::default();
let result = unsafe {
SystemParametersInfoW(
@@ -93,8 +98,8 @@ impl MouseWheelSettings {
)
};
- if result.log_err() != None && self.wheel_scroll_lines != value {
- self.wheel_scroll_lines = value;
+ if result.log_err() != None && self.wheel_scroll_lines.get() != value {
+ self.wheel_scroll_lines.set(value);
}
}
}
@@ -1,7 +1,7 @@
#![deny(unsafe_op_in_unsafe_fn)]
use std::{
- cell::RefCell,
+ cell::{Cell, RefCell},
num::NonZeroIsize,
path::PathBuf,
rc::{Rc, Weak},
@@ -30,43 +30,51 @@ use crate::*;
pub(crate) struct WindowsWindow(pub Rc<WindowsWindowInner>);
+impl std::ops::Deref for WindowsWindow {
+ type Target = WindowsWindowInner;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
pub struct WindowsWindowState {
- pub origin: Point<Pixels>,
- pub logical_size: Size<Pixels>,
+ pub origin: Cell<Point<Pixels>>,
+ pub logical_size: Cell<Size<Pixels>>,
pub min_size: Option<Size<Pixels>>,
- pub fullscreen_restore_bounds: Bounds<Pixels>,
+ pub fullscreen_restore_bounds: Cell<Bounds<Pixels>>,
pub border_offset: WindowBorderOffset,
- pub appearance: WindowAppearance,
- pub scale_factor: f32,
- pub restore_from_minimized: Option<Box<dyn FnMut(RequestFrameOptions)>>,
+ pub appearance: Cell<WindowAppearance>,
+ pub scale_factor: Cell<f32>,
+ pub restore_from_minimized: Cell<Option<Box<dyn FnMut(RequestFrameOptions)>>>,
pub callbacks: Callbacks,
- pub input_handler: Option<PlatformInputHandler>,
- pub pending_surrogate: Option<u16>,
- pub last_reported_modifiers: Option<Modifiers>,
- pub last_reported_capslock: Option<Capslock>,
- pub hovered: bool,
+ pub input_handler: Cell<Option<PlatformInputHandler>>,
+ pub pending_surrogate: Cell<Option<u16>>,
+ pub last_reported_modifiers: Cell<Option<Modifiers>>,
+ pub last_reported_capslock: Cell<Option<Capslock>>,
+ pub hovered: Cell<bool>,
- pub renderer: DirectXRenderer,
+ pub renderer: RefCell<DirectXRenderer>,
pub click_state: ClickState,
- pub current_cursor: Option<HCURSOR>,
- pub nc_button_pressed: Option<u32>,
+ pub current_cursor: Cell<Option<HCURSOR>>,
+ pub nc_button_pressed: Cell<Option<u32>>,
- pub display: WindowsDisplay,
+ pub display: Cell<WindowsDisplay>,
/// Flag to instruct the `VSyncProvider` thread to invalidate the directx devices
/// as resizing them has failed, causing us to have lost at least the render target.
pub invalidate_devices: Arc<AtomicBool>,
- fullscreen: Option<StyleAndBounds>,
- initial_placement: Option<WindowOpenStatus>,
+ fullscreen: Cell<Option<StyleAndBounds>>,
+ initial_placement: Cell<Option<WindowOpenStatus>>,
hwnd: HWND,
}
pub(crate) struct WindowsWindowInner {
hwnd: HWND,
drop_target_helper: IDropTargetHelper,
- pub(crate) state: RefCell<WindowsWindowState>,
- system_settings: RefCell<WindowsSystemSettings>,
+ pub(crate) state: WindowsWindowState,
+ system_settings: WindowsSystemSettings,
pub(crate) handle: AnyWindowHandle,
pub(crate) hide_title_bar: bool,
pub(crate) is_movable: bool,
@@ -121,27 +129,27 @@ impl WindowsWindowState {
let initial_placement = None;
Ok(Self {
- origin,
- logical_size,
- fullscreen_restore_bounds,
+ origin: Cell::new(origin),
+ logical_size: Cell::new(logical_size),
+ fullscreen_restore_bounds: Cell::new(fullscreen_restore_bounds),
border_offset,
- appearance,
- scale_factor,
- restore_from_minimized,
+ appearance: Cell::new(appearance),
+ scale_factor: Cell::new(scale_factor),
+ restore_from_minimized: Cell::new(restore_from_minimized),
min_size,
callbacks,
- input_handler,
- pending_surrogate,
- last_reported_modifiers,
- last_reported_capslock,
- hovered,
- renderer,
+ input_handler: Cell::new(input_handler),
+ pending_surrogate: Cell::new(pending_surrogate),
+ last_reported_modifiers: Cell::new(last_reported_modifiers),
+ last_reported_capslock: Cell::new(last_reported_capslock),
+ hovered: Cell::new(hovered),
+ renderer: RefCell::new(renderer),
click_state,
- current_cursor,
- nc_button_pressed,
- display,
- fullscreen,
- initial_placement,
+ current_cursor: Cell::new(current_cursor),
+ nc_button_pressed: Cell::new(nc_button_pressed),
+ display: Cell::new(display),
+ fullscreen: Cell::new(fullscreen),
+ initial_placement: Cell::new(initial_placement),
hwnd,
invalidate_devices,
})
@@ -149,7 +157,7 @@ impl WindowsWindowState {
#[inline]
pub(crate) fn is_fullscreen(&self) -> bool {
- self.fullscreen.is_some()
+ self.fullscreen.get().is_some()
}
pub(crate) fn is_maximized(&self) -> bool {
@@ -158,8 +166,8 @@ impl WindowsWindowState {
fn bounds(&self) -> Bounds<Pixels> {
Bounds {
- origin: self.origin,
- size: self.logical_size,
+ origin: self.origin.get(),
+ size: self.logical_size.get(),
}
}
@@ -178,8 +186,8 @@ impl WindowsWindowState {
(
calculate_client_rect(
placement.rcNormalPosition,
- self.border_offset,
- self.scale_factor,
+ &self.border_offset,
+ self.scale_factor.get(),
),
placement.showCmd == SW_SHOWMAXIMIZED.0 as u32,
)
@@ -189,7 +197,7 @@ impl WindowsWindowState {
let (bounds, maximized) = self.calculate_window_bounds();
if self.is_fullscreen() {
- WindowBounds::Fullscreen(self.fullscreen_restore_bounds)
+ WindowBounds::Fullscreen(self.fullscreen_restore_bounds.get())
} else if maximized {
WindowBounds::Maximized(bounds)
} else {
@@ -202,13 +210,13 @@ impl WindowsWindowState {
/// Currently, GPUI uses the logical size of the app to handle mouse interactions (such as
/// whether the mouse collides with other elements of GPUI).
fn content_size(&self) -> Size<Pixels> {
- self.logical_size
+ self.logical_size.get()
}
}
impl WindowsWindowInner {
fn new(context: &mut WindowCreateContext, hwnd: HWND, cs: &CREATESTRUCTW) -> Result<Rc<Self>> {
- let state = RefCell::new(WindowsWindowState::new(
+ let state = WindowsWindowState::new(
hwnd,
&context.directx_devices,
cs,
@@ -218,7 +226,7 @@ impl WindowsWindowInner {
context.appearance,
context.disable_direct_composition,
context.invalidate_devices.clone(),
- )?);
+ )?;
Ok(Rc::new(Self {
hwnd,
@@ -232,7 +240,7 @@ impl WindowsWindowInner {
validation_number: context.validation_number,
main_receiver: context.main_receiver.clone(),
platform_window_handle: context.platform_window_handle,
- system_settings: RefCell::new(WindowsSystemSettings::new(context.display)),
+ system_settings: WindowsSystemSettings::new(context.display),
}))
}
@@ -240,19 +248,17 @@ impl WindowsWindowInner {
let this = self.clone();
self.executor
.spawn(async move {
- let mut lock = this.state.borrow_mut();
let StyleAndBounds {
style,
x,
y,
cx,
cy,
- } = match lock.fullscreen.take() {
+ } = match this.state.fullscreen.take() {
Some(state) => state,
None => {
- let (window_bounds, _) = lock.calculate_window_bounds();
- lock.fullscreen_restore_bounds = window_bounds;
- drop(lock);
+ let (window_bounds, _) = this.state.calculate_window_bounds();
+ this.state.fullscreen_restore_bounds.set(window_bounds);
let style =
WINDOW_STYLE(unsafe { get_window_long(this.hwnd, GWL_STYLE) } as _);
@@ -260,22 +266,20 @@ impl WindowsWindowInner {
unsafe { GetWindowRect(this.hwnd, &mut rc) }
.context("failed to get window rect")
.log_err();
-
- lock = this.state.borrow_mut();
- let _ = lock.fullscreen.insert(StyleAndBounds {
+ let _ = this.state.fullscreen.set(Some(StyleAndBounds {
style,
x: rc.left,
y: rc.top,
cx: rc.right - rc.left,
cy: rc.bottom - rc.top,
- });
+ }));
let style = style
& !(WS_THICKFRAME
| WS_SYSMENU
| WS_MAXIMIZEBOX
| WS_MINIMIZEBOX
| WS_CAPTION);
- let physical_bounds = lock.display.physical_bounds();
+ let physical_bounds = this.state.display.get().physical_bounds();
StyleAndBounds {
style,
x: physical_bounds.left().0,
@@ -285,7 +289,6 @@ impl WindowsWindowInner {
}
}
};
- drop(lock);
unsafe { set_window_long(this.hwnd, GWL_STYLE, style.0 as isize) };
unsafe {
SetWindowPos(
@@ -304,7 +307,7 @@ impl WindowsWindowInner {
}
fn set_window_placement(self: &Rc<Self>) -> Result<()> {
- let Some(open_status) = self.state.borrow_mut().initial_placement.take() else {
+ let Some(open_status) = self.state.initial_placement.take() else {
return Ok(());
};
match open_status.state {
@@ -328,27 +331,23 @@ impl WindowsWindowInner {
Ok(())
}
- pub(crate) fn system_settings(&self) -> std::cell::Ref<'_, WindowsSystemSettings> {
- self.system_settings.borrow()
- }
-
- pub(crate) fn system_settings_mut(&self) -> std::cell::RefMut<'_, WindowsSystemSettings> {
- self.system_settings.borrow_mut()
+ pub(crate) fn system_settings(&self) -> &WindowsSystemSettings {
+ &self.system_settings
}
}
#[derive(Default)]
pub(crate) struct Callbacks {
- pub(crate) request_frame: Option<Box<dyn FnMut(RequestFrameOptions)>>,
- pub(crate) input: Option<Box<dyn FnMut(crate::PlatformInput) -> DispatchEventResult>>,
- pub(crate) active_status_change: Option<Box<dyn FnMut(bool)>>,
- pub(crate) hovered_status_change: Option<Box<dyn FnMut(bool)>>,
- pub(crate) resize: Option<Box<dyn FnMut(Size<Pixels>, f32)>>,
- pub(crate) moved: Option<Box<dyn FnMut()>>,
- pub(crate) should_close: Option<Box<dyn FnMut() -> bool>>,
- pub(crate) close: Option<Box<dyn FnOnce()>>,
- pub(crate) hit_test_window_control: Option<Box<dyn FnMut() -> Option<WindowControlArea>>>,
- pub(crate) appearance_changed: Option<Box<dyn FnMut()>>,
+ pub(crate) request_frame: Cell<Option<Box<dyn FnMut(RequestFrameOptions)>>>,
+ pub(crate) input: Cell<Option<Box<dyn FnMut(crate::PlatformInput) -> DispatchEventResult>>>,
+ pub(crate) active_status_change: Cell<Option<Box<dyn FnMut(bool)>>>,
+ pub(crate) hovered_status_change: Cell<Option<Box<dyn FnMut(bool)>>>,
+ pub(crate) resize: Cell<Option<Box<dyn FnMut(Size<Pixels>, f32)>>>,
+ pub(crate) moved: Cell<Option<Box<dyn FnMut()>>>,
+ pub(crate) should_close: Cell<Option<Box<dyn FnMut() -> bool>>>,
+ pub(crate) close: Cell<Option<Box<dyn FnOnce()>>>,
+ pub(crate) hit_test_window_control: Cell<Option<Box<dyn FnMut() -> Option<WindowControlArea>>>>,
+ pub(crate) appearance_changed: Cell<Option<Box<dyn FnMut()>>>,
}
struct WindowCreateContext {
@@ -476,21 +475,21 @@ impl WindowsWindow {
register_drag_drop(&this)?;
configure_dwm_dark_mode(hwnd, appearance);
- this.state.borrow_mut().border_offset.update(hwnd)?;
+ this.state.border_offset.update(hwnd)?;
let placement = retrieve_window_placement(
hwnd,
display,
params.bounds,
- this.state.borrow().scale_factor,
- this.state.borrow().border_offset,
+ this.state.scale_factor.get(),
+ &this.state.border_offset,
)?;
if params.show {
unsafe { SetWindowPlacement(hwnd, &placement)? };
} else {
- this.state.borrow_mut().initial_placement = Some(WindowOpenStatus {
+ this.state.initial_placement.set(Some(WindowOpenStatus {
placement,
state: WindowOpenState::Windowed,
- });
+ }));
}
Ok(Self(this))
@@ -533,15 +532,15 @@ impl Drop for WindowsWindow {
impl PlatformWindow for WindowsWindow {
fn bounds(&self) -> Bounds<Pixels> {
- self.0.state.borrow().bounds()
+ self.state.bounds()
}
fn is_maximized(&self) -> bool {
- self.0.state.borrow().is_maximized()
+ self.state.is_maximized()
}
fn window_bounds(&self) -> WindowBounds {
- self.0.state.borrow().window_bounds()
+ self.state.window_bounds()
}
/// get the logical size of the app's drawable area.
@@ -549,14 +548,14 @@ impl PlatformWindow for WindowsWindow {
/// Currently, GPUI uses the logical size of the app to handle mouse interactions (such as
/// whether the mouse collides with other elements of GPUI).
fn content_size(&self) -> Size<Pixels> {
- self.0.state.borrow().content_size()
+ self.state.content_size()
}
fn resize(&mut self, size: Size<Pixels>) {
let hwnd = self.0.hwnd;
let bounds =
crate::bounds(self.bounds().origin, size).to_device_pixels(self.scale_factor());
- let rect = calculate_window_rect(bounds, self.0.state.borrow().border_offset);
+ let rect = calculate_window_rect(bounds, &self.state.border_offset);
self.0
.executor
@@ -579,15 +578,15 @@ impl PlatformWindow for WindowsWindow {
}
fn scale_factor(&self) -> f32 {
- self.0.state.borrow().scale_factor
+ self.state.scale_factor.get()
}
fn appearance(&self) -> WindowAppearance {
- self.0.state.borrow().appearance
+ self.state.appearance.get()
}
fn display(&self) -> Option<Rc<dyn PlatformDisplay>> {
- Some(Rc::new(self.0.state.borrow().display))
+ Some(Rc::new(self.state.display.get()))
}
fn mouse_position(&self) -> Point<Pixels> {
@@ -612,11 +611,11 @@ impl PlatformWindow for WindowsWindow {
}
fn set_input_handler(&mut self, input_handler: PlatformInputHandler) {
- self.0.state.borrow_mut().input_handler = Some(input_handler);
+ self.state.input_handler.set(Some(input_handler));
}
fn take_input_handler(&mut self) -> Option<PlatformInputHandler> {
- self.0.state.borrow_mut().input_handler.take()
+ self.state.input_handler.take()
}
fn prompt(
@@ -762,7 +761,7 @@ impl PlatformWindow for WindowsWindow {
}
fn is_hovered(&self) -> bool {
- self.0.state.borrow().hovered
+ self.state.hovered.get()
}
fn set_title(&mut self, title: &str) {
@@ -805,8 +804,9 @@ impl PlatformWindow for WindowsWindow {
unsafe {
if IsWindowVisible(self.0.hwnd).as_bool() {
ShowWindowAsync(self.0.hwnd, SW_MAXIMIZE).ok().log_err();
- } else if let Some(status) = self.0.state.borrow_mut().initial_placement.as_mut() {
+ } else if let Some(mut status) = self.state.initial_placement.take() {
status.state = WindowOpenState::Maximized;
+ self.state.initial_placement.set(Some(status));
}
}
}
@@ -814,61 +814,78 @@ impl PlatformWindow for WindowsWindow {
fn toggle_fullscreen(&self) {
if unsafe { IsWindowVisible(self.0.hwnd).as_bool() } {
self.0.toggle_fullscreen();
- } else if let Some(status) = self.0.state.borrow_mut().initial_placement.as_mut() {
+ } else if let Some(mut status) = self.state.initial_placement.take() {
status.state = WindowOpenState::Fullscreen;
+ self.state.initial_placement.set(Some(status));
}
}
fn is_fullscreen(&self) -> bool {
- self.0.state.borrow().is_fullscreen()
+ self.state.is_fullscreen()
}
fn on_request_frame(&self, callback: Box<dyn FnMut(RequestFrameOptions)>) {
- self.0.state.borrow_mut().callbacks.request_frame = Some(callback);
+ self.state.callbacks.request_frame.set(Some(callback));
}
fn on_input(&self, callback: Box<dyn FnMut(PlatformInput) -> DispatchEventResult>) {
- self.0.state.borrow_mut().callbacks.input = Some(callback);
+ self.state.callbacks.input.set(Some(callback));
}
fn on_active_status_change(&self, callback: Box<dyn FnMut(bool)>) {
- self.0.state.borrow_mut().callbacks.active_status_change = Some(callback);
+ self.0
+ .state
+ .callbacks
+ .active_status_change
+ .set(Some(callback));
}
fn on_hover_status_change(&self, callback: Box<dyn FnMut(bool)>) {
- self.0.state.borrow_mut().callbacks.hovered_status_change = Some(callback);
+ self.0
+ .state
+ .callbacks
+ .hovered_status_change
+ .set(Some(callback));
}
fn on_resize(&self, callback: Box<dyn FnMut(Size<Pixels>, f32)>) {
- self.0.state.borrow_mut().callbacks.resize = Some(callback);
+ self.state.callbacks.resize.set(Some(callback));
}
fn on_moved(&self, callback: Box<dyn FnMut()>) {
- self.0.state.borrow_mut().callbacks.moved = Some(callback);
+ self.state.callbacks.moved.set(Some(callback));
}
fn on_should_close(&self, callback: Box<dyn FnMut() -> bool>) {
- self.0.state.borrow_mut().callbacks.should_close = Some(callback);
+ self.state.callbacks.should_close.set(Some(callback));
}
fn on_close(&self, callback: Box<dyn FnOnce()>) {
- self.0.state.borrow_mut().callbacks.close = Some(callback);
+ self.state.callbacks.close.set(Some(callback));
}
fn on_hit_test_window_control(&self, callback: Box<dyn FnMut() -> Option<WindowControlArea>>) {
- self.0.state.borrow_mut().callbacks.hit_test_window_control = Some(callback);
+ self.0
+ .state
+ .callbacks
+ .hit_test_window_control
+ .set(Some(callback));
}
fn on_appearance_changed(&self, callback: Box<dyn FnMut()>) {
- self.0.state.borrow_mut().callbacks.appearance_changed = Some(callback);
+ self.0
+ .state
+ .callbacks
+ .appearance_changed
+ .set(Some(callback));
}
fn draw(&self, scene: &Scene) {
- self.0.state.borrow_mut().renderer.draw(scene).log_err();
+ self.state.renderer.borrow_mut().draw(scene).log_err();
}
fn sprite_atlas(&self) -> Arc<dyn PlatformAtlas> {
- self.0.state.borrow().renderer.sprite_atlas()
+ self.state.renderer.borrow().sprite_atlas()
}
fn get_raw_handle(&self) -> HWND {
@@ -876,7 +893,7 @@ impl PlatformWindow for WindowsWindow {
}
fn gpu_specs(&self) -> Option<GpuSpecs> {
- self.0.state.borrow().renderer.gpu_specs().log_err()
+ self.state.renderer.borrow().gpu_specs().log_err()
}
fn update_ime_position(&self, _bounds: Bounds<Pixels>) {
@@ -889,11 +906,9 @@ struct WindowsDragDropHandler(pub Rc<WindowsWindowInner>);
impl WindowsDragDropHandler {
fn handle_drag_drop(&self, input: PlatformInput) {
- let mut lock = self.0.state.borrow_mut();
- if let Some(mut func) = lock.callbacks.input.take() {
- drop(lock);
+ if let Some(mut func) = self.0.state.callbacks.input.take() {
func(input);
- self.0.state.borrow_mut().callbacks.input = Some(func);
+ self.0.state.callbacks.input.set(Some(func));
}
}
}
@@ -937,7 +952,7 @@ impl IDropTarget_Impl for WindowsDragDropHandler_Impl {
ScreenToClient(self.0.hwnd, &mut cursor_position)
.ok()
.log_err();
- let scale_factor = self.0.state.borrow().scale_factor;
+ let scale_factor = self.0.state.scale_factor.get();
let input = PlatformInput::FileDrop(FileDropEvent::Entered {
position: logical_point(
cursor_position.x as f32,
@@ -975,7 +990,7 @@ impl IDropTarget_Impl for WindowsDragDropHandler_Impl {
.ok()
.log_err();
}
- let scale_factor = self.0.state.borrow().scale_factor;
+ let scale_factor = self.0.state.scale_factor.get();
let input = PlatformInput::FileDrop(FileDropEvent::Pending {
position: logical_point(
cursor_position.x as f32,
@@ -1017,7 +1032,7 @@ impl IDropTarget_Impl for WindowsDragDropHandler_Impl {
.ok()
.log_err();
}
- let scale_factor = self.0.state.borrow().scale_factor;
+ let scale_factor = self.0.state.scale_factor.get();
let input = PlatformInput::FileDrop(FileDropEvent::Submit {
position: logical_point(
cursor_position.x as f32,
@@ -1031,15 +1046,15 @@ impl IDropTarget_Impl for WindowsDragDropHandler_Impl {
}
}
-#[derive(Debug, Clone, Copy)]
+#[derive(Debug, Clone)]
pub(crate) struct ClickState {
- button: MouseButton,
- last_click: Instant,
- last_position: Point<DevicePixels>,
- double_click_spatial_tolerance_width: i32,
- double_click_spatial_tolerance_height: i32,
- double_click_interval: Duration,
- pub(crate) current_count: usize,
+ button: Cell<MouseButton>,
+ last_click: Cell<Instant>,
+ last_position: Cell<Point<DevicePixels>>,
+ double_click_spatial_tolerance_width: Cell<i32>,
+ double_click_spatial_tolerance_height: Cell<i32>,
+ double_click_interval: Cell<Duration>,
+ pub(crate) current_count: Cell<usize>,
}
impl ClickState {
@@ -1049,61 +1064,59 @@ impl ClickState {
let double_click_interval = Duration::from_millis(unsafe { GetDoubleClickTime() } as u64);
ClickState {
- button: MouseButton::Left,
- last_click: Instant::now(),
- last_position: Point::default(),
- double_click_spatial_tolerance_width,
- double_click_spatial_tolerance_height,
- double_click_interval,
- current_count: 0,
+ button: Cell::new(MouseButton::Left),
+ last_click: Cell::new(Instant::now()),
+ last_position: Cell::new(Point::default()),
+ double_click_spatial_tolerance_width: Cell::new(double_click_spatial_tolerance_width),
+ double_click_spatial_tolerance_height: Cell::new(double_click_spatial_tolerance_height),
+ double_click_interval: Cell::new(double_click_interval),
+ current_count: Cell::new(0),
}
}
/// update self and return the needed click count
- pub fn update(&mut self, button: MouseButton, new_position: Point<DevicePixels>) -> usize {
- if self.button == button && self.is_double_click(new_position) {
- self.current_count += 1;
+ pub fn update(&self, button: MouseButton, new_position: Point<DevicePixels>) -> usize {
+ if self.button.get() == button && self.is_double_click(new_position) {
+ self.current_count.update(|it| it + 1);
} else {
- self.current_count = 1;
+ self.current_count.set(1);
}
- self.last_click = Instant::now();
- self.last_position = new_position;
- self.button = button;
+ self.last_click.set(Instant::now());
+ self.last_position.set(new_position);
+ self.button.set(button);
- self.current_count
+ self.current_count.get()
}
- pub fn system_update(&mut self, wparam: usize) {
+ pub fn system_update(&self, wparam: usize) {
match wparam {
// SPI_SETDOUBLECLKWIDTH
- 29 => {
- self.double_click_spatial_tolerance_width =
- unsafe { GetSystemMetrics(SM_CXDOUBLECLK) }
- }
+ 29 => self
+ .double_click_spatial_tolerance_width
+ .set(unsafe { GetSystemMetrics(SM_CXDOUBLECLK) }),
// SPI_SETDOUBLECLKHEIGHT
- 30 => {
- self.double_click_spatial_tolerance_height =
- unsafe { GetSystemMetrics(SM_CYDOUBLECLK) }
- }
+ 30 => self
+ .double_click_spatial_tolerance_height
+ .set(unsafe { GetSystemMetrics(SM_CYDOUBLECLK) }),
// SPI_SETDOUBLECLICKTIME
- 32 => {
- self.double_click_interval =
- Duration::from_millis(unsafe { GetDoubleClickTime() } as u64)
- }
+ 32 => self
+ .double_click_interval
+ .set(Duration::from_millis(unsafe { GetDoubleClickTime() } as u64)),
_ => {}
}
}
#[inline]
fn is_double_click(&self, new_position: Point<DevicePixels>) -> bool {
- let diff = self.last_position - new_position;
+ let diff = self.last_position.get() - new_position;
- self.last_click.elapsed() < self.double_click_interval
- && diff.x.0.abs() <= self.double_click_spatial_tolerance_width
- && diff.y.0.abs() <= self.double_click_spatial_tolerance_height
+ self.last_click.get().elapsed() < self.double_click_interval.get()
+ && diff.x.0.abs() <= self.double_click_spatial_tolerance_width.get()
+ && diff.y.0.abs() <= self.double_click_spatial_tolerance_height.get()
}
}
+#[derive(Copy, Clone)]
struct StyleAndBounds {
style: WINDOW_STYLE,
x: i32,
@@ -1129,14 +1142,14 @@ struct AccentPolicy {
type Color = (u8, u8, u8, u8);
-#[derive(Debug, Default, Clone, Copy)]
+#[derive(Debug, Default, Clone)]
pub(crate) struct WindowBorderOffset {
- pub(crate) width_offset: i32,
- pub(crate) height_offset: i32,
+ pub(crate) width_offset: Cell<i32>,
+ pub(crate) height_offset: Cell<i32>,
}
impl WindowBorderOffset {
- pub(crate) fn update(&mut self, hwnd: HWND) -> anyhow::Result<()> {
+ pub(crate) fn update(&self, hwnd: HWND) -> anyhow::Result<()> {
let window_rect = unsafe {
let mut rect = std::mem::zeroed();
GetWindowRect(hwnd, &mut rect)?;
@@ -1147,19 +1160,21 @@ impl WindowBorderOffset {
GetClientRect(hwnd, &mut rect)?;
rect
};
- self.width_offset =
- (window_rect.right - window_rect.left) - (client_rect.right - client_rect.left);
- self.height_offset =
- (window_rect.bottom - window_rect.top) - (client_rect.bottom - client_rect.top);
+ self.width_offset
+ .set((window_rect.right - window_rect.left) - (client_rect.right - client_rect.left));
+ self.height_offset
+ .set((window_rect.bottom - window_rect.top) - (client_rect.bottom - client_rect.top));
Ok(())
}
}
+#[derive(Clone)]
struct WindowOpenStatus {
placement: WINDOWPLACEMENT,
state: WindowOpenState,
}
+#[derive(Clone, Copy)]
enum WindowOpenState {
Maximized,
Fullscreen,
@@ -1269,7 +1284,7 @@ fn register_drag_drop(window: &Rc<WindowsWindowInner>) -> Result<()> {
Ok(())
}
-fn calculate_window_rect(bounds: Bounds<DevicePixels>, border_offset: WindowBorderOffset) -> RECT {
+fn calculate_window_rect(bounds: Bounds<DevicePixels>, border_offset: &WindowBorderOffset) -> RECT {
// NOTE:
// The reason we're not using `AdjustWindowRectEx()` here is
// that the size reported by this function is incorrect.
@@ -1283,10 +1298,10 @@ fn calculate_window_rect(bounds: Bounds<DevicePixels>, border_offset: WindowBord
right: bounds.right().0,
bottom: bounds.bottom().0,
};
- let left_offset = border_offset.width_offset / 2;
- let top_offset = border_offset.height_offset / 2;
- let right_offset = border_offset.width_offset - left_offset;
- let bottom_offset = border_offset.height_offset - top_offset;
+ let left_offset = border_offset.width_offset.get() / 2;
+ let top_offset = border_offset.height_offset.get() / 2;
+ let right_offset = border_offset.width_offset.get() - left_offset;
+ let bottom_offset = border_offset.height_offset.get() - top_offset;
rect.left -= left_offset;
rect.top -= top_offset;
rect.right += right_offset;
@@ -1296,13 +1311,13 @@ fn calculate_window_rect(bounds: Bounds<DevicePixels>, border_offset: WindowBord
fn calculate_client_rect(
rect: RECT,
- border_offset: WindowBorderOffset,
+ border_offset: &WindowBorderOffset,
scale_factor: f32,
) -> Bounds<Pixels> {
- let left_offset = border_offset.width_offset / 2;
- let top_offset = border_offset.height_offset / 2;
- let right_offset = border_offset.width_offset - left_offset;
- let bottom_offset = border_offset.height_offset - top_offset;
+ let left_offset = border_offset.width_offset.get() / 2;
+ let top_offset = border_offset.height_offset.get() / 2;
+ let right_offset = border_offset.width_offset.get() - left_offset;
+ let bottom_offset = border_offset.height_offset.get() - top_offset;
let left = rect.left + left_offset;
let top = rect.top + top_offset;
let right = rect.right - right_offset;
@@ -1319,7 +1334,7 @@ fn retrieve_window_placement(
display: WindowsDisplay,
initial_bounds: Bounds<Pixels>,
scale_factor: f32,
- border_offset: WindowBorderOffset,
+ border_offset: &WindowBorderOffset,
) -> Result<WINDOWPLACEMENT> {
let mut placement = WINDOWPLACEMENT {
length: std::mem::size_of::<WINDOWPLACEMENT>() as u32,
@@ -1429,7 +1444,9 @@ mod tests {
state.update(MouseButton::Left, point(DevicePixels(0), DevicePixels(0))),
2
);
- state.last_click -= Duration::from_millis(700);
+ state
+ .last_click
+ .update(|it| it - Duration::from_millis(700));
assert_eq!(
state.update(MouseButton::Left, point(DevicePixels(0), DevicePixels(0))),
1
@@ -8,6 +8,7 @@ use std::{fmt::Debug, ops::Range};
use taffy::{
TaffyTree, TraversePartialTree as _,
geometry::{Point as TaffyPoint, Rect as TaffyRect, Size as TaffySize},
+ prelude::min_content,
style::AvailableSpace as TaffyAvailableSpace,
tree::NodeId,
};
@@ -295,7 +296,7 @@ trait ToTaffy<Output> {
impl ToTaffy<taffy::style::Style> for Style {
fn to_taffy(&self, rem_size: Pixels, scale_factor: f32) -> taffy::style::Style {
- use taffy::style_helpers::{fr, length, minmax, repeat};
+ use taffy::style_helpers::{length, minmax, repeat};
fn to_grid_line(
placement: &Range<crate::GridPlacement>,
@@ -309,8 +310,8 @@ impl ToTaffy<taffy::style::Style> for Style {
fn to_grid_repeat<T: taffy::style::CheapCloneStr>(
unit: &Option<u16>,
) -> Vec<taffy::GridTemplateComponent<T>> {
- // grid-template-columns: repeat(<number>, minmax(0, 1fr));
- unit.map(|count| vec![repeat(count, vec![minmax(length(0.0), fr(1.0))])])
+ // grid-template-columns: repeat(<number>, minmax(0, min-content));
+ unit.map(|count| vec![repeat(count, vec![minmax(length(0.0), min_content())])])
.unwrap_or_default()
}
@@ -596,7 +596,7 @@ pub enum HitboxBehavior {
/// ```
///
/// This has effects beyond event handling - any use of hitbox checking, such as hover
- /// styles and tooltops. These other behaviors are the main point of this mechanism. An
+ /// styles and tooltips. These other behaviors are the main point of this mechanism. An
/// alternative might be to not affect mouse event handling - but this would allow
/// inconsistent UI where clicks and moves interact with elements that are not considered to
/// be hovered.
@@ -624,7 +624,7 @@ pub enum HitboxBehavior {
/// desired, then a `cx.stop_propagation()` handler like the one above can be used.
///
/// This has effects beyond event handling - this affects any use of `is_hovered`, such as
- /// hover styles and tooltops. These other behaviors are the main point of this mechanism.
+ /// hover styles and tooltips. These other behaviors are the main point of this mechanism.
/// An alternative might be to not affect mouse event handling - but this would allow
/// inconsistent UI where clicks and moves interact with elements that are not considered to
/// be hovered.
@@ -2006,7 +2006,9 @@ impl Window {
if let Some(input_handler) = self.platform_window.take_input_handler() {
self.rendered_frame.input_handlers.push(Some(input_handler));
}
- self.draw_roots(cx);
+ if !cx.mode.skip_drawing() {
+ self.draw_roots(cx);
+ }
self.dirty_views.clear();
self.next_frame.window_active = self.active.get();
@@ -1,4 +1,4 @@
-use std::{future::Future, path::Path, pin::Pin, task::Poll};
+use std::{path::Path, pin::Pin, task::Poll};
use anyhow::{Context, Result};
use async_compression::futures::bufread::GzipDecoder;
@@ -85,65 +85,6 @@ pub async fn download_server_binary(
Ok(())
}
-pub async fn fetch_github_binary_with_digest_check<ValidityCheck, ValidityCheckFuture>(
- binary_path: &Path,
- metadata_path: &Path,
- expected_digest: Option<String>,
- url: &str,
- asset_kind: AssetKind,
- download_destination: &Path,
- http_client: &dyn HttpClient,
- validity_check: ValidityCheck,
-) -> Result<()>
-where
- ValidityCheck: FnOnce() -> ValidityCheckFuture,
- ValidityCheckFuture: Future<Output = Result<()>>,
-{
- let metadata = GithubBinaryMetadata::read_from_file(metadata_path)
- .await
- .ok();
-
- if let Some(metadata) = metadata {
- let validity_check_result = validity_check().await;
-
- if let (Some(actual_digest), Some(expected_digest_ref)) =
- (&metadata.digest, &expected_digest)
- {
- if actual_digest == expected_digest_ref {
- if validity_check_result.is_ok() {
- return Ok(());
- }
- } else {
- log::info!(
- "SHA-256 mismatch for {binary_path:?} asset, downloading new asset. Expected: {expected_digest_ref}, Got: {actual_digest}"
- );
- }
- } else if validity_check_result.is_ok() {
- return Ok(());
- }
- }
-
- download_server_binary(
- http_client,
- url,
- expected_digest.as_deref(),
- download_destination,
- asset_kind,
- )
- .await?;
-
- GithubBinaryMetadata::write_to_file(
- &GithubBinaryMetadata {
- metadata_version: 1,
- digest: expected_digest,
- },
- metadata_path,
- )
- .await?;
-
- Ok(())
-}
-
async fn stream_response_archive(
response: impl AsyncRead + Unpin,
url: &str,
@@ -408,6 +408,7 @@ impl FakeHttpClient {
}
pub fn with_404_response() -> Arc<HttpClientWithUrl> {
+ log::warn!("Using fake HTTP client with 404 response");
Self::create(|_| async move {
Ok(Response::builder()
.status(404)
@@ -417,6 +418,7 @@ impl FakeHttpClient {
}
pub fn with_200_response() -> Arc<HttpClientWithUrl> {
+ log::warn!("Using fake HTTP client with 200 response");
Self::create(|_| async move {
Ok(Response::builder()
.status(200)
@@ -86,7 +86,6 @@ pub enum IconName {
DebugIgnoreBreakpoints,
DebugLogBreakpoint,
DebugPause,
- DebugStepBack,
DebugStepInto,
DebugStepOut,
DebugStepOver,
@@ -686,7 +686,6 @@ impl CompletionProvider for RustStyleCompletionProvider {
position: language::Anchor,
_text: &str,
_trigger_in_words: bool,
- _menu_is_open: bool,
cx: &mut Context<Editor>,
) -> bool {
completion_replace_range(&buffer.read(cx).snapshot(), &position).is_some()
@@ -3001,7 +3001,6 @@ impl CompletionProvider for KeyContextCompletionProvider {
_position: language::Anchor,
text: &str,
_trigger_in_words: bool,
- _menu_is_open: bool,
_cx: &mut Context<Editor>,
) -> bool {
text.chars()
@@ -32,7 +32,6 @@ use gpui::{
Task, TaskLabel, TextStyle,
};
-use itertools::Itertools;
use lsp::{LanguageServerId, NumberOrString};
use parking_lot::{Mutex, RawMutex, lock_api::MutexGuard};
use serde::{Deserialize, Serialize};
@@ -45,7 +44,7 @@ use std::{
borrow::Cow,
cell::Cell,
cmp::{self, Ordering, Reverse},
- collections::{BTreeMap, BTreeSet, hash_map},
+ collections::{BTreeMap, BTreeSet},
future::Future,
iter::{self, Iterator, Peekable},
mem,
@@ -238,6 +237,8 @@ struct SelectionSet {
pub struct Diagnostic {
/// The name of the service that produced this diagnostic.
pub source: Option<String>,
+ /// The ID provided by the dynamic registration that produced this diagnostic.
+ pub registration_id: Option<SharedString>,
/// A machine-readable code that identifies this diagnostic.
pub code: Option<NumberOrString>,
pub code_description: Option<lsp::Uri>,
@@ -4284,7 +4285,6 @@ impl BufferSnapshot {
let mut new_bracket_matches = HashMap::default();
let mut all_bracket_matches = HashMap::default();
- let mut bracket_matches_to_color = HashMap::default();
for chunk in tree_sitter_data
.chunks
@@ -4301,7 +4301,10 @@ impl BufferSnapshot {
let bracket_matches = match tree_sitter_data.brackets_by_chunks[chunk.id].take() {
Some(cached_brackets) => cached_brackets,
None => {
- let mut bracket_pairs_ends = Vec::new();
+ let mut all_brackets = Vec::new();
+ let mut opens = Vec::new();
+ let mut color_pairs = Vec::new();
+
let mut matches =
self.syntax
.matches(chunk_range.clone(), &self.text, |grammar| {
@@ -4313,100 +4316,76 @@ impl BufferSnapshot {
.map(|grammar| grammar.brackets_config.as_ref().unwrap())
.collect::<Vec<_>>();
- let chunk_range = chunk_range.clone();
- let tree_sitter_matches = iter::from_fn(|| {
- while let Some(mat) = matches.peek() {
- let mut open = None;
- let mut close = None;
- let depth = mat.depth;
- let config = configs[mat.grammar_index];
- let pattern = &config.patterns[mat.pattern_index];
- for capture in mat.captures {
- if capture.index == config.open_capture_ix {
- open = Some(capture.node.byte_range());
- } else if capture.index == config.close_capture_ix {
- close = Some(capture.node.byte_range());
- }
+ while let Some(mat) = matches.peek() {
+ let mut open = None;
+ let mut close = None;
+ let syntax_layer_depth = mat.depth;
+ let config = configs[mat.grammar_index];
+ let pattern = &config.patterns[mat.pattern_index];
+ for capture in mat.captures {
+ if capture.index == config.open_capture_ix {
+ open = Some(capture.node.byte_range());
+ } else if capture.index == config.close_capture_ix {
+ close = Some(capture.node.byte_range());
}
+ }
- matches.advance();
+ matches.advance();
- let Some((open_range, close_range)) = open.zip(close) else {
- continue;
- };
+ let Some((open_range, close_range)) = open.zip(close) else {
+ continue;
+ };
- let bracket_range = open_range.start..=close_range.end;
- if !bracket_range.overlaps(&chunk_range) {
- continue;
- }
+ let bracket_range = open_range.start..=close_range.end;
+ if !bracket_range.overlaps(&chunk_range) {
+ continue;
+ }
- if !pattern.rainbow_exclude
- // Also, certain languages have "brackets" that are not brackets, e.g. tags. and such
- // bracket will match the entire tag with all text inside.
- // For now, avoid highlighting any pair that has more than single char in each bracket.
- // We need to colorize `<Element/>` bracket pairs, so cannot make this check stricter.
- && (open_range.len() == 1 || close_range.len() == 1)
- {
- // Certain tree-sitter grammars may return more bracket pairs than needed:
- // see `test_markdown_bracket_colorization` for a set-up that returns pairs with the same start bracket and different end one.
- // Pick the pair with the shortest range in case of ambiguity.
- match bracket_matches_to_color.entry(open_range.clone()) {
- hash_map::Entry::Vacant(v) => {
- v.insert(close_range.clone());
- }
- hash_map::Entry::Occupied(mut o) => {
- let previous_close_range = o.get();
- let previous_length =
- previous_close_range.end - open_range.start;
- let new_length = close_range.end - open_range.start;
- if new_length < previous_length {
- o.insert(close_range.clone());
- }
- }
- }
- }
- return Some((open_range, close_range, pattern, depth));
+ let index = all_brackets.len();
+ all_brackets.push(BracketMatch {
+ open_range: open_range.clone(),
+ close_range: close_range.clone(),
+ newline_only: pattern.newline_only,
+ syntax_layer_depth,
+ color_index: None,
+ });
+
+ // Certain languages have "brackets" that are not brackets, e.g. tags. and such
+ // bracket will match the entire tag with all text inside.
+ // For now, avoid highlighting any pair that has more than single char in each bracket.
+ // We need to colorize `<Element/>` bracket pairs, so cannot make this check stricter.
+ let should_color = !pattern.rainbow_exclude
+ && (open_range.len() == 1 || close_range.len() == 1);
+ if should_color {
+ opens.push(open_range.clone());
+ color_pairs.push((open_range, close_range, index));
}
- None
- })
- .sorted_by_key(|(open_range, _, _, _)| open_range.start)
- .collect::<Vec<_>>();
+ }
- let new_matches = tree_sitter_matches
- .into_iter()
- .map(|(open_range, close_range, pattern, syntax_layer_depth)| {
- let participates_in_colorizing =
- bracket_matches_to_color.get(&open_range).is_some_and(
- |close_range_to_color| close_range_to_color == &close_range,
- );
- let color_index = if participates_in_colorizing {
- while let Some(&last_bracket_end) = bracket_pairs_ends.last() {
- if last_bracket_end <= open_range.start {
- bracket_pairs_ends.pop();
- } else {
- break;
- }
- }
+ opens.sort_by_key(|r| (r.start, r.end));
+ opens.dedup_by(|a, b| a.start == b.start && a.end == b.end);
+ color_pairs.sort_by_key(|(_, close, _)| close.end);
- let bracket_depth = bracket_pairs_ends.len();
- bracket_pairs_ends.push(close_range.end);
- Some(bracket_depth)
- } else {
- None
- };
+ let mut open_stack = Vec::new();
+ let mut open_index = 0;
+ for (open, close, index) in color_pairs {
+ while open_index < opens.len() && opens[open_index].start < close.start {
+ open_stack.push(opens[open_index].clone());
+ open_index += 1;
+ }
- BracketMatch {
- open_range,
- close_range,
- syntax_layer_depth,
- newline_only: pattern.newline_only,
- color_index,
- }
- })
- .collect::<Vec<_>>();
+ if open_stack.last() == Some(&open) {
+ let depth_index = open_stack.len() - 1;
+ all_brackets[index].color_index = Some(depth_index);
+ open_stack.pop();
+ }
+ }
- new_bracket_matches.insert(chunk.id, new_matches.clone());
- new_matches
+ all_brackets.sort_by_key(|bracket_match| {
+ (bracket_match.open_range.start, bracket_match.open_range.end)
+ });
+ new_bracket_matches.insert(chunk.id, all_brackets.clone());
+ all_brackets
}
};
all_bracket_matches.insert(chunk.row_range(), bracket_matches);
@@ -5413,6 +5392,7 @@ impl Default for Diagnostic {
is_unnecessary: false,
underline: true,
data: None,
+ registration_id: None,
}
}
}
@@ -3,6 +3,7 @@
use crate::{CursorShape, Diagnostic, DiagnosticSourceKind, diagnostic_set::DiagnosticEntry};
use anyhow::{Context as _, Result};
use clock::ReplicaId;
+use gpui::SharedString;
use lsp::{DiagnosticSeverity, LanguageServerId};
use rpc::proto;
use serde_json::Value;
@@ -239,6 +240,11 @@ pub fn serialize_diagnostics<'a>(
is_disk_based: entry.diagnostic.is_disk_based,
is_unnecessary: entry.diagnostic.is_unnecessary,
data: entry.diagnostic.data.as_ref().map(|data| data.to_string()),
+ registration_id: entry
+ .diagnostic
+ .registration_id
+ .as_ref()
+ .map(ToString::to_string),
})
.collect()
}
@@ -457,6 +463,7 @@ pub fn deserialize_diagnostics(
is_disk_based: diagnostic.is_disk_based,
is_unnecessary: diagnostic.is_unnecessary,
underline: diagnostic.underline,
+ registration_id: diagnostic.registration_id.map(SharedString::from),
source_kind: match proto::diagnostic::SourceKind::from_i32(
diagnostic.source_kind,
)? {
@@ -135,6 +135,11 @@ impl LanguageModelRegistry {
fake_provider
}
+ #[cfg(any(test, feature = "test-support"))]
+ pub fn fake_model(&self) -> Arc<dyn LanguageModel> {
+ self.default_model.as_ref().unwrap().model.clone()
+ }
+
pub fn register_provider<T: LanguageModelProvider + LanguageModelProviderState>(
&mut self,
provider: Arc<T>,
@@ -71,6 +71,7 @@ pub struct AmazonBedrockSettings {
pub profile_name: Option<String>,
pub role_arn: Option<String>,
pub authentication_method: Option<BedrockAuthMethod>,
+ pub allow_global: Option<bool>,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, EnumIter, IntoStaticStr, JsonSchema)]
@@ -239,6 +240,13 @@ impl State {
.or(settings_region)
.unwrap_or(String::from("us-east-1"))
}
+
+ fn get_allow_global(&self) -> bool {
+ self.settings
+ .as_ref()
+ .and_then(|s| s.allow_global)
+ .unwrap_or(false)
+ }
}
pub struct BedrockLanguageModelProvider {
@@ -545,11 +553,13 @@ impl LanguageModel for BedrockModel {
LanguageModelCompletionError,
>,
> {
- let Ok(region) = cx.read_entity(&self.state, |state, _cx| state.get_region()) else {
+ let Ok((region, allow_global)) = cx.read_entity(&self.state, |state, _cx| {
+ (state.get_region(), state.get_allow_global())
+ }) else {
return async move { Err(anyhow::anyhow!("App State Dropped").into()) }.boxed();
};
- let model_id = match self.model.cross_region_inference_id(®ion) {
+ let model_id = match self.model.cross_region_inference_id(®ion, allow_global) {
Ok(s) => s,
Err(e) => {
return async move { Err(e.into()) }.boxed();
@@ -58,6 +58,7 @@ impl settings::Settings for AllLanguageModelSettings {
profile_name: bedrock.profile,
role_arn: None, // todo(was never a setting for this...)
authentication_method: bedrock.authentication_method.map(Into::into),
+ allow_global: bedrock.allow_global,
},
deepseek: DeepSeekSettings {
api_url: deepseek.api_url.unwrap(),
@@ -805,11 +805,13 @@ impl SearchableItem for LspLogView {
fn update_matches(
&mut self,
matches: &[Self::Match],
+ active_match_index: Option<usize>,
window: &mut Window,
cx: &mut Context<Self>,
) {
- self.editor
- .update(cx, |e, cx| e.update_matches(matches, window, cx))
+ self.editor.update(cx, |e, cx| {
+ e.update_matches(matches, active_match_index, window, cx)
+ })
}
fn query_suggestion(&mut self, window: &mut Window, cx: &mut Context<Self>) -> String {
@@ -937,7 +939,7 @@ impl Render for LspLogToolbarItemView {
})
.collect();
- let log_toolbar_view = cx.entity();
+ let log_toolbar_view = cx.weak_entity();
let lsp_menu = PopoverMenu::new("LspLogView")
.anchor(Corner::TopLeft)
@@ -1021,7 +1023,7 @@ impl Render for LspLogToolbarItemView {
.icon_color(Color::Muted),
)
.menu(move |window, cx| {
- let log_toolbar_view = log_toolbar_view.clone();
+ let log_toolbar_view = log_toolbar_view.upgrade()?;
let log_view = log_view.clone();
Some(ContextMenu::build(window, cx, move |this, window, _| {
this.entry(
@@ -459,7 +459,7 @@ impl SyntaxTreeView {
editor.clear_background_highlights::<Self>(cx);
editor.highlight_background::<Self>(
&[range],
- |theme| {
+ |_, theme| {
theme
.colors()
.editor_document_highlight_write_background
@@ -614,13 +614,14 @@ impl SyntaxTreeToolbarItemView {
let active_layer = buffer_state.active_layer.clone()?;
let active_buffer = buffer_state.buffer.read(cx).snapshot();
- let view = cx.entity();
+ let view = cx.weak_entity();
Some(
PopoverMenu::new("Syntax Tree")
.trigger(Self::render_header(&active_layer))
.menu(move |window, cx| {
- ContextMenu::build(window, cx, |mut menu, window, _| {
+ ContextMenu::build(window, cx, |mut menu, _, _| {
for (layer_ix, layer) in active_buffer.syntax_layers().enumerate() {
+ let view = view.clone();
menu = menu.entry(
format!(
"{} {}",
@@ -628,9 +629,12 @@ impl SyntaxTreeToolbarItemView {
format_node_range(layer.node())
),
None,
- window.handler_for(&view, move |view, window, cx| {
- view.select_layer(layer_ix, window, cx);
- }),
+ move |window, cx| {
+ view.update(cx, |view, cx| {
+ view.select_layer(layer_ix, window, cx);
+ })
+ .ok();
+ },
);
}
menu
@@ -3,7 +3,7 @@ use async_trait::async_trait;
use futures::StreamExt;
use gpui::{App, AsyncApp};
use http_client::github::{AssetKind, GitHubLspBinaryVersion, latest_github_release};
-use http_client::github_download::fetch_github_binary_with_digest_check;
+use http_client::github_download::{GithubBinaryMetadata, download_server_binary};
pub use language::*;
use lsp::{InitializeParams, LanguageServerBinary, LanguageServerName};
use project::lsp_store::clangd_ext;
@@ -85,32 +85,55 @@ impl LspInstaller for CLspAdapter {
};
let metadata_path = version_dir.join("metadata");
-
- let binary_path_for_check = binary_path.clone();
- fetch_github_binary_with_digest_check(
- &binary_path,
- &metadata_path,
- expected_digest,
- &url,
- AssetKind::Zip,
- &container_dir,
- &*delegate.http_client(),
- || async move {
+ let metadata = GithubBinaryMetadata::read_from_file(&metadata_path)
+ .await
+ .ok();
+ if let Some(metadata) = metadata {
+ let validity_check = async || {
delegate
.try_exec(LanguageServerBinary {
- path: binary_path_for_check,
+ path: binary_path.clone(),
arguments: vec!["--version".into()],
env: None,
})
.await
.inspect_err(|err| {
- log::warn!("Unable to run clangd asset, redownloading: {err:#}")
+ log::warn!("Unable to run {binary_path:?} asset, redownloading: {err:#}",)
})
- },
+ };
+ if let (Some(actual_digest), Some(expected_digest)) =
+ (&metadata.digest, &expected_digest)
+ {
+ if actual_digest == expected_digest {
+ if validity_check().await.is_ok() {
+ return Ok(binary);
+ }
+ } else {
+ log::info!(
+ "SHA-256 mismatch for {binary_path:?} asset, downloading new asset. Expected: {expected_digest}, Got: {actual_digest}"
+ );
+ }
+ } else if validity_check().await.is_ok() {
+ return Ok(binary);
+ }
+ }
+ download_server_binary(
+ &*delegate.http_client(),
+ &url,
+ expected_digest.as_deref(),
+ &container_dir,
+ AssetKind::Zip,
)
.await?;
-
remove_matching(&container_dir, |entry| entry != version_dir).await;
+ GithubBinaryMetadata::write_to_file(
+ &GithubBinaryMetadata {
+ metadata_version: 1,
+ digest: expected_digest,
+ },
+ &metadata_path,
+ )
+ .await?;
Ok(binary)
}
@@ -36,7 +36,7 @@
"#ifndef"
"#include"
(preproc_directive)
-] @keyword
+] @keyword.directive
[
"="
@@ -19,360 +19,717 @@
; INJECT SQL
(
- [
- ; var, const or short declaration of raw or interpreted string literal
- ((comment) @comment
- .
- (expression_list
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a literal element (to struct field eg.)
- ((comment) @comment
- .
- (literal_element
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a function parameter
- ((comment) @comment
- .
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content)
- ]
+ [
+ (const_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (var_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (assignment_statement
+ left: (expression_list)
+ "="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
- (#match? @comment "^\\/\\*\\s*sql\\s*\\*\\/") ; /* sql */ or /*sql*/
- (#set! injection.language "sql")
+ (short_var_declaration
+ left: (expression_list)
+ ":="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (composite_literal
+ body: (literal_value
+ (keyed_element
+ (comment) @_comment
+ value: (literal_element
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))))
+
+ (expression_statement
+ (call_expression
+ (argument_list
+ (comment) @_comment
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ )))
+ ]
+ (#match? @_comment "^\\/\\*\\s*sql\\s*\\*\\/$")
+ (#set! injection.language "sql")
)
; INJECT JSON
(
- [
- ; var, const or short declaration of raw or interpreted string literal
- ((comment) @comment
- .
- (expression_list
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a literal element (to struct field eg.)
- ((comment) @comment
- .
- (literal_element
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a function parameter
- ((comment) @comment
- .
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content)
- ]
+ [
+ (const_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
- (#match? @comment "^\\/\\*\\s*json\\s*\\*\\/") ; /* json */ or /*json*/
+ (var_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (assignment_statement
+ left: (expression_list)
+ "="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (short_var_declaration
+ left: (expression_list)
+ ":="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (composite_literal
+ body: (literal_value
+ (keyed_element
+ (comment) @_comment
+ value: (literal_element
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))))
+
+ (expression_statement
+ (call_expression
+ (argument_list
+ (comment) @_comment
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ )))
+ ]
+ (#match? @_comment "^\\/\\*\\s*json\\s*\\*\\/") ; /* json */ or /*json*/
(#set! injection.language "json")
)
; INJECT YAML
(
- [
- ; var, const or short declaration of raw or interpreted string literal
- ((comment) @comment
- .
- (expression_list
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a literal element (to struct field eg.)
- ((comment) @comment
- .
- (literal_element
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a function parameter
- ((comment) @comment
- .
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content)
- ]
+ [
+ (const_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (var_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (assignment_statement
+ left: (expression_list)
+ "="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (short_var_declaration
+ left: (expression_list)
+ ":="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
- (#match? @comment "^\\/\\*\\s*yaml\\s*\\*\\/") ; /* yaml */ or /*yaml*/
+ (composite_literal
+ body: (literal_value
+ (keyed_element
+ (comment) @_comment
+ value: (literal_element
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))))
+
+ (expression_statement
+ (call_expression
+ (argument_list
+ (comment) @_comment
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ )))
+ ]
+ (#match? @_comment "^\\/\\*\\s*yaml\\s*\\*\\/") ; /* yaml */ or /*yaml*/
(#set! injection.language "yaml")
)
; INJECT XML
(
- [
- ; var, const or short declaration of raw or interpreted string literal
- ((comment) @comment
- .
- (expression_list
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a literal element (to struct field eg.)
- ((comment) @comment
- .
- (literal_element
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a function parameter
- ((comment) @comment
- .
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content)
- ]
+ [
+ (const_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (var_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (assignment_statement
+ left: (expression_list)
+ "="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (short_var_declaration
+ left: (expression_list)
+ ":="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
- (#match? @comment "^\\/\\*\\s*xml\\s*\\*\\/") ; /* xml */ or /*xml*/
+ (composite_literal
+ body: (literal_value
+ (keyed_element
+ (comment) @_comment
+ value: (literal_element
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))))
+
+ (expression_statement
+ (call_expression
+ (argument_list
+ (comment) @_comment
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ )))
+ ]
+ (#match? @_comment "^\\/\\*\\s*xml\\s*\\*\\/") ; /* xml */ or /*xml*/
(#set! injection.language "xml")
)
; INJECT HTML
(
- [
- ; var, const or short declaration of raw or interpreted string literal
- ((comment) @comment
- .
- (expression_list
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a literal element (to struct field eg.)
- ((comment) @comment
- .
- (literal_element
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a function parameter
- ((comment) @comment
- .
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content)
- ]
+ [
+ (const_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
- (#match? @comment "^\\/\\*\\s*html\\s*\\*\\/") ; /* html */ or /*html*/
+ (var_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (assignment_statement
+ left: (expression_list)
+ "="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (short_var_declaration
+ left: (expression_list)
+ ":="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (composite_literal
+ body: (literal_value
+ (keyed_element
+ (comment) @_comment
+ value: (literal_element
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))))
+
+ (expression_statement
+ (call_expression
+ (argument_list
+ (comment) @_comment
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ )))
+ ]
+ (#match? @_comment "^\\/\\*\\s*html\\s*\\*\\/") ; /* html */ or /*html*/
(#set! injection.language "html")
)
; INJECT JS
(
- [
- ; var, const or short declaration of raw or interpreted string literal
- ((comment) @comment
- .
- (expression_list
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a literal element (to struct field eg.)
- ((comment) @comment
- .
- (literal_element
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a function parameter
- ((comment) @comment
- .
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content)
- ]
+ [
+ (const_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (var_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (assignment_statement
+ left: (expression_list)
+ "="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (short_var_declaration
+ left: (expression_list)
+ ":="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
- (#match? @comment "^\\/\\*\\s*js\\s*\\*\\/") ; /* js */ or /*js*/
+ (composite_literal
+ body: (literal_value
+ (keyed_element
+ (comment) @_comment
+ value: (literal_element
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))))
+
+ (expression_statement
+ (call_expression
+ (argument_list
+ (comment) @_comment
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ )))
+ ]
+ (#match? @_comment "^\\/\\*\\s*js\\s*\\*\\/") ; /* js */ or /*js*/
(#set! injection.language "javascript")
)
+
; INJECT CSS
(
- [
- ; var, const or short declaration of raw or interpreted string literal
- ((comment) @comment
- .
- (expression_list
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a literal element (to struct field eg.)
- ((comment) @comment
- .
- (literal_element
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a function parameter
- ((comment) @comment
- .
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content)
- ]
+ [
+ (const_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (var_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (assignment_statement
+ left: (expression_list)
+ "="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
- (#match? @comment "^\\/\\*\\s*css\\s*\\*\\/") ; /* css */ or /*css*/
+ (short_var_declaration
+ left: (expression_list)
+ ":="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (composite_literal
+ body: (literal_value
+ (keyed_element
+ (comment) @_comment
+ value: (literal_element
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))))
+
+ (expression_statement
+ (call_expression
+ (argument_list
+ (comment) @_comment
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ )))
+ ]
+ (#match? @_comment "^\\/\\*\\s*css\\s*\\*\\/") ; /* css */ or /*css*/
(#set! injection.language "css")
)
+
; INJECT LUA
(
- [
- ; var, const or short declaration of raw or interpreted string literal
- ((comment) @comment
- .
- (expression_list
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a literal element (to struct field eg.)
- ((comment) @comment
- .
- (literal_element
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a function parameter
- ((comment) @comment
- .
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content)
- ]
+ [
+ (const_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (var_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (assignment_statement
+ left: (expression_list)
+ "="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (short_var_declaration
+ left: (expression_list)
+ ":="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
- (#match? @comment "^\\/\\*\\s*lua\\s*\\*\\/") ; /* lua */ or /*lua*/
+ (composite_literal
+ body: (literal_value
+ (keyed_element
+ (comment) @_comment
+ value: (literal_element
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))))
+
+ (expression_statement
+ (call_expression
+ (argument_list
+ (comment) @_comment
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ )))
+ ]
+ (#match? @_comment "^\\/\\*\\s*lua\\s*\\*\\/") ; /* lua */ or /*lua*/
(#set! injection.language "lua")
)
; INJECT BASH
(
- [
- ; var, const or short declaration of raw or interpreted string literal
- ((comment) @comment
- .
- (expression_list
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a literal element (to struct field eg.)
- ((comment) @comment
- .
- (literal_element
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a function parameter
- ((comment) @comment
- .
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content)
- ]
+ [
+ (const_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (var_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (assignment_statement
+ left: (expression_list)
+ "="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (short_var_declaration
+ left: (expression_list)
+ ":="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
- (#match? @comment "^\\/\\*\\s*bash\\s*\\*\\/") ; /* bash */ or /*bash*/
+ (composite_literal
+ body: (literal_value
+ (keyed_element
+ (comment) @_comment
+ value: (literal_element
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))))
+
+ (expression_statement
+ (call_expression
+ (argument_list
+ (comment) @_comment
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ )))
+ ]
+ (#match? @_comment "^\\/\\*\\s*bash\\s*\\*\\/") ; /* bash */ or /*bash*/
(#set! injection.language "bash")
)
; INJECT CSV
(
- [
- ; var, const or short declaration of raw or interpreted string literal
- ((comment) @comment
- .
- (expression_list
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a literal element (to struct field eg.)
- ((comment) @comment
- .
- (literal_element
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content
- ))
-
- ; when passing as a function parameter
- ((comment) @comment
- .
- [
- (interpreted_string_literal)
- (raw_string_literal)
- ] @injection.content)
- ]
+ [
+ (const_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
- (#match? @comment "^\\/\\*\\s*csv\\s*\\*\\/") ; /* csv */ or /*csv*/
+ (var_spec
+ name: (identifier)
+ "="
+ (comment) @_comment
+ value: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (assignment_statement
+ left: (expression_list)
+ "="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (short_var_declaration
+ left: (expression_list)
+ ":="
+ (comment) @_comment
+ right: (expression_list
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ ((comment) @_comment
+ value: (literal_element
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ ))
+
+ (argument_list
+ (comment) @_comment
+ [
+ (interpreted_string_literal (interpreted_string_literal_content) @injection.content)
+ (raw_string_literal (raw_string_literal_content) @injection.content)
+ ]
+ )
+ ]
+ (#match? @_comment "^\\/\\*\\s*csv\\s*\\*\\/") ; /* csv */ or /*csv */
(#set! injection.language "csv")
)
@@ -283,7 +283,6 @@ pub fn init(languages: Arc<LanguageRegistry>, fs: Arc<dyn Fs>, node: NodeRuntime
"CSS",
"ERB",
"HTML+ERB",
- "HTML/ERB",
"HEEX",
"HTML",
"JavaScript",
@@ -23,11 +23,12 @@ use serde::{Deserialize, Serialize};
use serde_json::{Value, json};
use settings::Settings;
use smol::lock::OnceCell;
-use std::cmp::Ordering;
+use std::cmp::{Ordering, Reverse};
use std::env::consts;
use terminal::terminal_settings::TerminalSettings;
use util::command::new_smol_command;
use util::fs::{make_file_executable, remove_matching};
+use util::paths::PathStyle;
use util::rel_path::RelPath;
use http_client::github_download::{GithubBinaryMetadata, download_server_binary};
@@ -100,9 +101,41 @@ impl FromStr for TestRunner {
/// The problem with it is that Pyright adjusts the sort text based on previous resolutions (items for which we've issued `completion/resolve` call have their sortText adjusted),
/// which - long story short - makes completion items list non-stable. Pyright probably relies on VSCode's implementation detail.
/// see https://github.com/microsoft/pyright/blob/95ef4e103b9b2f129c9320427e51b73ea7cf78bd/packages/pyright-internal/src/languageService/completionProvider.ts#LL2873
+///
+/// upd 02.12.25:
+/// Decided to ignore Pyright's sortText() completely and to manually sort all entries
fn process_pyright_completions(items: &mut [lsp::CompletionItem]) {
for item in items {
- item.sort_text.take();
+ let is_dunder = item.label.starts_with("__") && item.label.ends_with("__");
+
+ let visibility_priority = if is_dunder {
+ '3'
+ } else if item.label.starts_with("__") {
+ '2' // private non-dunder
+ } else if item.label.starts_with('_') {
+ '1' // protected
+ } else {
+ '0' // public
+ };
+
+ // Kind priority within same visibility level
+ let kind_priority = match item.kind {
+ Some(lsp::CompletionItemKind::ENUM_MEMBER) => '0',
+ Some(lsp::CompletionItemKind::FIELD) => '1',
+ Some(lsp::CompletionItemKind::PROPERTY) => '2',
+ Some(lsp::CompletionItemKind::VARIABLE) => '3',
+ Some(lsp::CompletionItemKind::CONSTANT) => '4',
+ Some(lsp::CompletionItemKind::METHOD) => '5',
+ Some(lsp::CompletionItemKind::FUNCTION) => '5',
+ Some(lsp::CompletionItemKind::CLASS) => '6',
+ Some(lsp::CompletionItemKind::MODULE) => '7',
+ _ => '8',
+ };
+
+ item.sort_text = Some(format!(
+ "{}{}{}",
+ visibility_priority, kind_priority, item.label
+ ));
}
}
@@ -884,7 +917,7 @@ impl PythonContextProvider {
variables: &task::TaskVariables,
) -> Option<(VariableName, String)> {
let python_module_name =
- python_module_name_from_relative_path(variables.get(&VariableName::RelativeFile)?);
+ python_module_name_from_relative_path(variables.get(&VariableName::RelativeFile)?)?;
let unittest_class_name =
variables.get(&VariableName::Custom(Cow::Borrowed("_unittest_class_name")));
@@ -941,9 +974,10 @@ impl PythonContextProvider {
&self,
variables: &task::TaskVariables,
) -> Result<(VariableName, String)> {
- let python_module_name = python_module_name_from_relative_path(
- variables.get(&VariableName::RelativeFile).unwrap_or(""),
- );
+ let python_module_name = variables
+ .get(&VariableName::RelativeFile)
+ .and_then(|module| python_module_name_from_relative_path(module))
+ .unwrap_or_default();
let module_target = (PYTHON_MODULE_NAME_TASK_VARIABLE.clone(), python_module_name);
@@ -951,12 +985,15 @@ impl PythonContextProvider {
}
}
-fn python_module_name_from_relative_path(relative_path: &str) -> String {
- let path_with_dots = relative_path.replace('/', ".");
- path_with_dots
- .strip_suffix(".py")
- .unwrap_or(&path_with_dots)
- .to_string()
+fn python_module_name_from_relative_path(relative_path: &str) -> Option<String> {
+ let rel_path = RelPath::new(relative_path.as_ref(), PathStyle::local()).ok()?;
+ let path_with_dots = rel_path.display(PathStyle::Posix).replace('/', ".");
+ Some(
+ path_with_dots
+ .strip_suffix(".py")
+ .map(ToOwned::to_owned)
+ .unwrap_or(path_with_dots),
+ )
}
fn is_python_env_global(k: &PythonEnvironmentKind) -> bool {
@@ -1064,13 +1101,33 @@ fn get_venv_parent_dir(env: &PythonEnvironment) -> Option<PathBuf> {
venv.parent().map(|parent| parent.to_path_buf())
}
-fn wr_distance(wr: &PathBuf, venv: Option<&PathBuf>) -> usize {
+// How far is this venv from the root of our current project?
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
+enum SubprojectDistance {
+ WithinSubproject(Reverse<usize>),
+ WithinWorktree(Reverse<usize>),
+ NotInWorktree,
+}
+
+fn wr_distance(
+ wr: &PathBuf,
+ subroot_relative_path: &RelPath,
+ venv: Option<&PathBuf>,
+) -> SubprojectDistance {
if let Some(venv) = venv
&& let Ok(p) = venv.strip_prefix(wr)
{
- p.components().count()
+ if subroot_relative_path.components().next().is_some()
+ && let Ok(distance) = p
+ .strip_prefix(subroot_relative_path.as_std_path())
+ .map(|p| p.components().count())
+ {
+ SubprojectDistance::WithinSubproject(Reverse(distance))
+ } else {
+ SubprojectDistance::WithinWorktree(Reverse(p.components().count()))
+ }
} else {
- usize::MAX
+ SubprojectDistance::NotInWorktree
}
}
@@ -1133,11 +1190,14 @@ impl ToolchainLister for PythonToolchainProvider {
});
// Compare project paths against worktree root
- let proj_ordering = || {
- let lhs_project = lhs.project.clone().or_else(|| get_venv_parent_dir(lhs));
- let rhs_project = rhs.project.clone().or_else(|| get_venv_parent_dir(rhs));
- wr_distance(&wr, lhs_project.as_ref()).cmp(&wr_distance(&wr, rhs_project.as_ref()))
- };
+ let proj_ordering =
+ || {
+ let lhs_project = lhs.project.clone().or_else(|| get_venv_parent_dir(lhs));
+ let rhs_project = rhs.project.clone().or_else(|| get_venv_parent_dir(rhs));
+ wr_distance(&wr, &subroot_relative_path, lhs_project.as_ref()).cmp(
+ &wr_distance(&wr, &subroot_relative_path, rhs_project.as_ref()),
+ )
+ };
// Compare environment priorities
let priority_ordering = || env_priority(lhs.kind).cmp(&env_priority(rhs.kind));
@@ -2311,6 +2371,8 @@ mod tests {
use settings::SettingsStore;
use std::num::NonZeroU32;
+ use crate::python::python_module_name_from_relative_path;
+
#[gpui::test]
async fn test_python_autoindent(cx: &mut TestAppContext) {
cx.executor().set_block_on_ticks(usize::MAX..=usize::MAX);
@@ -2439,4 +2501,35 @@ mod tests {
buffer
});
}
+
+ #[test]
+ fn test_python_module_name_from_relative_path() {
+ assert_eq!(
+ python_module_name_from_relative_path("foo/bar.py"),
+ Some("foo.bar".to_string())
+ );
+ assert_eq!(
+ python_module_name_from_relative_path("foo/bar"),
+ Some("foo.bar".to_string())
+ );
+ if cfg!(windows) {
+ assert_eq!(
+ python_module_name_from_relative_path("foo\\bar.py"),
+ Some("foo.bar".to_string())
+ );
+ assert_eq!(
+ python_module_name_from_relative_path("foo\\bar"),
+ Some("foo.bar".to_string())
+ );
+ } else {
+ assert_eq!(
+ python_module_name_from_relative_path("foo\\bar.py"),
+ Some("foo\\bar".to_string())
+ );
+ assert_eq!(
+ python_module_name_from_relative_path("foo\\bar"),
+ Some("foo\\bar".to_string())
+ );
+ }
+ }
}
@@ -5,7 +5,7 @@ use futures::StreamExt;
use gpui::{App, AppContext, AsyncApp, SharedString, Task};
use http_client::github::AssetKind;
use http_client::github::{GitHubLspBinaryVersion, latest_github_release};
-use http_client::github_download::fetch_github_binary_with_digest_check;
+use http_client::github_download::{GithubBinaryMetadata, download_server_binary};
pub use language::*;
use lsp::{InitializeParams, LanguageServerBinary};
use project::lsp_store::rust_analyzer_ext::CARGO_DIAGNOSTICS_SOURCE_NAME;
@@ -574,34 +574,64 @@ impl LspInstaller for RustLspAdapter {
AssetKind::Zip => destination_path.clone().join("rust-analyzer.exe"), // zip contains a .exe
};
- let metadata_path = destination_path.with_extension("metadata");
+ let binary = LanguageServerBinary {
+ path: server_path.clone(),
+ env: None,
+ arguments: Default::default(),
+ };
- let server_path_for_check = server_path.clone();
- fetch_github_binary_with_digest_check(
- &server_path,
- &metadata_path,
- expected_digest,
- &url,
- Self::GITHUB_ASSET_KIND,
- &destination_path,
- &*delegate.http_client(),
- || async move {
+ let metadata_path = destination_path.with_extension("metadata");
+ let metadata = GithubBinaryMetadata::read_from_file(&metadata_path)
+ .await
+ .ok();
+ if let Some(metadata) = metadata {
+ let validity_check = async || {
delegate
.try_exec(LanguageServerBinary {
- path: server_path_for_check,
+ path: server_path.clone(),
arguments: vec!["--version".into()],
env: None,
})
.await
.inspect_err(|err| {
- log::warn!("Unable to run rust-analyzer asset, redownloading: {err:#}")
+ log::warn!("Unable to run {server_path:?} asset, redownloading: {err:#}",)
})
- },
+ };
+ if let (Some(actual_digest), Some(expected_digest)) =
+ (&metadata.digest, &expected_digest)
+ {
+ if actual_digest == expected_digest {
+ if validity_check().await.is_ok() {
+ return Ok(binary);
+ }
+ } else {
+ log::info!(
+ "SHA-256 mismatch for {destination_path:?} asset, downloading new asset. Expected: {expected_digest}, Got: {actual_digest}"
+ );
+ }
+ } else if validity_check().await.is_ok() {
+ return Ok(binary);
+ }
+ }
+
+ download_server_binary(
+ &*delegate.http_client(),
+ &url,
+ expected_digest.as_deref(),
+ &destination_path,
+ Self::GITHUB_ASSET_KIND,
)
.await?;
-
make_file_executable(&server_path).await?;
remove_matching(&container_dir, |path| path != destination_path).await;
+ GithubBinaryMetadata::write_to_file(
+ &GithubBinaryMetadata {
+ metadata_version: 1,
+ digest: expected_digest,
+ },
+ &metadata_path,
+ )
+ .await?;
Ok(LanguageServerBinary {
path: server_path,
@@ -186,7 +186,6 @@ impl LspAdapter for TailwindLspAdapter {
(LanguageName::new("HEEX"), "phoenix-heex".to_string()),
(LanguageName::new("ERB"), "erb".to_string()),
(LanguageName::new("HTML+ERB"), "erb".to_string()),
- (LanguageName::new("HTML/ERB"), "erb".to_string()),
(LanguageName::new("PHP"), "php".to_string()),
(LanguageName::new("Vue.js"), "vue".to_string()),
])
@@ -1,3 +1,25 @@
((comment) @injection.content
(#set! injection.language "comment")
)
+
+; GitHub actions: JavaScript for workflow scripting (inline and block)
+(block_mapping
+ (block_mapping_pair
+ key: (flow_node) @_uses (#eq? @_uses "uses")
+ value: (flow_node) @_actions_ghs (#match? @_actions_ghs "^actions/github-script"))
+ (block_mapping_pair
+ key: (flow_node) @_with (#eq? @_with "with")
+ value: (block_node
+ (block_mapping
+ (block_mapping_pair
+ key: (flow_node) @_run (#eq? @_run "script")
+ value: [
+ (flow_node (plain_scalar (string_scalar) @injection.content))
+ (block_node (block_scalar) @injection.content)
+ ]
+ (#set! injection.language "javascript")
+ )
+ )
+ )
+ )
+)
@@ -98,6 +98,14 @@ impl Room {
self.room.connection_state()
}
+ pub fn name(&self) -> String {
+ self.room.name()
+ }
+
+ pub async fn sid(&self) -> String {
+ self.room.sid().await.to_string()
+ }
+
pub async fn publish_local_microphone_track(
&self,
user_name: String,
@@ -714,6 +714,14 @@ impl Room {
self.0.lock().token.clone()
}
+ pub fn name(&self) -> String {
+ "test_room".to_string()
+ }
+
+ pub async fn sid(&self) -> String {
+ "RM_test_session".to_string()
+ }
+
pub fn play_remote_audio_track(
&self,
_track: &RemoteAudioTrack,
@@ -1202,6 +1202,15 @@ impl Element for MarkdownElement {
builder.push_text(html, range.clone());
}
MarkdownEvent::InlineHtml => {
+ let html = &parsed_markdown.source[range.clone()];
+ if html.starts_with("<code>") {
+ builder.push_text_style(self.style.inline_code.clone());
+ continue;
+ }
+ if html.trim_end().starts_with("</code>") {
+ builder.pop_text_style();
+ continue;
+ }
builder.push_text(&parsed_markdown.source[range.clone()], range.clone());
}
MarkdownEvent::Rule => {
@@ -520,7 +520,6 @@ fn render_markdown_table(parsed: &ParsedMarkdownTable, cx: &mut RenderContext) -
.px_2()
.py_1()
.border_1()
- .size_full()
.border_color(cx.border_color)
.when(cell.is_header, |this| {
this.bg(cx.title_bar_background_color)
@@ -551,7 +550,6 @@ fn render_markdown_table(parsed: &ParsedMarkdownTable, cx: &mut RenderContext) -
let empty_cell = div()
.border_1()
- .size_full()
.border_color(cx.border_color)
.when(row_idx % 2 == 1, |this| this.bg(cx.panel_background_color));
@@ -560,7 +558,7 @@ fn render_markdown_table(parsed: &ParsedMarkdownTable, cx: &mut RenderContext) -
}
}
- cx.with_common_p(div())
+ cx.with_common_p(v_flex().items_start())
.when_some(parsed.caption.as_ref(), |this, caption| {
this.children(render_markdown_text(caption, cx))
})
@@ -153,3 +153,9 @@ pub(crate) mod m_2025_11_25 {
pub(crate) use settings::remove_context_server_source;
}
+
+pub(crate) mod m_2025_12_01 {
+ mod settings;
+
+ pub(crate) use settings::SETTINGS_PATTERNS;
+}
@@ -0,0 +1,55 @@
+use std::ops::Range;
+use tree_sitter::{Query, QueryMatch};
+
+use crate::MigrationPatterns;
+use crate::patterns::SETTINGS_NESTED_KEY_VALUE_PATTERN;
+
+pub const SETTINGS_PATTERNS: MigrationPatterns = &[(
+ SETTINGS_NESTED_KEY_VALUE_PATTERN,
+ rename_enable_preview_from_code_navigation_setting,
+)];
+
+fn rename_enable_preview_from_code_navigation_setting(
+ contents: &str,
+ mat: &QueryMatch,
+ query: &Query,
+) -> Option<(Range<usize>, String)> {
+ if !is_enable_preview_from_code_navigation(contents, mat, query) {
+ return None;
+ }
+
+ let setting_name_ix = query.capture_index_for_name("setting_name")?;
+ let setting_name_range = mat
+ .nodes_for_capture_index(setting_name_ix)
+ .next()?
+ .byte_range();
+
+ Some((
+ setting_name_range,
+ "enable_keep_preview_on_code_navigation".to_string(),
+ ))
+}
+
+fn is_enable_preview_from_code_navigation(contents: &str, mat: &QueryMatch, query: &Query) -> bool {
+ let parent_key_ix = match query.capture_index_for_name("parent_key") {
+ Some(ix) => ix,
+ None => return false,
+ };
+ let parent_range = match mat.nodes_for_capture_index(parent_key_ix).next() {
+ Some(node) => node.byte_range(),
+ None => return false,
+ };
+ if contents.get(parent_range) != Some("preview_tabs") {
+ return false;
+ }
+
+ let setting_name_ix = match query.capture_index_for_name("setting_name") {
+ Some(ix) => ix,
+ None => return false,
+ };
+ let setting_name_range = match mat.nodes_for_capture_index(setting_name_ix).next() {
+ Some(node) => node.byte_range(),
+ None => return false,
+ };
+ contents.get(setting_name_range) == Some("enable_preview_from_code_navigation")
+}
@@ -219,6 +219,10 @@ pub fn migrate_settings(text: &str) -> Result<Option<String>> {
migrations::m_2025_11_12::SETTINGS_PATTERNS,
&SETTINGS_QUERY_2025_11_12,
),
+ MigrationType::TreeSitter(
+ migrations::m_2025_12_01::SETTINGS_PATTERNS,
+ &SETTINGS_QUERY_2025_12_01,
+ ),
MigrationType::TreeSitter(
migrations::m_2025_11_20::SETTINGS_PATTERNS,
&SETTINGS_QUERY_2025_11_20,
@@ -346,6 +350,10 @@ define_query!(
SETTINGS_QUERY_2025_11_12,
migrations::m_2025_11_12::SETTINGS_PATTERNS
);
+define_query!(
+ SETTINGS_QUERY_2025_12_01,
+ migrations::m_2025_12_01::SETTINGS_PATTERNS
+);
define_query!(
SETTINGS_QUERY_2025_11_20,
migrations::m_2025_11_20::SETTINGS_PATTERNS
@@ -2262,6 +2270,54 @@ mod tests {
);
}
+ #[test]
+ fn test_remove_context_server_source() {
+ assert_migrate_settings(
+ &r#"
+ {
+ "context_servers": {
+ "extension_server": {
+ "source": "extension",
+ "settings": {
+ "foo": "bar"
+ }
+ },
+ "custom_server": {
+ "source": "custom",
+ "command": "foo",
+ "args": ["bar"],
+ "env": {
+ "FOO": "BAR"
+ }
+ },
+ }
+ }
+ "#
+ .unindent(),
+ Some(
+ &r#"
+ {
+ "context_servers": {
+ "extension_server": {
+ "settings": {
+ "foo": "bar"
+ }
+ },
+ "custom_server": {
+ "command": "foo",
+ "args": ["bar"],
+ "env": {
+ "FOO": "BAR"
+ }
+ },
+ }
+ }
+ "#
+ .unindent(),
+ ),
+ );
+ }
+
#[test]
fn test_project_panel_open_file_on_paste_migration() {
assert_migrate_settings(
@@ -2308,25 +2364,14 @@ mod tests {
}
#[test]
- fn test_remove_context_server_source() {
+ fn test_enable_preview_from_code_navigation_migration() {
assert_migrate_settings(
&r#"
{
- "context_servers": {
- "extension_server": {
- "source": "extension",
- "settings": {
- "foo": "bar"
- }
- },
- "custom_server": {
- "source": "custom",
- "command": "foo",
- "args": ["bar"],
- "env": {
- "FOO": "BAR"
- }
- },
+ "other_setting_1": 1,
+ "preview_tabs": {
+ "other_setting_2": 2,
+ "enable_preview_from_code_navigation": false
}
}
"#
@@ -2334,19 +2379,35 @@ mod tests {
Some(
&r#"
{
- "context_servers": {
- "extension_server": {
- "settings": {
- "foo": "bar"
- }
- },
- "custom_server": {
- "command": "foo",
- "args": ["bar"],
- "env": {
- "FOO": "BAR"
- }
- },
+ "other_setting_1": 1,
+ "preview_tabs": {
+ "other_setting_2": 2,
+ "enable_keep_preview_on_code_navigation": false
+ }
+ }
+ "#
+ .unindent(),
+ ),
+ );
+
+ assert_migrate_settings(
+ &r#"
+ {
+ "other_setting_1": 1,
+ "preview_tabs": {
+ "other_setting_2": 2,
+ "enable_preview_from_code_navigation": true
+ }
+ }
+ "#
+ .unindent(),
+ Some(
+ &r#"
+ {
+ "other_setting_1": 1,
+ "preview_tabs": {
+ "other_setting_2": 2,
+ "enable_keep_preview_on_code_navigation": true
}
}
"#
@@ -43,7 +43,7 @@ use std::{
io,
iter::{self, FromIterator},
mem,
- ops::{self, AddAssign, Range, RangeBounds, Sub, SubAssign},
+ ops::{self, AddAssign, ControlFlow, Range, RangeBounds, Sub, SubAssign},
rc::Rc,
str,
sync::Arc,
@@ -2283,6 +2283,7 @@ impl MultiBuffer {
cx: &mut Context<Self>,
) {
use language::BufferEvent;
+ let buffer_id = buffer.read(cx).remote_id();
cx.emit(match event {
BufferEvent::Edited => Event::Edited {
edited_buffer: Some(buffer),
@@ -2291,8 +2292,8 @@ impl MultiBuffer {
BufferEvent::Saved => Event::Saved,
BufferEvent::FileHandleChanged => Event::FileHandleChanged,
BufferEvent::Reloaded => Event::Reloaded,
- BufferEvent::LanguageChanged => Event::LanguageChanged(buffer.read(cx).remote_id()),
- BufferEvent::Reparsed => Event::Reparsed(buffer.read(cx).remote_id()),
+ BufferEvent::LanguageChanged => Event::LanguageChanged(buffer_id),
+ BufferEvent::Reparsed => Event::Reparsed(buffer_id),
BufferEvent::DiagnosticsUpdated => Event::DiagnosticsUpdated,
BufferEvent::CapabilityChanged => {
self.capability = buffer.read(cx).capability();
@@ -4617,7 +4618,24 @@ impl MultiBufferSnapshot {
cx: &App,
) -> BTreeMap<MultiBufferRow, IndentSize> {
let mut result = BTreeMap::new();
+ self.suggested_indents_callback(
+ rows,
+ |row, indent| {
+ result.insert(row, indent);
+ ControlFlow::Continue(())
+ },
+ cx,
+ );
+ result
+ }
+ // move this to be a generator once those are a thing
+ pub fn suggested_indents_callback(
+ &self,
+ rows: impl IntoIterator<Item = u32>,
+ mut cb: impl FnMut(MultiBufferRow, IndentSize) -> ControlFlow<()>,
+ cx: &App,
+ ) {
let mut rows_for_excerpt = Vec::new();
let mut cursor = self.cursor::<Point, Point>();
let mut rows = rows.into_iter().peekable();
@@ -4661,16 +4679,17 @@ impl MultiBufferSnapshot {
let buffer_indents = region
.buffer
.suggested_indents(buffer_rows, single_indent_size);
- let multibuffer_indents = buffer_indents.into_iter().map(|(row, indent)| {
- (
+ for (row, indent) in buffer_indents {
+ if cb(
MultiBufferRow(start_multibuffer_row + row - start_buffer_row),
indent,
)
- });
- result.extend(multibuffer_indents);
+ .is_break()
+ {
+ return;
+ }
+ }
}
-
- result
}
pub fn indent_size_for_line(&self, row: MultiBufferRow) -> IndentSize {
@@ -2,7 +2,8 @@ use anyhow::Context as _;
use collections::{HashMap, HashSet};
use fs::Fs;
use gpui::{AsyncApp, Entity};
-use language::{Buffer, Diff, language_settings::language_settings};
+use language::language_settings::PrettierSettings;
+use language::{Buffer, Diff, Language, language_settings::language_settings};
use lsp::{LanguageServer, LanguageServerId};
use node_runtime::NodeRuntime;
use paths::default_prettier_dir;
@@ -349,7 +350,7 @@ impl Prettier {
Self::Real(local) => {
let params = buffer
.update(cx, |buffer, cx| {
- let buffer_language = buffer.language();
+ let buffer_language = buffer.language().map(|language| language.as_ref());
let language_settings = language_settings(buffer_language.map(|l| l.name()), buffer.file(), cx);
let prettier_settings = &language_settings.prettier;
anyhow::ensure!(
@@ -449,15 +450,7 @@ impl Prettier {
})
.collect();
- let mut prettier_parser = prettier_settings.parser.as_deref();
- if buffer_path.is_none() {
- prettier_parser = prettier_parser.or_else(|| buffer_language.and_then(|language| language.prettier_parser_name()));
- if prettier_parser.is_none() {
- log::error!("Formatting unsaved file with prettier failed. No prettier parser configured for language {buffer_language:?}");
- anyhow::bail!("Cannot determine prettier parser for unsaved file");
- }
-
- }
+ let parser = prettier_parser_name(buffer_path.as_deref(), buffer_language, prettier_settings).context("getting prettier parser")?;
let ignore_path = ignore_dir.and_then(|dir| {
let ignore_file = dir.join(".prettierignore");
@@ -475,15 +468,15 @@ impl Prettier {
anyhow::Ok(FormatParams {
text: buffer.text(),
options: FormatOptions {
- parser: prettier_parser.map(ToOwned::to_owned),
- plugins,
path: buffer_path,
+ parser,
+ plugins,
prettier_options,
ignore_path,
},
})
- })?
- .context("building prettier request")?;
+ })?
+ .context("building prettier request")?;
let response = local
.server
@@ -503,7 +496,26 @@ impl Prettier {
{
Some("rust") => anyhow::bail!("prettier does not support Rust"),
Some(_other) => {
- let formatted_text = buffer.text() + FORMAT_SUFFIX;
+ let mut formatted_text = buffer.text() + FORMAT_SUFFIX;
+
+ let buffer_language =
+ buffer.language().map(|language| language.as_ref());
+ let language_settings = language_settings(
+ buffer_language.map(|l| l.name()),
+ buffer.file(),
+ cx,
+ );
+ let prettier_settings = &language_settings.prettier;
+ let parser = prettier_parser_name(
+ buffer_path.as_deref(),
+ buffer_language,
+ prettier_settings,
+ )?;
+
+ if let Some(parser) = parser {
+ formatted_text = format!("{formatted_text}\n{parser}");
+ }
+
Ok(buffer.diff(formatted_text, cx))
}
None => panic!("Should not format buffer without a language with prettier"),
@@ -551,6 +563,40 @@ impl Prettier {
}
}
+fn prettier_parser_name(
+ buffer_path: Option<&Path>,
+ buffer_language: Option<&Language>,
+ prettier_settings: &PrettierSettings,
+) -> anyhow::Result<Option<String>> {
+ let parser = if buffer_path.is_none() {
+ let parser = prettier_settings
+ .parser
+ .as_deref()
+ .or_else(|| buffer_language.and_then(|language| language.prettier_parser_name()));
+ if parser.is_none() {
+ log::error!(
+ "Formatting unsaved file with prettier failed. No prettier parser configured for language {buffer_language:?}"
+ );
+ anyhow::bail!("Cannot determine prettier parser for unsaved file");
+ }
+ parser
+ } else if let (Some(buffer_language), Some(buffer_path)) = (buffer_language, buffer_path)
+ && buffer_path.extension().is_some_and(|extension| {
+ !buffer_language
+ .config()
+ .matcher
+ .path_suffixes
+ .contains(&extension.to_string_lossy().into_owned())
+ })
+ {
+ buffer_language.prettier_parser_name()
+ } else {
+ prettier_settings.parser.as_deref()
+ };
+
+ Ok(parser.map(ToOwned::to_owned))
+}
+
async fn has_prettier_in_node_modules(fs: &dyn Fs, path: &Path) -> anyhow::Result<bool> {
let possible_node_modules_location = path.join("node_modules").join(PRETTIER_PACKAGE_NAME);
if let Some(node_modules_location_metadata) = fs
@@ -453,7 +453,9 @@ impl AgentServerStore {
.clone()
.and_then(|settings| settings.custom_command()),
http_client: http_client.clone(),
- is_remote: downstream_client.is_some(),
+ no_browser: downstream_client
+ .as_ref()
+ .is_some_and(|(_, client)| !client.has_wsl_interop()),
}),
);
self.external_agents.insert(
@@ -1355,7 +1357,7 @@ struct LocalCodex {
project_environment: Entity<ProjectEnvironment>,
http_client: Arc<dyn HttpClient>,
custom_command: Option<AgentServerCommand>,
- is_remote: bool,
+ no_browser: bool,
}
impl ExternalAgentServer for LocalCodex {
@@ -1375,7 +1377,7 @@ impl ExternalAgentServer for LocalCodex {
.map(|root_dir| Path::new(root_dir))
.unwrap_or(paths::home_dir())
.into();
- let is_remote = self.is_remote;
+ let no_browser = self.no_browser;
cx.spawn(async move |cx| {
let mut env = project_environment
@@ -1388,7 +1390,7 @@ impl ExternalAgentServer for LocalCodex {
})?
.await
.unwrap_or_default();
- if is_remote {
+ if no_browser {
env.insert("NO_BROWSER".to_owned(), "1".to_owned());
}
@@ -472,6 +472,8 @@ impl GitStore {
client.add_entity_request_handler(Self::handle_change_branch);
client.add_entity_request_handler(Self::handle_create_branch);
client.add_entity_request_handler(Self::handle_rename_branch);
+ client.add_entity_request_handler(Self::handle_create_remote);
+ client.add_entity_request_handler(Self::handle_remove_remote);
client.add_entity_request_handler(Self::handle_delete_branch);
client.add_entity_request_handler(Self::handle_git_init);
client.add_entity_request_handler(Self::handle_push);
@@ -1130,6 +1132,7 @@ impl GitStore {
RepositoryState::Local(LocalRepositoryState { backend, .. }) => {
let origin_url = backend
.remote_url(&remote)
+ .await
.with_context(|| format!("remote \"{remote}\" not found"))?;
let sha = backend.head_sha().await.context("reading HEAD SHA")?;
@@ -2273,6 +2276,25 @@ impl GitStore {
Ok(proto::Ack {})
}
+ async fn handle_create_remote(
+ this: Entity<Self>,
+ envelope: TypedEnvelope<proto::GitCreateRemote>,
+ mut cx: AsyncApp,
+ ) -> Result<proto::Ack> {
+ let repository_id = RepositoryId::from_proto(envelope.payload.repository_id);
+ let repository_handle = Self::repository_for_request(&this, repository_id, &mut cx)?;
+ let remote_name = envelope.payload.remote_name;
+ let remote_url = envelope.payload.remote_url;
+
+ repository_handle
+ .update(&mut cx, |repository_handle, _| {
+ repository_handle.create_remote(remote_name, remote_url)
+ })?
+ .await??;
+
+ Ok(proto::Ack {})
+ }
+
async fn handle_delete_branch(
this: Entity<Self>,
envelope: TypedEnvelope<proto::GitDeleteBranch>,
@@ -2291,6 +2313,24 @@ impl GitStore {
Ok(proto::Ack {})
}
+ async fn handle_remove_remote(
+ this: Entity<Self>,
+ envelope: TypedEnvelope<proto::GitRemoveRemote>,
+ mut cx: AsyncApp,
+ ) -> Result<proto::Ack> {
+ let repository_id = RepositoryId::from_proto(envelope.payload.repository_id);
+ let repository_handle = Self::repository_for_request(&this, repository_id, &mut cx)?;
+ let remote_name = envelope.payload.remote_name;
+
+ repository_handle
+ .update(&mut cx, |repository_handle, _| {
+ repository_handle.remove_remote(remote_name)
+ })?
+ .await??;
+
+ Ok(proto::Ack {})
+ }
+
async fn handle_show(
this: Entity<Self>,
envelope: TypedEnvelope<proto::GitShow>,
@@ -4864,6 +4904,61 @@ impl Repository {
)
}
+ pub fn create_remote(
+ &mut self,
+ remote_name: String,
+ remote_url: String,
+ ) -> oneshot::Receiver<Result<()>> {
+ let id = self.id;
+ self.send_job(
+ Some(format!("git remote add {remote_name} {remote_url}").into()),
+ move |repo, _cx| async move {
+ match repo {
+ RepositoryState::Local(LocalRepositoryState { backend, .. }) => {
+ backend.create_remote(remote_name, remote_url).await
+ }
+ RepositoryState::Remote(RemoteRepositoryState { project_id, client }) => {
+ client
+ .request(proto::GitCreateRemote {
+ project_id: project_id.0,
+ repository_id: id.to_proto(),
+ remote_name,
+ remote_url,
+ })
+ .await?;
+
+ Ok(())
+ }
+ }
+ },
+ )
+ }
+
+ pub fn remove_remote(&mut self, remote_name: String) -> oneshot::Receiver<Result<()>> {
+ let id = self.id;
+ self.send_job(
+ Some(format!("git remove remote {remote_name}").into()),
+ move |repo, _cx| async move {
+ match repo {
+ RepositoryState::Local(LocalRepositoryState { backend, .. }) => {
+ backend.remove_remote(remote_name).await
+ }
+ RepositoryState::Remote(RemoteRepositoryState { project_id, client }) => {
+ client
+ .request(proto::GitRemoveRemote {
+ project_id: project_id.0,
+ repository_id: id.to_proto(),
+ remote_name,
+ })
+ .await?;
+
+ Ok(())
+ }
+ }
+ },
+ )
+ }
+
pub fn get_remotes(
&mut self,
branch_name: Option<String>,
@@ -4901,7 +4996,7 @@ impl Repository {
let remotes = response
.remotes
.into_iter()
- .map(|remotes| git::repository::Remote {
+ .map(|remotes| Remote {
name: remotes.name.into(),
})
.collect();
@@ -5447,7 +5542,8 @@ impl Repository {
git_hosting_providers::register_additional_providers(
git_hosting_provider_registry,
state.backend.clone(),
- );
+ )
+ .await;
}
let state = RepositoryState::Local(state);
let mut jobs = VecDeque::new();
@@ -6052,8 +6148,8 @@ async fn compute_snapshot(
}
// Used by edit prediction data collection
- let remote_origin_url = backend.remote_url("origin");
- let remote_upstream_url = backend.remote_url("upstream");
+ let remote_origin_url = backend.remote_url("origin").await;
+ let remote_upstream_url = backend.remote_url("upstream").await;
let snapshot = RepositorySnapshot {
id,
@@ -14,7 +14,7 @@ use client::proto::{self, PeerId};
use clock::Global;
use collections::{HashMap, HashSet};
use futures::future;
-use gpui::{App, AsyncApp, Entity, Task};
+use gpui::{App, AsyncApp, Entity, SharedString, Task};
use language::{
Anchor, Bias, Buffer, BufferSnapshot, CachedLspAdapter, CharKind, CharScopeContext,
OffsetRangeExt, PointUtf16, ToOffset, ToPointUtf16, Transaction, Unclipped,
@@ -26,8 +26,8 @@ use language::{
use lsp::{
AdapterServerCapabilities, CodeActionKind, CodeActionOptions, CodeDescription,
CompletionContext, CompletionListItemDefaultsEditRange, CompletionTriggerKind,
- DiagnosticServerCapabilities, DocumentHighlightKind, LanguageServer, LanguageServerId,
- LinkedEditingRangeServerCapabilities, OneOf, RenameOptions, ServerCapabilities,
+ DocumentHighlightKind, LanguageServer, LanguageServerId, LinkedEditingRangeServerCapabilities,
+ OneOf, RenameOptions, ServerCapabilities,
};
use serde_json::Value;
use signature_help::{lsp_to_proto_signature, proto_to_lsp_signature};
@@ -265,8 +265,9 @@ pub(crate) struct LinkedEditingRange {
pub(crate) struct GetDocumentDiagnostics {
/// We cannot blindly rely on server's capabilities.diagnostic_provider, as they're a singular field, whereas
/// a server can register multiple diagnostic providers post-mortem.
- pub dynamic_caps: DiagnosticServerCapabilities,
- pub previous_result_id: Option<String>,
+ pub registration_id: Option<SharedString>,
+ pub identifier: Option<String>,
+ pub previous_result_id: Option<SharedString>,
}
#[async_trait(?Send)]
@@ -3755,15 +3756,16 @@ impl GetDocumentDiagnostics {
.into_iter()
.filter_map(|diagnostics| {
Some(LspPullDiagnostics::Response {
+ registration_id: diagnostics.registration_id.map(SharedString::from),
server_id: LanguageServerId::from_proto(diagnostics.server_id),
uri: lsp::Uri::from_str(diagnostics.uri.as_str()).log_err()?,
diagnostics: if diagnostics.changed {
PulledDiagnostics::Unchanged {
- result_id: diagnostics.result_id?,
+ result_id: SharedString::new(diagnostics.result_id?),
}
} else {
PulledDiagnostics::Changed {
- result_id: diagnostics.result_id,
+ result_id: diagnostics.result_id.map(SharedString::new),
diagnostics: diagnostics
.diagnostics
.into_iter()
@@ -3927,6 +3929,7 @@ impl GetDocumentDiagnostics {
pub fn deserialize_workspace_diagnostics_report(
report: lsp::WorkspaceDiagnosticReportResult,
server_id: LanguageServerId,
+ registration_id: Option<SharedString>,
) -> Vec<WorkspaceLspPullDiagnostics> {
let mut pulled_diagnostics = HashMap::default();
match report {
@@ -3938,6 +3941,7 @@ impl GetDocumentDiagnostics {
&mut pulled_diagnostics,
server_id,
report,
+ registration_id.clone(),
)
}
lsp::WorkspaceDocumentDiagnosticReport::Unchanged(report) => {
@@ -3945,6 +3949,7 @@ impl GetDocumentDiagnostics {
&mut pulled_diagnostics,
server_id,
report,
+ registration_id.clone(),
)
}
}
@@ -3960,6 +3965,7 @@ impl GetDocumentDiagnostics {
&mut pulled_diagnostics,
server_id,
report,
+ registration_id.clone(),
)
}
lsp::WorkspaceDocumentDiagnosticReport::Unchanged(report) => {
@@ -3967,6 +3973,7 @@ impl GetDocumentDiagnostics {
&mut pulled_diagnostics,
server_id,
report,
+ registration_id.clone(),
)
}
}
@@ -3987,6 +3994,7 @@ fn process_full_workspace_diagnostics_report(
diagnostics: &mut HashMap<lsp::Uri, WorkspaceLspPullDiagnostics>,
server_id: LanguageServerId,
report: lsp::WorkspaceFullDocumentDiagnosticReport,
+ registration_id: Option<SharedString>,
) {
let mut new_diagnostics = HashMap::default();
process_full_diagnostics_report(
@@ -3994,6 +4002,7 @@ fn process_full_workspace_diagnostics_report(
server_id,
report.uri,
report.full_document_diagnostic_report,
+ registration_id,
);
diagnostics.extend(new_diagnostics.into_iter().map(|(uri, diagnostics)| {
(
@@ -4010,6 +4019,7 @@ fn process_unchanged_workspace_diagnostics_report(
diagnostics: &mut HashMap<lsp::Uri, WorkspaceLspPullDiagnostics>,
server_id: LanguageServerId,
report: lsp::WorkspaceUnchangedDocumentDiagnosticReport,
+ registration_id: Option<SharedString>,
) {
let mut new_diagnostics = HashMap::default();
process_unchanged_diagnostics_report(
@@ -4017,6 +4027,7 @@ fn process_unchanged_workspace_diagnostics_report(
server_id,
report.uri,
report.unchanged_document_diagnostic_report,
+ registration_id,
);
diagnostics.extend(new_diagnostics.into_iter().map(|(uri, diagnostics)| {
(
@@ -4050,19 +4061,12 @@ impl LspCommand for GetDocumentDiagnostics {
_: &Arc<LanguageServer>,
_: &App,
) -> Result<lsp::DocumentDiagnosticParams> {
- let identifier = match &self.dynamic_caps {
- lsp::DiagnosticServerCapabilities::Options(options) => options.identifier.clone(),
- lsp::DiagnosticServerCapabilities::RegistrationOptions(options) => {
- options.diagnostic_options.identifier.clone()
- }
- };
-
Ok(lsp::DocumentDiagnosticParams {
text_document: lsp::TextDocumentIdentifier {
uri: file_path_to_lsp_url(path)?,
},
- identifier,
- previous_result_id: self.previous_result_id.clone(),
+ identifier: self.identifier.clone(),
+ previous_result_id: self.previous_result_id.clone().map(|id| id.to_string()),
partial_result_params: Default::default(),
work_done_progress_params: Default::default(),
})
@@ -4097,6 +4101,7 @@ impl LspCommand for GetDocumentDiagnostics {
&mut pulled_diagnostics,
server_id,
related_documents,
+ self.registration_id.clone(),
);
}
process_full_diagnostics_report(
@@ -4104,6 +4109,7 @@ impl LspCommand for GetDocumentDiagnostics {
server_id,
url,
report.full_document_diagnostic_report,
+ self.registration_id,
);
}
lsp::DocumentDiagnosticReport::Unchanged(report) => {
@@ -4112,6 +4118,7 @@ impl LspCommand for GetDocumentDiagnostics {
&mut pulled_diagnostics,
server_id,
related_documents,
+ self.registration_id.clone(),
);
}
process_unchanged_diagnostics_report(
@@ -4119,6 +4126,7 @@ impl LspCommand for GetDocumentDiagnostics {
server_id,
url,
report.unchanged_document_diagnostic_report,
+ self.registration_id,
);
}
},
@@ -4128,6 +4136,7 @@ impl LspCommand for GetDocumentDiagnostics {
&mut pulled_diagnostics,
server_id,
related_documents,
+ self.registration_id,
);
}
}
@@ -4170,6 +4179,7 @@ impl LspCommand for GetDocumentDiagnostics {
server_id,
uri,
diagnostics,
+ registration_id,
} => {
let mut changed = false;
let (diagnostics, result_id) = match diagnostics {
@@ -4184,7 +4194,7 @@ impl LspCommand for GetDocumentDiagnostics {
};
Some(proto::PulledDiagnostics {
changed,
- result_id,
+ result_id: result_id.map(|id| id.to_string()),
uri: uri.to_string(),
server_id: server_id.to_proto(),
diagnostics: diagnostics
@@ -4195,6 +4205,7 @@ impl LspCommand for GetDocumentDiagnostics {
.log_err()
})
.collect(),
+ registration_id: registration_id.as_ref().map(ToString::to_string),
})
}
})
@@ -4365,14 +4376,25 @@ fn process_related_documents(
diagnostics: &mut HashMap<lsp::Uri, LspPullDiagnostics>,
server_id: LanguageServerId,
documents: impl IntoIterator<Item = (lsp::Uri, lsp::DocumentDiagnosticReportKind)>,
+ registration_id: Option<SharedString>,
) {
for (url, report_kind) in documents {
match report_kind {
- lsp::DocumentDiagnosticReportKind::Full(report) => {
- process_full_diagnostics_report(diagnostics, server_id, url, report)
- }
+ lsp::DocumentDiagnosticReportKind::Full(report) => process_full_diagnostics_report(
+ diagnostics,
+ server_id,
+ url,
+ report,
+ registration_id.clone(),
+ ),
lsp::DocumentDiagnosticReportKind::Unchanged(report) => {
- process_unchanged_diagnostics_report(diagnostics, server_id, url, report)
+ process_unchanged_diagnostics_report(
+ diagnostics,
+ server_id,
+ url,
+ report,
+ registration_id.clone(),
+ )
}
}
}
@@ -4383,8 +4405,9 @@ fn process_unchanged_diagnostics_report(
server_id: LanguageServerId,
uri: lsp::Uri,
report: lsp::UnchangedDocumentDiagnosticReport,
+ registration_id: Option<SharedString>,
) {
- let result_id = report.result_id;
+ let result_id = SharedString::new(report.result_id);
match diagnostics.entry(uri.clone()) {
hash_map::Entry::Occupied(mut o) => match o.get_mut() {
LspPullDiagnostics::Default => {
@@ -4392,12 +4415,14 @@ fn process_unchanged_diagnostics_report(
server_id,
uri,
diagnostics: PulledDiagnostics::Unchanged { result_id },
+ registration_id,
});
}
LspPullDiagnostics::Response {
server_id: existing_server_id,
uri: existing_uri,
diagnostics: existing_diagnostics,
+ ..
} => {
if server_id != *existing_server_id || &uri != existing_uri {
debug_panic!(
@@ -4417,6 +4442,7 @@ fn process_unchanged_diagnostics_report(
server_id,
uri,
diagnostics: PulledDiagnostics::Unchanged { result_id },
+ registration_id,
});
}
}
@@ -4427,8 +4453,9 @@ fn process_full_diagnostics_report(
server_id: LanguageServerId,
uri: lsp::Uri,
report: lsp::FullDocumentDiagnosticReport,
+ registration_id: Option<SharedString>,
) {
- let result_id = report.result_id;
+ let result_id = report.result_id.map(SharedString::new);
match diagnostics.entry(uri.clone()) {
hash_map::Entry::Occupied(mut o) => match o.get_mut() {
LspPullDiagnostics::Default => {
@@ -4439,12 +4466,14 @@ fn process_full_diagnostics_report(
result_id,
diagnostics: report.items,
},
+ registration_id,
});
}
LspPullDiagnostics::Response {
server_id: existing_server_id,
uri: existing_uri,
diagnostics: existing_diagnostics,
+ ..
} => {
if server_id != *existing_server_id || &uri != existing_uri {
debug_panic!(
@@ -4478,6 +4507,7 @@ fn process_full_diagnostics_report(
result_id,
diagnostics: report.items,
},
+ registration_id,
});
}
}
@@ -116,6 +116,7 @@ use std::{
atomic::{self, AtomicUsize},
},
time::{Duration, Instant},
+ vec,
};
use sum_tree::Dimensions;
use text::{Anchor, BufferId, LineEnding, OffsetRangeExt, ToPoint as _};
@@ -229,7 +230,8 @@ struct LanguageServerSeed {
#[derive(Debug)]
pub struct DocumentDiagnosticsUpdate<'a, D> {
pub diagnostics: D,
- pub result_id: Option<String>,
+ pub result_id: Option<SharedString>,
+ pub registration_id: Option<SharedString>,
pub server_id: LanguageServerId,
pub disk_based_sources: Cow<'a, [String]>,
}
@@ -283,7 +285,14 @@ pub struct LocalLspStore {
lsp_tree: LanguageServerTree,
registered_buffers: HashMap<BufferId, usize>,
buffers_opened_in_servers: HashMap<BufferId, HashSet<LanguageServerId>>,
- buffer_pull_diagnostics_result_ids: HashMap<LanguageServerId, HashMap<PathBuf, Option<String>>>,
+ buffer_pull_diagnostics_result_ids: HashMap<
+ LanguageServerId,
+ HashMap<Option<SharedString>, HashMap<PathBuf, Option<SharedString>>>,
+ >,
+ workspace_pull_diagnostics_result_ids: HashMap<
+ LanguageServerId,
+ HashMap<Option<SharedString>, HashMap<PathBuf, Option<SharedString>>>,
+ >,
}
impl LocalLspStore {
@@ -685,6 +694,7 @@ impl LocalLspStore {
disk_based_sources: Cow::Borrowed(
&adapter.disk_based_diagnostic_sources,
),
+ registration_id: None,
}],
|_, diagnostic, cx| match diagnostic.source_kind {
DiagnosticSourceKind::Other | DiagnosticSourceKind::Pushed => {
@@ -2256,8 +2266,9 @@ impl LocalLspStore {
server_id,
None,
None,
- diagnostics,
+ None,
Vec::new(),
+ diagnostics,
cx,
)
.log_err();
@@ -2335,7 +2346,8 @@ impl LocalLspStore {
&mut self,
buffer: &Entity<Buffer>,
server_id: LanguageServerId,
- result_id: Option<String>,
+ registration_id: Option<Option<SharedString>>,
+ result_id: Option<SharedString>,
version: Option<i32>,
new_diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
reused_diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
@@ -2408,11 +2420,15 @@ impl LocalLspStore {
let set = DiagnosticSet::new(sanitized_diagnostics, &snapshot);
buffer.update(cx, |buffer, cx| {
- if let Some(abs_path) = File::from_dyn(buffer.file()).map(|f| f.abs_path(cx)) {
- self.buffer_pull_diagnostics_result_ids
- .entry(server_id)
- .or_default()
- .insert(abs_path, result_id);
+ if let Some(registration_id) = registration_id {
+ if let Some(abs_path) = File::from_dyn(buffer.file()).map(|f| f.abs_path(cx)) {
+ self.buffer_pull_diagnostics_result_ids
+ .entry(server_id)
+ .or_default()
+ .entry(registration_id)
+ .or_default()
+ .insert(abs_path, result_id);
+ }
}
buffer.update_diagnostics(server_id, set, cx)
@@ -3266,6 +3282,8 @@ impl LocalLspStore {
self.language_servers.remove(server_id_to_remove);
self.buffer_pull_diagnostics_result_ids
.remove(server_id_to_remove);
+ self.workspace_pull_diagnostics_result_ids
+ .remove(server_id_to_remove);
for buffer_servers in self.buffers_opened_in_servers.values_mut() {
buffer_servers.remove(server_id_to_remove);
}
@@ -3952,6 +3970,7 @@ impl LspStore {
registered_buffers: HashMap::default(),
buffers_opened_in_servers: HashMap::default(),
buffer_pull_diagnostics_result_ids: HashMap::default(),
+ workspace_pull_diagnostics_result_ids: HashMap::default(),
watched_manifest_filenames: ManifestProvidersStore::global(cx)
.manifest_file_names(),
}),
@@ -4225,9 +4244,50 @@ impl LspStore {
lsp_store.lsp_data.remove(&buffer_id);
let local = lsp_store.as_local_mut().unwrap();
local.registered_buffers.remove(&buffer_id);
+
local.buffers_opened_in_servers.remove(&buffer_id);
if let Some(file) = File::from_dyn(buffer.read(cx).file()).cloned() {
local.unregister_old_buffer_from_language_servers(buffer, &file, cx);
+
+ let buffer_abs_path = file.abs_path(cx);
+ for (_, buffer_pull_diagnostics_result_ids) in
+ &mut local.buffer_pull_diagnostics_result_ids
+ {
+ buffer_pull_diagnostics_result_ids.retain(
+ |_, buffer_result_ids| {
+ buffer_result_ids.remove(&buffer_abs_path);
+ !buffer_result_ids.is_empty()
+ },
+ );
+ }
+
+ let diagnostic_updates = local
+ .language_servers
+ .keys()
+ .cloned()
+ .map(|server_id| DocumentDiagnosticsUpdate {
+ diagnostics: DocumentDiagnostics {
+ document_abs_path: buffer_abs_path.clone(),
+ version: None,
+ diagnostics: Vec::new(),
+ },
+ result_id: None,
+ registration_id: None,
+ server_id: server_id,
+ disk_based_sources: Cow::Borrowed(&[]),
+ })
+ .collect::<Vec<_>>();
+
+ lsp_store
+ .merge_diagnostic_entries(
+ diagnostic_updates,
+ |_, diagnostic, _| {
+ diagnostic.source_kind != DiagnosticSourceKind::Pulled
+ },
+ cx,
+ )
+ .context("Clearing diagnostics for the closed buffer")
+ .log_err();
}
}
})
@@ -6700,9 +6760,11 @@ impl LspStore {
};
assert!(any_server_has_diagnostics_provider);
+ let identifier = buffer_diagnostic_identifier(&dynamic_caps);
let request = GetDocumentDiagnostics {
previous_result_id: None,
- dynamic_caps,
+ identifier,
+ registration_id: None,
};
let request_task = client.request_lsp(
upstream_project_id,
@@ -6735,19 +6797,27 @@ impl LspStore {
.language_server_dynamic_registrations
.get(&server_id)
.into_iter()
- .flat_map(|registrations| registrations.diagnostics.values().cloned())
+ .flat_map(|registrations| registrations.diagnostics.clone())
.collect::<Vec<_>>();
Some(
providers_with_identifiers
.into_iter()
- .map(|dynamic_caps| {
- let result_id = self.result_id(server_id, buffer_id, cx);
+ .map(|(registration_id, dynamic_caps)| {
+ let identifier = buffer_diagnostic_identifier(&dynamic_caps);
+ let registration_id = registration_id.map(SharedString::from);
+ let result_id = self.result_id_for_buffer_pull(
+ server_id,
+ buffer_id,
+ ®istration_id,
+ cx,
+ );
self.request_lsp(
buffer.clone(),
LanguageServerToQuery::Other(server_id),
GetDocumentDiagnostics {
previous_result_id: result_id,
- dynamic_caps,
+ registration_id,
+ identifier,
},
cx,
)
@@ -7112,8 +7182,7 @@ impl LspStore {
return;
}
- let mut unchanged_buffers = HashSet::default();
- let mut changed_buffers = HashSet::default();
+ let mut unchanged_buffers = HashMap::default();
let server_diagnostics_updates = diagnostics
.into_iter()
.filter_map(|diagnostics_set| match diagnostics_set {
@@ -7121,24 +7190,25 @@ impl LspStore {
server_id,
uri,
diagnostics,
- } => Some((server_id, uri, diagnostics)),
+ registration_id,
+ } => Some((server_id, uri, diagnostics, registration_id)),
LspPullDiagnostics::Default => None,
})
.fold(
HashMap::default(),
- |mut acc, (server_id, uri, diagnostics)| {
+ |mut acc, (server_id, uri, diagnostics, new_registration_id)| {
let (result_id, diagnostics) = match diagnostics {
PulledDiagnostics::Unchanged { result_id } => {
- unchanged_buffers.insert(uri.clone());
+ unchanged_buffers
+ .entry(new_registration_id.clone())
+ .or_insert_with(HashSet::default)
+ .insert(uri.clone());
(Some(result_id), Vec::new())
}
PulledDiagnostics::Changed {
result_id,
diagnostics,
- } => {
- changed_buffers.insert(uri.clone());
- (result_id, diagnostics)
- }
+ } => (result_id, diagnostics),
};
let disk_based_sources = Cow::Owned(
lsp_store
@@ -7148,8 +7218,11 @@ impl LspStore {
.unwrap_or(&[])
.to_vec(),
);
- acc.entry(server_id).or_insert_with(Vec::new).push(
- DocumentDiagnosticsUpdate {
+ acc.entry(server_id)
+ .or_insert_with(HashMap::default)
+ .entry(new_registration_id.clone())
+ .or_insert_with(Vec::new)
+ .push(DocumentDiagnosticsUpdate {
server_id,
diagnostics: lsp::PublishDiagnosticsParams {
uri,
@@ -7158,37 +7231,35 @@ impl LspStore {
},
result_id,
disk_based_sources,
- },
- );
+ registration_id: new_registration_id,
+ });
acc
},
);
for diagnostic_updates in server_diagnostics_updates.into_values() {
- lsp_store
- .merge_lsp_diagnostics(
- DiagnosticSourceKind::Pulled,
- diagnostic_updates,
- |buffer, old_diagnostic, cx| {
- File::from_dyn(buffer.file())
- .and_then(|file| {
- let abs_path = file.as_local()?.abs_path(cx);
- lsp::Uri::from_file_path(abs_path).ok()
- })
- .is_none_or(|buffer_uri| {
- unchanged_buffers.contains(&buffer_uri)
- || match old_diagnostic.source_kind {
- DiagnosticSourceKind::Pulled => {
- !changed_buffers.contains(&buffer_uri)
- }
- DiagnosticSourceKind::Other
- | DiagnosticSourceKind::Pushed => true,
- }
- })
- },
- cx,
- )
- .log_err();
+ for (registration_id, diagnostic_updates) in diagnostic_updates {
+ lsp_store
+ .merge_lsp_diagnostics(
+ DiagnosticSourceKind::Pulled,
+ diagnostic_updates,
+ |document_uri, old_diagnostic, _| match old_diagnostic.source_kind {
+ DiagnosticSourceKind::Pulled => {
+ old_diagnostic.registration_id != registration_id
+ || unchanged_buffers
+ .get(&old_diagnostic.registration_id)
+ .is_some_and(|unchanged_buffers| {
+ unchanged_buffers.contains(&document_uri)
+ })
+ }
+ DiagnosticSourceKind::Other | DiagnosticSourceKind::Pushed => {
+ true
+ }
+ },
+ cx,
+ )
+ .log_err();
+ }
}
})
})
@@ -8195,7 +8266,7 @@ impl LspStore {
&mut self,
server_id: LanguageServerId,
abs_path: PathBuf,
- result_id: Option<String>,
+ result_id: Option<SharedString>,
version: Option<i32>,
diagnostics: Vec<DiagnosticEntry<Unclipped<PointUtf16>>>,
cx: &mut Context<Self>,
@@ -8210,6 +8281,7 @@ impl LspStore {
result_id,
server_id,
disk_based_sources: Cow::Borrowed(&[]),
+ registration_id: None,
}],
|_, _, _| false,
cx,
@@ -8220,7 +8292,7 @@ impl LspStore {
pub fn merge_diagnostic_entries<'a>(
&mut self,
diagnostic_updates: Vec<DocumentDiagnosticsUpdate<'a, DocumentDiagnostics>>,
- merge: impl Fn(&Buffer, &Diagnostic, &App) -> bool + Clone,
+ merge: impl Fn(&lsp::Uri, &Diagnostic, &App) -> bool + Clone,
cx: &mut Context<Self>,
) -> anyhow::Result<()> {
let mut diagnostics_summary = None::<proto::UpdateDiagnosticSummary>;
@@ -8241,13 +8313,15 @@ impl LspStore {
path: relative_path,
};
+ let document_uri = lsp::Uri::from_file_path(abs_path)
+ .map_err(|()| anyhow!("Failed to convert buffer path {abs_path:?} to lsp Uri"))?;
if let Some(buffer_handle) = self.buffer_store.read(cx).get_by_path(&project_path) {
let snapshot = buffer_handle.read(cx).snapshot();
let buffer = buffer_handle.read(cx);
let reused_diagnostics = buffer
.buffer_diagnostics(Some(server_id))
.iter()
- .filter(|v| merge(buffer, &v.diagnostic, cx))
+ .filter(|v| merge(&document_uri, &v.diagnostic, cx))
.map(|v| {
let start = Unclipped(v.range.start.to_point_utf16(&snapshot));
let end = Unclipped(v.range.end.to_point_utf16(&snapshot));
@@ -8263,6 +8337,7 @@ impl LspStore {
.update_buffer_diagnostics(
&buffer_handle,
server_id,
+ Some(update.registration_id),
update.result_id,
update.diagnostics.version,
update.diagnostics.diagnostics.clone(),
@@ -8271,6 +8346,25 @@ impl LspStore {
)?;
update.diagnostics.diagnostics.extend(reused_diagnostics);
+ } else if let Some(local) = self.as_local() {
+ let reused_diagnostics = local
+ .diagnostics
+ .get(&worktree_id)
+ .and_then(|diagnostics_for_tree| diagnostics_for_tree.get(&project_path.path))
+ .and_then(|diagnostics_by_server_id| {
+ diagnostics_by_server_id
+ .binary_search_by_key(&server_id, |e| e.0)
+ .ok()
+ .map(|ix| &diagnostics_by_server_id[ix].1)
+ })
+ .into_iter()
+ .flatten()
+ .filter(|v| merge(&document_uri, &v.diagnostic, cx));
+
+ update
+ .diagnostics
+ .diagnostics
+ .extend(reused_diagnostics.cloned());
}
let updated = worktree.update(cx, |worktree, cx| {
@@ -8355,7 +8449,7 @@ impl LspStore {
.unwrap_or_default();
let new_summary = DiagnosticSummary::new(&diagnostics);
- if new_summary.is_empty() {
+ if diagnostics.is_empty() {
if let Some(diagnostics_by_server_id) = diagnostics_for_tree.get_mut(&path_in_worktree)
{
if let Ok(ix) = diagnostics_by_server_id.binary_search_by_key(&server_id, |e| e.0) {
@@ -9665,7 +9759,7 @@ impl LspStore {
);
}
lsp::ProgressParamsValue::WorkspaceDiagnostic(report) => {
- let identifier = match progress_params.token {
+ let registration_id = match progress_params.token {
lsp::NumberOrString::Number(_) => None,
lsp::NumberOrString::String(token) => token
.split_once(WORKSPACE_DIAGNOSTICS_TOKEN_START)
@@ -9678,10 +9772,15 @@ impl LspStore {
.as_local_mut()
.and_then(|local| local.language_servers.get_mut(&language_server_id))
&& let Some(workspace_diagnostics) =
- workspace_diagnostics_refresh_tasks.get_mut(&identifier)
+ workspace_diagnostics_refresh_tasks.get_mut(®istration_id)
{
workspace_diagnostics.progress_tx.try_send(()).ok();
- self.apply_workspace_diagnostic_report(language_server_id, report, cx)
+ self.apply_workspace_diagnostic_report(
+ language_server_id,
+ report,
+ registration_id.map(SharedString::from),
+ cx,
+ )
}
}
}
@@ -10941,7 +11040,7 @@ impl LspStore {
&mut self,
server_id: LanguageServerId,
diagnostics: lsp::PublishDiagnosticsParams,
- result_id: Option<String>,
+ result_id: Option<SharedString>,
source_kind: DiagnosticSourceKind,
disk_based_sources: &[String],
cx: &mut Context<Self>,
@@ -10953,6 +11052,7 @@ impl LspStore {
result_id,
server_id,
disk_based_sources: Cow::Borrowed(disk_based_sources),
+ registration_id: None,
}],
|_, _, _| false,
cx,
@@ -10963,7 +11063,7 @@ impl LspStore {
&mut self,
source_kind: DiagnosticSourceKind,
lsp_diagnostics: Vec<DocumentDiagnosticsUpdate<lsp::PublishDiagnosticsParams>>,
- merge: impl Fn(&Buffer, &Diagnostic, &App) -> bool + Clone,
+ merge: impl Fn(&lsp::Uri, &Diagnostic, &App) -> bool + Clone,
cx: &mut Context<Self>,
) -> Result<()> {
anyhow::ensure!(self.mode.is_local(), "called update_diagnostics on remote");
@@ -10978,10 +11078,12 @@ impl LspStore {
update.server_id,
update.diagnostics,
&update.disk_based_sources,
+ update.registration_id.clone(),
),
result_id: update.result_id,
server_id: update.server_id,
disk_based_sources: update.disk_based_sources,
+ registration_id: update.registration_id,
})
})
.collect();
@@ -10996,6 +11098,7 @@ impl LspStore {
server_id: LanguageServerId,
mut lsp_diagnostics: lsp::PublishDiagnosticsParams,
disk_based_sources: &[String],
+ registration_id: Option<SharedString>,
) -> DocumentDiagnostics {
let mut diagnostics = Vec::default();
let mut primary_diagnostic_group_ids = HashMap::default();
@@ -11069,6 +11172,7 @@ impl LspStore {
is_unnecessary,
underline,
data: diagnostic.data.clone(),
+ registration_id: registration_id.clone(),
},
});
if let Some(infos) = &diagnostic.related_information {
@@ -11096,6 +11200,7 @@ impl LspStore {
is_unnecessary: false,
underline,
data: diagnostic.data.clone(),
+ registration_id: registration_id.clone(),
},
});
}
@@ -11845,18 +11950,22 @@ impl LspStore {
}
if let Some(local) = self.as_local_mut() {
local.buffer_pull_diagnostics_result_ids.remove(&for_server);
+ local
+ .workspace_pull_diagnostics_result_ids
+ .remove(&for_server);
for buffer_servers in local.buffers_opened_in_servers.values_mut() {
buffer_servers.remove(&for_server);
}
}
}
- pub fn result_id(
+ pub fn result_id_for_buffer_pull(
&self,
server_id: LanguageServerId,
buffer_id: BufferId,
+ registration_id: &Option<SharedString>,
cx: &App,
- ) -> Option<String> {
+ ) -> Option<SharedString> {
let abs_path = self
.buffer_store
.read(cx)
@@ -11866,20 +11975,40 @@ impl LspStore {
self.as_local()?
.buffer_pull_diagnostics_result_ids
.get(&server_id)?
+ .get(registration_id)?
.get(&abs_path)?
.clone()
}
- pub fn all_result_ids(&self, server_id: LanguageServerId) -> HashMap<PathBuf, String> {
+ /// Gets all result_ids for a workspace diagnostics pull request.
+ /// First, it tries to find buffer's result_id retrieved via the diagnostics pull; if it fails, it falls back to the workspace disagnostics pull result_id.
+ /// The latter is supposed to be of lower priority as we keep on pulling diagnostics for open buffers eagerly.
+ pub fn result_ids_for_workspace_refresh(
+ &self,
+ server_id: LanguageServerId,
+ registration_id: &Option<SharedString>,
+ ) -> HashMap<PathBuf, SharedString> {
let Some(local) = self.as_local() else {
return HashMap::default();
};
local
- .buffer_pull_diagnostics_result_ids
+ .workspace_pull_diagnostics_result_ids
.get(&server_id)
.into_iter()
+ .filter_map(|diagnostics| diagnostics.get(registration_id))
.flatten()
- .filter_map(|(abs_path, result_id)| Some((abs_path.clone(), result_id.clone()?)))
+ .filter_map(|(abs_path, result_id)| {
+ let result_id = local
+ .buffer_pull_diagnostics_result_ids
+ .get(&server_id)
+ .and_then(|buffer_ids_result_ids| {
+ buffer_ids_result_ids.get(registration_id)?.get(abs_path)
+ })
+ .cloned()
+ .flatten()
+ .or_else(|| result_id.clone())?;
+ Some((abs_path.clone(), result_id))
+ })
.collect()
}
@@ -11924,12 +12053,16 @@ impl LspStore {
&mut self,
server_id: LanguageServerId,
report: lsp::WorkspaceDiagnosticReportResult,
+ registration_id: Option<SharedString>,
cx: &mut Context<Self>,
) {
let workspace_diagnostics =
- GetDocumentDiagnostics::deserialize_workspace_diagnostics_report(report, server_id);
- let mut unchanged_buffers = HashSet::default();
- let mut changed_buffers = HashSet::default();
+ GetDocumentDiagnostics::deserialize_workspace_diagnostics_report(
+ report,
+ server_id,
+ registration_id,
+ );
+ let mut unchanged_buffers = HashMap::default();
let workspace_diagnostics_updates = workspace_diagnostics
.into_iter()
.filter_map(
@@ -11938,25 +12071,32 @@ impl LspStore {
server_id,
uri,
diagnostics,
- } => Some((server_id, uri, diagnostics, workspace_diagnostics.version)),
+ registration_id,
+ } => Some((
+ server_id,
+ uri,
+ diagnostics,
+ workspace_diagnostics.version,
+ registration_id,
+ )),
LspPullDiagnostics::Default => None,
},
)
.fold(
HashMap::default(),
- |mut acc, (server_id, uri, diagnostics, version)| {
+ |mut acc, (server_id, uri, diagnostics, version, new_registration_id)| {
let (result_id, diagnostics) = match diagnostics {
PulledDiagnostics::Unchanged { result_id } => {
- unchanged_buffers.insert(uri.clone());
+ unchanged_buffers
+ .entry(new_registration_id.clone())
+ .or_insert_with(HashSet::default)
+ .insert(uri.clone());
(Some(result_id), Vec::new())
}
PulledDiagnostics::Changed {
result_id,
diagnostics,
- } => {
- changed_buffers.insert(uri.clone());
- (result_id, diagnostics)
- }
+ } => (result_id, diagnostics),
};
let disk_based_sources = Cow::Owned(
self.language_server_adapter_for_id(server_id)
@@ -11965,47 +12105,68 @@ impl LspStore {
.unwrap_or(&[])
.to_vec(),
);
- acc.entry(server_id)
- .or_insert_with(Vec::new)
- .push(DocumentDiagnosticsUpdate {
- server_id,
- diagnostics: lsp::PublishDiagnosticsParams {
- uri,
- diagnostics,
- version,
- },
- result_id,
- disk_based_sources,
- });
+
+ let Some(abs_path) = uri.to_file_path().ok() else {
+ return acc;
+ };
+ let Some((worktree, relative_path)) =
+ self.worktree_store.read(cx).find_worktree(abs_path.clone(), cx)
+ else {
+ log::warn!("skipping workspace diagnostics update, no worktree found for path {abs_path:?}");
+ return acc;
+ };
+ let worktree_id = worktree.read(cx).id();
+ let project_path = ProjectPath {
+ worktree_id,
+ path: relative_path,
+ };
+ if let Some(local_lsp_store) = self.as_local_mut() {
+ local_lsp_store.workspace_pull_diagnostics_result_ids.entry(server_id)
+ .or_default().entry(new_registration_id.clone()).or_default().insert(abs_path, result_id.clone());
+ }
+ // The LSP spec recommends that "diagnostics from a document pull should win over diagnostics from a workspace pull."
+ // Since we actively pull diagnostics for documents with open buffers, we ignore contents of workspace pulls for these documents.
+ if self.buffer_store.read(cx).get_by_path(&project_path).is_none() {
+ acc.entry(server_id)
+ .or_insert_with(HashMap::default)
+ .entry(new_registration_id.clone())
+ .or_insert_with(Vec::new)
+ .push(DocumentDiagnosticsUpdate {
+ server_id,
+ diagnostics: lsp::PublishDiagnosticsParams {
+ uri,
+ diagnostics,
+ version,
+ },
+ result_id,
+ disk_based_sources,
+ registration_id: new_registration_id,
+ });
+ }
acc
},
);
for diagnostic_updates in workspace_diagnostics_updates.into_values() {
- self.merge_lsp_diagnostics(
- DiagnosticSourceKind::Pulled,
- diagnostic_updates,
- |buffer, old_diagnostic, cx| {
- File::from_dyn(buffer.file())
- .and_then(|file| {
- let abs_path = file.as_local()?.abs_path(cx);
- lsp::Uri::from_file_path(abs_path).ok()
- })
- .is_none_or(|buffer_uri| {
- unchanged_buffers.contains(&buffer_uri)
- || match old_diagnostic.source_kind {
- DiagnosticSourceKind::Pulled => {
- !changed_buffers.contains(&buffer_uri)
- }
- DiagnosticSourceKind::Other | DiagnosticSourceKind::Pushed => {
- true
- }
- }
- })
- },
- cx,
- )
- .log_err();
+ for (registration_id, diagnostic_updates) in diagnostic_updates {
+ self.merge_lsp_diagnostics(
+ DiagnosticSourceKind::Pulled,
+ diagnostic_updates,
+ |document_uri, old_diagnostic, _| match old_diagnostic.source_kind {
+ DiagnosticSourceKind::Pulled => {
+ old_diagnostic.registration_id != registration_id
+ || unchanged_buffers
+ .get(&old_diagnostic.registration_id)
+ .is_some_and(|unchanged_buffers| {
+ unchanged_buffers.contains(&document_uri)
+ })
+ }
+ DiagnosticSourceKind::Other | DiagnosticSourceKind::Pushed => true,
+ },
+ cx,
+ )
+ .log_err();
+ }
}
}
@@ -12284,54 +12445,41 @@ impl LspStore {
.diagnostics
.insert(Some(reg.id.clone()), caps.clone());
- if let LanguageServerState::Running {
- workspace_diagnostics_refresh_tasks,
- ..
- } = state
- && let Some(task) = lsp_workspace_diagnostics_refresh(
- Some(reg.id.clone()),
- caps.clone(),
- server.clone(),
- cx,
- )
- {
- workspace_diagnostics_refresh_tasks.insert(Some(reg.id), task);
+ let supports_workspace_diagnostics =
+ |capabilities: &DiagnosticServerCapabilities| match capabilities {
+ DiagnosticServerCapabilities::Options(diagnostic_options) => {
+ diagnostic_options.workspace_diagnostics
+ }
+ DiagnosticServerCapabilities::RegistrationOptions(
+ diagnostic_registration_options,
+ ) => {
+ diagnostic_registration_options
+ .diagnostic_options
+ .workspace_diagnostics
+ }
+ };
+
+ if supports_workspace_diagnostics(&caps) {
+ if let LanguageServerState::Running {
+ workspace_diagnostics_refresh_tasks,
+ ..
+ } = state
+ && let Some(task) = lsp_workspace_diagnostics_refresh(
+ Some(reg.id.clone()),
+ caps.clone(),
+ server.clone(),
+ cx,
+ )
+ {
+ workspace_diagnostics_refresh_tasks.insert(Some(reg.id), task);
+ }
}
- let mut did_update_caps = false;
server.update_capabilities(|capabilities| {
- if capabilities.diagnostic_provider.as_ref().is_none_or(
- |current_caps| {
- let supports_workspace_diagnostics =
- |capabilities: &DiagnosticServerCapabilities| {
- match capabilities {
- DiagnosticServerCapabilities::Options(
- diagnostic_options,
- ) => diagnostic_options.workspace_diagnostics,
- DiagnosticServerCapabilities::RegistrationOptions(
- diagnostic_registration_options,
- ) => {
- diagnostic_registration_options
- .diagnostic_options
- .workspace_diagnostics
- }
- }
- };
- // We don't actually care about capabilities.diagnostic_provider, but it IS relevant for the remote peer
- // to know that there's at least one provider. Otherwise, it will never ask us to issue documentdiagnostic calls on their behalf,
- // as it'll think that they're not supported.
- // If we did not support any workspace diagnostics up to this point but now do, let's update.
- !supports_workspace_diagnostics(current_caps)
- & supports_workspace_diagnostics(&caps)
- },
- ) {
- did_update_caps = true;
- capabilities.diagnostic_provider = Some(caps);
- }
+ capabilities.diagnostic_provider = Some(caps);
});
- if did_update_caps {
- notify_server_capabilities_updated(&server, cx);
- }
+
+ notify_server_capabilities_updated(&server, cx);
}
}
"textDocument/documentColor" => {
@@ -12499,7 +12647,7 @@ impl LspStore {
.language_servers
.get_mut(&server_id)
.context("Could not obtain Language Servers state")?;
- let options = local
+ local
.language_server_dynamic_registrations
.get_mut(&server_id)
.with_context(|| {
@@ -12512,13 +12660,12 @@ impl LspStore {
)?;
let mut has_any_diagnostic_providers_still = true;
- if let Some(identifier) = diagnostic_identifier(&options)
- && let LanguageServerState::Running {
- workspace_diagnostics_refresh_tasks,
- ..
- } = state
+ if let LanguageServerState::Running {
+ workspace_diagnostics_refresh_tasks,
+ ..
+ } = state
{
- workspace_diagnostics_refresh_tasks.remove(&identifier);
+ workspace_diagnostics_refresh_tasks.remove(&Some(unreg.id.clone()));
has_any_diagnostic_providers_still =
!workspace_diagnostics_refresh_tasks.is_empty();
}
@@ -12822,7 +12969,8 @@ fn lsp_workspace_diagnostics_refresh(
server: Arc<LanguageServer>,
cx: &mut Context<'_, LspStore>,
) -> Option<WorkspaceRefreshTask> {
- let identifier = diagnostic_identifier(&options)?;
+ let identifier = workspace_diagnostic_identifier(&options)?;
+ let registration_id_shared = registration_id.as_ref().map(SharedString::from);
let (progress_tx, mut progress_rx) = mpsc::channel(1);
let (mut refresh_tx, mut refresh_rx) = mpsc::channel(1);
@@ -12854,13 +13002,13 @@ fn lsp_workspace_diagnostics_refresh(
let Ok(previous_result_ids) = lsp_store.update(cx, |lsp_store, _| {
lsp_store
- .all_result_ids(server.server_id())
+ .result_ids_for_workspace_refresh(server.server_id(), ®istration_id_shared)
.into_iter()
.filter_map(|(abs_path, result_id)| {
let uri = file_path_to_lsp_url(&abs_path).ok()?;
Some(lsp::PreviousResultId {
uri,
- value: result_id,
+ value: result_id.to_string(),
})
})
.collect()
@@ -12868,9 +13016,9 @@ fn lsp_workspace_diagnostics_refresh(
return;
};
- let token = if let Some(identifier) = ®istration_id {
+ let token = if let Some(registration_id) = ®istration_id {
format!(
- "workspace/diagnostic/{}/{requests}/{WORKSPACE_DIAGNOSTICS_TOKEN_START}{identifier}",
+ "workspace/diagnostic/{}/{requests}/{WORKSPACE_DIAGNOSTICS_TOKEN_START}{registration_id}",
server.server_id(),
)
} else {
@@ -12920,6 +13068,7 @@ fn lsp_workspace_diagnostics_refresh(
lsp_store.apply_workspace_diagnostic_report(
server.server_id(),
pulled_diagnostics,
+ registration_id_shared.clone(),
cx,
)
})
@@ -12941,7 +13090,21 @@ fn lsp_workspace_diagnostics_refresh(
})
}
-fn diagnostic_identifier(options: &DiagnosticServerCapabilities) -> Option<Option<String>> {
+fn buffer_diagnostic_identifier(options: &DiagnosticServerCapabilities) -> Option<String> {
+ match &options {
+ lsp::DiagnosticServerCapabilities::Options(diagnostic_options) => {
+ diagnostic_options.identifier.clone()
+ }
+ lsp::DiagnosticServerCapabilities::RegistrationOptions(registration_options) => {
+ let diagnostic_options = ®istration_options.diagnostic_options;
+ diagnostic_options.identifier.clone()
+ }
+ }
+}
+
+fn workspace_diagnostic_identifier(
+ options: &DiagnosticServerCapabilities,
+) -> Option<Option<String>> {
match &options {
lsp::DiagnosticServerCapabilities::Options(diagnostic_options) => {
if !diagnostic_options.workspace_diagnostics {
@@ -90,6 +90,7 @@ pub fn register_notifications(
disk_based_sources: Cow::Borrowed(
&adapter.disk_based_diagnostic_sources,
),
+ registration_id: None,
}],
|_, diag, _| !is_inactive_region(diag),
cx,
@@ -984,6 +984,8 @@ pub enum LspPullDiagnostics {
server_id: LanguageServerId,
/// URI of the resource,
uri: lsp::Uri,
+ /// The ID provided by the dynamic registration that produced diagnostics.
+ registration_id: Option<SharedString>,
/// The diagnostics produced by this language server.
diagnostics: PulledDiagnostics,
},
@@ -994,10 +996,10 @@ pub enum PulledDiagnostics {
Unchanged {
/// An ID the current pulled batch for this file.
/// If given, can be used to query workspace diagnostics partially.
- result_id: String,
+ result_id: SharedString,
},
Changed {
- result_id: Option<String>,
+ result_id: Option<SharedString>,
diagnostics: Vec<lsp::Diagnostic>,
},
}
@@ -93,9 +93,6 @@ enum FindSearchCandidates {
/// based on disk contents of a buffer. This step is not performed for buffers we already have in memory.
confirm_contents_will_match_tx: Sender<MatchingEntry>,
confirm_contents_will_match_rx: Receiver<MatchingEntry>,
- /// Of those that contain at least one match (or are already in memory), look for rest of matches (and figure out their ranges).
- /// But wait - first, we need to go back to the main thread to open a buffer (& create an entity for it).
- get_buffer_for_full_scan_tx: Sender<ProjectPath>,
},
Remote,
OpenBuffersOnly,
@@ -226,7 +223,7 @@ impl Search {
.boxed_local(),
cx.background_spawn(Self::maintain_sorted_search_results(
sorted_search_results_rx,
- get_buffer_for_full_scan_tx.clone(),
+ get_buffer_for_full_scan_tx,
self.limit,
))
.boxed_local(),
@@ -234,7 +231,6 @@ impl Search {
(
FindSearchCandidates::Local {
fs,
- get_buffer_for_full_scan_tx,
confirm_contents_will_match_tx,
confirm_contents_will_match_rx,
input_paths_rx,
@@ -593,7 +589,6 @@ impl Worker<'_> {
input_paths_rx,
confirm_contents_will_match_rx,
mut confirm_contents_will_match_tx,
- mut get_buffer_for_full_scan_tx,
fs,
) = match self.candidates {
FindSearchCandidates::Local {
@@ -601,21 +596,15 @@ impl Worker<'_> {
input_paths_rx,
confirm_contents_will_match_rx,
confirm_contents_will_match_tx,
- get_buffer_for_full_scan_tx,
} => (
input_paths_rx,
confirm_contents_will_match_rx,
confirm_contents_will_match_tx,
- get_buffer_for_full_scan_tx,
Some(fs),
),
- FindSearchCandidates::Remote | FindSearchCandidates::OpenBuffersOnly => (
- unbounded().1,
- unbounded().1,
- unbounded().0,
- unbounded().0,
- None,
- ),
+ FindSearchCandidates::Remote | FindSearchCandidates::OpenBuffersOnly => {
+ (unbounded().1, unbounded().1, unbounded().0, None)
+ }
};
// WorkerA: grabs a request for "find all matches in file/a" <- takes 5 minutes
// right after: WorkerB: grabs a request for "find all matches in file/b" <- takes 5 seconds
@@ -629,7 +618,6 @@ impl Worker<'_> {
open_entries: &self.open_buffers,
fs: fs.as_deref(),
confirm_contents_will_match_tx: &confirm_contents_will_match_tx,
- get_buffer_for_full_scan_tx: &get_buffer_for_full_scan_tx,
};
// Whenever we notice that some step of a pipeline is closed, we don't want to close subsequent
// steps straight away. Another worker might be about to produce a value that will
@@ -645,10 +633,7 @@ impl Worker<'_> {
find_first_match = find_first_match.next() => {
if let Some(buffer_with_at_least_one_match) = find_first_match {
handler.handle_find_first_match(buffer_with_at_least_one_match).await;
- } else {
- get_buffer_for_full_scan_tx = bounded(1).0;
}
-
},
scan_path = scan_path.next() => {
if let Some(path_to_scan) = scan_path {
@@ -673,7 +658,6 @@ struct RequestHandler<'worker> {
fs: Option<&'worker dyn Fs>,
open_entries: &'worker HashSet<ProjectEntryId>,
confirm_contents_will_match_tx: &'worker Sender<MatchingEntry>,
- get_buffer_for_full_scan_tx: &'worker Sender<ProjectPath>,
}
impl RequestHandler<'_> {
@@ -729,9 +713,8 @@ impl RequestHandler<'_> {
_ = maybe!(async move {
let InputPath {
entry,
-
snapshot,
- should_scan_tx,
+ mut should_scan_tx,
} = req;
if entry.is_fifo || !entry.is_file() {
@@ -754,7 +737,7 @@ impl RequestHandler<'_> {
if self.open_entries.contains(&entry.id) {
// The buffer is already in memory and that's the version we want to scan;
// hence skip the dilly-dally and look for all matches straight away.
- self.get_buffer_for_full_scan_tx
+ should_scan_tx
.send(ProjectPath {
worktree_id: snapshot.id(),
path: entry.path.clone(),
@@ -2750,11 +2750,13 @@ async fn test_empty_diagnostic_ranges(cx: &mut gpui::TestAppContext) {
);
let fs = FakeFs::new(cx.executor());
- fs.insert_tree("/dir", json!({ "a.rs": text })).await;
+ fs.insert_tree(path!("/dir"), json!({ "a.rs": text })).await;
- let project = Project::test(fs, ["/dir".as_ref()], cx).await;
+ let project = Project::test(fs, [Path::new(path!("/dir"))], cx).await;
let buffer = project
- .update(cx, |project, cx| project.open_local_buffer("/dir/a.rs", cx))
+ .update(cx, |project, cx| {
+ project.open_local_buffer(path!("/dir/a.rs"), cx)
+ })
.await
.unwrap();
@@ -2763,7 +2765,7 @@ async fn test_empty_diagnostic_ranges(cx: &mut gpui::TestAppContext) {
lsp_store
.update_diagnostic_entries(
LanguageServerId(0),
- PathBuf::from("/dir/a.rs"),
+ PathBuf::from(path!("/dir/a.rs")),
None,
None,
vec![
@@ -2820,17 +2822,17 @@ async fn test_diagnostics_from_multiple_language_servers(cx: &mut gpui::TestAppC
init_test(cx);
let fs = FakeFs::new(cx.executor());
- fs.insert_tree("/dir", json!({ "a.rs": "one two three" }))
+ fs.insert_tree(path!("/dir"), json!({ "a.rs": "one two three" }))
.await;
- let project = Project::test(fs, ["/dir".as_ref()], cx).await;
+ let project = Project::test(fs, [Path::new(path!("/dir"))], cx).await;
let lsp_store = project.read_with(cx, |project, _| project.lsp_store.clone());
lsp_store.update(cx, |lsp_store, cx| {
lsp_store
.update_diagnostic_entries(
LanguageServerId(0),
- Path::new("/dir/a.rs").to_owned(),
+ Path::new(path!("/dir/a.rs")).to_owned(),
None,
None,
vec![DiagnosticEntry {
@@ -2849,7 +2851,7 @@ async fn test_diagnostics_from_multiple_language_servers(cx: &mut gpui::TestAppC
lsp_store
.update_diagnostic_entries(
LanguageServerId(1),
- Path::new("/dir/a.rs").to_owned(),
+ Path::new(path!("/dir/a.rs")).to_owned(),
None,
None,
vec![DiagnosticEntry {
@@ -9733,7 +9735,7 @@ async fn test_ignored_dirs_events(cx: &mut gpui::TestAppContext) {
("project/target/debug/deps".to_string(), PathChange::Added),
("project/target/debug/deps".to_string(), PathChange::Removed),
],
- "Due to `debug` directory being tracket, it should get updates for entries inside it.
+ "Due to `debug` directory being tracked, it should get updates for entries inside it.
No updates for more nested directories should happen as those are ignored",
);
}
@@ -96,7 +96,7 @@ impl TelemetryWorktreeSnapshot {
};
};
- let remote_url = backend.remote_url("origin");
+ let remote_url = backend.remote_url("origin").await;
let head_sha = backend.head_sha().await;
let diff = backend.diff(DiffType::HeadToWorktree).await.ok();
@@ -1529,7 +1529,8 @@ impl ProjectPanel {
}
fn open(&mut self, _: &Open, window: &mut Window, cx: &mut Context<Self>) {
- let preview_tabs_enabled = PreviewTabsSettings::get_global(cx).enabled;
+ let preview_tabs_enabled =
+ PreviewTabsSettings::get_global(cx).enable_preview_from_project_panel;
self.open_internal(true, !preview_tabs_enabled, None, window, cx);
}
@@ -4819,7 +4820,7 @@ impl ProjectPanel {
project_panel.toggle_expanded(entry_id, window, cx);
}
} else {
- let preview_tabs_enabled = PreviewTabsSettings::get_global(cx).enabled;
+ let preview_tabs_enabled = PreviewTabsSettings::get_global(cx).enable_preview_from_project_panel;
let click_count = event.click_count();
let focus_opened_item = click_count > 1;
let allow_preview = preview_tabs_enabled && click_count == 1;
@@ -133,8 +133,9 @@ impl PickerDelegate for ProjectSymbolsDelegate {
workspace.active_pane().clone()
};
- let editor =
- workspace.open_project_item::<Editor>(pane, buffer, true, true, window, cx);
+ let editor = workspace.open_project_item::<Editor>(
+ pane, buffer, true, true, true, true, window, cx,
+ );
editor.update(cx, |editor, cx| {
editor.change_selections(
@@ -258,6 +258,7 @@ message Diagnostic {
Anchor start = 1;
Anchor end = 2;
optional string source = 3;
+ optional string registration_id = 17;
enum SourceKind {
Pulled = 0;
@@ -190,6 +190,19 @@ message GitRenameBranch {
string new_name = 4;
}
+message GitCreateRemote {
+ uint64 project_id = 1;
+ uint64 repository_id = 2;
+ string remote_name = 3;
+ string remote_url = 4;
+}
+
+message GitRemoveRemote {
+ uint64 project_id = 1;
+ uint64 repository_id = 2;
+ string remote_name = 3;
+}
+
message GitDeleteBranch {
uint64 project_id = 1;
uint64 repository_id = 2;
@@ -949,6 +949,7 @@ message PulledDiagnostics {
optional string result_id = 3;
bool changed = 4;
repeated LspDiagnostic diagnostics = 5;
+ optional string registration_id = 6;
}
message PullWorkspaceDiagnostics {
@@ -437,13 +437,18 @@ message Envelope {
OpenImageResponse open_image_response = 392;
CreateImageForPeer create_image_for_peer = 393;
+
GitFileHistory git_file_history = 397;
GitFileHistoryResponse git_file_history_response = 398;
RunGitHook run_git_hook = 399;
GitDeleteBranch git_delete_branch = 400;
- ExternalExtensionAgentsUpdated external_extension_agents_updated = 401; // current max
+
+ ExternalExtensionAgentsUpdated external_extension_agents_updated = 401;
+
+ GitCreateRemote git_create_remote = 402;
+ GitRemoveRemote git_remove_remote = 403;// current max
}
reserved 87 to 88, 396;
@@ -305,6 +305,8 @@ messages!(
(RemoteMessageResponse, Background),
(AskPassRequest, Background),
(AskPassResponse, Background),
+ (GitCreateRemote, Background),
+ (GitRemoveRemote, Background),
(GitCreateBranch, Background),
(GitChangeBranch, Background),
(GitRenameBranch, Background),
@@ -504,6 +506,8 @@ request_messages!(
(GetRemotes, GetRemotesResponse),
(Pull, RemoteMessageResponse),
(AskPassRequest, AskPassResponse),
+ (GitCreateRemote, Ack),
+ (GitRemoveRemote, Ack),
(GitCreateBranch, Ack),
(GitChangeBranch, Ack),
(GitRenameBranch, Ack),
@@ -676,6 +680,8 @@ entity_messages!(
GitChangeBranch,
GitRenameBranch,
GitCreateBranch,
+ GitCreateRemote,
+ GitRemoveRemote,
CheckForPushedCommits,
GitDiff,
GitInit,
@@ -43,7 +43,6 @@ urlencoding.workspace = true
util.workspace = true
which.workspace = true
-
[dev-dependencies]
gpui = { workspace = true, features = ["test-support"] }
fs = { workspace = true, features = ["test-support"] }
@@ -328,8 +328,15 @@ impl RemoteClient {
let (incoming_tx, incoming_rx) = mpsc::unbounded::<Envelope>();
let (connection_activity_tx, connection_activity_rx) = mpsc::channel::<()>(1);
- let client =
- cx.update(|cx| ChannelClient::new(incoming_rx, outgoing_tx, cx, "client"))?;
+ let client = cx.update(|cx| {
+ ChannelClient::new(
+ incoming_rx,
+ outgoing_tx,
+ cx,
+ "client",
+ remote_connection.has_wsl_interop(),
+ )
+ })?;
let path_style = remote_connection.path_style();
let this = cx.new(|_| Self {
@@ -420,8 +427,9 @@ impl RemoteClient {
outgoing_tx: mpsc::UnboundedSender<Envelope>,
cx: &App,
name: &'static str,
+ has_wsl_interop: bool,
) -> AnyProtoClient {
- ChannelClient::new(incoming_rx, outgoing_tx, cx, name).into()
+ ChannelClient::new(incoming_rx, outgoing_tx, cx, name, has_wsl_interop).into()
}
pub fn shutdown_processes<T: RequestMessage>(
@@ -921,8 +929,8 @@ impl RemoteClient {
});
let (outgoing_tx, _) = mpsc::unbounded::<Envelope>();
let (_, incoming_rx) = mpsc::unbounded::<Envelope>();
- let server_client =
- server_cx.update(|cx| ChannelClient::new(incoming_rx, outgoing_tx, cx, "fake-server"));
+ let server_client = server_cx
+ .update(|cx| ChannelClient::new(incoming_rx, outgoing_tx, cx, "fake-server", false));
let connection: Arc<dyn RemoteConnection> = Arc::new(fake::FakeRemoteConnection {
connection_options: opts.clone(),
server_cx: fake::SendableCx::new(server_cx),
@@ -1140,6 +1148,7 @@ pub trait RemoteConnection: Send + Sync {
fn path_style(&self) -> PathStyle;
fn shell(&self) -> String;
fn default_system_shell(&self) -> String;
+ fn has_wsl_interop(&self) -> bool;
#[cfg(any(test, feature = "test-support"))]
fn simulate_disconnect(&self, _: &AsyncApp) {}
@@ -1188,6 +1197,7 @@ struct ChannelClient {
name: &'static str,
task: Mutex<Task<Result<()>>>,
remote_started: Signal<()>,
+ has_wsl_interop: bool,
}
impl ChannelClient {
@@ -1196,6 +1206,7 @@ impl ChannelClient {
outgoing_tx: mpsc::UnboundedSender<Envelope>,
cx: &App,
name: &'static str,
+ has_wsl_interop: bool,
) -> Arc<Self> {
Arc::new_cyclic(|this| Self {
outgoing_tx: Mutex::new(outgoing_tx),
@@ -1211,6 +1222,7 @@ impl ChannelClient {
&cx.to_async(),
)),
remote_started: Signal::new(cx),
+ has_wsl_interop,
})
}
@@ -1489,6 +1501,10 @@ impl ProtoClient for ChannelClient {
fn is_via_collab(&self) -> bool {
false
}
+
+ fn has_wsl_interop(&self) -> bool {
+ self.has_wsl_interop
+ }
}
#[cfg(any(test, feature = "test-support"))]
@@ -1652,6 +1668,10 @@ mod fake {
fn default_system_shell(&self) -> String {
"sh".to_owned()
}
+
+ fn has_wsl_interop(&self) -> bool {
+ false
+ }
}
pub(super) struct Delegate;
@@ -131,11 +131,7 @@ async fn build_remote_server_from_source(
let build_remote_server =
std::env::var("ZED_BUILD_REMOTE_SERVER").unwrap_or("nocompress".into());
- if build_remote_server == "false"
- || build_remote_server == "no"
- || build_remote_server == "off"
- || build_remote_server == "0"
- {
+ if let "false" | "no" | "off" | "0" = &*build_remote_server {
return Ok(None);
}
@@ -394,6 +394,10 @@ impl RemoteConnection for SshRemoteConnection {
fn path_style(&self) -> PathStyle {
self.ssh_path_style
}
+
+ fn has_wsl_interop(&self) -> bool {
+ false
+ }
}
impl SshRemoteConnection {
@@ -47,6 +47,7 @@ pub(crate) struct WslRemoteConnection {
shell: String,
shell_kind: ShellKind,
default_system_shell: String,
+ has_wsl_interop: bool,
connection_options: WslConnectionOptions,
}
@@ -71,6 +72,7 @@ impl WslRemoteConnection {
shell: String::new(),
shell_kind: ShellKind::Posix,
default_system_shell: String::from("/bin/sh"),
+ has_wsl_interop: false,
};
delegate.set_status(Some("Detecting WSL environment"), cx);
this.shell = this
@@ -79,6 +81,15 @@ impl WslRemoteConnection {
.context("failed detecting shell")?;
log::info!("Remote shell discovered: {}", this.shell);
this.shell_kind = ShellKind::new(&this.shell, false);
+ this.has_wsl_interop = this.detect_has_wsl_interop().await.unwrap_or_default();
+ log::info!(
+ "Remote has wsl interop {}",
+ if this.has_wsl_interop {
+ "enabled"
+ } else {
+ "disabled"
+ }
+ );
this.platform = this
.detect_platform()
.await
@@ -115,6 +126,14 @@ impl WslRemoteConnection {
.unwrap_or_else(|| "/bin/sh".to_string()))
}
+ async fn detect_has_wsl_interop(&self) -> Result<bool> {
+ Ok(self
+ .run_wsl_command_with_output("cat", &["/proc/sys/fs/binfmt_misc/WSLInterop"])
+ .await
+ .inspect_err(|err| log::error!("Failed to detect wsl interop: {err}"))?
+ .contains("enabled"))
+ }
+
async fn windows_path_to_wsl_path(&self, source: &Path) -> Result<String> {
windows_path_to_wsl_path_impl(&self.connection_options, source).await
}
@@ -317,6 +336,7 @@ impl RemoteConnection for WslRemoteConnection {
proxy_args.push(format!("{}={}", env_var, value));
}
}
+
proxy_args.push(remote_binary_path.display(PathStyle::Posix).into_owned());
proxy_args.push("proxy".to_owned());
proxy_args.push("--identifier".to_owned());
@@ -489,6 +509,10 @@ impl RemoteConnection for WslRemoteConnection {
fn default_system_shell(&self) -> String {
self.default_system_shell.clone()
}
+
+ fn has_wsl_interop(&self) -> bool {
+ self.has_wsl_interop
+ }
}
/// `wslpath` is a executable available in WSL, it's a linux binary.
@@ -199,6 +199,7 @@ fn start_server(
listeners: ServerListeners,
log_rx: Receiver<Vec<u8>>,
cx: &mut App,
+ is_wsl_interop: bool,
) -> AnyProtoClient {
// This is the server idle timeout. If no connection comes in this timeout, the server will shut down.
const IDLE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(10 * 60);
@@ -318,7 +319,7 @@ fn start_server(
})
.detach();
- RemoteClient::proto_client_from_channels(incoming_rx, outgoing_tx, cx, "server")
+ RemoteClient::proto_client_from_channels(incoming_rx, outgoing_tx, cx, "server", is_wsl_interop)
}
fn init_paths() -> anyhow::Result<()> {
@@ -407,8 +408,15 @@ pub fn execute_run(
HeadlessProject::init(cx);
+ let is_wsl_interop = if cfg!(target_os = "linux") {
+ // See: https://learn.microsoft.com/en-us/windows/wsl/filesystems#disable-interoperability
+ matches!(std::fs::read_to_string("/proc/sys/fs/binfmt_misc/WSLInterop"), Ok(s) if s.contains("enabled"))
+ } else {
+ false
+ };
+
log::info!("gpui app started, initializing server");
- let session = start_server(listeners, log_rx, cx);
+ let session = start_server(listeners, log_rx, cx, is_wsl_interop);
GitHostingProviderRegistry::set_global(git_hosting_provider_registry, cx);
git_hosting_providers::init(cx);
@@ -59,6 +59,7 @@ pub trait ProtoClient: Send + Sync {
fn message_handler_set(&self) -> &parking_lot::Mutex<ProtoMessageHandlerSet>;
fn is_via_collab(&self) -> bool;
+ fn has_wsl_interop(&self) -> bool;
}
#[derive(Default)]
@@ -510,6 +511,10 @@ impl AnyProtoClient {
},
);
}
+
+ pub fn has_wsl_interop(&self) -> bool {
+ self.0.client.has_wsl_interop()
+ }
}
fn to_any_envelope<T: EnvelopedMessage>(
@@ -1031,7 +1031,7 @@ impl BufferSearchBar {
let new_match_index = searchable_item
.match_index_for_direction(matches, index, direction, count, window, cx);
- searchable_item.update_matches(matches, window, cx);
+ searchable_item.update_matches(matches, Some(new_match_index), window, cx);
searchable_item.activate_match(new_match_index, matches, window, cx);
}
}
@@ -1045,7 +1045,7 @@ impl BufferSearchBar {
if matches.is_empty() {
return;
}
- searchable_item.update_matches(matches, window, cx);
+ searchable_item.update_matches(matches, Some(0), window, cx);
searchable_item.activate_match(0, matches, window, cx);
}
}
@@ -1060,7 +1060,7 @@ impl BufferSearchBar {
return;
}
let new_match_index = matches.len() - 1;
- searchable_item.update_matches(matches, window, cx);
+ searchable_item.update_matches(matches, Some(new_match_index), window, cx);
searchable_item.activate_match(new_match_index, matches, window, cx);
}
}
@@ -1300,7 +1300,12 @@ impl BufferSearchBar {
if matches.is_empty() {
active_searchable_item.clear_matches(window, cx);
} else {
- active_searchable_item.update_matches(matches, window, cx);
+ active_searchable_item.update_matches(
+ matches,
+ this.active_match_index,
+ window,
+ cx,
+ );
}
let _ = done_tx.send(());
}
@@ -1335,6 +1340,18 @@ impl BufferSearchBar {
});
if new_index != self.active_match_index {
self.active_match_index = new_index;
+ if !self.dismissed {
+ if let Some(searchable_item) = self.active_searchable_item.as_ref() {
+ if let Some(matches) = self
+ .searchable_items_with_matches
+ .get(&searchable_item.downgrade())
+ {
+ if !matches.is_empty() {
+ searchable_item.update_matches(matches, new_index, window, cx);
+ }
+ }
+ }
+ }
cx.notify();
}
}
@@ -1444,6 +1444,7 @@ impl ProjectSearchView {
s.select_ranges([range_to_select])
});
});
+ self.highlight_matches(&match_ranges, Some(new_index), cx);
}
}
@@ -1518,11 +1519,6 @@ impl ProjectSearchView {
});
editor.scroll(Point::default(), Some(Axis::Vertical), window, cx);
}
- editor.highlight_background::<Self>(
- &match_ranges,
- |theme| theme.colors().search_match_background,
- cx,
- );
});
if is_new_search && self.query_editor.focus_handle(cx).is_focused(window) {
self.focus_results_editor(window, cx);
@@ -1535,18 +1531,41 @@ impl ProjectSearchView {
fn update_match_index(&mut self, cx: &mut Context<Self>) {
let results_editor = self.results_editor.read(cx);
+ let match_ranges = self.entity.read(cx).match_ranges.clone();
let new_index = active_match_index(
Direction::Next,
- &self.entity.read(cx).match_ranges,
+ &match_ranges,
&results_editor.selections.newest_anchor().head(),
&results_editor.buffer().read(cx).snapshot(cx),
);
+ self.highlight_matches(&match_ranges, new_index, cx);
if self.active_match_index != new_index {
self.active_match_index = new_index;
cx.notify();
}
}
+ fn highlight_matches(
+ &self,
+ match_ranges: &[Range<Anchor>],
+ active_index: Option<usize>,
+ cx: &mut Context<Self>,
+ ) {
+ self.results_editor.update(cx, |editor, cx| {
+ editor.highlight_background::<Self>(
+ match_ranges,
+ move |index, theme| {
+ if active_index == Some(*index) {
+ theme.colors().search_active_match_background
+ } else {
+ theme.colors().search_match_background
+ }
+ },
+ cx,
+ );
+ });
+ }
+
pub fn has_matches(&self) -> bool {
self.active_match_index.is_some()
}
@@ -2456,7 +2475,9 @@ pub mod tests {
use pretty_assertions::assert_eq;
use project::FakeFs;
use serde_json::json;
- use settings::{InlayHintSettingsContent, SettingsStore};
+ use settings::{
+ InlayHintSettingsContent, SettingsStore, ThemeColorsContent, ThemeStyleContent,
+ };
use util::{path, paths::PathStyle, rel_path::rel_path};
use util_macros::perf;
use workspace::DeploySearch;
@@ -2464,8 +2485,105 @@ pub mod tests {
#[perf]
#[gpui::test]
async fn test_project_search(cx: &mut TestAppContext) {
+ fn dp(row: u32, col: u32) -> DisplayPoint {
+ DisplayPoint::new(DisplayRow(row), col)
+ }
+
+ fn assert_active_match_index(
+ search_view: &WindowHandle<ProjectSearchView>,
+ cx: &mut TestAppContext,
+ expected_index: usize,
+ ) {
+ search_view
+ .update(cx, |search_view, _window, _cx| {
+ assert_eq!(search_view.active_match_index, Some(expected_index));
+ })
+ .unwrap();
+ }
+
+ fn assert_selection_range(
+ search_view: &WindowHandle<ProjectSearchView>,
+ cx: &mut TestAppContext,
+ expected_range: Range<DisplayPoint>,
+ ) {
+ search_view
+ .update(cx, |search_view, _window, cx| {
+ assert_eq!(
+ search_view.results_editor.update(cx, |editor, cx| editor
+ .selections
+ .display_ranges(&editor.display_snapshot(cx))),
+ [expected_range]
+ );
+ })
+ .unwrap();
+ }
+
+ fn assert_highlights(
+ search_view: &WindowHandle<ProjectSearchView>,
+ cx: &mut TestAppContext,
+ expected_highlights: Vec<(Range<DisplayPoint>, &str)>,
+ ) {
+ search_view
+ .update(cx, |search_view, window, cx| {
+ let match_bg = cx.theme().colors().search_match_background;
+ let active_match_bg = cx.theme().colors().search_active_match_background;
+ let selection_bg = cx
+ .theme()
+ .colors()
+ .editor_document_highlight_bracket_background;
+
+ let highlights: Vec<_> = expected_highlights
+ .into_iter()
+ .map(|(range, color_type)| {
+ let color = match color_type {
+ "active" => active_match_bg,
+ "match" => match_bg,
+ "selection" => selection_bg,
+ _ => panic!("Unknown color type"),
+ };
+ (range, color)
+ })
+ .collect();
+
+ assert_eq!(
+ search_view.results_editor.update(cx, |editor, cx| editor
+ .all_text_background_highlights(window, cx)),
+ highlights.as_slice()
+ );
+ })
+ .unwrap();
+ }
+
+ fn select_match(
+ search_view: &WindowHandle<ProjectSearchView>,
+ cx: &mut TestAppContext,
+ direction: Direction,
+ ) {
+ search_view
+ .update(cx, |search_view, window, cx| {
+ search_view.select_match(direction, window, cx);
+ })
+ .unwrap();
+ }
+
init_test(cx);
+ // Override active search match color since the fallback theme uses the same color
+ // for normal search match and active one, which can make this test less robust.
+ cx.update(|cx| {
+ SettingsStore::update_global(cx, |settings, cx| {
+ settings.update_user_settings(cx, |settings| {
+ settings.theme.experimental_theme_overrides = Some(ThemeStyleContent {
+ colors: ThemeColorsContent {
+ search_active_match_background: Some("#ff0000ff".to_string()),
+ ..Default::default()
+ },
+ ..Default::default()
+ });
+ });
+ });
+ });
+
let fs = FakeFs::new(cx.background_executor.clone());
fs.insert_tree(
path!("/dir"),
@@ -2486,113 +2604,113 @@ pub mod tests {
});
perform_search(search_view, "TWO", cx);
- search_view.update(cx, |search_view, window, cx| {
- assert_eq!(
- search_view
- .results_editor
- .update(cx, |editor, cx| editor.display_text(cx)),
- "\n\nconst THREE: usize = one::ONE + two::TWO;\n\n\nconst TWO: usize = one::ONE + one::ONE;"
- );
- let match_background_color = cx.theme().colors().search_match_background;
- let selection_background_color = cx.theme().colors().editor_document_highlight_bracket_background;
- assert_eq!(
- search_view
- .results_editor
- .update(cx, |editor, cx| editor.all_text_background_highlights(window, cx)),
- &[
- (
- DisplayPoint::new(DisplayRow(2), 32)..DisplayPoint::new(DisplayRow(2), 35),
- match_background_color
- ),
- (
- DisplayPoint::new(DisplayRow(2), 37)..DisplayPoint::new(DisplayRow(2), 40),
- selection_background_color
- ),
- (
- DisplayPoint::new(DisplayRow(2), 37)..DisplayPoint::new(DisplayRow(2), 40),
- match_background_color
- ),
- (
- DisplayPoint::new(DisplayRow(5), 6)..DisplayPoint::new(DisplayRow(5), 9),
- match_background_color
- ),
-
- ]
- );
- assert_eq!(search_view.active_match_index, Some(0));
- assert_eq!(
- search_view
- .results_editor
- .update(cx, |editor, cx| editor.selections.display_ranges(&editor.display_snapshot(cx))),
- [DisplayPoint::new(DisplayRow(2), 32)..DisplayPoint::new(DisplayRow(2), 35)]
- );
-
- search_view.select_match(Direction::Next, window, cx);
- }).unwrap();
+ cx.run_until_parked();
search_view
- .update(cx, |search_view, window, cx| {
- assert_eq!(search_view.active_match_index, Some(1));
+ .update(cx, |search_view, _window, cx| {
assert_eq!(
- search_view.results_editor.update(cx, |editor, cx| editor
- .selections
- .display_ranges(&editor.display_snapshot(cx))),
- [DisplayPoint::new(DisplayRow(2), 37)..DisplayPoint::new(DisplayRow(2), 40)]
+ search_view
+ .results_editor
+ .update(cx, |editor, cx| editor.display_text(cx)),
+ "\n\nconst THREE: usize = one::ONE + two::TWO;\n\n\nconst TWO: usize = one::ONE + one::ONE;"
);
- search_view.select_match(Direction::Next, window, cx);
})
.unwrap();
- search_view
- .update(cx, |search_view, window, cx| {
- assert_eq!(search_view.active_match_index, Some(2));
- assert_eq!(
- search_view.results_editor.update(cx, |editor, cx| editor
- .selections
- .display_ranges(&editor.display_snapshot(cx))),
- [DisplayPoint::new(DisplayRow(5), 6)..DisplayPoint::new(DisplayRow(5), 9)]
- );
- search_view.select_match(Direction::Next, window, cx);
- })
- .unwrap();
+ assert_active_match_index(&search_view, cx, 0);
+ assert_selection_range(&search_view, cx, dp(2, 32)..dp(2, 35));
+ assert_highlights(
+ &search_view,
+ cx,
+ vec![
+ (dp(2, 32)..dp(2, 35), "active"),
+ (dp(2, 37)..dp(2, 40), "selection"),
+ (dp(2, 37)..dp(2, 40), "match"),
+ (dp(5, 6)..dp(5, 9), "match"),
+ // TODO: we should be getting selection highlight here after project search
+ // but for some reason we are not getting it here
+ ],
+ );
+ select_match(&search_view, cx, Direction::Next);
+ cx.run_until_parked();
- search_view
- .update(cx, |search_view, window, cx| {
- assert_eq!(search_view.active_match_index, Some(0));
- assert_eq!(
- search_view.results_editor.update(cx, |editor, cx| editor
- .selections
- .display_ranges(&editor.display_snapshot(cx))),
- [DisplayPoint::new(DisplayRow(2), 32)..DisplayPoint::new(DisplayRow(2), 35)]
- );
- search_view.select_match(Direction::Prev, window, cx);
- })
- .unwrap();
+ assert_active_match_index(&search_view, cx, 1);
+ assert_selection_range(&search_view, cx, dp(2, 37)..dp(2, 40));
+ assert_highlights(
+ &search_view,
+ cx,
+ vec![
+ (dp(2, 32)..dp(2, 35), "selection"),
+ (dp(2, 32)..dp(2, 35), "match"),
+ (dp(2, 37)..dp(2, 40), "active"),
+ (dp(5, 6)..dp(5, 9), "selection"),
+ (dp(5, 6)..dp(5, 9), "match"),
+ ],
+ );
+ select_match(&search_view, cx, Direction::Next);
+ cx.run_until_parked();
- search_view
- .update(cx, |search_view, window, cx| {
- assert_eq!(search_view.active_match_index, Some(2));
- assert_eq!(
- search_view.results_editor.update(cx, |editor, cx| editor
- .selections
- .display_ranges(&editor.display_snapshot(cx))),
- [DisplayPoint::new(DisplayRow(5), 6)..DisplayPoint::new(DisplayRow(5), 9)]
- );
- search_view.select_match(Direction::Prev, window, cx);
- })
- .unwrap();
+ assert_active_match_index(&search_view, cx, 2);
+ assert_selection_range(&search_view, cx, dp(5, 6)..dp(5, 9));
+ assert_highlights(
+ &search_view,
+ cx,
+ vec![
+ (dp(2, 32)..dp(2, 35), "selection"),
+ (dp(2, 32)..dp(2, 35), "match"),
+ (dp(2, 37)..dp(2, 40), "selection"),
+ (dp(2, 37)..dp(2, 40), "match"),
+ (dp(5, 6)..dp(5, 9), "active"),
+ ],
+ );
+ select_match(&search_view, cx, Direction::Next);
+ cx.run_until_parked();
- search_view
- .update(cx, |search_view, _, cx| {
- assert_eq!(search_view.active_match_index, Some(1));
- assert_eq!(
- search_view.results_editor.update(cx, |editor, cx| editor
- .selections
- .display_ranges(&editor.display_snapshot(cx))),
- [DisplayPoint::new(DisplayRow(2), 37)..DisplayPoint::new(DisplayRow(2), 40)]
- );
- })
- .unwrap();
+ assert_active_match_index(&search_view, cx, 0);
+ assert_selection_range(&search_view, cx, dp(2, 32)..dp(2, 35));
+ assert_highlights(
+ &search_view,
+ cx,
+ vec![
+ (dp(2, 32)..dp(2, 35), "active"),
+ (dp(2, 37)..dp(2, 40), "selection"),
+ (dp(2, 37)..dp(2, 40), "match"),
+ (dp(5, 6)..dp(5, 9), "selection"),
+ (dp(5, 6)..dp(5, 9), "match"),
+ ],
+ );
+ select_match(&search_view, cx, Direction::Prev);
+ cx.run_until_parked();
+
+ assert_active_match_index(&search_view, cx, 2);
+ assert_selection_range(&search_view, cx, dp(5, 6)..dp(5, 9));
+ assert_highlights(
+ &search_view,
+ cx,
+ vec![
+ (dp(2, 32)..dp(2, 35), "selection"),
+ (dp(2, 32)..dp(2, 35), "match"),
+ (dp(2, 37)..dp(2, 40), "selection"),
+ (dp(2, 37)..dp(2, 40), "match"),
+ (dp(5, 6)..dp(5, 9), "active"),
+ ],
+ );
+ select_match(&search_view, cx, Direction::Prev);
+ cx.run_until_parked();
+
+ assert_active_match_index(&search_view, cx, 1);
+ assert_selection_range(&search_view, cx, dp(2, 37)..dp(2, 40));
+ assert_highlights(
+ &search_view,
+ cx,
+ vec![
+ (dp(2, 32)..dp(2, 35), "selection"),
+ (dp(2, 32)..dp(2, 35), "match"),
+ (dp(2, 37)..dp(2, 40), "active"),
+ (dp(5, 6)..dp(5, 9), "selection"),
+ (dp(5, 6)..dp(5, 9), "match"),
+ ],
+ );
}
#[perf]
@@ -1,6 +1,6 @@
/// Trait for recursively merging settings structures.
///
-/// When Zed starts it loads settinsg from `default.json` to initialize
+/// When Zed starts it loads settings from `default.json` to initialize
/// everything. These may be further refined by loading the user's settings,
/// and any settings profiles; and then further refined by loading any
/// local project settings.
@@ -61,6 +61,7 @@ pub struct AmazonBedrockSettingsContent {
pub region: Option<String>,
pub profile: Option<String>,
pub authentication_method: Option<BedrockAuthMethodContent>,
+ pub allow_global: Option<bool>,
}
#[with_fallible_options]
@@ -570,6 +570,9 @@ pub struct ThemeColorsContent {
#[serde(rename = "search.match_background")]
pub search_match_background: Option<String>,
+ #[serde(rename = "search.active_match_background")]
+ pub search_active_match_background: Option<String>,
+
#[serde(rename = "panel.background")]
pub panel_background: Option<String>,
@@ -152,14 +152,31 @@ pub struct PreviewTabsSettingsContent {
///
/// Default: true
pub enabled: Option<bool>,
+ /// Whether to open tabs in preview mode when opened from the project panel with a single click.
+ ///
+ /// Default: true
+ pub enable_preview_from_project_panel: Option<bool>,
/// Whether to open tabs in preview mode when selected from the file finder.
///
/// Default: false
pub enable_preview_from_file_finder: Option<bool>,
- /// Whether a preview tab gets replaced when code navigation is used to navigate away from the tab.
+ /// Whether to open tabs in preview mode when opened from a multibuffer.
+ ///
+ /// Default: true
+ pub enable_preview_from_multibuffer: Option<bool>,
+ /// Whether to open tabs in preview mode when code navigation is used to open a multibuffer.
+ ///
+ /// Default: false
+ pub enable_preview_multibuffer_from_code_navigation: Option<bool>,
+ /// Whether to open tabs in preview mode when code navigation is used to open a single file.
+ ///
+ /// Default: true
+ pub enable_preview_file_from_code_navigation: Option<bool>,
+ /// Whether to keep tabs in preview mode when code navigation is used to navigate away from them.
+ /// If `enable_preview_file_from_code_navigation` or `enable_preview_multibuffer_from_code_navigation` is also true, the new tab may replace the existing one.
///
/// Default: false
- pub enable_preview_from_code_navigation: Option<bool>,
+ pub enable_keep_preview_on_code_navigation: Option<bool>,
}
#[derive(
@@ -619,9 +619,13 @@ impl VsCodeSettings {
fn preview_tabs_settings_content(&self) -> Option<PreviewTabsSettingsContent> {
skip_default(PreviewTabsSettingsContent {
enabled: self.read_bool("workbench.editor.enablePreview"),
+ enable_preview_from_project_panel: None,
enable_preview_from_file_finder: self
.read_bool("workbench.editor.enablePreviewFromQuickOpen"),
- enable_preview_from_code_navigation: self
+ enable_preview_from_multibuffer: None,
+ enable_preview_multibuffer_from_code_navigation: None,
+ enable_preview_file_from_code_navigation: None,
+ enable_keep_preview_on_code_navigation: self
.read_bool("workbench.editor.enablePreviewFromCodeNavigation"),
})
}
@@ -3145,7 +3145,7 @@ pub(crate) fn settings_data(cx: &App) -> Vec<SettingsPage> {
SettingsPageItem::SectionHeader("Preview Tabs"),
SettingsPageItem::SettingItem(SettingItem {
title: "Preview Tabs Enabled",
- description: "Show opened editors as Preview tabs.",
+ description: "Show opened editors as preview tabs.",
field: Box::new(SettingField {
json_path: Some("preview_tabs.enabled"),
pick: |settings_content| {
@@ -3161,9 +3161,31 @@ pub(crate) fn settings_data(cx: &App) -> Vec<SettingsPage> {
metadata: None,
files: USER,
}),
+ SettingsPageItem::SettingItem(SettingItem {
+ title: "Enable Preview From Project Panel",
+ description: "Whether to open tabs in preview mode when opened from the project panel with a single click.",
+ field: Box::new(SettingField {
+ json_path: Some("preview_tabs.enable_preview_from_project_panel"),
+ pick: |settings_content| {
+ settings_content
+ .preview_tabs
+ .as_ref()?
+ .enable_preview_from_project_panel
+ .as_ref()
+ },
+ write: |settings_content, value| {
+ settings_content
+ .preview_tabs
+ .get_or_insert_default()
+ .enable_preview_from_project_panel = value;
+ },
+ }),
+ metadata: None,
+ files: USER,
+ }),
SettingsPageItem::SettingItem(SettingItem {
title: "Enable Preview From File Finder",
- description: "Whether to open tabs in Preview mode when selected from the file finder.",
+ description: "Whether to open tabs in preview mode when selected from the file finder.",
field: Box::new(SettingField {
json_path: Some("preview_tabs.enable_preview_from_file_finder"),
pick: |settings_content| {
@@ -3184,22 +3206,88 @@ pub(crate) fn settings_data(cx: &App) -> Vec<SettingsPage> {
files: USER,
}),
SettingsPageItem::SettingItem(SettingItem {
- title: "Enable Preview From Code Navigation",
- description: "Whether a preview tab gets replaced when code navigation is used to navigate away from the tab.",
+ title: "Enable Preview From Multibuffer",
+ description: "Whether to open tabs in preview mode when opened from a multibuffer.",
+ field: Box::new(SettingField {
+ json_path: Some("preview_tabs.enable_preview_from_multibuffer"),
+ pick: |settings_content| {
+ settings_content
+ .preview_tabs
+ .as_ref()?
+ .enable_preview_from_multibuffer
+ .as_ref()
+ },
+ write: |settings_content, value| {
+ settings_content
+ .preview_tabs
+ .get_or_insert_default()
+ .enable_preview_from_multibuffer = value;
+ },
+ }),
+ metadata: None,
+ files: USER,
+ }),
+ SettingsPageItem::SettingItem(SettingItem {
+ title: "Enable Preview Multibuffer From Code Navigation",
+ description: "Whether to open tabs in preview mode when code navigation is used to open a multibuffer.",
+ field: Box::new(SettingField {
+ json_path: Some("preview_tabs.enable_preview_multibuffer_from_code_navigation"),
+ pick: |settings_content| {
+ settings_content
+ .preview_tabs
+ .as_ref()?
+ .enable_preview_multibuffer_from_code_navigation
+ .as_ref()
+ },
+ write: |settings_content, value| {
+ settings_content
+ .preview_tabs
+ .get_or_insert_default()
+ .enable_preview_multibuffer_from_code_navigation = value;
+ },
+ }),
+ metadata: None,
+ files: USER,
+ }),
+ SettingsPageItem::SettingItem(SettingItem {
+ title: "Enable Preview File From Code Navigation",
+ description: "Whether to open tabs in preview mode when code navigation is used to open a single file.",
+ field: Box::new(SettingField {
+ json_path: Some("preview_tabs.enable_preview_file_from_code_navigation"),
+ pick: |settings_content| {
+ settings_content
+ .preview_tabs
+ .as_ref()?
+ .enable_preview_file_from_code_navigation
+ .as_ref()
+ },
+ write: |settings_content, value| {
+ settings_content
+ .preview_tabs
+ .get_or_insert_default()
+ .enable_preview_file_from_code_navigation = value;
+ },
+ }),
+ metadata: None,
+ files: USER,
+ }),
+ SettingsPageItem::SettingItem(SettingItem {
+ title: "Enable Keep Preview On Code Navigation",
+ description: "Whether to keep tabs in preview mode when code navigation is used to navigate away from them. If `enable_preview_file_from_code_navigation` or `enable_preview_multibuffer_from_code_navigation` is also true, the new tab may replace the existing one.",
field: Box::new(SettingField {
- json_path: Some("preview_tabs.enable_preview_from_code_navigation"),
+ json_path: Some("preview_tabs.enable_keep_preview_on_code_navigation"),
pick: |settings_content| {
settings_content
.preview_tabs
.as_ref()?
- .enable_preview_from_code_navigation
+ .enable_keep_preview_on_code_navigation
.as_ref()
},
write: |settings_content, value| {
settings_content
.preview_tabs
.get_or_insert_default()
- .enable_preview_from_code_navigation = value;
+ .enable_keep_preview_on_code_navigation = value;
},
}),
metadata: None,
@@ -6526,7 +6614,7 @@ fn language_settings_data() -> Vec<SettingsPageItem> {
files: USER | PROJECT,
}),
SettingsPageItem::SettingItem(SettingItem {
- title: "Jsx Tag Auto Close",
+ title: "JSX Tag Auto Close",
description: "Whether to automatically close JSX tags.",
field: Box::new(SettingField {
json_path: Some("languages.$(language).jsx_tag_auto_close"),
@@ -7053,7 +7141,7 @@ fn language_settings_data() -> Vec<SettingsPageItem> {
files: USER | PROJECT,
}),
SettingsPageItem::SettingItem(SettingItem {
- title: "Colorize brackets",
+ title: "Colorize Brackets",
description: "Whether to colorize brackets in the editor.",
field: Box::new(SettingField {
json_path: Some("languages.$(language).colorize_brackets"),
@@ -23,9 +23,9 @@ use ui::{
};
use util::ResultExt;
use workspace::{
- ModalView, Pane, SaveIntent, Workspace,
+ Event as WorkspaceEvent, ModalView, Pane, SaveIntent, Workspace,
item::{ItemHandle, ItemSettings, ShowDiagnostics, TabContentParams},
- pane::{Event as PaneEvent, render_item_indicator, tab_details},
+ pane::{render_item_indicator, tab_details},
};
const PANEL_WIDTH_REMS: f32 = 28.;
@@ -322,7 +322,7 @@ impl TabSwitcherDelegate {
cx: &mut Context<TabSwitcher>,
original_items: Vec<(Entity<Pane>, usize)>,
) -> Self {
- Self::subscribe_to_updates(&pane, window, cx);
+ Self::subscribe_to_updates(&workspace, window, cx);
Self {
select_last,
tab_switcher,
@@ -338,22 +338,36 @@ impl TabSwitcherDelegate {
}
fn subscribe_to_updates(
- pane: &WeakEntity<Pane>,
+ workspace: &WeakEntity<Workspace>,
window: &mut Window,
cx: &mut Context<TabSwitcher>,
) {
- let Some(pane) = pane.upgrade() else {
+ let Some(workspace) = workspace.upgrade() else {
return;
};
- cx.subscribe_in(&pane, window, |tab_switcher, _, event, window, cx| {
+ cx.subscribe_in(&workspace, window, |tab_switcher, _, event, window, cx| {
match event {
- PaneEvent::AddItem { .. }
- | PaneEvent::RemovedItem { .. }
- | PaneEvent::Remove { .. } => tab_switcher.picker.update(cx, |picker, cx| {
- let query = picker.query(cx);
- picker.delegate.update_matches(query, window, cx);
- cx.notify();
- }),
+ WorkspaceEvent::ItemAdded { .. } | WorkspaceEvent::PaneRemoved => {
+ tab_switcher.picker.update(cx, |picker, cx| {
+ let query = picker.query(cx);
+ picker.delegate.update_matches(query, window, cx);
+ cx.notify();
+ })
+ }
+ WorkspaceEvent::ItemRemoved { .. } => {
+ tab_switcher.picker.update(cx, |picker, cx| {
+ let query = picker.query(cx);
+ picker.delegate.update_matches(query, window, cx);
+
+ // When the Tab Switcher is being used and an item is
+ // removed, there's a chance that the new selected index
+ // will not match the actual tab that is now being displayed
+ // by the pane, as such, the selected index needs to be
+ // updated to match the pane's state.
+ picker.delegate.sync_selected_index(cx);
+ cx.notify();
+ })
+ }
_ => {}
};
})
@@ -540,11 +554,40 @@ impl TabSwitcherDelegate {
let Some(pane) = tab_match.pane.upgrade() else {
return;
};
+
pane.update(cx, |pane, cx| {
pane.close_item_by_id(tab_match.item.item_id(), SaveIntent::Close, window, cx)
.detach_and_log_err(cx);
});
}
+
+ /// Updates the selected index to ensure it matches the pane's active item,
+ /// as the pane's active item can be indirectly updated and this method
+ /// ensures that the picker can react to those changes.
+ fn sync_selected_index(&mut self, cx: &mut Context<Picker<TabSwitcherDelegate>>) {
+ let item = if self.is_all_panes {
+ self.workspace
+ .read_with(cx, |workspace, cx| workspace.active_item(cx))
+ } else {
+ self.pane.read_with(cx, |pane, _cx| pane.active_item())
+ };
+
+ let Ok(Some(item)) = item else {
+ return;
+ };
+
+ let item_id = item.item_id();
+ let Some((index, _tab_match)) = self
+ .matches
+ .iter()
+ .enumerate()
+ .find(|(_index, tab_match)| tab_match.item.item_id() == item_id)
+ else {
+ return;
+ };
+
+ self.selected_index = index;
+ }
}
impl PickerDelegate for TabSwitcherDelegate {
@@ -5,7 +5,7 @@ use menu::SelectPrevious;
use project::{Project, ProjectPath};
use serde_json::json;
use util::{path, rel_path::rel_path};
-use workspace::{AppState, Workspace};
+use workspace::{ActivatePreviousItem, AppState, Workspace};
#[ctor::ctor]
fn init_logger() {
@@ -197,6 +197,8 @@ async fn test_close_selected_item(cx: &mut gpui::TestAppContext) {
json!({
"1.txt": "First file",
"2.txt": "Second file",
+ "3.txt": "Third file",
+ "4.txt": "Fourth file",
}),
)
.await;
@@ -206,80 +208,47 @@ async fn test_close_selected_item(cx: &mut gpui::TestAppContext) {
cx.add_window_view(|window, cx| Workspace::test_new(project.clone(), window, cx));
let tab_1 = open_buffer("1.txt", &workspace, cx).await;
+ let tab_3 = open_buffer("3.txt", &workspace, cx).await;
let tab_2 = open_buffer("2.txt", &workspace, cx).await;
+ let tab_4 = open_buffer("4.txt", &workspace, cx).await;
+
+ // After opening all buffers, let's navigate to the previous item two times, finishing with:
+ //
+ // 1.txt | [3.txt] | 2.txt | 4.txt
+ //
+ // With 3.txt being the active item in the pane.
+ cx.dispatch_action(ActivatePreviousItem);
+ cx.dispatch_action(ActivatePreviousItem);
+ cx.run_until_parked();
cx.simulate_modifiers_change(Modifiers::control());
let tab_switcher = open_tab_switcher(false, &workspace, cx);
tab_switcher.update(cx, |tab_switcher, _| {
- assert_eq!(tab_switcher.delegate.matches.len(), 2);
- assert_match_at_position(tab_switcher, 0, tab_2.boxed_clone());
- assert_match_selection(tab_switcher, 1, tab_1.boxed_clone());
+ assert_eq!(tab_switcher.delegate.matches.len(), 4);
+ assert_match_at_position(tab_switcher, 0, tab_3.boxed_clone());
+ assert_match_selection(tab_switcher, 1, tab_2.boxed_clone());
+ assert_match_at_position(tab_switcher, 2, tab_4.boxed_clone());
+ assert_match_at_position(tab_switcher, 3, tab_1.boxed_clone());
});
cx.simulate_modifiers_change(Modifiers::control());
cx.dispatch_action(CloseSelectedItem);
tab_switcher.update(cx, |tab_switcher, _| {
- assert_eq!(tab_switcher.delegate.matches.len(), 1);
- assert_match_selection(tab_switcher, 0, tab_2);
+ assert_eq!(tab_switcher.delegate.matches.len(), 3);
+ assert_match_selection(tab_switcher, 0, tab_3);
+ assert_match_at_position(tab_switcher, 1, tab_4);
+ assert_match_at_position(tab_switcher, 2, tab_1);
});
// Still switches tab on modifiers release
cx.simulate_modifiers_change(Modifiers::none());
cx.read(|cx| {
let active_editor = workspace.read(cx).active_item_as::<Editor>(cx).unwrap();
- assert_eq!(active_editor.read(cx).title(cx), "2.txt");
+ assert_eq!(active_editor.read(cx).title(cx), "3.txt");
});
assert_tab_switcher_is_closed(workspace, cx);
}
-#[gpui::test]
-async fn test_close_preserves_selected_position(cx: &mut gpui::TestAppContext) {
- let app_state = init_test(cx);
- app_state
- .fs
- .as_fake()
- .insert_tree(
- path!("/root"),
- json!({
- "1.txt": "First file",
- "2.txt": "Second file",
- "3.txt": "Third file",
- }),
- )
- .await;
-
- let project = Project::test(app_state.fs.clone(), [path!("/root").as_ref()], cx).await;
- let (workspace, cx) =
- cx.add_window_view(|window, cx| Workspace::test_new(project.clone(), window, cx));
-
- let tab_1 = open_buffer("1.txt", &workspace, cx).await;
- let tab_2 = open_buffer("2.txt", &workspace, cx).await;
- let tab_3 = open_buffer("3.txt", &workspace, cx).await;
-
- let tab_switcher = open_tab_switcher(false, &workspace, cx);
- tab_switcher.update(cx, |tab_switcher, _| {
- assert_eq!(tab_switcher.delegate.matches.len(), 3);
- assert_match_at_position(tab_switcher, 0, tab_3.boxed_clone());
- assert_match_selection(tab_switcher, 1, tab_2.boxed_clone());
- assert_match_at_position(tab_switcher, 2, tab_1.boxed_clone());
- });
-
- // Verify that if the selected tab was closed, tab at the same position is selected.
- cx.dispatch_action(CloseSelectedItem);
- tab_switcher.update(cx, |tab_switcher, _| {
- assert_eq!(tab_switcher.delegate.matches.len(), 2);
- assert_match_at_position(tab_switcher, 0, tab_3.boxed_clone());
- assert_match_selection(tab_switcher, 1, tab_1.boxed_clone());
- });
-
- // But if the position is no longer valid, fall back to the position above.
- cx.dispatch_action(CloseSelectedItem);
- tab_switcher.update(cx, |tab_switcher, _| {
- assert_eq!(tab_switcher.delegate.matches.len(), 1);
- assert_match_selection(tab_switcher, 0, tab_3.boxed_clone());
- });
-}
-
fn init_test(cx: &mut TestAppContext) -> Arc<AppState> {
cx.update(|cx| {
let state = AppState::test(cx);
@@ -1434,6 +1434,7 @@ impl SearchableItem for TerminalView {
fn update_matches(
&mut self,
matches: &[Self::Match],
+ _active_match_index: Option<usize>,
_window: &mut Window,
cx: &mut Context<Self>,
) {
@@ -91,6 +91,7 @@ impl ThemeColors {
tab_inactive_background: neutral().light().step_2(),
tab_active_background: neutral().light().step_1(),
search_match_background: neutral().light().step_5(),
+ search_active_match_background: neutral().light().step_7(),
panel_background: neutral().light().step_2(),
panel_focused_border: blue().light().step_10(),
panel_indent_guide: neutral().light_alpha().step_5(),
@@ -228,6 +229,7 @@ impl ThemeColors {
tab_inactive_background: neutral().dark().step_2(),
tab_active_background: neutral().dark().step_1(),
search_match_background: neutral().dark().step_5(),
+ search_active_match_background: neutral().dark().step_3(),
panel_background: neutral().dark().step_2(),
panel_focused_border: blue().dark().step_8(),
panel_indent_guide: neutral().dark_alpha().step_4(),
@@ -152,6 +152,7 @@ pub(crate) fn zed_default_dark() -> Theme {
tab_inactive_background: bg,
tab_active_background: editor,
search_match_background: bg,
+ search_active_match_background: bg,
editor_background: editor,
editor_gutter_background: editor,
@@ -287,6 +287,15 @@ pub fn theme_colors_refinement(
.panel_background
.as_ref()
.and_then(|color| try_parse_color(color).ok());
+ let search_match_background = this
+ .search_match_background
+ .as_ref()
+ .and_then(|color| try_parse_color(color).ok());
+ let search_active_match_background = this
+ .search_active_match_background
+ .as_ref()
+ .and_then(|color| try_parse_color(color).ok())
+ .or(search_match_background);
ThemeColorsRefinement {
border,
border_variant: this
@@ -442,10 +451,8 @@ pub fn theme_colors_refinement(
.tab_active_background
.as_ref()
.and_then(|color| try_parse_color(color).ok()),
- search_match_background: this
- .search_match_background
- .as_ref()
- .and_then(|color| try_parse_color(color).ok()),
+ search_match_background: search_match_background,
+ search_active_match_background: search_active_match_background,
panel_background,
panel_focused_border: this
.panel_focused_border
@@ -128,6 +128,7 @@ pub struct ThemeColors {
pub tab_inactive_background: Hsla,
pub tab_active_background: Hsla,
pub search_match_background: Hsla,
+ pub search_active_match_background: Hsla,
pub panel_background: Hsla,
pub panel_focused_border: Hsla,
pub panel_indent_guide: Hsla,
@@ -352,6 +353,7 @@ pub enum ThemeColorField {
TabInactiveBackground,
TabActiveBackground,
SearchMatchBackground,
+ SearchActiveMatchBackground,
PanelBackground,
PanelFocusedBorder,
PanelIndentGuide,
@@ -467,6 +469,7 @@ impl ThemeColors {
ThemeColorField::TabInactiveBackground => self.tab_inactive_background,
ThemeColorField::TabActiveBackground => self.tab_active_background,
ThemeColorField::SearchMatchBackground => self.search_match_background,
+ ThemeColorField::SearchActiveMatchBackground => self.search_active_match_background,
ThemeColorField::PanelBackground => self.panel_background,
ThemeColorField::PanelFocusedBorder => self.panel_focused_border,
ThemeColorField::PanelIndentGuide => self.panel_indent_guide,
@@ -588,19 +588,20 @@ impl ToolchainSelector {
.worktree_for_id(worktree_id, cx)?
.read(cx)
.abs_path();
- let workspace_id = workspace.database_id()?;
let weak = workspace.weak_handle();
cx.spawn_in(window, async move |workspace, cx| {
- let active_toolchain = workspace::WORKSPACE_DB
- .toolchain(
- workspace_id,
- worktree_id,
- relative_path.clone(),
- language_name.clone(),
- )
- .await
- .ok()
- .flatten();
+ let active_toolchain = project
+ .read_with(cx, |this, cx| {
+ this.active_toolchain(
+ ProjectPath {
+ worktree_id,
+ path: relative_path.clone(),
+ },
+ language_name.clone(),
+ cx,
+ )
+ })?
+ .await;
workspace
.update_in(cx, |this, window, cx| {
this.toggle_modal(window, cx, move |window, cx| {
@@ -618,6 +619,7 @@ impl ToolchainSelector {
});
})
.ok();
+ anyhow::Ok(())
})
.detach();
@@ -2388,10 +2388,16 @@ fn matching(map: &DisplaySnapshot, display_point: DisplayPoint) -> DisplayPoint
.or_else(|| snapshot.innermost_enclosing_bracket_ranges(offset..offset, None));
if let Some((opening_range, closing_range)) = bracket_ranges {
- if opening_range.contains(&offset) {
- return closing_range.start.to_display_point(map);
- } else if closing_range.contains(&offset) {
- return opening_range.start.to_display_point(map);
+ let mut chars = map.buffer_snapshot().chars_at(offset);
+ match chars.next() {
+ Some('/') => {}
+ _ => {
+ if opening_range.contains(&offset) {
+ return closing_range.start.to_display_point(map);
+ } else if closing_range.contains(&offset) {
+ return opening_range.start.to_display_point(map);
+ }
+ }
}
}
@@ -3443,6 +3449,23 @@ mod test {
test = "test"
/>
</a>"#});
+
+ // test nested closing tag
+ cx.set_shared_state(indoc! {r#"<html>
+ <bˇody>
+ </body>
+ </html>"#})
+ .await;
+ cx.simulate_shared_keystrokes("%").await;
+ cx.shared_state().await.assert_eq(indoc! {r#"<html>
+ <body>
+ <ˇ/body>
+ </html>"#});
+ cx.simulate_shared_keystrokes("%").await;
+ cx.shared_state().await.assert_eq(indoc! {r#"<html>
+ <ˇbody>
+ </body>
+ </html>"#});
}
#[gpui::test]
@@ -121,7 +121,11 @@ impl Vim {
});
});
if objects_found {
- vim.copy_selections_content(editor, MotionKind::Exclusive, window, cx);
+ let kind = match object.target_visual_mode(vim.mode, around) {
+ Mode::VisualLine => MotionKind::Linewise,
+ _ => MotionKind::Exclusive,
+ };
+ vim.copy_selections_content(editor, kind, window, cx);
editor.insert("", window, cx);
editor.refresh_edit_prediction(true, false, window, cx);
}
@@ -81,7 +81,11 @@ impl Vim {
start_positions.insert(selection.id, start_position);
});
});
- vim.yank_selections_content(editor, MotionKind::Exclusive, window, cx);
+ let kind = match object.target_visual_mode(vim.mode, around) {
+ Mode::VisualLine => MotionKind::Linewise,
+ _ => MotionKind::Exclusive,
+ };
+ vim.yank_selections_content(editor, kind, window, cx);
editor.change_selections(SelectionEffects::no_scroll(), window, cx, |s| {
s.move_with(|_, selection| {
let (head, goal) = start_positions.remove(&selection.id).unwrap();
@@ -223,7 +227,7 @@ impl Vim {
editor.highlight_background::<HighlightOnYank>(
&ranges_to_highlight,
- |colors| colors.colors().editor_document_highlight_read_background,
+ |_, colors| colors.colors().editor_document_highlight_read_background,
cx,
);
cx.spawn(async move |this, cx| {
@@ -273,7 +273,7 @@ impl Vim {
let ranges = [new_range];
editor.highlight_background::<VimExchange>(
&ranges,
- |theme| theme.colors().editor_document_highlight_read_background,
+ |_, theme| theme.colors().editor_document_highlight_read_background,
cx,
);
}
@@ -2253,6 +2253,79 @@ async fn test_paragraph_multi_delete(cx: &mut gpui::TestAppContext) {
cx.shared_state().await.assert_eq(indoc! {"ˇ"});
}
+#[perf]
+#[gpui::test]
+async fn test_yank_paragraph_with_paste(cx: &mut gpui::TestAppContext) {
+ let mut cx = NeovimBackedTestContext::new(cx).await;
+ cx.set_shared_state(indoc! {
+ "
+ first paragraph
+ ˇstill first
+
+ second paragraph
+ still second
+
+ third paragraph
+ "
+ })
+ .await;
+
+ cx.simulate_shared_keystrokes("y a p").await;
+ cx.shared_clipboard()
+ .await
+ .assert_eq("first paragraph\nstill first\n\n");
+
+ cx.simulate_shared_keystrokes("j j p").await;
+ cx.shared_state().await.assert_eq(indoc! {
+ "
+ first paragraph
+ still first
+
+ ˇfirst paragraph
+ still first
+
+ second paragraph
+ still second
+
+ third paragraph
+ "
+ });
+}
+
+#[perf]
+#[gpui::test]
+async fn test_change_paragraph(cx: &mut gpui::TestAppContext) {
+ let mut cx = NeovimBackedTestContext::new(cx).await;
+ cx.set_shared_state(indoc! {
+ "
+ first paragraph
+ ˇstill first
+
+ second paragraph
+ still second
+
+ third paragraph
+ "
+ })
+ .await;
+
+ cx.simulate_shared_keystrokes("c a p").await;
+ cx.shared_clipboard()
+ .await
+ .assert_eq("first paragraph\nstill first\n\n");
+
+ cx.simulate_shared_keystrokes("escape").await;
+ cx.shared_state().await.assert_eq(indoc! {
+ "
+ ˇ
+ second paragraph
+ still second
+
+ third paragraph
+ "
+ });
+}
+
#[perf]
#[gpui::test]
async fn test_multi_cursor_replay(cx: &mut gpui::TestAppContext) {
@@ -0,0 +1,8 @@
+{"Put":{"state":"first paragraph\nˇstill first\n\nsecond paragraph\nstill second\n\nthird paragraph\n"}}
+{"Key":"c"}
+{"Key":"a"}
+{"Key":"p"}
+{"Get":{"state":"ˇ\nsecond paragraph\nstill second\n\nthird paragraph\n","mode":"Insert"}}
+{"ReadRegister":{"name":"\"","value":"first paragraph\nstill first\n\n"}}
+{"Key":"escape"}
+{"Get":{"state":"ˇ\nsecond paragraph\nstill second\n\nthird paragraph\n","mode":"Normal"}}
@@ -13,3 +13,8 @@
{"Put":{"state":"<a>\n <br\n test = \"test\"\n /ˇ>\n</a>"}}
{"Key":"%"}
{"Get":{"state":"<a>\n ˇ<br\n test = \"test\"\n />\n</a>","mode":"Normal"}}
+{"Put":{"state":"<html>\n <bˇody>\n </body>\n</html>"}}
+{"Key":"%"}
+{"Get":{"state":"<html>\n <body>\n <ˇ/body>\n</html>","mode":"Normal"}}
+{"Key":"%"}
+{"Get":{"state":"<html>\n <ˇbody>\n </body>\n</html>","mode":"Normal"}}
@@ -0,0 +1,10 @@
+{"Put":{"state":"first paragraph\nˇstill first\n\nsecond paragraph\nstill second\n\nthird paragraph\n"}}
+{"Key":"y"}
+{"Key":"a"}
+{"Key":"p"}
+{"Get":{"state":"ˇfirst paragraph\nstill first\n\nsecond paragraph\nstill second\n\nthird paragraph\n","mode":"Normal"}}
+{"ReadRegister":{"name":"\"","value":"first paragraph\nstill first\n\n"}}
+{"Key":"j"}
+{"Key":"j"}
+{"Key":"p"}
+{"Get":{"state":"first paragraph\nstill first\n\nˇfirst paragraph\nstill first\n\nsecond paragraph\nstill second\n\nthird paragraph\n","mode":"Normal"}}
@@ -64,8 +64,12 @@ pub struct ItemSettings {
#[derive(RegisterSetting)]
pub struct PreviewTabsSettings {
pub enabled: bool,
+ pub enable_preview_from_project_panel: bool,
pub enable_preview_from_file_finder: bool,
- pub enable_preview_from_code_navigation: bool,
+ pub enable_preview_from_multibuffer: bool,
+ pub enable_preview_multibuffer_from_code_navigation: bool,
+ pub enable_preview_file_from_code_navigation: bool,
+ pub enable_keep_preview_on_code_navigation: bool,
}
impl Settings for ItemSettings {
@@ -87,9 +91,19 @@ impl Settings for PreviewTabsSettings {
let preview_tabs = content.preview_tabs.as_ref().unwrap();
Self {
enabled: preview_tabs.enabled.unwrap(),
+ enable_preview_from_project_panel: preview_tabs
+ .enable_preview_from_project_panel
+ .unwrap(),
enable_preview_from_file_finder: preview_tabs.enable_preview_from_file_finder.unwrap(),
- enable_preview_from_code_navigation: preview_tabs
- .enable_preview_from_code_navigation
+ enable_preview_from_multibuffer: preview_tabs.enable_preview_from_multibuffer.unwrap(),
+ enable_preview_multibuffer_from_code_navigation: preview_tabs
+ .enable_preview_multibuffer_from_code_navigation
+ .unwrap(),
+ enable_preview_file_from_code_navigation: preview_tabs
+ .enable_preview_file_from_code_navigation
+ .unwrap(),
+ enable_keep_preview_on_code_navigation: preview_tabs
+ .enable_keep_preview_on_code_navigation
.unwrap(),
}
}
@@ -873,10 +873,35 @@ impl Pane {
self.preview_item_id == Some(item_id)
}
+ /// Promotes the item with the given ID to not be a preview item.
+ /// This does nothing if it wasn't already a preview item.
+ pub fn unpreview_item_if_preview(&mut self, item_id: EntityId) {
+ if self.is_active_preview_item(item_id) {
+ self.preview_item_id = None;
+ }
+ }
+
+ /// Marks the item with the given ID as the preview item.
+ /// This will be ignored if the global setting `preview_tabs` is disabled.
+ ///
+ /// The old preview item (if there was one) is closed and its index is returned.
+ pub fn replace_preview_item_id(
+ &mut self,
+ item_id: EntityId,
+ window: &mut Window,
+ cx: &mut Context<Self>,
+ ) -> Option<usize> {
+ let idx = self.close_current_preview_item(window, cx);
+ self.set_preview_item_id(Some(item_id), cx);
+ idx
+ }
+
/// Marks the item with the given ID as the preview item.
/// This will be ignored if the global setting `preview_tabs` is disabled.
- pub fn set_preview_item_id(&mut self, item_id: Option<EntityId>, cx: &App) {
- if PreviewTabsSettings::get_global(cx).enabled {
+ ///
+ /// This is a low-level method. Prefer `unpreview_item_if_preview()` or `set_new_preview_item()`.
+ pub(crate) fn set_preview_item_id(&mut self, item_id: Option<EntityId>, cx: &App) {
+ if item_id.is_none() || PreviewTabsSettings::get_global(cx).enabled {
self.preview_item_id = item_id;
}
}
@@ -895,7 +920,7 @@ impl Pane {
&& preview_item.item_id() == item_id
&& !preview_item.preserve_preview(cx)
{
- self.set_preview_item_id(None, cx);
+ self.unpreview_item_if_preview(item_id);
}
}
@@ -936,14 +961,8 @@ impl Pane {
let set_up_existing_item =
|index: usize, pane: &mut Self, window: &mut Window, cx: &mut Context<Self>| {
- // If the item is already open, and the item is a preview item
- // and we are not allowing items to open as preview, mark the item as persistent.
- if let Some(preview_item_id) = pane.preview_item_id
- && let Some(tab) = pane.items.get(index)
- && tab.item_id() == preview_item_id
- && !allow_preview
- {
- pane.set_preview_item_id(None, cx);
+ if !allow_preview && let Some(item) = pane.items.get(index) {
+ pane.unpreview_item_if_preview(item.item_id());
}
if activate {
pane.activate_item(index, focus_item, focus_item, window, cx);
@@ -955,7 +974,7 @@ impl Pane {
window: &mut Window,
cx: &mut Context<Self>| {
if allow_preview {
- pane.set_preview_item_id(Some(new_item.item_id()), cx);
+ pane.replace_preview_item_id(new_item.item_id(), window, cx);
}
if let Some(text) = new_item.telemetry_event_text(cx) {
@@ -1036,6 +1055,7 @@ impl Pane {
) -> Option<usize> {
let item_idx = self.preview_item_idx()?;
let id = self.preview_item_id()?;
+ self.set_preview_item_id(None, cx);
let prev_active_item_index = self.active_item_index;
self.remove_item(id, false, false, window, cx);
@@ -1981,9 +2001,7 @@ impl Pane {
item.on_removed(cx);
self.nav_history.set_mode(mode);
- if self.is_active_preview_item(item.item_id()) {
- self.set_preview_item_id(None, cx);
- }
+ self.unpreview_item_if_preview(item.item_id());
if let Some(path) = item.project_path(cx) {
let abs_path = self
@@ -2194,9 +2212,7 @@ impl Pane {
if can_save {
pane.update_in(cx, |pane, window, cx| {
- if pane.is_active_preview_item(item.item_id()) {
- pane.set_preview_item_id(None, cx);
- }
+ pane.unpreview_item_if_preview(item.item_id());
item.save(
SaveOptions {
format: should_format,
@@ -2450,8 +2466,8 @@ impl Pane {
let id = self.item_for_index(ix)?.item_id();
let should_activate = ix == self.active_item_index;
- if matches!(operation, PinOperation::Pin) && self.is_active_preview_item(id) {
- self.set_preview_item_id(None, cx);
+ if matches!(operation, PinOperation::Pin) {
+ self.unpreview_item_if_preview(id);
}
match operation {
@@ -2591,6 +2607,7 @@ impl Pane {
let close_side = &settings.close_position;
let show_close_button = &settings.show_close_button;
let indicator = render_item_indicator(item.boxed_clone(), cx);
+ let tab_tooltip_content = item.tab_tooltip_content(cx);
let item_id = item.item_id();
let is_first_item = ix == 0;
let is_last_item = ix == self.items.len() - 1;
@@ -2623,12 +2640,9 @@ impl Pane {
)
.on_mouse_down(
MouseButton::Left,
- cx.listener(move |pane, event: &MouseDownEvent, _, cx| {
- if let Some(id) = pane.preview_item_id
- && id == item_id
- && event.click_count > 1
- {
- pane.set_preview_item_id(None, cx);
+ cx.listener(move |pane, event: &MouseDownEvent, _, _| {
+ if event.click_count > 1 {
+ pane.unpreview_item_if_preview(item_id);
}
}),
)
@@ -2678,12 +2692,6 @@ impl Pane {
this.drag_split_direction = None;
this.handle_external_paths_drop(paths, window, cx)
}))
- .when_some(item.tab_tooltip_content(cx), |tab, content| match content {
- TabTooltipContent::Text(text) => tab.tooltip(Tooltip::text(text)),
- TabTooltipContent::Custom(element_fn) => {
- tab.tooltip(move |window, cx| element_fn(window, cx))
- }
- })
.start_slot::<Indicator>(indicator)
.map(|this| {
let end_slot_action: &'static dyn Action;
@@ -2750,7 +2758,15 @@ impl Pane {
})
.flatten(),
)
- .child(label),
+ .child(label)
+ .id(("pane-tab-content", ix))
+ .map(|this| match tab_tooltip_content {
+ Some(TabTooltipContent::Text(text)) => this.tooltip(Tooltip::text(text)),
+ Some(TabTooltipContent::Custom(element_fn)) => {
+ this.tooltip(move |window, cx| element_fn(window, cx))
+ }
+ None => this,
+ }),
);
let single_entry_to_resolve = (self.items[ix].buffer_kind(cx) == ItemBufferKind::Singleton)
@@ -3269,11 +3285,7 @@ impl Pane {
let mut to_pane = cx.entity();
let split_direction = self.drag_split_direction;
let item_id = dragged_tab.item.item_id();
- if let Some(preview_item_id) = self.preview_item_id
- && item_id == preview_item_id
- {
- self.set_preview_item_id(None, cx);
- }
+ self.unpreview_item_if_preview(item_id);
let is_clone = cfg!(target_os = "macos") && window.modifiers().alt
|| cfg!(not(target_os = "macos")) && window.modifiers().control;
@@ -3785,15 +3797,17 @@ impl Render for Pane {
.on_action(cx.listener(Self::toggle_pin_tab))
.on_action(cx.listener(Self::unpin_all_tabs))
.when(PreviewTabsSettings::get_global(cx).enabled, |this| {
- this.on_action(cx.listener(|pane: &mut Pane, _: &TogglePreviewTab, _, cx| {
- if let Some(active_item_id) = pane.active_item().map(|i| i.item_id()) {
- if pane.is_active_preview_item(active_item_id) {
- pane.set_preview_item_id(None, cx);
- } else {
- pane.set_preview_item_id(Some(active_item_id), cx);
+ this.on_action(
+ cx.listener(|pane: &mut Pane, _: &TogglePreviewTab, window, cx| {
+ if let Some(active_item_id) = pane.active_item().map(|i| i.item_id()) {
+ if pane.is_active_preview_item(active_item_id) {
+ pane.unpreview_item_if_preview(active_item_id);
+ } else {
+ pane.replace_preview_item_id(active_item_id, window, cx);
+ }
}
- }
- }))
+ }),
+ )
})
.on_action(
cx.listener(|pane: &mut Self, action: &CloseActiveItem, window, cx| {
@@ -1656,49 +1656,6 @@ impl WorkspaceDb {
}
}
- pub async fn toolchain(
- &self,
- workspace_id: WorkspaceId,
- worktree_id: WorktreeId,
- relative_worktree_path: Arc<RelPath>,
- language_name: LanguageName,
- ) -> Result<Option<Toolchain>> {
- self.write(move |this| {
- let mut select = this
- .select_bound(sql!(
- SELECT
- name, path, raw_json
- FROM toolchains
- WHERE
- workspace_id = ? AND
- language_name = ? AND
- worktree_id = ? AND
- relative_worktree_path = ?
- ))
- .context("select toolchain")?;
-
- let toolchain: Vec<(String, String, String)> = select((
- workspace_id,
- language_name.as_ref().to_string(),
- worktree_id.to_usize(),
- relative_worktree_path.as_unix_str().to_string(),
- ))?;
-
- Ok(toolchain
- .into_iter()
- .next()
- .and_then(|(name, path, raw_json)| {
- Some(Toolchain {
- name: name.into(),
- path: path.into(),
- language_name,
- as_json: serde_json::Value::from_str(&raw_json).ok()?,
- })
- }))
- })
- .await
- }
-
pub(crate) async fn toolchains(
&self,
workspace_id: WorkspaceId,
@@ -96,6 +96,7 @@ pub trait SearchableItem: Item + EventEmitter<SearchEvent> {
fn update_matches(
&mut self,
matches: &[Self::Match],
+ active_match_index: Option<usize>,
window: &mut Window,
cx: &mut Context<Self>,
);
@@ -179,7 +180,13 @@ pub trait SearchableItemHandle: ItemHandle {
handler: Box<dyn Fn(&SearchEvent, &mut Window, &mut App) + Send>,
) -> Subscription;
fn clear_matches(&self, window: &mut Window, cx: &mut App);
- fn update_matches(&self, matches: &AnyVec<dyn Send>, window: &mut Window, cx: &mut App);
+ fn update_matches(
+ &self,
+ matches: &AnyVec<dyn Send>,
+ active_match_index: Option<usize>,
+ window: &mut Window,
+ cx: &mut App,
+ );
fn query_suggestion(&self, window: &mut Window, cx: &mut App) -> String;
fn activate_match(
&self,
@@ -264,10 +271,16 @@ impl<T: SearchableItem> SearchableItemHandle for Entity<T> {
fn clear_matches(&self, window: &mut Window, cx: &mut App) {
self.update(cx, |this, cx| this.clear_matches(window, cx));
}
- fn update_matches(&self, matches: &AnyVec<dyn Send>, window: &mut Window, cx: &mut App) {
+ fn update_matches(
+ &self,
+ matches: &AnyVec<dyn Send>,
+ active_match_index: Option<usize>,
+ window: &mut Window,
+ cx: &mut App,
+ ) {
let matches = matches.downcast_ref().unwrap();
self.update(cx, |this, cx| {
- this.update_matches(matches.as_slice(), window, cx)
+ this.update_matches(matches.as_slice(), active_match_index, window, cx)
});
}
fn query_suggestion(&self, window: &mut Window, cx: &mut App) -> String {
@@ -3636,14 +3636,33 @@ impl Workspace {
project_item: Entity<T::Item>,
activate_pane: bool,
focus_item: bool,
+ keep_old_preview: bool,
+ allow_new_preview: bool,
window: &mut Window,
cx: &mut Context<Self>,
) -> Entity<T>
where
T: ProjectItem,
{
+ let old_item_id = pane.read(cx).active_item().map(|item| item.item_id());
+
if let Some(item) = self.find_project_item(&pane, &project_item, cx) {
+ if !keep_old_preview
+ && let Some(old_id) = old_item_id
+ && old_id != item.item_id()
+ {
+ // switching to a different item, so unpreview old active item
+ pane.update(cx, |pane, _| {
+ pane.unpreview_item_if_preview(old_id);
+ });
+ }
+
self.activate_item(&item, activate_pane, focus_item, window, cx);
+ if !allow_new_preview {
+ pane.update(cx, |pane, _| {
+ pane.unpreview_item_if_preview(item.item_id());
+ });
+ }
return item;
}
@@ -3652,16 +3671,14 @@ impl Workspace {
T::for_project_item(self.project().clone(), Some(pane), project_item, window, cx)
})
});
- let item_id = item.item_id();
let mut destination_index = None;
pane.update(cx, |pane, cx| {
- if PreviewTabsSettings::get_global(cx).enable_preview_from_code_navigation
- && let Some(preview_item_id) = pane.preview_item_id()
- && preview_item_id != item_id
- {
- destination_index = pane.close_current_preview_item(window, cx);
+ if !keep_old_preview && let Some(old_id) = old_item_id {
+ pane.unpreview_item_if_preview(old_id);
+ }
+ if allow_new_preview {
+ destination_index = pane.replace_preview_item_id(item.item_id(), window, cx);
}
- pane.set_preview_item_id(Some(item.item_id()), cx)
});
self.add_item(
@@ -7236,7 +7253,9 @@ actions!(
/// Shares the current project with collaborators.
ShareProject,
/// Shares your screen with collaborators.
- ScreenShare
+ ScreenShare,
+ /// Copies the current room name and session id for debugging purposes.
+ CopyRoomId,
]
);
actions!(
@@ -52,7 +52,7 @@ use std::{
fmt,
future::Future,
mem::{self},
- ops::{Deref, DerefMut},
+ ops::{Deref, DerefMut, Range},
path::{Path, PathBuf},
pin::Pin,
sync::{
@@ -3877,29 +3877,35 @@ impl BackgroundScanner {
abs_paths.dedup_by(|a, b| a.starts_with(b));
{
let snapshot = &self.state.lock().await.snapshot;
- abs_paths.retain(|abs_path| {
- let abs_path = &SanitizedPath::new(abs_path);
+ let mut ranges_to_drop = SmallVec::<[Range<usize>; 4]>::new();
- {
- let mut is_git_related = false;
+ fn skip_ix(ranges: &mut SmallVec<[Range<usize>; 4]>, ix: usize) {
+ if let Some(last_range) = ranges.last_mut()
+ && last_range.end == ix
+ {
+ last_range.end += 1;
+ } else {
+ ranges.push(ix..ix + 1);
+ }
+ }
- let dot_git_paths = self.executor.block(maybe!(async {
- let mut path = None;
- for ancestor in abs_path.as_path().ancestors() {
+ for (ix, abs_path) in abs_paths.iter().enumerate() {
+ let abs_path = &SanitizedPath::new(&abs_path);
+ let mut is_git_related = false;
+ let mut dot_git_paths = None;
+
+ for ancestor in abs_path.as_path().ancestors() {
if is_git_dir(ancestor, self.fs.as_ref()).await {
let path_in_git_dir = abs_path
.as_path()
.strip_prefix(ancestor)
.expect("stripping off the ancestor");
- path = Some((ancestor.to_owned(), path_in_git_dir.to_owned()));
- break;
- }
+ dot_git_paths = Some((ancestor.to_owned(), path_in_git_dir.to_owned()));
+ break;
}
- path
-
- }));
+ }
if let Some((dot_git_abs_path, path_in_git_dir)) = dot_git_paths {
if skipped_files_in_dot_git
@@ -3909,8 +3915,11 @@ impl BackgroundScanner {
path_in_git_dir.starts_with(skipped_git_subdir)
})
{
- log::debug!("ignoring event {abs_path:?} as it's in the .git directory among skipped files or directories");
- return false;
+ log::debug!(
+ "ignoring event {abs_path:?} as it's in the .git directory among skipped files or directories"
+ );
+ skip_ix(&mut ranges_to_drop, ix);
+ continue;
}
is_git_related = true;
@@ -3919,8 +3928,7 @@ impl BackgroundScanner {
}
}
- let relative_path = if let Ok(path) =
- abs_path.strip_prefix(&root_canonical_path)
+ let relative_path = if let Ok(path) = abs_path.strip_prefix(&root_canonical_path)
&& let Ok(path) = RelPath::new(path, PathStyle::local())
{
path
@@ -3931,10 +3939,11 @@ impl BackgroundScanner {
);
} else {
log::error!(
- "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
+ "ignoring event {abs_path:?} outside of root path {root_canonical_path:?}",
);
}
- return false;
+ skip_ix(&mut ranges_to_drop, ix);
+ continue;
};
if abs_path.file_name() == Some(OsStr::new(GITIGNORE)) {
@@ -3958,21 +3967,26 @@ impl BackgroundScanner {
});
if !parent_dir_is_loaded {
log::debug!("ignoring event {relative_path:?} within unloaded directory");
- return false;
+ skip_ix(&mut ranges_to_drop, ix);
+ continue;
}
if self.settings.is_path_excluded(&relative_path) {
if !is_git_related {
log::debug!("ignoring FS event for excluded path {relative_path:?}");
}
- return false;
+ skip_ix(&mut ranges_to_drop, ix);
+ continue;
}
relative_paths.push(relative_path.into_arc());
- true
}
- });
+
+ for range_to_drop in ranges_to_drop.into_iter().rev() {
+ abs_paths.drain(range_to_drop);
+ }
}
+
if relative_paths.is_empty() && dot_git_abs_paths.is_empty() {
return;
}
@@ -2,7 +2,7 @@
description = "The fast, collaborative code editor."
edition.workspace = true
name = "zed"
-version = "0.216.0"
+version = "0.217.0"
publish.workspace = true
license = "GPL-3.0-or-later"
authors = ["Zed Team <hi@zed.dev>"]
@@ -22,5 +22,9 @@
<true/>
<key>com.apple.security.personal-information.photos-library</key>
<true/>
+ <key>com.apple.security.files.user-selected.read-write</key>
+ <true/>
+ <key>com.apple.security.files.downloads.read-write</key>
+ <true/>
</dict>
</plist>
@@ -54,7 +54,7 @@ pub enum OpenRequestKind {
schema_path: String,
},
Setting {
- // None just opens settings without navigating to a specific path
+ /// `None` opens settings without navigating to a specific path.
setting_path: Option<String>,
},
}
@@ -215,6 +215,10 @@ pub mod git {
Switch,
/// Selects a different repository.
SelectRepo,
+ /// Filter remotes.
+ FilterRemotes,
+ /// Create a git remote.
+ CreateRemote,
/// Opens the git branch selector.
#[action(deprecated_aliases = ["branches::OpenRecent"])]
Branch,
@@ -132,12 +132,8 @@ impl EditPredictionProvider for ZetaEditPredictionProvider {
}
fn discard(&mut self, cx: &mut Context<Self>) {
- self.zeta.update(cx, |zeta, cx| {
- zeta.reject_current_prediction(
- EditPredictionRejectReason::Discarded,
- &self.project,
- cx,
- );
+ self.zeta.update(cx, |zeta, _cx| {
+ zeta.reject_current_prediction(EditPredictionRejectReason::Discarded, &self.project);
});
}
@@ -173,11 +169,10 @@ impl EditPredictionProvider for ZetaEditPredictionProvider {
let snapshot = buffer.snapshot();
let Some(edits) = prediction.interpolate(&snapshot) else {
- self.zeta.update(cx, |zeta, cx| {
+ self.zeta.update(cx, |zeta, _cx| {
zeta.reject_current_prediction(
EditPredictionRejectReason::InterpolatedEmpty,
&self.project,
- cx,
);
});
return None;
@@ -5,7 +5,7 @@ use cloud_llm_client::predict_edits_v3::{self, Event, PromptFormat, Signature};
use cloud_llm_client::{
AcceptEditPredictionBody, EXPIRED_LLM_TOKEN_HEADER_NAME, EditPredictionRejectReason,
EditPredictionRejection, MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST,
- MINIMUM_REQUIRED_VERSION_HEADER_NAME, PredictEditsRequestTrigger, RejectEditPredictionsBody,
+ MINIMUM_REQUIRED_VERSION_HEADER_NAME, PredictEditsRequestTrigger, RejectEditPredictionsBodyRef,
ZED_VERSION_HEADER_NAME,
};
use cloud_zeta2_prompt::retrieval_prompt::{SearchToolInput, SearchToolQuery};
@@ -19,8 +19,10 @@ use edit_prediction_context::{
SyntaxIndex, SyntaxIndexState,
};
use feature_flags::{FeatureFlag, FeatureFlagAppExt as _, PredictEditsRateCompletionsFeatureFlag};
+use futures::channel::mpsc::UnboundedReceiver;
use futures::channel::{mpsc, oneshot};
-use futures::{AsyncReadExt as _, FutureExt as _, StreamExt as _};
+use futures::{AsyncReadExt as _, FutureExt as _, StreamExt as _, select_biased};
+use gpui::BackgroundExecutor;
use gpui::{
App, AsyncApp, Entity, EntityId, Global, SharedString, Subscription, Task, WeakEntity, actions,
http_client::{self, AsyncBody, Method},
@@ -100,6 +102,7 @@ actions!(
const EVENT_COUNT_MAX: usize = 6;
const CHANGE_GROUPING_LINE_SPAN: u32 = 8;
const ZED_PREDICT_DATA_COLLECTION_CHOICE: &str = "zed_predict_data_collection_choice";
+const REJECT_REQUEST_DEBOUNCE: Duration = Duration::from_secs(15);
pub struct SweepFeatureFlag;
@@ -195,9 +198,7 @@ pub struct Zeta {
edit_prediction_model: ZetaEditPredictionModel,
pub sweep_ai: SweepAi,
data_collection_choice: DataCollectionChoice,
- rejected_predictions: Vec<EditPredictionRejection>,
- reject_predictions_tx: mpsc::UnboundedSender<()>,
- reject_predictions_debounce_task: Option<Task<()>>,
+ reject_predictions_tx: mpsc::UnboundedSender<EditPredictionRejection>,
shown_predictions: VecDeque<EditPrediction>,
rated_predictions: HashSet<EditPredictionId>,
}
@@ -325,13 +326,8 @@ impl ZetaProject {
return;
};
- this.update(cx, |this, cx| {
- this.reject_prediction(
- prediction_id,
- EditPredictionRejectReason::Canceled,
- false,
- cx,
- );
+ this.update(cx, |this, _cx| {
+ this.reject_prediction(prediction_id, EditPredictionRejectReason::Canceled, false);
})
.ok();
})
@@ -504,14 +500,24 @@ impl Zeta {
let refresh_llm_token_listener = RefreshLlmTokenListener::global(cx);
let data_collection_choice = Self::load_data_collection_choice();
- let (reject_tx, mut reject_rx) = mpsc::unbounded();
- cx.spawn(async move |this, cx| {
- while let Some(()) = reject_rx.next().await {
- this.update(cx, |this, cx| this.flush_rejected_predictions(cx))?
- .await
- .log_err();
+ let llm_token = LlmApiToken::default();
+
+ let (reject_tx, reject_rx) = mpsc::unbounded();
+ cx.background_spawn({
+ let client = client.clone();
+ let llm_token = llm_token.clone();
+ let app_version = AppVersion::global(cx);
+ let background_executor = cx.background_executor().clone();
+ async move {
+ Self::handle_rejected_predictions(
+ reject_rx,
+ client,
+ llm_token,
+ app_version,
+ background_executor,
+ )
+ .await
}
- anyhow::Ok(())
})
.detach();
@@ -520,7 +526,7 @@ impl Zeta {
client,
user_store,
options: DEFAULT_OPTIONS,
- llm_token: LlmApiToken::default(),
+ llm_token,
_llm_token_subscription: cx.subscribe(
&refresh_llm_token_listener,
|this, _listener, _event, cx| {
@@ -540,8 +546,6 @@ impl Zeta {
edit_prediction_model: ZetaEditPredictionModel::Zeta2,
sweep_ai: SweepAi::new(cx),
data_collection_choice,
- rejected_predictions: Vec::new(),
- reject_predictions_debounce_task: None,
reject_predictions_tx: reject_tx,
rated_predictions: Default::default(),
shown_predictions: Default::default(),
@@ -901,64 +905,73 @@ impl Zeta {
.detach_and_log_err(cx);
}
- fn flush_rejected_predictions(&mut self, cx: &mut Context<Self>) -> Task<Result<()>> {
- match self.edit_prediction_model {
- ZetaEditPredictionModel::Zeta1 | ZetaEditPredictionModel::Zeta2 => {}
- ZetaEditPredictionModel::Sweep => return Task::ready(anyhow::Ok(())),
- }
+ async fn handle_rejected_predictions(
+ rx: UnboundedReceiver<EditPredictionRejection>,
+ client: Arc<Client>,
+ llm_token: LlmApiToken,
+ app_version: Version,
+ background_executor: BackgroundExecutor,
+ ) {
+ let mut rx = std::pin::pin!(rx.peekable());
+ let mut batched = Vec::new();
- let client = self.client.clone();
- let llm_token = self.llm_token.clone();
- let app_version = AppVersion::global(cx);
- let last_rejection = self.rejected_predictions.last().cloned();
- let Some(last_rejection) = last_rejection else {
- return Task::ready(anyhow::Ok(()));
- };
+ while let Some(rejection) = rx.next().await {
+ batched.push(rejection);
- let body = serde_json::to_string(&RejectEditPredictionsBody {
- rejections: self.rejected_predictions.clone(),
- })
- .ok();
+ if batched.len() < MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST / 2 {
+ select_biased! {
+ next = rx.as_mut().peek().fuse() => {
+ if next.is_some() {
+ continue;
+ }
+ }
+ () = background_executor.timer(REJECT_REQUEST_DEBOUNCE).fuse() => {},
+ }
+ }
- cx.spawn(async move |this, cx| {
let url = client
.http_client()
- .build_zed_llm_url("/predict_edits/reject", &[])?;
+ .build_zed_llm_url("/predict_edits/reject", &[])
+ .unwrap();
+
+ let flush_count = batched
+ .len()
+ // in case items have accumulated after failure
+ .min(MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST);
+ let start = batched.len() - flush_count;
- cx.background_spawn(Self::send_api_request::<()>(
- move |builder| {
- let req = builder.uri(url.as_ref()).body(body.clone().into());
- Ok(req?)
+ let body = RejectEditPredictionsBodyRef {
+ rejections: &batched[start..],
+ };
+
+ let result = Self::send_api_request::<()>(
+ |builder| {
+ let req = builder
+ .uri(url.as_ref())
+ .body(serde_json::to_string(&body)?.into());
+ anyhow::Ok(req?)
},
- client,
- llm_token,
- app_version,
- ))
- .await
- .context("Failed to reject edit predictions")?;
+ client.clone(),
+ llm_token.clone(),
+ app_version.clone(),
+ )
+ .await;
- this.update(cx, |this, _| {
- if let Some(ix) = this
- .rejected_predictions
- .iter()
- .position(|rejection| rejection.request_id == last_rejection.request_id)
- {
- this.rejected_predictions.drain(..ix + 1);
- }
- })
- })
+ if result.log_err().is_some() {
+ batched.drain(start..);
+ }
+ }
}
fn reject_current_prediction(
&mut self,
reason: EditPredictionRejectReason,
project: &Entity<Project>,
- cx: &mut Context<Self>,
) {
if let Some(project_state) = self.projects.get_mut(&project.entity_id()) {
project_state.pending_predictions.clear();
if let Some(prediction) = project_state.current_prediction.take() {
- self.reject_prediction(prediction.prediction.id, reason, prediction.was_shown, cx);
+ self.reject_prediction(prediction.prediction.id, reason, prediction.was_shown);
}
};
}
@@ -984,26 +997,19 @@ impl Zeta {
prediction_id: EditPredictionId,
reason: EditPredictionRejectReason,
was_shown: bool,
- cx: &mut Context<Self>,
) {
- self.rejected_predictions.push(EditPredictionRejection {
- request_id: prediction_id.to_string(),
- reason,
- was_shown,
- });
+ match self.edit_prediction_model {
+ ZetaEditPredictionModel::Zeta1 | ZetaEditPredictionModel::Zeta2 => {}
+ ZetaEditPredictionModel::Sweep => return,
+ }
- let reached_request_limit =
- self.rejected_predictions.len() >= MAX_EDIT_PREDICTION_REJECTIONS_PER_REQUEST / 2;
- let reject_tx = self.reject_predictions_tx.clone();
- self.reject_predictions_debounce_task = Some(cx.spawn(async move |_this, cx| {
- const REJECT_REQUEST_DEBOUNCE: Duration = Duration::from_secs(15);
- if !reached_request_limit {
- cx.background_executor()
- .timer(REJECT_REQUEST_DEBOUNCE)
- .await;
- }
- reject_tx.unbounded_send(()).log_err();
- }));
+ self.reject_predictions_tx
+ .unbounded_send(EditPredictionRejection {
+ request_id: prediction_id.to_string(),
+ reason,
+ was_shown,
+ })
+ .log_err();
}
fn is_refreshing(&self, project: &Entity<Project>) -> bool {
@@ -1211,7 +1217,6 @@ impl Zeta {
this.reject_current_prediction(
EditPredictionRejectReason::Replaced,
&project,
- cx,
);
Some(new_prediction)
@@ -1220,7 +1225,6 @@ impl Zeta {
new_prediction.prediction.id,
EditPredictionRejectReason::CurrentPreferred,
false,
- cx,
);
None
}
@@ -1229,7 +1233,7 @@ impl Zeta {
}
}
Err(reject_reason) => {
- this.reject_prediction(prediction_result.id, reject_reason, false, cx);
+ this.reject_prediction(prediction_result.id, reject_reason, false);
None
}
}
@@ -2906,7 +2910,7 @@ fn feature_gate_predict_edits_actions(cx: &mut App) {
#[cfg(test)]
mod tests {
- use std::{path::Path, sync::Arc};
+ use std::{path::Path, sync::Arc, time::Duration};
use client::UserStore;
use clock::FakeSystemClock;
@@ -2933,7 +2937,7 @@ mod tests {
use util::path;
use uuid::Uuid;
- use crate::{BufferEditPrediction, Zeta};
+ use crate::{BufferEditPrediction, EditPredictionId, REJECT_REQUEST_DEBOUNCE, Zeta};
#[gpui::test]
async fn test_current_state(cx: &mut TestAppContext) {
@@ -3035,8 +3039,8 @@ mod tests {
.unwrap();
refresh_task.await.unwrap();
- zeta.update(cx, |zeta, cx| {
- zeta.reject_current_prediction(EditPredictionRejectReason::Discarded, &project, cx);
+ zeta.update(cx, |zeta, _cx| {
+ zeta.reject_current_prediction(EditPredictionRejectReason::Discarded, &project);
});
// Prediction for another file
@@ -3545,14 +3549,17 @@ mod tests {
let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
let position = snapshot.anchor_before(language::Point::new(1, 3));
+ // start two refresh tasks
zeta.update(cx, |zeta, cx| {
- // start two refresh tasks
zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
+ });
+
+ let (_, respond_first) = requests.predict.next().await.unwrap();
+ zeta.update(cx, |zeta, cx| {
zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
});
- let (_, respond_first) = requests.predict.next().await.unwrap();
let (_, respond_second) = requests.predict.next().await.unwrap();
// wait for throttle
@@ -3631,18 +3638,22 @@ mod tests {
let snapshot = buffer.read_with(cx, |buffer, _cx| buffer.snapshot());
let position = snapshot.anchor_before(language::Point::new(1, 3));
+ // start two refresh tasks
zeta.update(cx, |zeta, cx| {
- // start two refresh tasks
zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
+ });
+
+ let (_, respond_first) = requests.predict.next().await.unwrap();
+
+ zeta.update(cx, |zeta, cx| {
zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
});
+ let (_, respond_second) = requests.predict.next().await.unwrap();
+
// wait for throttle, so requests are sent
cx.run_until_parked();
- let (_, respond_first) = requests.predict.next().await.unwrap();
- let (_, respond_second) = requests.predict.next().await.unwrap();
-
zeta.update(cx, |zeta, cx| {
// start a third request
zeta.refresh_prediction_from_buffer(project.clone(), buffer.clone(), position, cx);
@@ -3736,6 +3747,118 @@ mod tests {
);
}
+ #[gpui::test]
+ async fn test_rejections_flushing(cx: &mut TestAppContext) {
+ let (zeta, mut requests) = init_test(cx);
+
+ zeta.update(cx, |zeta, _cx| {
+ zeta.reject_prediction(
+ EditPredictionId("test-1".into()),
+ EditPredictionRejectReason::Discarded,
+ false,
+ );
+ zeta.reject_prediction(
+ EditPredictionId("test-2".into()),
+ EditPredictionRejectReason::Canceled,
+ true,
+ );
+ });
+
+ cx.executor().advance_clock(REJECT_REQUEST_DEBOUNCE);
+ cx.run_until_parked();
+
+ let (reject_request, respond_tx) = requests.reject.next().await.unwrap();
+ respond_tx.send(()).unwrap();
+
+ // batched
+ assert_eq!(reject_request.rejections.len(), 2);
+ assert_eq!(
+ reject_request.rejections[0],
+ EditPredictionRejection {
+ request_id: "test-1".to_string(),
+ reason: EditPredictionRejectReason::Discarded,
+ was_shown: false
+ }
+ );
+ assert_eq!(
+ reject_request.rejections[1],
+ EditPredictionRejection {
+ request_id: "test-2".to_string(),
+ reason: EditPredictionRejectReason::Canceled,
+ was_shown: true
+ }
+ );
+
+ // Reaching batch size limit sends without debounce
+ zeta.update(cx, |zeta, _cx| {
+ for i in 0..70 {
+ zeta.reject_prediction(
+ EditPredictionId(format!("batch-{}", i).into()),
+ EditPredictionRejectReason::Discarded,
+ false,
+ );
+ }
+ });
+
+ // First MAX/2 items are sent immediately
+ cx.run_until_parked();
+ let (reject_request, respond_tx) = requests.reject.next().await.unwrap();
+ respond_tx.send(()).unwrap();
+
+ assert_eq!(reject_request.rejections.len(), 50);
+ assert_eq!(reject_request.rejections[0].request_id, "batch-0");
+ assert_eq!(reject_request.rejections[49].request_id, "batch-49");
+
+ // Remaining items are debounced with the next batch
+ cx.executor().advance_clock(Duration::from_secs(15));
+ cx.run_until_parked();
+
+ let (reject_request, respond_tx) = requests.reject.next().await.unwrap();
+ respond_tx.send(()).unwrap();
+
+ assert_eq!(reject_request.rejections.len(), 20);
+ assert_eq!(reject_request.rejections[0].request_id, "batch-50");
+ assert_eq!(reject_request.rejections[19].request_id, "batch-69");
+
+ // Request failure
+ zeta.update(cx, |zeta, _cx| {
+ zeta.reject_prediction(
+ EditPredictionId("retry-1".into()),
+ EditPredictionRejectReason::Discarded,
+ false,
+ );
+ });
+
+ cx.executor().advance_clock(REJECT_REQUEST_DEBOUNCE);
+ cx.run_until_parked();
+
+ let (reject_request, _respond_tx) = requests.reject.next().await.unwrap();
+ assert_eq!(reject_request.rejections.len(), 1);
+ assert_eq!(reject_request.rejections[0].request_id, "retry-1");
+ // Simulate failure
+ drop(_respond_tx);
+
+ // Add another rejection
+ zeta.update(cx, |zeta, _cx| {
+ zeta.reject_prediction(
+ EditPredictionId("retry-2".into()),
+ EditPredictionRejectReason::Discarded,
+ false,
+ );
+ });
+
+ cx.executor().advance_clock(REJECT_REQUEST_DEBOUNCE);
+ cx.run_until_parked();
+
+ // Retry should include both the failed item and the new one
+ let (reject_request, respond_tx) = requests.reject.next().await.unwrap();
+ respond_tx.send(()).unwrap();
+
+ assert_eq!(reject_request.rejections.len(), 2);
+ assert_eq!(reject_request.rejections[0].request_id, "retry-1");
+ assert_eq!(reject_request.rejections[1].request_id, "retry-2");
+ }
+
// Skipped until we start including diagnostics in prompt
// #[gpui::test]
// async fn test_request_diagnostics(cx: &mut TestAppContext) {
@@ -1,689 +0,0 @@
-# Language Model Provider Extensions - Implementation Guide
-
-## Purpose
-
-This document provides a detailed guide for completing the implementation of Language Model Provider Extensions in Zed. It explains what has been done, what remains, and how to complete the work.
-
-For the full design and rationale, see [language_model_provider_extensions_plan.md](./language_model_provider_extensions_plan.md).
-
-## Core Design Principle
-
-**Extensions handle ALL provider-specific logic.** This means:
-- Thought signatures (Anthropic)
-- Reasoning effort parameters (OpenAI o-series)
-- Cache control markers
-- Parallel tool calls
-- SSE/streaming format parsing
-- Any other provider-specific features
-
-Zed's core should have **zero knowledge** of these details. The extension API must be generic enough that extensions can implement any provider without Zed changes.
-
----
-
-## Current Status: STREAMING API COMPLETE ✅
-
-The core plumbing and streaming API are now complete. Extensions can:
-1. Declare LLM providers in their manifest
-2. Be queried for providers and models at load time
-3. Have their providers registered with the `LanguageModelRegistry`
-4. Have their providers unregistered when the extension is unloaded
-5. Stream completions using the new polling-based API
-
-**What's NOT done yet:**
-- Credential UI prompt support (`llm_request_credential` returns false)
-- Model refresh mechanism
-- A working test extension that demonstrates the feature (requires WASM build)
-- End-to-end testing with a real extension
-
----
-
-## What Has Been Completed
-
-### 1. WIT Interface Definition ✅
-
-**Location:** `crates/extension_api/wit/since_v0.7.0/`
-
-Created all WIT files for v0.7.0:
-- `llm-provider.wit` - Core LLM types (ProviderInfo, ModelInfo, CompletionRequest, CompletionEvent, etc.)
-- `extension.wit` - Updated with LLM exports/imports
-
-Key types in `llm-provider.wit`:
-```wit
-record provider-info {
- id: string,
- name: string,
- icon: option<string>,
-}
-
-record model-info {
- id: string,
- name: string,
- max-token-count: u64,
- max-output-tokens: option<u64>,
- capabilities: model-capabilities,
- is-default: bool,
- is-default-fast: bool,
-}
-
-variant completion-event {
- started,
- text(string),
- thinking(thinking-content),
- redacted-thinking(string),
- tool-use(tool-use),
- tool-use-json-parse-error(tool-use-json-parse-error),
- stop(stop-reason),
- usage(token-usage),
- reasoning-details(string),
-}
-```
-
-Key exports in `extension.wit`:
-```wit
-export llm-providers: func() -> list<provider-info>;
-export llm-provider-models: func(provider-id: string) -> result<list<model-info>, string>;
-export llm-provider-is-authenticated: func(provider-id: string) -> bool;
-export llm-provider-authenticate: func(provider-id: string) -> result<_, string>;
-export llm-stream-completion-start: func(provider-id: string, model-id: string, request: completion-request) -> result<string, string>;
-export llm-stream-completion-next: func(stream-id: string) -> result<option<completion-event>, string>;
-export llm-stream-completion-close: func(stream-id: string);
-```
-
-Note: The streaming API uses a polling-based approach with explicit stream IDs instead of a resource handle.
-This avoids complexity with cross-boundary resource ownership in the WASM component model.
-
-Key imports in `extension.wit`:
-```wit
-import llm-get-credential: func(provider-id: string) -> option<string>;
-import llm-store-credential: func(provider-id: string, value: string) -> result<_, string>;
-import llm-delete-credential: func(provider-id: string) -> result<_, string>;
-import llm-get-env-var: func(name: string) -> option<string>;
-```
-
-### 2. Extension Manifest Changes ✅
-
-**Location:** `crates/extension/src/extension_manifest.rs`
-
-Added these types:
-```rust
-pub struct LanguageModelProviderManifestEntry {
- pub name: String,
- pub icon: Option<String>,
- pub models: Vec<LanguageModelManifestEntry>,
- pub auth: Option<LanguageModelAuthConfig>,
-}
-
-pub struct LanguageModelManifestEntry {
- pub id: String,
- pub name: String,
- pub max_token_count: u64,
- pub max_output_tokens: Option<u64>,
- pub supports_images: bool,
- pub supports_tools: bool,
- pub supports_thinking: bool,
-}
-
-pub struct LanguageModelAuthConfig {
- pub env_var: Option<String>,
- pub credential_label: Option<String>,
-}
-```
-
-Added to `ExtensionManifest`:
-```rust
-pub language_model_providers: BTreeMap<Arc<str>, LanguageModelProviderManifestEntry>,
-```
-
-### 3. Host-Side Provider/Model Structs ✅
-
-**Location:** `crates/extension_host/src/wasm_host/llm_provider.rs`
-
-Created `ExtensionLanguageModelProvider` implementing `LanguageModelProvider`:
-- Wraps a `WasmExtension` and `LlmProviderInfo`
-- Delegates to extension calls for authentication, model listing, etc.
-- Returns `ExtensionLanguageModel` instances
-- Implements `LanguageModelProviderState` for UI observation
-
-Created `ExtensionLanguageModel` implementing `LanguageModel`:
-- Wraps extension + model info
-- Implements `stream_completion` by calling extension's `llm-stream-completion`
-- Converts between Zed's `LanguageModelRequest` and WIT's `CompletionRequest`
-- Handles streaming via polling-based approach with explicit stream IDs
-
-**Key implementation details:**
-- The `stream_completion` method uses a polling loop that calls `llm_stream_completion_start`, then repeatedly calls `llm_stream_completion_next` until the stream is complete, and finally calls `llm_stream_completion_close` to clean up
-- Credential storage uses gpui's `cx.read_credentials()`, `cx.write_credentials()`, and `cx.delete_credentials()` APIs
-- The `new()` method now accepts a `models: Vec<LlmModelInfo>` parameter to populate available models at registration time
-
-### 4. Extension Host Proxy ✅
-
-**Location:** `crates/extension/src/extension_host_proxy.rs`
-
-Added `ExtensionLanguageModelProviderProxy` trait:
-```rust
-pub type LanguageModelProviderRegistration = Box<dyn FnOnce(&mut App) + Send + Sync + 'static>;
-
-pub trait ExtensionLanguageModelProviderProxy: Send + Sync + 'static {
- fn register_language_model_provider(
- &self,
- provider_id: Arc<str>,
- register_fn: LanguageModelProviderRegistration,
- cx: &mut App,
- );
-
- fn unregister_language_model_provider(&self, provider_id: Arc<str>, cx: &mut App);
-}
-```
-
-The proxy uses a boxed closure pattern. This allows `extension_host` to create the `ExtensionLanguageModelProvider` (which requires `WasmExtension`), while letting `language_models` handle the actual registry registration.
-
-### 5. Proxy Implementation ✅
-
-**Location:** `crates/language_models/src/extension.rs`
-
-```rust
-pub struct ExtensionLanguageModelProxy {
- registry: Entity<LanguageModelRegistry>,
-}
-
-impl ExtensionLanguageModelProviderProxy for ExtensionLanguageModelProxy {
- fn register_language_model_provider(
- &self,
- _provider_id: Arc<str>,
- register_fn: LanguageModelProviderRegistration,
- cx: &mut App,
- ) {
- register_fn(cx);
- }
-
- fn unregister_language_model_provider(&self, provider_id: Arc<str>, cx: &mut App) {
- self.registry.update(cx, |registry, cx| {
- registry.unregister_provider(LanguageModelProviderId::from(provider_id), cx);
- });
- }
-}
-```
-
-The proxy is registered during `language_models::init()`.
-
-### 6. Extension Loading Wiring ✅
-
-**Location:** `crates/extension_host/src/extension_host.rs`
-
-In `extensions_updated()`:
-
-**Unloading (around line 1217):**
-```rust
-for provider_id in extension.manifest.language_model_providers.keys() {
- let full_provider_id: Arc<str> = format!("{}:{}", extension_id, provider_id).into();
- self.proxy.unregister_language_model_provider(full_provider_id, cx);
-}
-```
-
-**Loading (around line 1383):**
-After loading a wasm extension, we query for LLM providers and models:
-```rust
-if !extension.manifest.language_model_providers.is_empty() {
- let providers_result = wasm_extension
- .call(|ext, store| {
- async move { ext.call_llm_providers(store).await }.boxed()
- })
- .await;
-
- if let Ok(Ok(providers)) = providers_result {
- for provider_info in providers {
- // Query for models...
- let models_result = wasm_extension.call(...).await;
- // Store provider_info and models for registration
- }
- }
-}
-```
-
-Then during registration (around line 1511):
-```rust
-for (provider_info, models) in llm_providers_with_models {
- let provider_id: Arc<str> = format!("{}:{}", manifest.id, provider_info.id).into();
- this.proxy.register_language_model_provider(
- provider_id,
- Box::new(move |cx: &mut App| {
- let provider = Arc::new(ExtensionLanguageModelProvider::new(
- wasm_ext, pinfo, mods, cx,
- ));
- language_model::LanguageModelRegistry::global(cx).update(
- cx,
- |registry, cx| {
- registry.register_provider(provider, cx);
- },
- );
- }),
- cx,
- );
-}
-```
-
-### 7. Extension API Updates ✅
-
-**Location:** `crates/extension_api/src/extension_api.rs`
-
-- Updated `wit_bindgen::generate!` to use `./wit/since_v0.7.0`
-- Added LLM type re-exports (prefixed with `Llm` for clarity)
-- Added LLM methods to `Extension` trait with default implementations
-- Added `wit::Guest` implementations for LLM functions
-
-The default implementations ensure backward compatibility:
-```rust
-fn llm_providers(&self) -> Vec<LlmProviderInfo> {
- Vec::new() // Extensions without LLM providers return empty
-}
-
-fn llm_provider_models(&self, _provider_id: &str) -> Result<Vec<LlmModelInfo>, String> {
- Ok(Vec::new())
-}
-
-fn llm_stream_completion_start(...) -> Result<String, String> {
- Err("`llm_stream_completion_start` not implemented".to_string())
-}
-fn llm_stream_completion_next(stream_id: &str) -> Result<Option<LlmCompletionEvent>, String> {
- Err("`llm_stream_completion_next` not implemented".to_string())
-}
-fn llm_stream_completion_close(stream_id: &str) { /* cleanup */ }
-```
-
-### 8. Test Files Updated ✅
-
-Added `language_model_providers: BTreeMap::default()` to all test manifests:
-- `crates/extension/src/extension_manifest.rs` (test module)
-- `crates/extension_host/src/extension_store_test.rs`
-- `crates/extension_host/src/capability_granter.rs` (test module)
-- `crates/extension_host/benches/extension_compilation_benchmark.rs`
-
----
-
-## What Remains To Be Done
-
-### Task 1: Test the Streaming Completion Flow (HIGH PRIORITY) - ARCHITECTURE UPDATED ✅
-
-The streaming API has been updated to use a polling-based approach instead of a resource handle pattern.
-This was necessary because the original design had a fundamental issue: the `completion-stream` resource
-was defined in an imported interface but returned from an exported function, creating ownership ambiguity.
-
-**New API:**
-- `llm-stream-completion-start` - Returns a stream ID (string)
-- `llm-stream-completion-next` - Poll for the next event using the stream ID
-- `llm-stream-completion-close` - Clean up the stream when done
-
-**Still needs testing:**
-1. Create a test extension that implements a simple LLM provider
-2. Verify the polling-based streaming works correctly through the WASM boundary
-3. Test error handling and edge cases
-
-**Location to test:** `crates/extension_host/src/wasm_host/llm_provider.rs` - the `stream_completion` method on `ExtensionLanguageModel`.
-
-### Task 2: Credential UI Prompt Support (MEDIUM PRIORITY)
-
-**Location:** `crates/extension_host/src/wasm_host/wit/since_v0_7_0.rs`
-
-The `llm_request_credential` host function currently returns `Ok(Ok(false))`:
-```rust
-async fn llm_request_credential(
- &mut self,
- _provider_id: String,
- _credential_type: llm_provider::CredentialType,
- _label: String,
- _placeholder: String,
-) -> wasmtime::Result<Result<bool, String>> {
- // TODO: Implement actual UI prompting
- Ok(Ok(false))
-}
-```
-
-**What needs to happen:**
-1. Show a dialog to the user asking for the credential
-2. Wait for user input
-3. Return `true` if provided, `false` if cancelled
-4. The extension can then use `llm_store_credential` to save it
-
-This requires UI work and async coordination with gpui windows.
-
-### Task 3: Handle Model Refresh (LOW PRIORITY - can be follow-up)
-
-Currently models are only queried once at registration time. Options for improvement:
-
-1. Add a refresh mechanism that re-queries `call_llm_provider_models`
-2. Add a notification mechanism where extensions can signal that models have changed
-3. Automatic refresh on authentication
-
-**Recommendation:** Start with refresh-on-authentication as a fast-follow.
-
-### Task 4: Create a Test Extension (LOW PRIORITY - but very useful)
-
-**Note:** Creating a working test extension requires building a WASM component, which needs:
-1. The `wasm32-wasip1` Rust target: `rustup target add wasm32-wasip1`
-2. Building with: `cargo build --target wasm32-wasip1 --release`
-3. The resulting `.wasm` file must be placed in the extension directory
-
-The existing `extensions/test-extension` has a pre-built WASM file checked in. To test LLM
-provider functionality, either:
-- Rebuild the test-extension WASM with LLM provider code
-- Create a new extension and build it locally
-
-Example test extension that demonstrates the LLM provider API:
-
-```
-extensions/test-llm-provider/
-├── extension.toml
-├── Cargo.toml
-└── src/
- └── lib.rs
-```
-
-**extension.toml:**
-```toml
-id = "test-llm-provider"
-name = "Test LLM Provider"
-version = "0.1.0"
-schema_version = 1
-
-[language_model_providers.test-provider]
-name = "Test Provider"
-```
-
-**src/lib.rs:**
-```rust
-use zed_extension_api::{self as zed, *};
-
-use std::collections::HashMap;
-use std::sync::Mutex;
-
-struct TestExtension {
- streams: Mutex<HashMap<String, Vec<LlmCompletionEvent>>>,
- next_stream_id: Mutex<u64>,
-}
-
-impl zed::Extension for TestExtension {
- fn new() -> Self {
- Self {
- streams: Mutex::new(HashMap::new()),
- next_stream_id: Mutex::new(0),
- }
- }
-
- fn llm_providers(&self) -> Vec<LlmProviderInfo> {
- vec![LlmProviderInfo {
- id: "test-provider".into(),
- name: "Test Provider".into(),
- icon: None,
- }]
- }
-
- fn llm_provider_models(&self, _provider_id: &str) -> Result<Vec<LlmModelInfo>, String> {
- Ok(vec![LlmModelInfo {
- id: "test-model".into(),
- name: "Test Model".into(),
- max_token_count: 4096,
- max_output_tokens: Some(1024),
- capabilities: LlmModelCapabilities {
- supports_images: false,
- supports_tools: false,
- supports_tool_choice_auto: false,
- supports_tool_choice_any: false,
- supports_tool_choice_none: false,
- supports_thinking: false,
- tool_input_format: LlmToolInputFormat::JsonSchema,
- },
- is_default: true,
- is_default_fast: true,
- }])
- }
-
- fn llm_stream_completion_start(
- &mut self,
- _provider_id: &str,
- _model_id: &str,
- _request: &LlmCompletionRequest,
- ) -> Result<String, String> {
- // Create a simple response with test events
- let events = vec![
- LlmCompletionEvent::Started,
- LlmCompletionEvent::Text("Hello, ".into()),
- LlmCompletionEvent::Text("world!".into()),
- LlmCompletionEvent::Stop(LlmStopReason::EndTurn),
- ];
-
- let mut id = self.next_stream_id.lock().unwrap();
- let stream_id = format!("stream-{}", *id);
- *id += 1;
-
- self.streams.lock().unwrap().insert(stream_id.clone(), events);
- Ok(stream_id)
- }
-
- fn llm_stream_completion_next(
- &mut self,
- stream_id: &str,
- ) -> Result<Option<LlmCompletionEvent>, String> {
- let mut streams = self.streams.lock().unwrap();
- if let Some(events) = streams.get_mut(stream_id) {
- if events.is_empty() {
- Ok(None)
- } else {
- Ok(Some(events.remove(0)))
- }
- } else {
- Err(format!("Unknown stream: {}", stream_id))
- }
- }
-
- fn llm_stream_completion_close(&mut self, stream_id: &str) {
- self.streams.lock().unwrap().remove(stream_id);
- }
-}
-
-zed::register_extension!(TestExtension);
-```
-
----
-
-## File-by-File Checklist
-
-### Completed ✅
-
-- [x] `crates/extension_api/wit/since_v0.7.0/llm-provider.wit` - LLM types defined
-- [x] `crates/extension_api/wit/since_v0.7.0/extension.wit` - LLM exports/imports added
-- [x] `crates/extension_api/src/extension_api.rs` - Extension trait + Guest impl updated for v0.7.0
-- [x] `crates/extension/src/extension_manifest.rs` - Manifest types added
-- [x] `crates/extension/src/extension_host_proxy.rs` - Proxy trait added
-- [x] `crates/extension_host/src/wasm_host/llm_provider.rs` - Provider/Model structs created
-- [x] `crates/extension_host/src/wasm_host/wit.rs` - LLM types exported, Extension enum updated
-- [x] `crates/extension_host/src/wasm_host/wit/since_v0_7_0.rs` - Host trait implementations
-- [x] `crates/extension_host/src/wasm_host/wit/since_v0_6_0.rs` - Rewritten to use latest types
-- [x] `crates/extension_host/src/extension_host.rs` - Wired up LLM provider registration/unregistration
-- [x] `crates/extension_host/Cargo.toml` - Dependencies added
-- [x] `crates/language_models/src/extension.rs` - Proxy implementation
-- [x] `crates/language_models/src/language_models.rs` - Proxy registration
-- [x] `crates/language_models/Cargo.toml` - Extension dependency added
-
-### Should Implement (Follow-up PRs)
-
-- [ ] `llm_request_credential` UI implementation
-- [ ] Model refresh mechanism
-- [ ] Test extension for validation
-- [ ] Documentation for extension authors
-
----
-
-## Architecture Overview
-
-```
-┌─────────────────────────────────────────────────────────────────────┐
-│ Extension Host │
-│ ┌─────────────────────────────────────────────────────────────┐ │
-│ │ extensions_updated() │ │
-│ │ │ │
-│ │ 1. Load WasmExtension │ │
-│ │ 2. Query llm_providers() and llm_provider_models() │ │
-│ │ 3. Call proxy.register_language_model_provider() │ │
-│ └───────────────────────────┬───────────────────────────────────┘ │
-│ │ │
-│ ┌───────────────────────────▼───────────────────────────────────┐ │
-│ │ ExtensionLanguageModelProvider │ │
-│ │ - Wraps WasmExtension │ │
-│ │ - Implements LanguageModelProvider │ │
-│ │ - Creates ExtensionLanguageModel instances │ │
-│ └───────────────────────────┬───────────────────────────────────┘ │
-│ │ │
-│ ┌───────────────────────────▼───────────────────────────────────┐ │
-│ │ ExtensionLanguageModel │ │
-│ │ - Implements LanguageModel │ │
-│ │ - stream_completion() calls extension via WASM │ │
-│ └───────────────────────────────────────────────────────────────┘ │
-└─────────────────────────────────────────────────────────────────────┘
- │
- │ Proxy (boxed closure)
- ▼
-┌─────────────────────────────────────────────────────────────────────┐
-│ Language Models Crate │
-│ ┌───────────────────────────────────────────────────────────────┐ │
-│ │ ExtensionLanguageModelProxy │ │
-│ │ - Implements ExtensionLanguageModelProviderProxy │ │
-│ │ - Calls register_fn closure │ │
-│ │ - Unregisters from LanguageModelRegistry │ │
-│ └───────────────────────────┬───────────────────────────────────┘ │
-│ │ │
-│ ┌───────────────────────────▼───────────────────────────────────┐ │
-│ │ LanguageModelRegistry │ │
-│ │ - Stores all providers (built-in + extension) │ │
-│ │ - Provides models to UI │ │
-│ └───────────────────────────────────────────────────────────────┘ │
-└─────────────────────────────────────────────────────────────────────┘
-```
-
----
-
-## Key Code Patterns
-
-### 1. Provider ID Format
-
-Provider IDs are formatted as `{extension_id}:{provider_id}` to ensure uniqueness:
-
-```rust
-let provider_id: Arc<str> = format!("{}:{}", manifest.id, provider_info.id).into();
-```
-
-### 2. Triple-Nested Result Handling
-
-When calling extension methods, results are nested:
-- Outer `Result`: from channel operations (anyhow error)
-- Middle `Result`: from WASM call (anyhow error)
-- Inner `Result<T, String>`: from extension logic
-
-```rust
-let models_result = wasm_extension.call(...).await;
-
-let models: Vec<LlmModelInfo> = match models_result {
- Ok(Ok(Ok(models))) => models,
- Ok(Ok(Err(e))) => { /* extension returned error */ }
- Ok(Err(e)) => { /* WASM call failed */ }
- Err(e) => { /* channel operation failed */ }
-};
-```
-
-### 3. Polling-Based Streaming Pattern
-
-The streaming API uses explicit stream IDs with polling instead of resource handles:
-
-```rust
-// Start the stream and get an ID
-let stream_id = ext.call_llm_stream_completion_start(store, provider_id, model_id, request).await?;
-
-// Poll for events in a loop
-loop {
- match ext.call_llm_stream_completion_next(store, &stream_id).await? {
- Ok(Some(event)) => { /* process event */ }
- Ok(None) => break, // Stream complete
- Err(e) => { /* handle error */ }
- }
-}
-
-// Clean up
-ext.call_llm_stream_completion_close(store, &stream_id).await;
-```
-
-This pattern avoids the complexity of cross-boundary resource ownership in the WASM component model.
-
-### 4. Default Trait Implementations
-
-All LLM methods in the `Extension` trait have defaults so existing extensions continue to work:
-
-```rust
-fn llm_providers(&self) -> Vec<LlmProviderInfo> {
- Vec::new() // No providers by default
-}
-```
-
----
-
-## Common Pitfalls
-
-1. **Type confusion:** WIT bindgen creates NEW types for each version. `Completion` from v0.6.0 bindgen is different from v0.7.0. This is why we map older interfaces to `latest::`.
-
-2. **Import paths:** After `pub use self::zed::extension::*;`, types are available without prefix. Types in sub-interfaces (like `lsp::CompletionKind`) need explicit imports.
-
-3. **Async closures:** Extension calls use `extension.call(|ext, store| async move { ... }.boxed())` pattern. The closure must be `'static + Send`.
-
-4. **Stream ID management:** Extensions must track their active streams using the stream IDs returned from `llm_stream_completion_start`. The host will call `llm_stream_completion_close` when done.
-
-5. **Result nesting:** `extension.call(...)` wraps the closure's return type in `Result<T>`, so if the closure returns `Result<Result<X, String>>`, you get `Result<Result<Result<X, String>>>`. Unwrap carefully!
-
-6. **Proxy type boundaries:** The `extension` crate shouldn't depend on `extension_host`. The proxy trait uses a boxed closure to pass the registration logic without needing to share types.
-
-7. **Resource ownership in WIT:** Be careful when defining resources in imported interfaces but returning them from exported functions. This creates ownership ambiguity. The streaming API was changed to use polling to avoid this issue.
-
----
-
-## Testing
-
-All existing tests pass:
-```bash
-cargo test -p extension_host --lib
-# 3 tests pass
-
-./script/clippy
-# No warnings
-```
-
-To test the full flow manually:
-1. Create a test extension with LLM provider
-2. Build and install it
-3. Check if it appears in the model selector
-4. Try making a completion request
-
----
-
-## Relevant Files for Reference
-
-### How providers are registered
-- `crates/language_model/src/registry.rs` - `LanguageModelRegistry::register_provider`
-
-### How other extension proxies work
-- `crates/extension/src/extension_host_proxy.rs` - the proxy pattern
-- `crates/project/src/context_server_store/extension.rs` - context server proxy implementation
-
-### How extensions are loaded
-- `crates/extension_host/src/extension_host.rs` - `extensions_updated` method
-
-### WasmExtension call pattern
-- `crates/extension_host/src/wasm_host.rs` - `WasmExtension::call` method
-
----
-
-## Questions for Follow-up
-
-1. **Where should configuration UI live?** The current implementation uses an empty config view. Should extension providers have configurable settings?
-
-2. **How to handle extension reload?** Currently, in-flight completions will fail if the extension is unloaded. Should we add graceful handling?
-
-3. **Should there be rate limiting?** If an extension's provider misbehaves, should Zed throttle or disable it?
-
-4. **Icon support:** The `provider_info.icon` field exists but `icon()` on the provider returns `ui::IconName::ZedAssistant`. Should we add custom icon support?
@@ -1,1368 +0,0 @@
-# Language Model Provider Extensions Plan
-
-## Executive Summary
-
-This document outlines a comprehensive plan to introduce **Language Model Provider Extensions** to Zed. This feature will allow third-party developers to create extensions that register new language model providers, enabling users to select and use custom language models in Zed's AI features (Agent, inline assist, commit message generation, etc.).
-
-## Table of Contents
-
-1. [Current Architecture Overview](#current-architecture-overview)
-2. [Goals and Requirements](#goals-and-requirements)
-3. [Proposed Architecture](#proposed-architecture)
-4. [Implementation Phases](#implementation-phases)
-5. [WIT Interface Design](#wit-interface-design)
-6. [Extension Manifest Changes](#extension-manifest-changes)
-7. [Migration Plan for Built-in Providers](#migration-plan-for-built-in-providers)
-8. [Testing Strategy](#testing-strategy)
-9. [Security Considerations](#security-considerations)
-10. [Appendix: Provider-Specific Requirements](#appendix-provider-specific-requirements)
-
----
-
-## Current Architecture Overview
-
-### Key Components
-
-#### `language_model` crate (`crates/language_model/`)
-- **`LanguageModel` trait** (`src/language_model.rs:580-718`): Core trait defining model capabilities
- - `id()`, `name()`, `provider_id()`, `provider_name()`
- - `supports_images()`, `supports_tools()`, `supports_tool_choice()`
- - `max_token_count()`, `max_output_tokens()`
- - `count_tokens()` - async token counting
- - `stream_completion()` - the main completion streaming method
- - `cache_configuration()` - optional prompt caching config
-
-- **`LanguageModelProvider` trait** (`src/language_model.rs:743-764`): Provider registration
- - `id()`, `name()`, `icon()`
- - `default_model()`, `default_fast_model()`
- - `provided_models()`, `recommended_models()`
- - `is_authenticated()`, `authenticate()`
- - `configuration_view()` - UI for provider configuration
- - `reset_credentials()`
-
-- **`LanguageModelRegistry`** (`src/registry.rs`): Global registry for providers
- - `register_provider()` / `unregister_provider()`
- - Model selection and configuration
- - Event emission for UI updates
-
-#### `language_models` crate (`crates/language_models/`)
-Contains all built-in provider implementations:
-- `provider/anthropic.rs` - Anthropic Claude models
-- `provider/cloud.rs` - Zed Cloud (proxied models)
-- `provider/google.rs` - Google Gemini models
-- `provider/open_ai.rs` - OpenAI GPT models
-- `provider/ollama.rs` - Local Ollama models
-- `provider/deepseek.rs` - DeepSeek models
-- `provider/open_router.rs` - OpenRouter aggregator
-- `provider/bedrock.rs` - AWS Bedrock
-- And more...
-
-#### Extension System (`crates/extension_host/`, `crates/extension_api/`)
-- **WIT interface** (`extension_api/wit/since_v0.6.0/`): WebAssembly Interface Types definitions
-- **WASM host** (`extension_host/src/wasm_host.rs`): Executes extension WASM modules
-- **Extension trait** (`extension/src/extension.rs`): Rust trait for extensions
-- **HTTP client** (`extension_api/src/http_client.rs`): Existing HTTP capability for extensions
-
-### Request/Response Flow
-
-```
-User Request
- ↓
-LanguageModelRequest (crates/language_model/src/request.rs)
- ↓
-Provider-specific conversion (e.g., into_anthropic(), into_open_ai())
- ↓
-HTTP API call (provider-specific crate)
- ↓
-Stream of provider-specific events
- ↓
-Event mapping to LanguageModelCompletionEvent
- ↓
-Consumer (Agent, Inline Assist, etc.)
-```
-
-### Key Data Structures
-
-```rust
-// Request
-pub struct LanguageModelRequest {
- pub thread_id: Option<String>,
- pub prompt_id: Option<String>,
- pub intent: Option<CompletionIntent>,
- pub mode: Option<CompletionMode>,
- pub messages: Vec<LanguageModelRequestMessage>,
- pub tools: Vec<LanguageModelRequestTool>,
- pub tool_choice: Option<LanguageModelToolChoice>,
- pub stop: Vec<String>,
- pub temperature: Option<f32>,
- pub thinking_allowed: bool,
-}
-
-// Completion Events
-pub enum LanguageModelCompletionEvent {
- Queued { position: usize },
- Started,
- UsageUpdated { amount: usize, limit: usize },
- ToolUseLimitReached,
- Stop(StopReason),
- Text(String),
- Thinking { text: String, signature: Option<String> },
- RedactedThinking { data: String },
- ToolUse(LanguageModelToolUse),
- ToolUseJsonParseError { ... },
- StartMessage { message_id: Option<String> },
- ReasoningDetails(serde_json::Value),
- UsageUpdate(TokenUsage),
-}
-```
-
----
-
-## Goals and Requirements
-
-### Primary Goals
-
-1. **Extensibility**: Allow any developer to add new LLM providers via extensions
-2. **Parity**: Extension-based providers should have feature parity with built-in providers
-3. **Performance**: Minimize overhead from WASM boundary crossings during streaming
-4. **Security**: Sandbox API key handling and network access appropriately
-5. **User Experience**: Seamless integration with existing model selectors and configuration UI
-
-### Functional Requirements
-
-1. Extensions can register one or more language model providers
-2. Extensions can define multiple models per provider
-3. Extensions handle authentication (API keys, OAuth, etc.)
-4. Extensions implement the streaming completion API
-5. Extensions can specify model capabilities (tools, images, thinking, etc.)
-6. Extensions can provide token counting logic
-7. Extensions can provide configuration UI components
-8. Extensions receive full request context for API customization
-
-### Non-Functional Requirements
-
-1. Streaming should feel as responsive as built-in providers
-2. Extension crashes should not crash Zed
-3. API keys should never be logged or exposed
-4. Extensions should be able to make arbitrary HTTP requests
-5. Settings should persist across sessions
-
----
-
-## Proposed Architecture
-
-### High-Level Design
-
-```
-┌─────────────────────────────────────────────────────────────────┐
-│ Zed Application │
-├─────────────────────────────────────────────────────────────────┤
-│ ┌─────────────────────────────────────────────────────────────┐│
-│ │ LanguageModelRegistry ││
-│ │ ┌──────────────┐ ┌──────────────┐ ┌──────────────────┐ ││
-│ │ │ Built-in │ │ Extension │ │ Extension │ ││
-│ │ │ Providers │ │ Provider A │ │ Provider B │ ││
-│ │ │ (Anthropic, │ │ (WASM) │ │ (WASM) │ ││
-│ │ │ OpenAI...) │ │ │ │ │ ││
-│ │ └──────────────┘ └──────────────┘ └──────────────────┘ ││
-│ └─────────────────────────────────────────────────────────────┘│
-│ ↑ │
-│ │ │
-│ ┌───────────────────────────┴─────────────────────────────────┐│
-│ │ ExtensionLanguageModelProvider ││
-│ │ ┌─────────────────────────────────────────────────────────┐││
-│ │ │ • Bridges WASM extension to LanguageModelProvider trait │││
-│ │ │ • Manages streaming across WASM boundary │││
-│ │ │ • Handles credential storage via credentials_provider │││
-│ │ │ • Provides configuration UI scaffolding │││
-│ │ └─────────────────────────────────────────────────────────┘││
-│ └─────────────────────────────────────────────────────────────┘│
-│ ↑ │
-│ ┌───────────────────────────┴─────────────────────────────────┐│
-│ │ WasmHost / WasmExtension ││
-│ │ • Executes WASM module ││
-│ │ • Provides WIT interface for LLM operations ││
-│ │ • HTTP client for API calls ││
-│ └─────────────────────────────────────────────────────────────┘│
-└─────────────────────────────────────────────────────────────────┘
-```
-
-### New Components
-
-#### 1. `ExtensionLanguageModelProvider`
-
-A new struct in `extension_host` that implements `LanguageModelProvider` and wraps a WASM extension:
-
-```rust
-pub struct ExtensionLanguageModelProvider {
- extension: WasmExtension,
- provider_info: ExtensionLlmProviderInfo,
- state: Entity<ExtensionLlmProviderState>,
-}
-
-struct ExtensionLlmProviderState {
- is_authenticated: bool,
- available_models: Vec<ExtensionLanguageModel>,
-}
-```
-
-#### 2. `ExtensionLanguageModel`
-
-Implements `LanguageModel` trait, delegating to WASM calls:
-
-```rust
-pub struct ExtensionLanguageModel {
- extension: WasmExtension,
- model_info: ExtensionLlmModelInfo,
- provider_id: LanguageModelProviderId,
-}
-```
-
-#### 3. WIT Interface Extensions
-
-New WIT definitions for LLM provider functionality (see [WIT Interface Design](#wit-interface-design)).
-
----
-
-## Implementation Phases
-
-### Phase 1: Foundation (2-3 weeks)
-
-**Goal**: Establish the core infrastructure for extension-based LLM providers.
-
-#### Tasks
-
-1. **Define WIT interface for LLM providers** (`extension_api/wit/since_v0.7.0/llm-provider.wit`)
- - Provider metadata (id, name, icon)
- - Model definitions (id, name, capabilities, limits)
- - Credential management hooks
- - Completion request/response types
-
-2. **Create `ExtensionLanguageModelProvider`** (`extension_host/src/wasm_host/llm_provider.rs`)
- - Implement `LanguageModelProvider` trait
- - Handle provider registration/unregistration
- - Basic authentication state management
-
-3. **Create `ExtensionLanguageModel`** (`extension_host/src/wasm_host/llm_model.rs`)
- - Implement `LanguageModel` trait
- - Simple synchronous completion (non-streaming initially)
-
-4. **Update `ExtensionManifest`** (`extension/src/extension_manifest.rs`)
- - Add `language_model_providers` field
- - Parse provider configuration from `extension.toml`
-
-5. **Update extension loading** (`extension_host/src/extension_host.rs`)
- - Detect LLM provider declarations in manifest
- - Register providers with `LanguageModelRegistry`
-
-#### Deliverables
-- Extensions can register a provider that appears in model selector
-- Basic (non-streaming) completions work
-- Manual testing with a test extension
-
-### Phase 2: Streaming Support (2-3 weeks)
-
-**Goal**: Enable efficient streaming completions across the WASM boundary.
-
-#### Tasks
-
-1. **Design streaming protocol**
- - Option A: Chunked responses via repeated WASM calls
- - Option B: Callback-based streaming (preferred)
- - Option C: Shared memory buffer with polling
-
-2. **Implement streaming in WIT**
- ```wit
- resource completion-stream {
- next-event: func() -> result<option<completion-event>, string>;
- }
-
- export stream-completion: func(
- provider-id: string,
- model-id: string,
- request: completion-request
- ) -> result<completion-stream, string>;
- ```
-
-3. **Implement `http-response-stream` integration**
- - Extensions already have access to `fetch-stream`
- - Need to parse SSE/chunked responses in WASM
- - Map to completion events
-
-4. **Update `ExtensionLanguageModel::stream_completion`**
- - Bridge WASM completion-stream to Rust BoxStream
- - Handle backpressure and cancellation
-
-5. **Performance optimization**
- - Batch small events to reduce WASM boundary crossings
- - Consider using shared memory for large payloads
-
-#### Deliverables
-- Streaming completions work with acceptable latency
-- Performance benchmarks vs built-in providers
-
-### Phase 3: Full Feature Parity (2-3 weeks)
-
-**Goal**: Support all advanced features that built-in providers have.
-
-#### Tasks
-
-1. **Tool/Function calling support**
- - Add tool definitions to request
- - Parse tool use events from response
- - Handle tool results in follow-up requests
-
-2. **Image support**
- - Pass image data in messages
- - Handle base64 encoding/size limits
-
-3. **Thinking/reasoning support** (for Claude-like models)
- - `Thinking` and `RedactedThinking` events
- - Thought signatures for tool calls
-
-4. **Token counting**
- - WIT interface for `count_tokens`
- - Allow extensions to provide custom tokenizers or call API
-
-5. **Prompt caching configuration**
- - Cache control markers in messages
- - Cache configuration reporting
-
-6. **Rate limiting and error handling**
- - Standard error types in WIT
- - Retry-after headers
- - Rate limit events
-
-#### Deliverables
-- Extension providers can use tools
-- Extension providers can process images
-- Full error handling parity
-
-### Phase 4: Credential Management & Configuration UI (1-2 weeks)
-
-**Goal**: Secure credential storage and user-friendly configuration.
-
-#### Tasks
-
-1. **Credential storage integration**
- - Use existing `credentials_provider` crate
- - Extensions request credentials via WIT
- - Credentials never exposed to WASM directly (only "is_authenticated" status)
-
-2. **API key input flow**
- ```wit
- import request-credential: func(
- credential-type: credential-type,
- label: string,
- placeholder: string
- ) -> result<bool, string>;
- ```
-
-3. **Configuration view scaffolding**
- - Generic configuration view that works for most providers
- - Extensions can provide additional settings via JSON schema
- - Settings stored in extension-specific namespace
-
-4. **Environment variable support**
- - Allow specifying env var names for API keys
- - Read from environment on startup
-
-#### Deliverables
-- Secure API key storage
-- Configuration UI for extension providers
-- Environment variable fallback
-
-### Phase 5: Testing & Documentation (1-2 weeks)
-
-**Goal**: Comprehensive testing and developer documentation.
-
-#### Tasks
-
-1. **Integration tests**
- - Test extension loading and registration
- - Test streaming completions
- - Test error handling
- - Test credential management
-
-2. **Performance tests**
- - Latency benchmarks
- - Memory usage under load
- - Comparison with built-in providers
-
-3. **Example extensions**
- - Simple OpenAI-compatible provider
- - Provider with custom authentication
- - Provider with tool support
-
-4. **Documentation**
- - Extension developer guide
- - API reference
- - Migration guide for custom providers
-
-#### Deliverables
-- Full test coverage
-- Published documentation
-- Example extensions in `extensions/` directory
-
-### Phase 6: Migration of Built-in Providers (Optional, Long-term)
-
-**Goal**: Prove the extension system by migrating one or more built-in providers.
-
-#### Tasks
-
-1. **Select candidate provider** (suggest: Ollama or LM Studio - simplest API)
-2. **Create extension version**
-3. **Feature parity testing**
-4. **Performance comparison**
-5. **Gradual rollout (feature flag)
-
----
-
-## WIT Interface Design
-
-### New File: `extension_api/wit/since_v0.7.0/llm-provider.wit`
-
-```wit
-interface llm-provider {
- /// Information about a language model provider
- record provider-info {
- /// Unique identifier for the provider (e.g., "my-extension.my-provider")
- id: string,
- /// Display name for the provider
- name: string,
- /// Icon name from Zed's icon set (optional)
- icon: option<string>,
- }
-
- /// Capabilities of a language model
- record model-capabilities {
- /// Whether the model supports image inputs
- supports-images: bool,
- /// Whether the model supports tool/function calling
- supports-tools: bool,
- /// Whether the model supports tool choice (auto/any/none)
- supports-tool-choice-auto: bool,
- supports-tool-choice-any: bool,
- supports-tool-choice-none: bool,
- /// Whether the model supports extended thinking
- supports-thinking: bool,
- /// The format for tool input schemas
- tool-input-format: tool-input-format,
- }
-
- /// Format for tool input schemas
- enum tool-input-format {
- json-schema,
- simplified,
- }
-
- /// Information about a specific model
- record model-info {
- /// Unique identifier for the model
- id: string,
- /// Display name for the model
- name: string,
- /// Maximum input token count
- max-token-count: u64,
- /// Maximum output tokens (optional)
- max-output-tokens: option<u64>,
- /// Model capabilities
- capabilities: model-capabilities,
- /// Whether this is the default model for the provider
- is-default: bool,
- /// Whether this is the default fast model
- is-default-fast: bool,
- }
-
- /// A message in a completion request
- record request-message {
- role: message-role,
- content: list<message-content>,
- cache: bool,
- }
-
- enum message-role {
- user,
- assistant,
- system,
- }
-
- /// Content within a message
- variant message-content {
- text(string),
- image(image-data),
- tool-use(tool-use),
- tool-result(tool-result),
- thinking(thinking-content),
- redacted-thinking(string),
- }
-
- record image-data {
- /// Base64-encoded image data
- source: string,
- /// Estimated dimensions
- width: option<u32>,
- height: option<u32>,
- }
-
- record tool-use {
- id: string,
- name: string,
- input: string, // JSON string
- thought-signature: option<string>,
- }
-
- record tool-result {
- tool-use-id: string,
- tool-name: string,
- is-error: bool,
- content: tool-result-content,
- }
-
- variant tool-result-content {
- text(string),
- image(image-data),
- }
-
- record thinking-content {
- text: string,
- signature: option<string>,
- }
-
- /// A tool definition
- record tool-definition {
- name: string,
- description: string,
- /// JSON Schema for input parameters
- input-schema: string,
- }
-
- /// Tool choice preference
- enum tool-choice {
- auto,
- any,
- none,
- }
-
- /// A completion request
- record completion-request {
- messages: list<request-message>,
- tools: list<tool-definition>,
- tool-choice: option<tool-choice>,
- stop-sequences: list<string>,
- temperature: option<f32>,
- thinking-allowed: bool,
- /// Maximum tokens to generate
- max-tokens: option<u64>,
- }
-
- /// Events emitted during completion streaming
- variant completion-event {
- /// Completion has started
- started,
- /// Text content
- text(string),
- /// Thinking/reasoning content
- thinking(thinking-content),
- /// Redacted thinking (encrypted)
- redacted-thinking(string),
- /// Tool use request
- tool-use(tool-use),
- /// Completion stopped
- stop(stop-reason),
- /// Token usage update
- usage(token-usage),
- }
-
- enum stop-reason {
- end-turn,
- max-tokens,
- tool-use,
- }
-
- record token-usage {
- input-tokens: u64,
- output-tokens: u64,
- cache-creation-input-tokens: option<u64>,
- cache-read-input-tokens: option<u64>,
- }
-
- /// A streaming completion response
- resource completion-stream {
- /// Get the next event from the stream.
- /// Returns None when the stream is complete.
- next-event: func() -> result<option<completion-event>, string>;
- }
-
- /// Credential types that can be requested
- enum credential-type {
- api-key,
- oauth-token,
- }
-}
-```
-
-### Updates to `extension_api/wit/since_v0.7.0/extension.wit`
-
-```wit
-world extension {
- // ... existing imports ...
- import llm-provider;
-
- use llm-provider.{
- provider-info, model-info, completion-request,
- completion-stream, credential-type
- };
-
- /// Returns information about language model providers offered by this extension
- export llm-providers: func() -> list<provider-info>;
-
- /// Returns the models available for a provider
- export llm-provider-models: func(provider-id: string) -> result<list<model-info>, string>;
-
- /// Check if the provider is authenticated
- export llm-provider-is-authenticated: func(provider-id: string) -> bool;
-
- /// Attempt to authenticate the provider
- export llm-provider-authenticate: func(provider-id: string) -> result<_, string>;
-
- /// Reset credentials for the provider
- export llm-provider-reset-credentials: func(provider-id: string) -> result<_, string>;
-
- /// Count tokens for a request
- export llm-count-tokens: func(
- provider-id: string,
- model-id: string,
- request: completion-request
- ) -> result<u64, string>;
-
- /// Stream a completion
- export llm-stream-completion: func(
- provider-id: string,
- model-id: string,
- request: completion-request
- ) -> result<completion-stream, string>;
-
- /// Request a credential from the user
- import llm-request-credential: func(
- provider-id: string,
- credential-type: credential-type,
- label: string,
- placeholder: string
- ) -> result<bool, string>;
-
- /// Get a stored credential
- import llm-get-credential: func(provider-id: string) -> option<string>;
-
- /// Store a credential
- import llm-store-credential: func(provider-id: string, value: string) -> result<_, string>;
-
- /// Delete a stored credential
- import llm-delete-credential: func(provider-id: string) -> result<_, string>;
-}
-```
-
----
-
-## Extension Manifest Changes
-
-### Updated `extension.toml` Schema
-
-```toml
-id = "my-llm-extension"
-name = "My LLM Provider"
-description = "Adds support for My LLM API"
-version = "1.0.0"
-schema_version = 1
-authors = ["Developer <dev@example.com>"]
-repository = "https://github.com/example/my-llm-extension"
-
-[lib]
-kind = "rust"
-version = "0.7.0"
-
-# New section for LLM providers
-[language_model_providers.my-provider]
-name = "My LLM"
-icon = "sparkle" # Optional, from Zed's icon set
-
-# Optional: Default models to show even before API connection
-[[language_model_providers.my-provider.models]]
-id = "my-model-large"
-name = "My Model Large"
-max_token_count = 200000
-max_output_tokens = 8192
-supports_images = true
-supports_tools = true
-
-[[language_model_providers.my-provider.models]]
-id = "my-model-small"
-name = "My Model Small"
-max_token_count = 100000
-max_output_tokens = 4096
-supports_images = false
-supports_tools = true
-
-# Optional: Environment variable for API key
-[language_model_providers.my-provider.auth]
-env_var = "MY_LLM_API_KEY"
-credential_label = "API Key"
-```
-
-### `ExtensionManifest` Changes
-
-```rust
-// In extension/src/extension_manifest.rs
-
-#[derive(Clone, Default, PartialEq, Eq, Debug, Deserialize, Serialize)]
-pub struct LanguageModelProviderManifestEntry {
- pub name: String,
- #[serde(default)]
- pub icon: Option<String>,
- #[serde(default)]
- pub models: Vec<LanguageModelManifestEntry>,
- #[serde(default)]
- pub auth: Option<LanguageModelAuthConfig>,
-}
-
-#[derive(Clone, Default, PartialEq, Eq, Debug, Deserialize, Serialize)]
-pub struct LanguageModelManifestEntry {
- pub id: String,
- pub name: String,
- #[serde(default)]
- pub max_token_count: u64,
- #[serde(default)]
- pub max_output_tokens: Option<u64>,
- #[serde(default)]
- pub supports_images: bool,
- #[serde(default)]
- pub supports_tools: bool,
- #[serde(default)]
- pub supports_thinking: bool,
-}
-
-#[derive(Clone, Default, PartialEq, Eq, Debug, Deserialize, Serialize)]
-pub struct LanguageModelAuthConfig {
- pub env_var: Option<String>,
- pub credential_label: Option<String>,
-}
-
-// Add to ExtensionManifest struct:
-pub struct ExtensionManifest {
- // ... existing fields ...
- #[serde(default)]
- pub language_model_providers: BTreeMap<Arc<str>, LanguageModelProviderManifestEntry>,
-}
-```
-
----
-
-## Migration Plan for Built-in Providers
-
-This section analyzes each built-in provider and what would be required to implement them as extensions.
-
-### Provider Comparison Matrix
-
-| Provider | API Style | Auth Method | Special Features | Migration Complexity |
-|----------|-----------|-------------|------------------|---------------------|
-| Anthropic | REST/SSE | API Key | Thinking, Caching, Tool signatures | High |
-| OpenAI | REST/SSE | API Key | Reasoning effort, Prompt caching | Medium |
-| Google | REST/SSE | API Key | Thinking, Tool signatures | High |
-| Ollama | REST/SSE | None (local) | Dynamic model discovery | Low |
-| DeepSeek | REST/SSE | API Key | Reasoning mode | Medium |
-| OpenRouter | REST/SSE | API Key | Reasoning details, Model routing | Medium |
-| LM Studio | REST/SSE | None (local) | OpenAI-compatible | Low |
-| Bedrock | AWS SDK | AWS Credentials | Multiple underlying providers | High |
-| Zed Cloud | Zed Auth | Zed Account | Proxied providers | N/A (keep built-in) |
-
-### Provider-by-Provider Analysis
-
-#### Anthropic (`provider/anthropic.rs`)
-
-**Current Implementation Highlights:**
-- Uses `anthropic` crate for API types and streaming
-- Custom event mapper (`AnthropicEventMapper`) for SSE → completion events
-- Supports thinking/reasoning with thought signatures
-- Prompt caching with cache control markers
-- Beta headers for experimental features
-
-**Extension Requirements:**
-- Full SSE parsing in WASM
-- Complex event mapping logic
-- Thinking content with signatures
-- Cache configuration reporting
-
-**Unique Challenges:**
-```rust
-// Thought signatures in tool use
-pub struct LanguageModelToolUse {
- pub thought_signature: Option<String>, // Anthropic-specific
-}
-
-// Thinking events with signatures
-Thinking { text: String, signature: Option<String> }
-```
-
-**Migration Approach:**
-1. Port `anthropic` crate types to extension-compatible structures
-2. Implement SSE parser in extension (can use existing `fetch-stream`)
-3. Map Anthropic events to generic completion events
-4. Handle beta headers via custom HTTP headers
-
-#### OpenAI (`provider/open_ai.rs`)
-
-**Current Implementation Highlights:**
-- Uses `open_ai` crate for API types
-- Tiktoken-based token counting
-- Parallel tool calls support
-- Reasoning effort parameter (o1/o3 models)
-
-**Extension Requirements:**
-- SSE parsing (standard format)
-- Token counting (could call API or use simplified estimate)
-- Tool call aggregation across chunks
-
-**Unique Challenges:**
-```rust
-// Reasoning effort for o-series models
-pub reasoning_effort: Option<String>, // "low", "medium", "high"
-
-// Prompt cache key (preview feature)
-pub prompt_cache_key: Option<String>,
-```
-
-**Migration Approach:**
-1. Standard SSE parsing
-2. Token counting via API or tiktoken WASM port
-3. Support reasoning_effort as model-specific config
-
-#### Google/Gemini (`provider/google.rs`)
-
-**Current Implementation Highlights:**
-- Uses `google_ai` crate
-- Different API structure from OpenAI/Anthropic
-- Thinking support similar to Anthropic
-- Tool signatures in function calls
-
-**Extension Requirements:**
-- Different request/response format
-- Thinking content handling
-- Tool signature preservation
-
-**Unique Challenges:**
-```rust
-// Google uses different content structure
-enum ContentPart {
- Text { text: String },
- InlineData { mime_type: String, data: String },
- FunctionCall { name: String, args: Value },
- FunctionResponse { name: String, response: Value },
-}
-```
-
-**Migration Approach:**
-1. Implement Google-specific request building
-2. Map Google events to generic completion events
-3. Handle thinking/function call signatures
-
-#### Ollama (`provider/ollama.rs`)
-
-**Current Implementation Highlights:**
-- Local-only, no authentication needed
-- Dynamic model discovery via API
-- OpenAI-compatible chat endpoint
-- Simple streaming format
-
-**Extension Requirements:**
-- API URL configuration
-- Model list fetching
-- Basic streaming
-
-**Why This is a Good First Migration Target:**
-- No authentication complexity
-- Simple API format
-- Dynamic model discovery is isolated
-- Good test case for local provider pattern
-
-**Migration Approach:**
-1. Configuration for API URL
-2. Model discovery endpoint call
-3. OpenAI-compatible streaming
-
-#### DeepSeek (`provider/deepseek.rs`)
-
-**Current Implementation Highlights:**
-- OpenAI-compatible API with extensions
-- Reasoner model support
-- Different handling for reasoning vs standard models
-
-**Extension Requirements:**
-- API key authentication
-- Model-specific request modifications
-- Reasoning content handling
-
-**Migration Approach:**
-1. Standard OpenAI-compatible base
-2. Special handling for reasoner model
-3. Temperature disabled for reasoning
-
-#### OpenRouter (`provider/open_router.rs`)
-
-**Current Implementation Highlights:**
-- Aggregates multiple providers
-- Dynamic model fetching
-- Reasoning details preservation
-- Tool call signatures
-
-**Extension Requirements:**
-- API key authentication
-- Model list from API
-- Reasoning details in responses
-
-**Migration Approach:**
-1. Model discovery from API
-2. Standard OpenAI-compatible streaming
-3. Preserve reasoning_details in events
-
-#### LM Studio (`provider/lmstudio.rs`)
-
-**Current Implementation Highlights:**
-- Local-only, OpenAI-compatible
-- Model discovery from API
-- Simple configuration
-
-**Why This is a Good First Migration Target:**
-- No authentication
-- OpenAI-compatible (reusable streaming code)
-- Similar to Ollama
-
-#### Bedrock (`provider/bedrock.rs`)
-
-**Current Implementation Highlights:**
-- AWS SDK-based authentication
-- Multiple authentication methods (IAM, Profile, etc.)
-- Proxies to Claude, Llama, etc.
-
-**Extension Requirements:**
-- AWS credential handling (complex)
-- AWS Signature V4 signing
-- Region configuration
-
-**Why This Should Stay Built-in (Initially):**
-- AWS credential management is complex
-- SDK dependency not easily portable to WASM
-- Security implications of AWS credentials in extensions
-
----
-
-## Testing Strategy
-
-### Unit Tests
-
-```rust
-// extension_host/src/wasm_host/llm_provider_tests.rs
-
-#[gpui::test]
-async fn test_extension_provider_registration(cx: &mut TestAppContext) {
- // Load test extension with LLM provider
- // Verify provider appears in registry
- // Verify models are listed correctly
-}
-
-#[gpui::test]
-async fn test_extension_streaming_completion(cx: &mut TestAppContext) {
- // Create mock HTTP server
- // Load extension
- // Send completion request
- // Verify streaming events received correctly
-}
-
-#[gpui::test]
-async fn test_extension_tool_calling(cx: &mut TestAppContext) {
- // Test tool definitions are passed correctly
- // Test tool use events are parsed
- // Test tool results can be sent back
-}
-
-#[gpui::test]
-async fn test_extension_credential_management(cx: &mut TestAppContext) {
- // Test credential storage
- // Test credential retrieval
- // Test authentication state
-}
-
-#[gpui::test]
-async fn test_extension_error_handling(cx: &mut TestAppContext) {
- // Test API errors are propagated correctly
- // Test rate limiting is handled
- // Test network errors are handled
-}
-```
-
-### Integration Tests
-
-```rust
-// crates/extension_host/src/extension_store_test.rs (additions)
-
-#[gpui::test]
-async fn test_llm_extension_lifecycle(cx: &mut TestAppContext) {
- // Install extension with LLM provider
- // Verify provider registered
- // Configure credentials
- // Make completion request
- // Uninstall extension
- // Verify provider unregistered
-}
-```
-
-### Manual Testing Checklist
-
-1. **Provider Discovery**
- - [ ] Extension provider appears in model selector
- - [ ] Provider icon displays correctly
- - [ ] Models list correctly
-
-2. **Authentication**
- - [ ] API key prompt appears when not authenticated
- - [ ] API key is stored securely
- - [ ] Environment variable fallback works
- - [ ] "Reset credentials" works
-
-3. **Completions**
- - [ ] Basic text completion works
- - [ ] Streaming is smooth (no jank)
- - [ ] Long responses complete successfully
- - [ ] Cancellation works
-
-4. **Advanced Features**
- - [ ] Tool calling works (Agent panel)
- - [ ] Image inputs work (if supported)
- - [ ] Thinking/reasoning displays correctly
-
-5. **Error Handling**
- - [ ] Invalid API key shows error
- - [ ] Rate limiting shows appropriate message
- - [ ] Network errors are handled gracefully
-
-6. **Performance**
- - [ ] First token latency acceptable (<500ms overhead)
- - [ ] Memory usage reasonable
- - [ ] No memory leaks on repeated requests
-
----
-
-## Security Considerations
-
-### Credential Handling
-
-1. **Never expose raw credentials to WASM**
- - Extensions request credentials via import function
- - Zed stores credentials in secure storage (keychain/credential manager)
- - Extensions receive only "authenticated: true/false" status
-
-2. **Credential scope isolation**
- - Each extension has its own credential namespace
- - Extensions cannot access other extensions' credentials
- - Provider ID is prefixed with extension ID
-
-3. **Audit logging**
- - Log when credentials are accessed (not the values)
- - Log when credentials are modified
-
-### Network Access
-
-1. **HTTP request validation**
- - Extensions already have HTTP access via `fetch` / `fetch-stream`
- - Consider domain allowlisting for LLM providers
- - Log outbound requests for debugging
-
-2. **Request/Response inspection**
- - API keys in headers should be redacted in logs
- - Response bodies may contain sensitive data
-
-### Extension Sandbox
-
-1. **WASM isolation**
- - Extensions run in WASM sandbox
- - Cannot access filesystem outside work directory
- - Cannot access other extensions' data
-
-2. **Resource limits**
- - Memory limits per extension
- - CPU time limits (epoch-based interruption already exists)
- - Concurrent request limits
-
-### Capability Requirements
-
-```toml
-# Extensions with LLM providers should declare:
-[[capabilities]]
-kind = "network:http"
-domains = ["api.example.com"] # Optional domain restriction
-
-[[capabilities]]
-kind = "credential:store"
-```
-
----
-
-## Appendix: Provider-Specific Requirements
-
-### A. Anthropic Implementation Details
-
-**Request Format:**
-```json
-{
- "model": "claude-sonnet-4-20250514",
- "max_tokens": 8192,
- "messages": [
- {"role": "user", "content": [{"type": "text", "text": "Hello"}]}
- ],
- "system": [{"type": "text", "text": "You are helpful"}],
- "tools": [...],
- "thinking": {"type": "enabled", "budget_tokens": 10000}
-}
-```
-
-**SSE Events:**
-- `message_start` - Contains message ID, model, usage
-- `content_block_start` - Starts text/tool_use/thinking block
-- `content_block_delta` - Incremental content (text_delta, input_json_delta, thinking_delta)
-- `content_block_stop` - Block complete
-- `message_delta` - Stop reason, final usage
-- `message_stop` - End of message
-
-**Special Considerations:**
-- Beta headers for thinking: `anthropic-beta: interleaved-thinking-2025-05-14`
-- Cache control markers in messages
-- Thought signatures on tool uses
-
-### B. OpenAI Implementation Details
-
-**Request Format:**
-```json
-{
- "model": "gpt-4o",
- "messages": [
- {"role": "system", "content": "You are helpful"},
- {"role": "user", "content": "Hello"}
- ],
- "stream": true,
- "tools": [...],
- "max_completion_tokens": 4096
-}
-```
-
-**SSE Events:**
-```
-data: {"choices":[{"delta":{"content":"Hello"}}]}
-data: {"choices":[{"delta":{"tool_calls":[...]}}]}
-data: [DONE]
-```
-
-**Special Considerations:**
-- `reasoning_effort` for o-series models
-- `parallel_tool_calls` option
-- Token counting via tiktoken
-
-### C. Google/Gemini Implementation Details
-
-**Request Format:**
-```json
-{
- "contents": [
- {"role": "user", "parts": [{"text": "Hello"}]}
- ],
- "generationConfig": {
- "maxOutputTokens": 8192,
- "temperature": 0.7
- },
- "tools": [...]
-}
-```
-
-**Response Format:**
-```json
-{
- "candidates": [{
- "content": {
- "parts": [
- {"text": "Response"},
- {"functionCall": {"name": "...", "args": {...}}}
- ]
- }
- }]
-}
-```
-
-**Special Considerations:**
-- Different streaming format (not SSE, line-delimited JSON)
-- Tool signatures in function calls
-- Thinking support similar to Anthropic
-
-### D. OpenAI-Compatible Providers (Ollama, LM Studio, DeepSeek)
-
-These providers can share common implementation:
-
-**Shared Code:**
-```rust
-// In extension
-fn stream_openai_compatible(
- api_url: &str,
- api_key: Option<&str>,
- request: CompletionRequest,
-) -> Result<CompletionStream, String> {
- let request_body = build_openai_request(request);
- let stream = http_client::fetch_stream(HttpRequest {
- method: HttpMethod::Post,
- url: format!("{}/v1/chat/completions", api_url),
- headers: build_headers(api_key),
- body: Some(serde_json::to_vec(&request_body)?),
- redirect_policy: RedirectPolicy::NoFollow,
- })?;
-
- Ok(OpenAiStreamParser::new(stream))
-}
-```
-
-### E. Example Extension: Simple OpenAI-Compatible Provider
-
-```rust
-// src/my_provider.rs
-use zed_extension_api::{self as zed, Result};
-use zed_extension_api::http_client::{HttpMethod, HttpRequest, RedirectPolicy};
-
-struct MyLlmExtension {
- api_key: Option<String>,
-}
-
-impl zed::Extension for MyLlmExtension {
- fn new() -> Self {
- Self { api_key: None }
- }
-
- fn llm_providers(&self) -> Vec<zed::LlmProviderInfo> {
- vec![zed::LlmProviderInfo {
- id: "my-provider".into(),
- name: "My LLM Provider".into(),
- icon: Some("sparkle".into()),
- }]
- }
-
- fn llm_provider_models(&self, provider_id: &str) -> Result<Vec<zed::LlmModelInfo>> {
- Ok(vec![
- zed::LlmModelInfo {
- id: "my-model".into(),
- name: "My Model".into(),
- max_token_count: 128000,
- max_output_tokens: Some(4096),
- capabilities: zed::LlmModelCapabilities {
- supports_images: true,
- supports_tools: true,
- ..Default::default()
- },
- is_default: true,
- is_default_fast: false,
- }
- ])
- }
-
- fn llm_provider_is_authenticated(&self, _provider_id: &str) -> bool {
- self.api_key.is_some() || std::env::var("MY_API_KEY").is_ok()
- }
-
- fn llm_provider_authenticate(&mut self, provider_id: &str) -> Result<()> {
- if let Some(key) = zed::llm_get_credential(provider_id)? {
- self.api_key = Some(key);
- return Ok(());
- }
-
- if zed::llm_request_credential(
- provider_id,
- zed::CredentialType::ApiKey,
- "API Key",
- "Enter your API key",
- )? {
- self.api_key = zed::llm_get_credential(provider_id)?;
- }
-
- Ok(())
- }
-
- fn llm_stream_completion(
- &self,
- provider_id: &str,
- model_id: &str,
- request: zed::LlmCompletionRequest,
- ) -> Result<zed::LlmCompletionStream> {
- let api_key = self.api_key.as_ref()
- .or_else(|| std::env::var("MY_API_KEY").ok().as_ref())
- .ok_or("Not authenticated")?;
-
- let body = serde_json::json!({
- "model": model_id,
- "messages": self.convert_messages(&request.messages),
- "stream": true,
- "max_tokens": request.max_tokens.unwrap_or(4096),
- });
-
- let stream = HttpRequest::builder()
- .method(HttpMethod::Post)
- .url("https://api.my-provider.com/v1/chat/completions")
- .header("Authorization", format!("Bearer {}", api_key))
- .header("Content-Type", "application/json")
- .body(serde_json::to_vec(&body)?)
- .build()?
- .fetch_stream()?;
-
- Ok(zed::LlmCompletionStream::new(OpenAiStreamParser::new(stream)))
- }
-}
-
-zed::register_extension!(MyLlmExtension);
-```
-
----
-
-## Timeline Summary
-
-| Phase | Duration | Key Deliverables |
-|-------|----------|------------------|
-| 1. Foundation | 2-3 weeks | WIT interface, basic provider registration |
-| 2. Streaming | 2-3 weeks | Efficient streaming across WASM boundary |
-| 3. Full Features | 2-3 weeks | Tools, images, thinking support |
-| 4. Credentials & UI | 1-2 weeks | Secure credentials, configuration UI |
-| 5. Testing & Docs | 1-2 weeks | Tests, documentation, examples |
-| 6. Migration (optional) | Ongoing | Migrate built-in providers |
-
-**Total estimated time: 8-13 weeks**
-
----
-
-## Open Questions
-
-1. **Streaming efficiency**: Is callback-based streaming feasible in WASM, or should we use polling?
-
-2. **Token counting**: Should we require extensions to implement token counting, or provide a fallback estimation?
-
-3. **Configuration UI**: Should extensions be able to provide custom UI components, or just JSON schema-driven forms?
-
-4. **Provider priorities**: Should extension providers appear before or after built-in providers in the selector?
-
-5. **Backward compatibility**: How do we handle extensions built against older WIT versions when adding new LLM features?
-
-6. **Rate limiting**: Should the host help with rate limiting, or leave it entirely to extensions?
-
----
-
-## Conclusion
-
-This plan provides a comprehensive roadmap for implementing Language Model Provider Extensions in Zed. The phased approach allows for incremental delivery of value while building toward full feature parity with built-in providers.
-
-The key architectural decisions are:
-1. **WIT-based interface** for WASM interop, consistent with existing extension patterns
-2. **Streaming via resources** to minimize WASM boundary crossing overhead
-3. **Host-managed credentials** for security
-4. **Manifest-based discovery** for static model information
-
-The migration analysis shows that simpler providers (Ollama, LM Studio) can be migrated first as proof of concept, while more complex providers (Anthropic, Bedrock) may remain built-in initially.
@@ -89,12 +89,32 @@ To do this:
#### Cross-Region Inference
-The Zed implementation of Amazon Bedrock uses [Cross-Region inference](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) for all the models and region combinations that support it.
+The Zed implementation of Amazon Bedrock uses [Cross-Region inference](https://docs.aws.amazon.com/bedrock/latest/userguide/cross-region-inference.html) to improve availability and throughput.
With Cross-Region inference, you can distribute traffic across multiple AWS Regions, enabling higher throughput.
-For example, if you use `Claude Sonnet 3.7 Thinking` from `us-east-1`, it may be processed across the US regions, namely: `us-east-1`, `us-east-2`, or `us-west-2`.
-Cross-Region inference requests are kept within the AWS Regions that are part of the geography where the data originally resides.
-For example, a request made within the US is kept within the AWS Regions in the US.
+##### Regional vs Global Inference Profiles
+
+Bedrock supports two types of cross-region inference profiles:
+
+- **Regional profiles** (default): Route requests within a specific geography (US, EU, APAC). For example, `us-east-1` uses the `us.*` profile which routes across `us-east-1`, `us-east-2`, and `us-west-2`.
+- **Global profiles**: Route requests across all commercial AWS Regions for maximum availability and performance.
+
+By default, Zed uses **regional profiles** which keep your data within the same geography. You can opt into global profiles by adding `"allow_global": true` to your Bedrock configuration:
+
+```json [settings]
+{
+ "language_models": {
+ "bedrock": {
+ "authentication_method": "named_profile",
+ "region": "your-aws-region",
+ "profile": "your-profile-name",
+ "allow_global": true
+ }
+ }
+}
+```
+
+**Note:** Only select newer models support global inference profiles. See the [AWS Bedrock supported models documentation](https://docs.aws.amazon.com/bedrock/latest/userguide/inference-profiles-support.html#inference-profiles-support-system) for the current list of models that support global inference. If you encounter availability issues with a model in your region, enabling `allow_global` may resolve them.
Although the data remains stored only in the source Region, your input prompts and output results might move outside of your source Region during cross-Region inference.
All data will be transmitted encrypted across Amazon's secure network.
@@ -2861,11 +2861,25 @@ Configuration object for defining settings profiles. Example:
```json [settings]
"preview_tabs": {
"enabled": true,
+ "enable_preview_from_project_panel": true,
"enable_preview_from_file_finder": false,
- "enable_preview_from_code_navigation": false,
+ "enable_preview_from_multibuffer": true,
+ "enable_preview_multibuffer_from_code_navigation": false,
+ "enable_preview_file_from_code_navigation": true,
+ "enable_keep_preview_on_code_navigation": false,
}
```
+### Enable preview from project panel
+
+- Description: Determines whether to open files in preview mode when opened from the project panel with a single click.
+- Setting: `enable_preview_from_project_panel`
+- Default: `true`
+
+**Options**
+
+`boolean` values
+
### Enable preview from file finder
- Description: Determines whether to open files in preview mode when selected from the file finder.
@@ -2876,10 +2890,40 @@ Configuration object for defining settings profiles. Example:
`boolean` values
-### Enable preview from code navigation
+### Enable preview from multibuffer
+
+- Description: Determines whether to open files in preview mode when opened from a multibuffer.
+- Setting: `enable_preview_from_multibuffer`
+- Default: `true`
+
+**Options**
+
+`boolean` values
+
+### Enable preview multibuffer from code navigation
+
+- Description: Determines whether to open tabs in preview mode when code navigation is used to open a multibuffer.
+- Setting: `enable_preview_multibuffer_from_code_navigation`
+- Default: `false`
+
+**Options**
+
+`boolean` values
+
+### Enable preview file from code navigation
+
+- Description: Determines whether to open tabs in preview mode when code navigation is used to open a single file.
+- Setting: `enable_preview_file_from_code_navigation`
+- Default: `true`
+
+**Options**
+
+`boolean` values
+
+### Enable keep preview on code navigation
-- Description: Determines whether a preview tab gets replaced when code navigation is used to navigate away from the tab.
-- Setting: `enable_preview_from_code_navigation`
+- Description: Determines whether to keep tabs in preview mode when code navigation is used to navigate away from them. If `enable_preview_file_from_code_navigation` or `enable_preview_multibuffer_from_code_navigation` is also true, the new tab may replace the existing one.
+- Setting: `enable_keep_preview_on_code_navigation`
- Default: `false`
**Options**
@@ -41,6 +41,7 @@ There are several third-party Zed packages for various Linux distributions and p
- Arch: [`zed`](https://archlinux.org/packages/extra/x86_64/zed/)
- Arch (AUR): [`zed-git`](https://aur.archlinux.org/packages/zed-git), [`zed-preview`](https://aur.archlinux.org/packages/zed-preview), [`zed-preview-bin`](https://aur.archlinux.org/packages/zed-preview-bin)
- Alpine: `zed` ([aarch64](https://pkgs.alpinelinux.org/package/edge/testing/aarch64/zed)) ([x86_64](https://pkgs.alpinelinux.org/package/edge/testing/x86_64/zed))
+- Conda: [`zed`](https://anaconda.org/conda-forge/zed)
- Nix: `zed-editor` ([unstable](https://search.nixos.org/packages?channel=unstable&show=zed-editor))
- Fedora/Ultramarine (Terra): [`zed`](https://github.com/terrapkg/packages/tree/frawhide/anda/devs/zed/stable), [`zed-preview`](https://github.com/terrapkg/packages/tree/frawhide/anda/devs/zed/preview), [`zed-nightly`](https://github.com/terrapkg/packages/tree/frawhide/anda/devs/zed/nightly)
- Solus: [`zed`](https://github.com/getsolus/packages/tree/main/packages/z/zed)
@@ -8,6 +8,9 @@ on:
push:
branches:
- main
+ paths-ignore:
+ - .github/**
+ workflow_dispatch: {}
jobs:
determine_bump_type:
runs-on: namespace-profile-16x32-ubuntu-2204
@@ -5,6 +5,9 @@ on:
pull_request:
branches:
- '**'
+ push:
+ branches:
+ - main
jobs:
call_extension_tests:
uses: zed-industries/zed/.github/workflows/extension_tests.yml@main
@@ -2,7 +2,7 @@
set -euo pipefail
-CARGO_ABOUT_VERSION="0.8"
+CARGO_ABOUT_VERSION="0.8.2"
OUTPUT_FILE="${1:-$(pwd)/assets/licenses.md}"
TEMPLATE_FILE="script/licenses/template.md.hbs"
@@ -28,10 +28,10 @@ echo -n "" >"$OUTPUT_FILE"
} >>"$OUTPUT_FILE"
if ! cargo about --version | grep "cargo-about $CARGO_ABOUT_VERSION" &>/dev/null; then
- echo "Installing cargo-about@^$CARGO_ABOUT_VERSION..."
- cargo install "cargo-about@^$CARGO_ABOUT_VERSION"
+ echo "Installing cargo-about@$CARGO_ABOUT_VERSION..."
+ cargo install "cargo-about@$CARGO_ABOUT_VERSION"
else
- echo "cargo-about@^$CARGO_ABOUT_VERSION is already installed."
+ echo "cargo-about@$CARGO_ABOUT_VERSION is already installed."
fi
echo "Generating cargo licenses"
@@ -2,15 +2,15 @@
set -euo pipefail
-CARGO_ABOUT_VERSION="0.8"
+CARGO_ABOUT_VERSION="0.8.2"
OUTPUT_FILE="${1:-$(pwd)/assets/licenses.csv}"
TEMPLATE_FILE="script/licenses/template.csv.hbs"
if ! cargo about --version | grep "cargo-about $CARGO_ABOUT_VERSION" 2>&1 > /dev/null; then
- echo "Installing cargo-about@^$CARGO_ABOUT_VERSION..."
- cargo install "cargo-about@^$CARGO_ABOUT_VERSION"
+ echo "Installing cargo-about@$CARGO_ABOUT_VERSION..."
+ cargo install "cargo-about@$CARGO_ABOUT_VERSION"
else
- echo "cargo-about@^$CARGO_ABOUT_VERSION is already installed."
+ echo "cargo-about@$CARGO_ABOUT_VERSION is already installed."
fi
echo "Generating cargo licenses"
@@ -1,4 +1,4 @@
-$CARGO_ABOUT_VERSION="0.8"
+$CARGO_ABOUT_VERSION="0.8.2"
$outputFile=$args[0] ? $args[0] : "$(Get-Location)/assets/licenses.md"
$templateFile="script/licenses/template.md.hbs"
@@ -14,10 +14,10 @@ New-Item -Path "$outputFile" -ItemType File -Value "" -Force
$versionOutput = cargo about --version
if (-not ($versionOutput -match "cargo-about $CARGO_ABOUT_VERSION")) {
- Write-Host "Installing cargo-about@^$CARGO_ABOUT_VERSION..."
- cargo install "cargo-about@^$CARGO_ABOUT_VERSION"
+ Write-Host "Installing cargo-about@$CARGO_ABOUT_VERSION..."
+ cargo install "cargo-about@$CARGO_ABOUT_VERSION"
} else {
- Write-Host "cargo-about@^$CARGO_ABOUT_VERSION" is already installed
+ Write-Host "cargo-about@$CARGO_ABOUT_VERSION" is already installed
}
Write-Host "Generating cargo licenses"
@@ -23,15 +23,12 @@ pub(crate) fn extension_bump() -> Workflow {
let force_bump = WorkflowInput::bool("force-bump", None);
let (app_id, app_secret) = extension_workflow_secrets();
-
- let test_extension = extension_tests::check_extension();
let (check_bump_needed, needs_bump, current_version) = check_bump_needed();
let needs_bump = needs_bump.as_job_output(&check_bump_needed);
let current_version = current_version.as_job_output(&check_bump_needed);
- let dependencies = [&test_extension, &check_bump_needed];
-
+ let dependencies = [&check_bump_needed];
let bump_version = bump_extension_version(
&dependencies,
¤t_version,
@@ -72,7 +69,6 @@ pub(crate) fn extension_bump() -> Workflow {
"ZED_EXTENSION_CLI_SHA",
extension_tests::ZED_EXTENSION_CLI_SHA,
))
- .add_job(test_extension.name, test_extension.job)
.add_job(check_bump_needed.name, check_bump_needed.job)
.add_job(bump_version.name, bump_version.job)
.add_job(create_label.name, create_label.job)
@@ -1,5 +1,6 @@
use gh_workflow::{
- Event, Expression, Input, Job, PullRequest, PullRequestType, Push, Run, Step, UsesJob, Workflow,
+ Event, Expression, Input, Job, PullRequest, PullRequestType, Push, Run, Step, UsesJob,
+ Workflow, WorkflowDispatch,
};
use indexmap::IndexMap;
use indoc::indoc;
@@ -18,8 +19,13 @@ pub(crate) fn bump_version() -> Workflow {
named::workflow()
.on(Event::default()
- .push(Push::default().add_branch("main"))
- .pull_request(PullRequest::default().add_type(PullRequestType::Labeled)))
+ .push(
+ Push::default()
+ .add_branch("main")
+ .add_ignored_path(".github/**"),
+ )
+ .pull_request(PullRequest::default().add_type(PullRequestType::Labeled))
+ .workflow_dispatch(WorkflowDispatch::default()))
.concurrency(one_workflow_per_non_main_branch_and_token("labels"))
.add_job(determine_bump_type.name, determine_bump_type.job)
.add_job(call_bump_version.name, call_bump_version.job)
@@ -1,4 +1,4 @@
-use gh_workflow::{Event, Job, PullRequest, UsesJob, Workflow};
+use gh_workflow::{Event, Job, PullRequest, Push, UsesJob, Workflow};
use crate::tasks::workflows::{
steps::{NamedJob, named},
@@ -8,7 +8,9 @@ use crate::tasks::workflows::{
pub(crate) fn run_tests() -> Workflow {
let call_extension_tests = call_extension_tests();
named::workflow()
- .on(Event::default().pull_request(PullRequest::default().add_branch("**")))
+ .on(Event::default()
+ .pull_request(PullRequest::default().add_branch("**"))
+ .push(Push::default().add_branch("main")))
.concurrency(one_workflow_per_non_main_branch_and_token("pr"))
.add_job(call_extension_tests.name, call_extension_tests.job)
}
@@ -226,8 +226,8 @@ fn check_style() -> NamedJob {
named::uses(
"crate-ci",
"typos",
- "80c8a4945eec0f6d464eaf9e65ed98ef085283d1",
- ) // v1.38.1
+ "2d0ce569feab1f8752f1dde43cc2f2aa53236e06",
+ ) // v1.40.0
.with(("config", "./typos.toml"))
}
named::job(