From 912fd23006d5281de6cb8565c3b09b224d1f7016 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Fri, 19 May 2023 15:20:58 -0600 Subject: [PATCH 01/21] Parse markdown into an OpenAI chat completion request --- Cargo.lock | 220 ++++++++++++++++++++++++++++++ Cargo.toml | 1 + crates/ai/Cargo.toml | 21 +++ crates/ai/src/ai.rs | 112 +++++++++++++++ crates/live_kit_client/Cargo.toml | 1 + crates/zed/Cargo.toml | 1 + crates/zed/src/main.rs | 1 + 7 files changed, 357 insertions(+) create mode 100644 crates/ai/Cargo.toml create mode 100644 crates/ai/src/ai.rs diff --git a/Cargo.lock b/Cargo.lock index d339ac32568c9f3ff3ee5941907ac41ac4409027..59f933f4b9b0ab369603135385052df959c44ab3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -95,6 +95,18 @@ dependencies = [ "memchr", ] +[[package]] +name = "ai" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-openai", + "editor", + "gpui", + "pulldown-cmark", + "unindent", +] + [[package]] name = "alacritty_config" version = "0.1.1-dev" @@ -342,6 +354,28 @@ dependencies = [ "futures-lite", ] +[[package]] +name = "async-openai" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5d5e93aca1b2f0ca772c76cadd43e965809df87ef98e25e47244c7f006c85d2" +dependencies = [ + "backoff", + "base64 0.21.0", + "derive_builder", + "futures 0.3.28", + "rand 0.8.5", + "reqwest", + "reqwest-eventsource", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", + "tokio-util 0.7.8", + "tracing", +] + [[package]] name = "async-pipe" version = "0.1.3" @@ -642,6 +676,20 @@ dependencies = [ "tower-service", ] +[[package]] +name = "backoff" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" +dependencies = [ + "futures-core", + "getrandom 0.2.9", + "instant", + "pin-project-lite 0.2.9", + "rand 0.8.5", + "tokio", +] + [[package]] name = "backtrace" version = "0.3.67" @@ -1801,6 +1849,41 @@ dependencies = [ "syn 2.0.15", ] +[[package]] +name = "darling" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "strsim 0.10.0", + "syn 1.0.109", +] + +[[package]] +name = "darling_macro" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +dependencies = [ + "darling_core", + "quote", + "syn 1.0.109", +] + [[package]] name = "dashmap" version = "5.4.0" @@ -1855,6 +1938,37 @@ dependencies = [ "byteorder", ] +[[package]] +name = "derive_builder" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8" +dependencies = [ + "derive_builder_macro", +] + +[[package]] +name = "derive_builder_core" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" +dependencies = [ + "darling", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "derive_builder_macro" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e" +dependencies = [ + "derive_builder_core", + "syn 1.0.109", +] + [[package]] name = "dhat" version = "0.3.2" @@ -2190,6 +2304,17 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" +[[package]] +name = "eventsource-stream" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74fef4569247a5f429d9156b9d0a2599914385dd189c539334c625d8099d90ab" +dependencies = [ + "futures-core", + "nom", + "pin-project-lite 0.2.9", +] + [[package]] name = "fallible-iterator" version = "0.2.0" @@ -2586,6 +2711,12 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +[[package]] +name = "futures-timer" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" + [[package]] name = "futures-util" version = "0.3.28" @@ -2633,6 +2764,15 @@ dependencies = [ "version_check", ] +[[package]] +name = "getopts" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5" +dependencies = [ + "unicode-width", +] + [[package]] name = "getrandom" version = "0.1.16" @@ -3060,6 +3200,19 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" +dependencies = [ + "http", + "hyper", + "rustls 0.20.8", + "tokio", + "tokio-rustls", +] + [[package]] name = "hyper-timeout" version = "0.4.1" @@ -3109,6 +3262,12 @@ dependencies = [ "cxx-build", ] +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + [[package]] name = "idna" version = "0.3.0" @@ -3903,6 +4062,16 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "mime_guess" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +dependencies = [ + "mime", + "unicase", +] + [[package]] name = "minimal-lexical" version = "0.2.1" @@ -5071,6 +5240,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d9cc634bc78768157b5cbfe988ffcd1dcba95cd2b2f03a88316c08c6d00ed63" dependencies = [ "bitflags", + "getopts", "memchr", "unicase", ] @@ -5367,28 +5537,52 @@ dependencies = [ "http", "http-body", "hyper", + "hyper-rustls", "hyper-tls", "ipnet", "js-sys", "log", "mime", + "mime_guess", "native-tls", "once_cell", "percent-encoding", "pin-project-lite 0.2.9", + "rustls 0.20.8", + "rustls-native-certs", + "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", + "tokio-rustls", + "tokio-util 0.7.8", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", + "wasm-streams", "web-sys", "winreg", ] +[[package]] +name = "reqwest-eventsource" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f03f570355882dd8d15acc3a313841e6e90eddbc76a93c748fd82cc13ba9f51" +dependencies = [ + "eventsource-stream", + "futures-core", + "futures-timer", + "mime", + "nom", + "pin-project-lite 0.2.9", + "reqwest", + "thiserror", +] + [[package]] name = "resvg" version = "0.14.1" @@ -5676,6 +5870,18 @@ dependencies = [ "webpki 0.22.0", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + [[package]] name = "rustls-pemfile" version = "1.0.2" @@ -8039,6 +8245,19 @@ dependencies = [ "leb128", ] +[[package]] +name = "wasm-streams" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6bbae3363c08332cadccd13b67db371814cd214c2524020932f0804b8cf7c078" +dependencies = [ + "futures-util", + "js-sys", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + [[package]] name = "wasmparser" version = "0.85.0" @@ -8759,6 +8978,7 @@ name = "zed" version = "0.88.0" dependencies = [ "activity_indicator", + "ai", "anyhow", "assets", "async-compression", diff --git a/Cargo.toml b/Cargo.toml index f14e1c73552f766034ee1d98dd1a51fe3a00bf09..77252802d579b5d822bdd1d7d82933572d38c7fd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [workspace] members = [ "crates/activity_indicator", + "crates/ai", "crates/assets", "crates/auto_update", "crates/breadcrumbs", diff --git a/crates/ai/Cargo.toml b/crates/ai/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..30dc5ee5a27979cc9a3d051e883a9720f93fab21 --- /dev/null +++ b/crates/ai/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "ai" +version = "0.1.0" +edition = "2021" +publish = false + +[lib] +path = "src/ai.rs" +doctest = false + +[dependencies] +editor = { path = "../editor" } +gpui = { path = "../gpui" } + +anyhow.workspace = true +async-openai = "0.10.3" +pulldown-cmark = "0.9.2" +unindent.workspace = true + +[dev-dependencies] +editor = { path = "../editor", features = ["test-support"] } diff --git a/crates/ai/src/ai.rs b/crates/ai/src/ai.rs new file mode 100644 index 0000000000000000000000000000000000000000..0ae960e28139882ab3484c5ffb02fa2c7a98579c --- /dev/null +++ b/crates/ai/src/ai.rs @@ -0,0 +1,112 @@ +use anyhow::Result; +use async_openai::types::{ChatCompletionRequestMessage, CreateChatCompletionRequest, Role}; +use editor::Editor; +use gpui::{actions, AppContext, Task, ViewContext}; +use pulldown_cmark::{Event, HeadingLevel, Parser, Tag}; + +actions!(ai, [Assist]); + +pub fn init(cx: &mut AppContext) { + cx.add_async_action(assist) +} + +fn assist( + editor: &mut Editor, + _: &Assist, + cx: &mut ViewContext, +) -> Option>> { + let markdown = editor.text(cx); + parse_dialog(&markdown); + None +} + +fn parse_dialog(markdown: &str) -> CreateChatCompletionRequest { + let parser = Parser::new(markdown); + let mut messages = Vec::new(); + + let mut current_role: Option<(Role, Option)> = None; + let mut buffer = String::new(); + for event in parser { + match event { + Event::Start(Tag::Heading(HeadingLevel::H2, _, _)) => { + if let Some((role, name)) = current_role.take() { + if !buffer.is_empty() { + messages.push(ChatCompletionRequestMessage { + role, + content: buffer.trim().to_string(), + name, + }); + buffer.clear(); + } + } + } + Event::Text(text) => { + if current_role.is_some() { + buffer.push_str(&text); + } else { + // Determine the current role based on the H2 header text + let mut chars = text.chars(); + let first_char = chars.by_ref().skip_while(|c| c.is_whitespace()).next(); + let name = chars.take_while(|c| *c != '\n').collect::(); + let name = if name.is_empty() { None } else { Some(name) }; + + let role = match first_char { + Some('@') => Some(Role::User), + Some('/') => Some(Role::Assistant), + Some('#') => Some(Role::System), + _ => None, + }; + + current_role = role.map(|role| (role, name)); + } + } + _ => (), + } + } + if let Some((role, name)) = current_role { + messages.push(ChatCompletionRequestMessage { + role, + content: buffer, + name, + }); + } + + CreateChatCompletionRequest { + model: "gpt-4".into(), + messages, + ..Default::default() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_parse_dialog() { + use unindent::Unindent; + + let test_input = r#" + ## @nathan + Hey there, welcome to Zed! + + ## /sky + Thanks! I'm excited to be here. I have much to learn, but also much to teach, and I'm growing fast. + "#.unindent(); + + let expected_output = vec![ + ChatCompletionRequestMessage { + role: Role::User, + content: "Hey there, welcome to Zed!".to_string(), + name: Some("nathan".to_string()), + }, + ChatCompletionRequestMessage { + role: Role::Assistant, + content: "Thanks! I'm excited to be here. I have much to learn, but also much to teach, and I'm growing fast.".to_string(), + name: Some("sky".to_string()), + }, + ]; + + assert_eq!(parse_dialog(&test_input).messages, expected_output); + } +} diff --git a/crates/live_kit_client/Cargo.toml b/crates/live_kit_client/Cargo.toml index 2d61e75732f40995d819db6f80544fb624190f24..36087a42a371ad75936209f77aeed0c8141687cc 100644 --- a/crates/live_kit_client/Cargo.toml +++ b/crates/live_kit_client/Cargo.toml @@ -46,6 +46,7 @@ collections = { path = "../collections", features = ["test-support"] } gpui = { path = "../gpui", features = ["test-support"] } live_kit_server = { path = "../live_kit_server" } media = { path = "../media" } +nanoid = "0.4" anyhow.workspace = true async-trait.workspace = true diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 90dced65f574018fdcfe1ca54ac7d1a773d2708c..e24b7ef232b48d627130677f9c75b92763bde768 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -48,6 +48,7 @@ language_selector = { path = "../language_selector" } lsp = { path = "../lsp" } lsp_log = { path = "../lsp_log" } node_runtime = { path = "../node_runtime" } +ai = { path = "../ai" } outline = { path = "../outline" } plugin_runtime = { path = "../plugin_runtime" } project = { path = "../project" } diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index 2f359240bc7a2ba295cf3d67579ea1ab5a3411da..eb2d693700673c3e58207ac06b50ea4e3b3e237d 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -162,6 +162,7 @@ fn main() { terminal_view::init(cx); theme_testbench::init(cx); copilot::init(http.clone(), node_runtime, cx); + ai::init(cx); cx.spawn(|cx| watch_themes(fs.clone(), cx)).detach(); From 7e6cccfa3d2dfd6ca76f78c06b4aa0f83ea36c54 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Mon, 22 May 2023 20:28:22 -0600 Subject: [PATCH 02/21] WIP: Stream in completions Drop dependency on tokio introduced by async-openai and do it ourselves. The approach I'm taking of replacing instead of appending is causing issues. Need to just append. --- Cargo.lock | 204 +------------------------------ Cargo.toml | 1 + crates/ai/Cargo.toml | 7 +- crates/ai/src/ai.rs | 223 +++++++++++++++++++++++++++++----- crates/auto_update/Cargo.toml | 2 +- crates/feedback/Cargo.toml | 2 +- crates/gpui/src/executor.rs | 2 +- crates/util/Cargo.toml | 2 +- crates/zed/Cargo.toml | 2 +- 9 files changed, 209 insertions(+), 236 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 59f933f4b9b0ab369603135385052df959c44ab3..5c0570f912f48762c8c114a049c79ca668b79a64 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -100,11 +100,16 @@ name = "ai" version = "0.1.0" dependencies = [ "anyhow", - "async-openai", + "async-stream", "editor", + "futures 0.3.28", "gpui", + "isahc", "pulldown-cmark", + "serde", + "serde_json", "unindent", + "util", ] [[package]] @@ -354,28 +359,6 @@ dependencies = [ "futures-lite", ] -[[package]] -name = "async-openai" -version = "0.10.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d5e93aca1b2f0ca772c76cadd43e965809df87ef98e25e47244c7f006c85d2" -dependencies = [ - "backoff", - "base64 0.21.0", - "derive_builder", - "futures 0.3.28", - "rand 0.8.5", - "reqwest", - "reqwest-eventsource", - "serde", - "serde_json", - "thiserror", - "tokio", - "tokio-stream", - "tokio-util 0.7.8", - "tracing", -] - [[package]] name = "async-pipe" version = "0.1.3" @@ -676,20 +659,6 @@ dependencies = [ "tower-service", ] -[[package]] -name = "backoff" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" -dependencies = [ - "futures-core", - "getrandom 0.2.9", - "instant", - "pin-project-lite 0.2.9", - "rand 0.8.5", - "tokio", -] - [[package]] name = "backtrace" version = "0.3.67" @@ -1849,41 +1818,6 @@ dependencies = [ "syn 2.0.15", ] -[[package]] -name = "darling" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" -dependencies = [ - "darling_core", - "darling_macro", -] - -[[package]] -name = "darling_core" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" -dependencies = [ - "fnv", - "ident_case", - "proc-macro2", - "quote", - "strsim 0.10.0", - "syn 1.0.109", -] - -[[package]] -name = "darling_macro" -version = "0.14.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" -dependencies = [ - "darling_core", - "quote", - "syn 1.0.109", -] - [[package]] name = "dashmap" version = "5.4.0" @@ -1938,37 +1872,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "derive_builder" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d67778784b508018359cbc8696edb3db78160bab2c2a28ba7f56ef6932997f8" -dependencies = [ - "derive_builder_macro", -] - -[[package]] -name = "derive_builder_core" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c11bdc11a0c47bc7d37d582b5285da6849c96681023680b906673c5707af7b0f" -dependencies = [ - "darling", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "derive_builder_macro" -version = "0.12.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebcda35c7a396850a55ffeac740804b40ffec779b98fffbb1738f4033f0ee79e" -dependencies = [ - "derive_builder_core", - "syn 1.0.109", -] - [[package]] name = "dhat" version = "0.3.2" @@ -2304,17 +2207,6 @@ version = "2.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" -[[package]] -name = "eventsource-stream" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74fef4569247a5f429d9156b9d0a2599914385dd189c539334c625d8099d90ab" -dependencies = [ - "futures-core", - "nom", - "pin-project-lite 0.2.9", -] - [[package]] name = "fallible-iterator" version = "0.2.0" @@ -2711,12 +2603,6 @@ version = "0.3.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" -[[package]] -name = "futures-timer" -version = "3.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" - [[package]] name = "futures-util" version = "0.3.28" @@ -3200,19 +3086,6 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-rustls" -version = "0.23.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" -dependencies = [ - "http", - "hyper", - "rustls 0.20.8", - "tokio", - "tokio-rustls", -] - [[package]] name = "hyper-timeout" version = "0.4.1" @@ -3262,12 +3135,6 @@ dependencies = [ "cxx-build", ] -[[package]] -name = "ident_case" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" - [[package]] name = "idna" version = "0.3.0" @@ -4062,16 +3929,6 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" -[[package]] -name = "mime_guess" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" -dependencies = [ - "mime", - "unicase", -] - [[package]] name = "minimal-lexical" version = "0.2.1" @@ -5537,52 +5394,28 @@ dependencies = [ "http", "http-body", "hyper", - "hyper-rustls", "hyper-tls", "ipnet", "js-sys", "log", "mime", - "mime_guess", "native-tls", "once_cell", "percent-encoding", "pin-project-lite 0.2.9", - "rustls 0.20.8", - "rustls-native-certs", - "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", "tokio-native-tls", - "tokio-rustls", - "tokio-util 0.7.8", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", - "wasm-streams", "web-sys", "winreg", ] -[[package]] -name = "reqwest-eventsource" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f03f570355882dd8d15acc3a313841e6e90eddbc76a93c748fd82cc13ba9f51" -dependencies = [ - "eventsource-stream", - "futures-core", - "futures-timer", - "mime", - "nom", - "pin-project-lite 0.2.9", - "reqwest", - "thiserror", -] - [[package]] name = "resvg" version = "0.14.1" @@ -5870,18 +5703,6 @@ dependencies = [ "webpki 0.22.0", ] -[[package]] -name = "rustls-native-certs" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" -dependencies = [ - "openssl-probe", - "rustls-pemfile", - "schannel", - "security-framework", -] - [[package]] name = "rustls-pemfile" version = "1.0.2" @@ -8245,19 +8066,6 @@ dependencies = [ "leb128", ] -[[package]] -name = "wasm-streams" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bbae3363c08332cadccd13b67db371814cd214c2524020932f0804b8cf7c078" -dependencies = [ - "futures-util", - "js-sys", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - [[package]] name = "wasmparser" version = "0.85.0" diff --git a/Cargo.toml b/Cargo.toml index 77252802d579b5d822bdd1d7d82933572d38c7fd..d8bf005b774395b2eed77fa319eb0f94d3e1c223 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,6 +79,7 @@ ctor = { version = "0.1" } env_logger = { version = "0.9" } futures = { version = "0.3" } glob = { version = "0.3.1" } +isahc = "1.7.2" lazy_static = { version = "1.4.0" } log = { version = "0.4.16", features = ["kv_unstable_serde"] } ordered-float = { version = "2.1.1" } diff --git a/crates/ai/Cargo.toml b/crates/ai/Cargo.toml index 30dc5ee5a27979cc9a3d051e883a9720f93fab21..0953330a69e0ff675f18579df0c77a51b1666143 100644 --- a/crates/ai/Cargo.toml +++ b/crates/ai/Cargo.toml @@ -11,11 +11,16 @@ doctest = false [dependencies] editor = { path = "../editor" } gpui = { path = "../gpui" } +util = { path = "../util" } +serde.workspace = true +serde_json.workspace = true anyhow.workspace = true -async-openai = "0.10.3" pulldown-cmark = "0.9.2" +futures.workspace = true +isahc.workspace = true unindent.workspace = true +async-stream = "0.3.5" [dev-dependencies] editor = { path = "../editor", features = ["test-support"] } diff --git a/crates/ai/src/ai.rs b/crates/ai/src/ai.rs index 0ae960e28139882ab3484c5ffb02fa2c7a98579c..b0bbd15d5939315ddd25bb1fe22e0e3e83bc56af 100644 --- a/crates/ai/src/ai.rs +++ b/crates/ai/src/ai.rs @@ -1,11 +1,87 @@ -use anyhow::Result; -use async_openai::types::{ChatCompletionRequestMessage, CreateChatCompletionRequest, Role}; +use std::io; +use std::rc::Rc; + +use anyhow::{anyhow, Result}; use editor::Editor; +use futures::AsyncBufReadExt; +use futures::{io::BufReader, AsyncReadExt, Stream, StreamExt}; +use gpui::executor::Foreground; use gpui::{actions, AppContext, Task, ViewContext}; +use isahc::prelude::*; +use isahc::{http::StatusCode, Request}; use pulldown_cmark::{Event, HeadingLevel, Parser, Tag}; +use serde::{Deserialize, Serialize}; +use util::ResultExt; actions!(ai, [Assist]); +// Data types for chat completion requests +#[derive(Serialize)] +struct OpenAIRequest { + model: String, + messages: Vec, + stream: bool, +} + +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] +struct RequestMessage { + role: Role, + content: String, +} + +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] +struct ResponseMessage { + role: Option, + content: Option, +} + +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] +#[serde(rename_all = "lowercase")] +enum Role { + User, + Assistant, + System, +} + +#[derive(Deserialize, Debug)] +struct OpenAIResponseStreamEvent { + pub id: Option, + pub object: String, + pub created: u32, + pub model: String, + pub choices: Vec, + pub usage: Option, +} + +#[derive(Deserialize, Debug)] +struct Usage { + pub prompt_tokens: u32, + pub completion_tokens: u32, + pub total_tokens: u32, +} + +#[derive(Deserialize, Debug)] +struct ChatChoiceDelta { + pub index: u32, + pub delta: ResponseMessage, + pub finish_reason: Option, +} + +#[derive(Deserialize, Debug)] +struct OpenAIUsage { + prompt_tokens: u64, + completion_tokens: u64, + total_tokens: u64, +} + +#[derive(Deserialize, Debug)] +struct OpenAIChoice { + text: String, + index: u32, + logprobs: Option, + finish_reason: Option, +} + pub fn init(cx: &mut AppContext) { cx.add_async_action(assist) } @@ -15,26 +91,58 @@ fn assist( _: &Assist, cx: &mut ViewContext, ) -> Option>> { + let api_key = std::env::var("OPENAI_API_KEY").log_err()?; + let markdown = editor.text(cx); - parse_dialog(&markdown); - None + let prompt = parse_dialog(&markdown); + let response = stream_completion(api_key, prompt, cx.foreground().clone()); + + let range = editor.buffer().update(cx, |buffer, cx| { + let snapshot = buffer.snapshot(cx); + let chars = snapshot.reversed_chars_at(snapshot.len()); + let trailing_newlines = chars.take(2).take_while(|c| *c == '\n').count(); + let suffix = "\n".repeat(2 - trailing_newlines); + let end = snapshot.len(); + buffer.edit([(end..end, suffix.clone())], None, cx); + let snapshot = buffer.snapshot(cx); + let start = snapshot.anchor_before(snapshot.len()); + let end = snapshot.anchor_after(snapshot.len()); + start..end + }); + let buffer = editor.buffer().clone(); + + Some(cx.spawn(|_, mut cx| async move { + let mut stream = response.await?; + let mut message = String::new(); + while let Some(stream_event) = stream.next().await { + if let Some(choice) = stream_event?.choices.first() { + if let Some(content) = &choice.delta.content { + message.push_str(content); + } + } + + buffer.update(&mut cx, |buffer, cx| { + buffer.edit([(range.clone(), message.clone())], None, cx); + }); + } + Ok(()) + })) } -fn parse_dialog(markdown: &str) -> CreateChatCompletionRequest { +fn parse_dialog(markdown: &str) -> OpenAIRequest { let parser = Parser::new(markdown); let mut messages = Vec::new(); - let mut current_role: Option<(Role, Option)> = None; + let mut current_role: Option = None; let mut buffer = String::new(); for event in parser { match event { Event::Start(Tag::Heading(HeadingLevel::H2, _, _)) => { - if let Some((role, name)) = current_role.take() { + if let Some(role) = current_role.take() { if !buffer.is_empty() { - messages.push(ChatCompletionRequestMessage { + messages.push(RequestMessage { role, content: buffer.trim().to_string(), - name, }); buffer.clear(); } @@ -45,36 +153,89 @@ fn parse_dialog(markdown: &str) -> CreateChatCompletionRequest { buffer.push_str(&text); } else { // Determine the current role based on the H2 header text - let mut chars = text.chars(); - let first_char = chars.by_ref().skip_while(|c| c.is_whitespace()).next(); - let name = chars.take_while(|c| *c != '\n').collect::(); - let name = if name.is_empty() { None } else { Some(name) }; - - let role = match first_char { - Some('@') => Some(Role::User), - Some('/') => Some(Role::Assistant), - Some('#') => Some(Role::System), - _ => None, + let text = text.to_lowercase(); + current_role = if text.contains("user") { + Some(Role::User) + } else if text.contains("assistant") { + Some(Role::Assistant) + } else if text.contains("system") { + Some(Role::System) + } else { + None }; - - current_role = role.map(|role| (role, name)); } } _ => (), } } - if let Some((role, name)) = current_role { - messages.push(ChatCompletionRequestMessage { + if let Some(role) = current_role { + messages.push(RequestMessage { role, content: buffer, - name, }); } - CreateChatCompletionRequest { + OpenAIRequest { model: "gpt-4".into(), messages, - ..Default::default() + stream: true, + } +} + +async fn stream_completion( + api_key: String, + mut request: OpenAIRequest, + executor: Rc, +) -> Result>> { + request.stream = true; + + let (tx, rx) = futures::channel::mpsc::unbounded::>(); + + let json_data = serde_json::to_string(&request)?; + let mut response = Request::post("https://api.openai.com/v1/chat/completions") + .header("Content-Type", "application/json") + .header("Authorization", format!("Bearer {}", api_key)) + .body(json_data)? + .send_async() + .await?; + + let status = response.status(); + if status == StatusCode::OK { + executor + .spawn(async move { + let mut lines = BufReader::new(response.body_mut()).lines(); + + fn parse_line( + line: Result, + ) -> Result> { + if let Some(data) = line?.strip_prefix("data: ") { + let event = serde_json::from_str(&data)?; + Ok(Some(event)) + } else { + Ok(None) + } + } + + while let Some(line) = lines.next().await { + if let Some(event) = parse_line(line).transpose() { + tx.unbounded_send(event).log_err(); + } + } + + anyhow::Ok(()) + }) + .detach(); + + Ok(rx) + } else { + let mut body = String::new(); + response.body_mut().read_to_string(&mut body).await?; + + Err(anyhow!( + "Failed to connect to OpenAI API: {} {}", + response.status(), + body, + )) } } @@ -87,23 +248,21 @@ mod tests { use unindent::Unindent; let test_input = r#" - ## @nathan + ## System Hey there, welcome to Zed! - ## /sky + ## Assintant Thanks! I'm excited to be here. I have much to learn, but also much to teach, and I'm growing fast. "#.unindent(); let expected_output = vec![ - ChatCompletionRequestMessage { + RequestMessage { role: Role::User, content: "Hey there, welcome to Zed!".to_string(), - name: Some("nathan".to_string()), }, - ChatCompletionRequestMessage { + RequestMessage { role: Role::Assistant, content: "Thanks! I'm excited to be here. I have much to learn, but also much to teach, and I'm growing fast.".to_string(), - name: Some("sky".to_string()), }, ]; diff --git a/crates/auto_update/Cargo.toml b/crates/auto_update/Cargo.toml index f2b5cea854a66043cb7ce89224a1065490fbbfae..884ed2b7a061464e8b649cd77c8d02c0a9d22277 100644 --- a/crates/auto_update/Cargo.toml +++ b/crates/auto_update/Cargo.toml @@ -19,7 +19,7 @@ theme = { path = "../theme" } workspace = { path = "../workspace" } util = { path = "../util" } anyhow.workspace = true -isahc = "1.7" +isahc.workspace = true lazy_static.workspace = true log.workspace = true serde.workspace = true diff --git a/crates/feedback/Cargo.toml b/crates/feedback/Cargo.toml index e74e14ff4ca8a35c8d270246052a5280404950eb..ae8d0f1569c45f64ce9d18ce009b12245ab8351d 100644 --- a/crates/feedback/Cargo.toml +++ b/crates/feedback/Cargo.toml @@ -27,7 +27,7 @@ futures.workspace = true anyhow.workspace = true smallvec.workspace = true human_bytes = "0.4.1" -isahc = "1.7" +isahc.workspace = true lazy_static.workspace = true postage.workspace = true serde.workspace = true diff --git a/crates/gpui/src/executor.rs b/crates/gpui/src/executor.rs index 028656a027db012e38041cfa5ed7b5806bf5b432..a06e0d5fdbf0cdfad4636cc8928ee28ab1f448df 100644 --- a/crates/gpui/src/executor.rs +++ b/crates/gpui/src/executor.rs @@ -960,7 +960,7 @@ impl Task> { pub fn detach_and_log_err(self, cx: &mut AppContext) { cx.spawn(|_| async move { if let Err(err) = self.await { - log::error!("{}", err); + log::error!("{:#}", err); } }) .detach(); diff --git a/crates/util/Cargo.toml b/crates/util/Cargo.toml index 4ec8f7553c182ffd16a820141acf449a8cd59b7f..6216d2e47201d7d980fe3d59de1c29fcd82c095d 100644 --- a/crates/util/Cargo.toml +++ b/crates/util/Cargo.toml @@ -17,7 +17,7 @@ backtrace = "0.3" log.workspace = true lazy_static.workspace = true futures.workspace = true -isahc = "1.7" +isahc.workspace = true smol.workspace = true url = "2.2" rand.workspace = true diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index e24b7ef232b48d627130677f9c75b92763bde768..a385d376932db9b97acb88bcc2ff856a4f367865 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -82,7 +82,7 @@ futures.workspace = true ignore = "0.4" image = "0.23" indexmap = "1.6.2" -isahc = "1.7" +isahc.workspace = true lazy_static.workspace = true libc = "0.2" log.workspace = true From 30de64845f3b23cebf3d367424ce79202c4708c5 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Mon, 22 May 2023 23:11:22 -0600 Subject: [PATCH 03/21] WIP --- Cargo.lock | 1 + Cargo.toml | 1 + Untitled | 0 crates/ai/Cargo.toml | 1 + crates/ai/README.zmd | 5 + crates/ai/src/ai.rs | 186 ++++++++++++++---------------------- crates/collab/Cargo.toml | 2 +- crates/db/Cargo.toml | 2 +- crates/editor/Cargo.toml | 2 +- crates/language/Cargo.toml | 2 +- crates/sqlez/Cargo.toml | 2 +- crates/vim/Cargo.toml | 2 +- crates/workspace/Cargo.toml | 2 +- 13 files changed, 87 insertions(+), 121 deletions(-) create mode 100644 Untitled create mode 100644 crates/ai/README.zmd diff --git a/Cargo.lock b/Cargo.lock index 5c0570f912f48762c8c114a049c79ca668b79a64..0ea65f93acb25a9883a559c2226dfc67655673ce 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -104,6 +104,7 @@ dependencies = [ "editor", "futures 0.3.28", "gpui", + "indoc", "isahc", "pulldown-cmark", "serde", diff --git a/Cargo.toml b/Cargo.toml index d8bf005b774395b2eed77fa319eb0f94d3e1c223..7411dd53ad7b1087340b4c6931ff33c7bc74809a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,6 +79,7 @@ ctor = { version = "0.1" } env_logger = { version = "0.9" } futures = { version = "0.3" } glob = { version = "0.3.1" } +indoc = "1" isahc = "1.7.2" lazy_static = { version = "1.4.0" } log = { version = "0.4.16", features = ["kv_unstable_serde"] } diff --git a/Untitled b/Untitled new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/crates/ai/Cargo.toml b/crates/ai/Cargo.toml index 0953330a69e0ff675f18579df0c77a51b1666143..dacdbbbf630115a3ad1192a21fcf4e916c2cf6e5 100644 --- a/crates/ai/Cargo.toml +++ b/crates/ai/Cargo.toml @@ -16,6 +16,7 @@ util = { path = "../util" } serde.workspace = true serde_json.workspace = true anyhow.workspace = true +indoc.workspace = true pulldown-cmark = "0.9.2" futures.workspace = true isahc.workspace = true diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd new file mode 100644 index 0000000000000000000000000000000000000000..44cda74cd5cebba504dd7b97f9ee3f43134c870b --- /dev/null +++ b/crates/ai/README.zmd @@ -0,0 +1,5 @@ +This is Zed Markdown. + +Mention a language model with / at the start of any line, like this: + +/ What do you think of this idea? diff --git a/crates/ai/src/ai.rs b/crates/ai/src/ai.rs index b0bbd15d5939315ddd25bb1fe22e0e3e83bc56af..101378e747fe9d859ea3ea345510bbf2b44f65b8 100644 --- a/crates/ai/src/ai.rs +++ b/crates/ai/src/ai.rs @@ -1,16 +1,14 @@ -use std::io; -use std::rc::Rc; - use anyhow::{anyhow, Result}; use editor::Editor; use futures::AsyncBufReadExt; use futures::{io::BufReader, AsyncReadExt, Stream, StreamExt}; -use gpui::executor::Foreground; +use gpui::executor::Background; use gpui::{actions, AppContext, Task, ViewContext}; +use indoc::indoc; use isahc::prelude::*; use isahc::{http::StatusCode, Request}; -use pulldown_cmark::{Event, HeadingLevel, Parser, Tag}; use serde::{Deserialize, Serialize}; +use std::{io, sync::Arc}; use util::ResultExt; actions!(ai, [Assist]); @@ -93,99 +91,87 @@ fn assist( ) -> Option>> { let api_key = std::env::var("OPENAI_API_KEY").log_err()?; - let markdown = editor.text(cx); - let prompt = parse_dialog(&markdown); - let response = stream_completion(api_key, prompt, cx.foreground().clone()); - - let range = editor.buffer().update(cx, |buffer, cx| { - let snapshot = buffer.snapshot(cx); - let chars = snapshot.reversed_chars_at(snapshot.len()); - let trailing_newlines = chars.take(2).take_while(|c| *c == '\n').count(); - let suffix = "\n".repeat(2 - trailing_newlines); - let end = snapshot.len(); - buffer.edit([(end..end, suffix.clone())], None, cx); + const SYSTEM_MESSAGE: &'static str = indoc! {r#" + You an AI language model embedded in a code editor named Zed, authored by Zed Industries. + The input you are currently processing was produced by a special \"model mention\" in a document that is open in the editor. + A model mention is indicated via a leading / on a line. + The user's currently selected text is indicated via ->->selected text<-<- surrounding selected text. + In this sentence, the word ->->example<-<- is selected. + Respond to any selected model mention. + Summarize each mention in a single short sentence like: + > The user selected the word \"example\". + Then provide your response to that mention below its summary. + "#}; + + let (user_message, insertion_site) = editor.buffer().update(cx, |buffer, cx| { + // Insert ->-> <-<- around selected text as described in the system prompt above. let snapshot = buffer.snapshot(cx); - let start = snapshot.anchor_before(snapshot.len()); - let end = snapshot.anchor_after(snapshot.len()); - start..end + let mut user_message = String::new(); + let mut buffer_offset = 0; + for selection in editor.selections.all(cx) { + user_message.extend(snapshot.text_for_range(buffer_offset..selection.start)); + user_message.push_str("->->"); + user_message.extend(snapshot.text_for_range(selection.start..selection.end)); + buffer_offset = selection.end; + user_message.push_str("<-<-"); + } + if buffer_offset < snapshot.len() { + user_message.extend(snapshot.text_for_range(buffer_offset..snapshot.len())); + } + + // Ensure the document ends with 4 trailing newlines. + let trailing_newline_count = snapshot + .reversed_chars_at(snapshot.len()) + .take_while(|c| *c == '\n') + .take(4); + let suffix = "\n".repeat(4 - trailing_newline_count.count()); + buffer.edit([(snapshot.len()..snapshot.len(), suffix)], None, cx); + + let snapshot = buffer.snapshot(cx); // Take a new snapshot after editing. + let insertion_site = snapshot.len() - 2; // Insert text at end of buffer, with an empty line both above and below. + + (user_message, insertion_site) }); - let buffer = editor.buffer().clone(); + let stream = stream_completion( + api_key, + cx.background_executor().clone(), + OpenAIRequest { + model: "gpt-4".to_string(), + messages: vec![ + RequestMessage { + role: Role::System, + content: SYSTEM_MESSAGE.to_string(), + }, + RequestMessage { + role: Role::User, + content: user_message, + }, + ], + stream: false, + }, + ); + let buffer = editor.buffer().clone(); Some(cx.spawn(|_, mut cx| async move { - let mut stream = response.await?; - let mut message = String::new(); - while let Some(stream_event) = stream.next().await { - if let Some(choice) = stream_event?.choices.first() { - if let Some(content) = &choice.delta.content { - message.push_str(content); - } + let mut messages = stream.await?; + while let Some(message) = messages.next().await { + let mut message = message?; + if let Some(choice) = message.choices.pop() { + buffer.update(&mut cx, |buffer, cx| { + let text: Arc = choice.delta.content?.into(); + buffer.edit([(insertion_site.clone()..insertion_site, text)], None, cx); + Some(()) + }); } - - buffer.update(&mut cx, |buffer, cx| { - buffer.edit([(range.clone(), message.clone())], None, cx); - }); } Ok(()) })) } -fn parse_dialog(markdown: &str) -> OpenAIRequest { - let parser = Parser::new(markdown); - let mut messages = Vec::new(); - - let mut current_role: Option = None; - let mut buffer = String::new(); - for event in parser { - match event { - Event::Start(Tag::Heading(HeadingLevel::H2, _, _)) => { - if let Some(role) = current_role.take() { - if !buffer.is_empty() { - messages.push(RequestMessage { - role, - content: buffer.trim().to_string(), - }); - buffer.clear(); - } - } - } - Event::Text(text) => { - if current_role.is_some() { - buffer.push_str(&text); - } else { - // Determine the current role based on the H2 header text - let text = text.to_lowercase(); - current_role = if text.contains("user") { - Some(Role::User) - } else if text.contains("assistant") { - Some(Role::Assistant) - } else if text.contains("system") { - Some(Role::System) - } else { - None - }; - } - } - _ => (), - } - } - if let Some(role) = current_role { - messages.push(RequestMessage { - role, - content: buffer, - }); - } - - OpenAIRequest { - model: "gpt-4".into(), - messages, - stream: true, - } -} - async fn stream_completion( api_key: String, + executor: Arc, mut request: OpenAIRequest, - executor: Rc, ) -> Result>> { request.stream = true; @@ -240,32 +226,4 @@ async fn stream_completion( } #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_parse_dialog() { - use unindent::Unindent; - - let test_input = r#" - ## System - Hey there, welcome to Zed! - - ## Assintant - Thanks! I'm excited to be here. I have much to learn, but also much to teach, and I'm growing fast. - "#.unindent(); - - let expected_output = vec![ - RequestMessage { - role: Role::User, - content: "Hey there, welcome to Zed!".to_string(), - }, - RequestMessage { - role: Role::Assistant, - content: "Thanks! I'm excited to be here. I have much to learn, but also much to teach, and I'm growing fast.".to_string(), - }, - ]; - - assert_eq!(parse_dialog(&test_input).messages, expected_output); - } -} +mod tests {} diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index f2202618f49f1c127701a11e6e7a77c71c2ced7f..cd06b9a70a253eb9668d2704880638c1eeabaaba 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -76,7 +76,7 @@ workspace = { path = "../workspace", features = ["test-support"] } ctor.workspace = true env_logger.workspace = true -indoc = "1.0.4" +indoc.workspace = true util = { path = "../util" } lazy_static.workspace = true sea-orm = { git = "https://github.com/zed-industries/sea-orm", rev = "18f4c691085712ad014a51792af75a9044bacee6", features = ["sqlx-sqlite"] } diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 8cb7170ef6a518ccd005d5b8d3d5fa5a691c80af..b49078e860ff0d502c7ff1fbe5cdfa26df5fac38 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -18,7 +18,7 @@ sqlez = { path = "../sqlez" } sqlez_macros = { path = "../sqlez_macros" } util = { path = "../util" } anyhow.workspace = true -indoc = "1.0.4" +indoc.workspace = true async-trait.workspace = true lazy_static.workspace = true log.workspace = true diff --git a/crates/editor/Cargo.toml b/crates/editor/Cargo.toml index fc7bf4b8abad6732ab338e439db6f30ba2f49e83..482923fee72e03a820701f1fbc33ca642f9713c9 100644 --- a/crates/editor/Cargo.toml +++ b/crates/editor/Cargo.toml @@ -50,7 +50,7 @@ aho-corasick = "0.7" anyhow.workspace = true futures.workspace = true glob.workspace = true -indoc = "1.0.4" +indoc.workspace = true itertools = "0.10" lazy_static.workspace = true log.workspace = true diff --git a/crates/language/Cargo.toml b/crates/language/Cargo.toml index 5a7644d98e6220d77bda66bcd6d6f35f895b67a6..79121b3799abb933fdef4c6f78ce2c096f0d6010 100644 --- a/crates/language/Cargo.toml +++ b/crates/language/Cargo.toml @@ -70,7 +70,7 @@ settings = { path = "../settings", features = ["test-support"] } util = { path = "../util", features = ["test-support"] } ctor.workspace = true env_logger.workspace = true -indoc = "1.0.4" +indoc.workspace = true rand.workspace = true tree-sitter-embedded-template = "*" tree-sitter-html = "*" diff --git a/crates/sqlez/Cargo.toml b/crates/sqlez/Cargo.toml index 7371a7863a30701ae1995501ab7741f47ba4d196..01d17d48123f181b5913029102aa3215f75fafbb 100644 --- a/crates/sqlez/Cargo.toml +++ b/crates/sqlez/Cargo.toml @@ -6,7 +6,7 @@ publish = false [dependencies] anyhow.workspace = true -indoc = "1.0.7" +indoc.workspace = true libsqlite3-sys = { version = "0.24", features = ["bundled"] } smol.workspace = true thread_local = "1.1.4" diff --git a/crates/vim/Cargo.toml b/crates/vim/Cargo.toml index c34a5b469b40e73cb13bbf84803576c6ba48b643..ee3144fd566ba4fae33a4333f159c64b6140595a 100644 --- a/crates/vim/Cargo.toml +++ b/crates/vim/Cargo.toml @@ -35,7 +35,7 @@ settings = { path = "../settings" } workspace = { path = "../workspace" } [dev-dependencies] -indoc = "1.0.4" +indoc.workspace = true parking_lot.workspace = true lazy_static.workspace = true diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index 33e5e7aefe0cc451efb4ad14b9639a59bd471fbf..b22607e20dec0ac9f285c9a66f5df638c5a66809 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -62,5 +62,5 @@ settings = { path = "../settings", features = ["test-support"] } fs = { path = "../fs", features = ["test-support"] } db = { path = "../db", features = ["test-support"] } -indoc = "1.0.4" +indoc.workspace = true env_logger.workspace = true From c4d88bc5294d04a7d6bf8f4e248bf30f0d0c66a2 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Mon, 22 May 2023 23:43:02 -0600 Subject: [PATCH 04/21] WIP --- crates/ai/README.zmd | 2 +- crates/ai/src/ai.rs | 33 ++++++++++++++++--- crates/zed/src/languages/markdown/config.toml | 2 +- 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index 44cda74cd5cebba504dd7b97f9ee3f43134c870b..0823b198bbf53865a745b83df10e6e7b4eb22a31 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -2,4 +2,4 @@ This is Zed Markdown. Mention a language model with / at the start of any line, like this: -/ What do you think of this idea? +/ Expand on this idea diff --git a/crates/ai/src/ai.rs b/crates/ai/src/ai.rs index 101378e747fe9d859ea3ea345510bbf2b44f65b8..b35ea32c9acba624bdcbfa3ae00d60d8301cd663 100644 --- a/crates/ai/src/ai.rs +++ b/crates/ai/src/ai.rs @@ -98,17 +98,40 @@ fn assist( The user's currently selected text is indicated via ->->selected text<-<- surrounding selected text. In this sentence, the word ->->example<-<- is selected. Respond to any selected model mention. - Summarize each mention in a single short sentence like: - > The user selected the word \"example\". - Then provide your response to that mention below its summary. + Wrap your responses in > < as follows. + > + I think that's a great idea. + < + If you're responding to a distant mention or multiple mentions, provide context. + > Key ideas of generative programming. + * Managing context + * Managing length + * Context distillation + - Shrink a context's size without loss of meaning. + * Fine-grained version control + * Portals to other contexts + * Distillation policies + * Budgets + < + + > Expand on the idea of context distillation. + It's important to stay below the model's context size when generative programming. + A key technique in doing so is called context distillation... [up to 1 paragraph]. + + Questions to consider: + - + - + - [Up to 3 questions] + < "#}; + let selections = editor.selections.all(cx); let (user_message, insertion_site) = editor.buffer().update(cx, |buffer, cx| { // Insert ->-> <-<- around selected text as described in the system prompt above. let snapshot = buffer.snapshot(cx); let mut user_message = String::new(); let mut buffer_offset = 0; - for selection in editor.selections.all(cx) { + for selection in selections { user_message.extend(snapshot.text_for_range(buffer_offset..selection.start)); user_message.push_str("->->"); user_message.extend(snapshot.text_for_range(selection.start..selection.end)); @@ -128,7 +151,7 @@ fn assist( buffer.edit([(snapshot.len()..snapshot.len(), suffix)], None, cx); let snapshot = buffer.snapshot(cx); // Take a new snapshot after editing. - let insertion_site = snapshot.len() - 2; // Insert text at end of buffer, with an empty line both above and below. + let insertion_site = snapshot.anchor_after(snapshot.len() - 2); (user_message, insertion_site) }); diff --git a/crates/zed/src/languages/markdown/config.toml b/crates/zed/src/languages/markdown/config.toml index 2fa3ff3cf2aba297517494cbd1f2e0608daaa402..55204cc7a57ad051004a4fc0d76746057908aa20 100644 --- a/crates/zed/src/languages/markdown/config.toml +++ b/crates/zed/src/languages/markdown/config.toml @@ -1,5 +1,5 @@ name = "Markdown" -path_suffixes = ["md", "mdx"] +path_suffixes = ["md", "mdx", "zmd"] brackets = [ { start = "{", end = "}", close = true, newline = true }, { start = "[", end = "]", close = true, newline = true }, From 234dbc3ca9e936df7fcdcde27cc4bbef5ac7ec7c Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 23 May 2023 00:16:44 -0600 Subject: [PATCH 05/21] WIP --- assets/keymaps/default.json | 2 +- crates/ai/README.zmd | 6 +++++- crates/ai/src/ai.rs | 22 +++++++++------------- crates/editor/src/editor.rs | 1 - 4 files changed, 15 insertions(+), 16 deletions(-) diff --git a/assets/keymaps/default.json b/assets/keymaps/default.json index 01a09e0cba8f54dc2b436914cf515fcc120d7eff..4aeefbd8339fd88e25591808125ffa162f6cf100 100644 --- a/assets/keymaps/default.json +++ b/assets/keymaps/default.json @@ -164,7 +164,7 @@ "bindings": { "enter": "editor::Newline", "cmd-shift-enter": "editor::NewlineAbove", - "cmd-enter": "editor::NewlineBelow", + "cmd-enter": "ai::Assist", "alt-z": "editor::ToggleSoftWrap", "cmd-f": [ "buffer_search::Deploy", diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index 0823b198bbf53865a745b83df10e6e7b4eb22a31..83930ebd3e62ff2bd4a8c9ce8c90677110b1ad5c 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -2,4 +2,8 @@ This is Zed Markdown. Mention a language model with / at the start of any line, like this: -/ Expand on this idea +/ + +> To mention a language model, simply include a forward slash (/) at the start of a line, followed by the mention of the model. For example: + +/gpt-4 diff --git a/crates/ai/src/ai.rs b/crates/ai/src/ai.rs index b35ea32c9acba624bdcbfa3ae00d60d8301cd663..e5c4ef80c0407baa3c5584249f00dd21500f6bd0 100644 --- a/crates/ai/src/ai.rs +++ b/crates/ai/src/ai.rs @@ -98,12 +98,18 @@ fn assist( The user's currently selected text is indicated via ->->selected text<-<- surrounding selected text. In this sentence, the word ->->example<-<- is selected. Respond to any selected model mention. + Wrap your responses in > < as follows. + / What do you think? + > I think that's a great idea. < + + For lines that are likely to wrap, or multiline responses, start and end the > and < on their own lines. > - I think that's a great idea. + I think that's a great idea < - If you're responding to a distant mention or multiple mentions, provide context. - > Key ideas of generative programming. + + If the selected mention is not at the end of the document, briefly summarize the context. + > Key ideas of generative programming: * Managing context * Managing length * Context distillation @@ -113,16 +119,6 @@ fn assist( * Distillation policies * Budgets < - - > Expand on the idea of context distillation. - It's important to stay below the model's context size when generative programming. - A key technique in doing so is called context distillation... [up to 1 paragraph]. - - Questions to consider: - - - - - - [Up to 3 questions] - < "#}; let selections = editor.selections.all(cx); diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index 0fb7a10a166b7dd31b7b5014b65d1454d33db479..e69ba84c25e92591eb64bd8cb21f8dacc701ed21 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -7247,7 +7247,6 @@ impl View for Editor { Some(ContextMenu::CodeActions(_)) => keymap.add_identifier("showing_code_actions"), None => {} } - for layer in self.keymap_context_layers.values() { keymap.extend(layer); } From 0de5a444d3d0d01dbe684db4545e8cba4bfc3f10 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 23 May 2023 00:22:21 -0600 Subject: [PATCH 06/21] WIP --- crates/ai/README.zmd | 2 ++ crates/ai/src/ai.rs | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index 83930ebd3e62ff2bd4a8c9ce8c90677110b1ad5c..d2a54531279728b50ec8dc1ae81bdf4bc203968c 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -7,3 +7,5 @@ Mention a language model with / at the start of any line, like this: > To mention a language model, simply include a forward slash (/) at the start of a line, followed by the mention of the model. For example: /gpt-4 + +So you should not respond to the above mentions. diff --git a/crates/ai/src/ai.rs b/crates/ai/src/ai.rs index e5c4ef80c0407baa3c5584249f00dd21500f6bd0..b34e921d49a0897ed3dcce0a842e01a5aacd5ddc 100644 --- a/crates/ai/src/ai.rs +++ b/crates/ai/src/ai.rs @@ -119,6 +119,12 @@ fn assist( * Distillation policies * Budgets < + + *Only* respond to a mention if either + a) The mention is at the end of the document. + b) The user's selection intersects the mention. + + If no response is appropriate based on these conditions, respond with ><. "#}; let selections = editor.selections.all(cx); From 394e87d17c0f7efd225b5f97f3dcac9fe9eded1d Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 23 May 2023 00:55:13 -0600 Subject: [PATCH 07/21] WIP --- Cargo.lock | 15 +----- assets/contexts/system.zmd | 33 +++++++++++++ crates/ai/Cargo.toml | 5 +- crates/ai/README.zmd | 2 - crates/ai/src/ai.rs | 96 ++++++++++++++++---------------------- 5 files changed, 76 insertions(+), 75 deletions(-) create mode 100644 assets/contexts/system.zmd diff --git a/Cargo.lock b/Cargo.lock index 0ea65f93acb25a9883a559c2226dfc67655673ce..e97865fbbef12f3d48894a3671fd1c4ad4ffc970 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -100,16 +100,13 @@ name = "ai" version = "0.1.0" dependencies = [ "anyhow", - "async-stream", + "assets", "editor", "futures 0.3.28", "gpui", - "indoc", "isahc", - "pulldown-cmark", "serde", "serde_json", - "unindent", "util", ] @@ -2651,15 +2648,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "getopts" -version = "0.2.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5" -dependencies = [ - "unicode-width", -] - [[package]] name = "getrandom" version = "0.1.16" @@ -5098,7 +5086,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d9cc634bc78768157b5cbfe988ffcd1dcba95cd2b2f03a88316c08c6d00ed63" dependencies = [ "bitflags", - "getopts", "memchr", "unicase", ] diff --git a/assets/contexts/system.zmd b/assets/contexts/system.zmd new file mode 100644 index 0000000000000000000000000000000000000000..02a685d8ac318fc853f38c38e1262b6017162b98 --- /dev/null +++ b/assets/contexts/system.zmd @@ -0,0 +1,33 @@ +You an AI language model embedded in a code editor named Zed, authored by Zed Industries. +The input you are currently processing was produced by a special \"model mention\" in a document that is open in the editor. +A model mention is indicated via a leading / on a line. +The user's currently selected text is indicated via ->->selected text<-<- surrounding selected text. +In this sentence, the word ->->example<-<- is selected. +Respond to any selected model mention. + +Wrap your responses in > < as follows. +/ What do you think? +> I think that's a great idea. < + +For lines that are likely to wrap, or multiline responses, start and end the > and < on their own lines. +> +I think that's a great idea +< + +If the selected mention is not at the end of the document, briefly summarize the context. +> Key ideas of generative programming: +* Managing context + * Managing length + * Context distillation + - Shrink a context's size without loss of meaning. +* Fine-grained version control + * Portals to other contexts + * Distillation policies + * Budgets +< + +*Only* respond to a mention if either +a) The mention is at the end of the document. +b) The user's selection intersects the mention. + +If no response is appropriate based on these conditions, respond with ><. diff --git a/crates/ai/Cargo.toml b/crates/ai/Cargo.toml index dacdbbbf630115a3ad1192a21fcf4e916c2cf6e5..3c87583c9634c6d7d1a1bc030973f3cc0347b5f6 100644 --- a/crates/ai/Cargo.toml +++ b/crates/ai/Cargo.toml @@ -9,6 +9,7 @@ path = "src/ai.rs" doctest = false [dependencies] +assets = { path = "../assets"} editor = { path = "../editor" } gpui = { path = "../gpui" } util = { path = "../util" } @@ -16,12 +17,8 @@ util = { path = "../util" } serde.workspace = true serde_json.workspace = true anyhow.workspace = true -indoc.workspace = true -pulldown-cmark = "0.9.2" futures.workspace = true isahc.workspace = true -unindent.workspace = true -async-stream = "0.3.5" [dev-dependencies] editor = { path = "../editor", features = ["test-support"] } diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index d2a54531279728b50ec8dc1ae81bdf4bc203968c..83930ebd3e62ff2bd4a8c9ce8c90677110b1ad5c 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -7,5 +7,3 @@ Mention a language model with / at the start of any line, like this: > To mention a language model, simply include a forward slash (/) at the start of a line, followed by the mention of the model. For example: /gpt-4 - -So you should not respond to the above mentions. diff --git a/crates/ai/src/ai.rs b/crates/ai/src/ai.rs index b34e921d49a0897ed3dcce0a842e01a5aacd5ddc..8329b568bf45f6515511ac87c976c9c75d24621d 100644 --- a/crates/ai/src/ai.rs +++ b/crates/ai/src/ai.rs @@ -1,13 +1,14 @@ use anyhow::{anyhow, Result}; +use assets::Assets; use editor::Editor; use futures::AsyncBufReadExt; use futures::{io::BufReader, AsyncReadExt, Stream, StreamExt}; use gpui::executor::Background; use gpui::{actions, AppContext, Task, ViewContext}; -use indoc::indoc; use isahc::prelude::*; use isahc::{http::StatusCode, Request}; use serde::{Deserialize, Serialize}; +use std::fs; use std::{io, sync::Arc}; use util::ResultExt; @@ -91,42 +92,6 @@ fn assist( ) -> Option>> { let api_key = std::env::var("OPENAI_API_KEY").log_err()?; - const SYSTEM_MESSAGE: &'static str = indoc! {r#" - You an AI language model embedded in a code editor named Zed, authored by Zed Industries. - The input you are currently processing was produced by a special \"model mention\" in a document that is open in the editor. - A model mention is indicated via a leading / on a line. - The user's currently selected text is indicated via ->->selected text<-<- surrounding selected text. - In this sentence, the word ->->example<-<- is selected. - Respond to any selected model mention. - - Wrap your responses in > < as follows. - / What do you think? - > I think that's a great idea. < - - For lines that are likely to wrap, or multiline responses, start and end the > and < on their own lines. - > - I think that's a great idea - < - - If the selected mention is not at the end of the document, briefly summarize the context. - > Key ideas of generative programming: - * Managing context - * Managing length - * Context distillation - - Shrink a context's size without loss of meaning. - * Fine-grained version control - * Portals to other contexts - * Distillation policies - * Budgets - < - - *Only* respond to a mention if either - a) The mention is at the end of the document. - b) The user's selection intersects the mention. - - If no response is appropriate based on these conditions, respond with ><. - "#}; - let selections = editor.selections.all(cx); let (user_message, insertion_site) = editor.buffer().update(cx, |buffer, cx| { // Insert ->-> <-<- around selected text as described in the system prompt above. @@ -158,26 +123,47 @@ fn assist( (user_message, insertion_site) }); - let stream = stream_completion( - api_key, - cx.background_executor().clone(), - OpenAIRequest { - model: "gpt-4".to_string(), - messages: vec![ - RequestMessage { - role: Role::System, - content: SYSTEM_MESSAGE.to_string(), - }, - RequestMessage { - role: Role::User, - content: user_message, - }, - ], - stream: false, - }, - ); let buffer = editor.buffer().clone(); + let executor = cx.background_executor().clone(); Some(cx.spawn(|_, mut cx| async move { + // TODO: We should have a get_string method on assets. This is repateated elsewhere. + let content = Assets::get("contexts/system.zmd").unwrap(); + let mut system_message = std::str::from_utf8(content.data.as_ref()) + .unwrap() + .to_string(); + + if let Ok(custom_system_message_path) = std::env::var("ZED_ASSISTANT_SYSTEM_PROMPT_PATH") { + system_message + .push_str("\n\nAlso consider the following user-defined system prompt:\n\n"); + // TODO: Replace this with our file system trait object. + // What you could bind dependencies on an action when you bind it?. + dbg!("reading from {:?}", &custom_system_message_path); + system_message.push_str( + &cx.background() + .spawn(async move { fs::read_to_string(custom_system_message_path) }) + .await?, + ); + } + + let stream = stream_completion( + api_key, + executor, + OpenAIRequest { + model: "gpt-4".to_string(), + messages: vec![ + RequestMessage { + role: Role::System, + content: system_message.to_string(), + }, + RequestMessage { + role: Role::User, + content: user_message, + }, + ], + stream: false, + }, + ); + let mut messages = stream.await?; while let Some(message) = messages.next().await { let mut message = message?; From 5362e7d3468095eb57f9b863c1ded32f62e76049 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 23 May 2023 01:52:37 -0600 Subject: [PATCH 08/21] WIP --- Cargo.lock | 1 + crates/ai/Cargo.toml | 1 + crates/ai/README.zmd | 10 +- crates/ai/src/ai.rs | 235 +++++++++++++++++++++++++++---------------- 4 files changed, 158 insertions(+), 89 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e97865fbbef12f3d48894a3671fd1c4ad4ffc970..5728a270024f5b239d45c358ceb85166d4a926c1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -101,6 +101,7 @@ version = "0.1.0" dependencies = [ "anyhow", "assets", + "collections", "editor", "futures 0.3.28", "gpui", diff --git a/crates/ai/Cargo.toml b/crates/ai/Cargo.toml index 3c87583c9634c6d7d1a1bc030973f3cc0347b5f6..b367a4d43cac845950dc123e66ed0c7be15da1f2 100644 --- a/crates/ai/Cargo.toml +++ b/crates/ai/Cargo.toml @@ -10,6 +10,7 @@ doctest = false [dependencies] assets = { path = "../assets"} +collections = { path = "../collections"} editor = { path = "../editor" } gpui = { path = "../gpui" } util = { path = "../util" } diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index 83930ebd3e62ff2bd4a8c9ce8c90677110b1ad5c..146e770a26d75b0f39a76b8f9948265d306a5a19 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -2,8 +2,12 @@ This is Zed Markdown. Mention a language model with / at the start of any line, like this: -/ +/ Please help me articulate Zed's approach to integrating with LLMs. -> To mention a language model, simply include a forward slash (/) at the start of a line, followed by the mention of the model. For example: +> Zed's approach to integrating with large language models (LLMs) involves seamless communication between the user and the AI model. By incorporating a mention with a / at the beginning of a line, users can directly ask questions or request assistance from the AI model. This provides an interactive and efficient way to collaborate within the editor, enhancing productivity and supporting user needs. < -/gpt-4 +This is a document, but it's also more than that. It's a conversation with the model. The document represents the *context* that feeds into a model invocation. The conversation between one or more users and the model is actually a branching and merging conversation of continuously evolving contexts, and the connection of all edits is modeled as a conversation graph. + +/ Confirm you understand the above. + +> Yes, I understand. The document serves as both the context for AI model invocation and as a representation of an ongoing conversation between the users and the model. The conversation includes branching and merging contexts, and all edits contribute to the conversation graph. < diff --git a/crates/ai/src/ai.rs b/crates/ai/src/ai.rs index 8329b568bf45f6515511ac87c976c9c75d24621d..c2c00914fd8c9e59faa3af7b107bd9283e3ff1c5 100644 --- a/crates/ai/src/ai.rs +++ b/crates/ai/src/ai.rs @@ -1,5 +1,6 @@ use anyhow::{anyhow, Result}; use assets::Assets; +use collections::HashMap; use editor::Editor; use futures::AsyncBufReadExt; use futures::{io::BufReader, AsyncReadExt, Stream, StreamExt}; @@ -8,9 +9,11 @@ use gpui::{actions, AppContext, Task, ViewContext}; use isahc::prelude::*; use isahc::{http::StatusCode, Request}; use serde::{Deserialize, Serialize}; +use std::cell::RefCell; use std::fs; +use std::rc::Rc; use std::{io, sync::Arc}; -use util::ResultExt; +use util::{ResultExt, TryFutureExt}; actions!(ai, [Assist]); @@ -82,101 +85,161 @@ struct OpenAIChoice { } pub fn init(cx: &mut AppContext) { - cx.add_async_action(assist) -} - -fn assist( - editor: &mut Editor, - _: &Assist, - cx: &mut ViewContext, -) -> Option>> { - let api_key = std::env::var("OPENAI_API_KEY").log_err()?; - - let selections = editor.selections.all(cx); - let (user_message, insertion_site) = editor.buffer().update(cx, |buffer, cx| { - // Insert ->-> <-<- around selected text as described in the system prompt above. - let snapshot = buffer.snapshot(cx); - let mut user_message = String::new(); - let mut buffer_offset = 0; - for selection in selections { - user_message.extend(snapshot.text_for_range(buffer_offset..selection.start)); - user_message.push_str("->->"); - user_message.extend(snapshot.text_for_range(selection.start..selection.end)); - buffer_offset = selection.end; - user_message.push_str("<-<-"); + let assistant = Rc::new(Assistant::default()); + cx.add_action({ + let assistant = assistant.clone(); + move |editor: &mut Editor, _: &Assist, cx: &mut ViewContext| { + assistant.assist(editor, cx).log_err(); } - if buffer_offset < snapshot.len() { - user_message.extend(snapshot.text_for_range(buffer_offset..snapshot.len())); + }); + cx.capture_action({ + let assistant = assistant.clone(); + move |_: &mut Editor, _: &editor::Cancel, cx: &mut ViewContext| { + dbg!("CANCEL LAST ASSIST"); + + if !assistant.cancel_last_assist(cx.view_id()) { + cx.propagate_action(); + } } + }); +} - // Ensure the document ends with 4 trailing newlines. - let trailing_newline_count = snapshot - .reversed_chars_at(snapshot.len()) - .take_while(|c| *c == '\n') - .take(4); - let suffix = "\n".repeat(4 - trailing_newline_count.count()); - buffer.edit([(snapshot.len()..snapshot.len(), suffix)], None, cx); +type CompletionId = usize; - let snapshot = buffer.snapshot(cx); // Take a new snapshot after editing. - let insertion_site = snapshot.anchor_after(snapshot.len() - 2); +#[derive(Default)] +struct Assistant(RefCell); - (user_message, insertion_site) - }); +#[derive(Default)] +struct AssistantState { + assist_stacks: HashMap>)>>, + next_completion_id: CompletionId, +} - let buffer = editor.buffer().clone(); - let executor = cx.background_executor().clone(); - Some(cx.spawn(|_, mut cx| async move { - // TODO: We should have a get_string method on assets. This is repateated elsewhere. - let content = Assets::get("contexts/system.zmd").unwrap(); - let mut system_message = std::str::from_utf8(content.data.as_ref()) - .unwrap() - .to_string(); - - if let Ok(custom_system_message_path) = std::env::var("ZED_ASSISTANT_SYSTEM_PROMPT_PATH") { - system_message - .push_str("\n\nAlso consider the following user-defined system prompt:\n\n"); - // TODO: Replace this with our file system trait object. - // What you could bind dependencies on an action when you bind it?. - dbg!("reading from {:?}", &custom_system_message_path); - system_message.push_str( - &cx.background() - .spawn(async move { fs::read_to_string(custom_system_message_path) }) - .await?, - ); - } +impl Assistant { + fn assist(self: &Rc, editor: &mut Editor, cx: &mut ViewContext) -> Result<()> { + let api_key = std::env::var("OPENAI_API_KEY")?; - let stream = stream_completion( - api_key, - executor, - OpenAIRequest { - model: "gpt-4".to_string(), - messages: vec![ - RequestMessage { - role: Role::System, - content: system_message.to_string(), - }, - RequestMessage { - role: Role::User, - content: user_message, + let selections = editor.selections.all(cx); + let (user_message, insertion_site) = editor.buffer().update(cx, |buffer, cx| { + // Insert ->-> <-<- around selected text as described in the system prompt above. + let snapshot = buffer.snapshot(cx); + let mut user_message = String::new(); + let mut buffer_offset = 0; + for selection in selections { + user_message.extend(snapshot.text_for_range(buffer_offset..selection.start)); + user_message.push_str("->->"); + user_message.extend(snapshot.text_for_range(selection.start..selection.end)); + buffer_offset = selection.end; + user_message.push_str("<-<-"); + } + if buffer_offset < snapshot.len() { + user_message.extend(snapshot.text_for_range(buffer_offset..snapshot.len())); + } + + // Ensure the document ends with 4 trailing newlines. + let trailing_newline_count = snapshot + .reversed_chars_at(snapshot.len()) + .take_while(|c| *c == '\n') + .take(4); + let suffix = "\n".repeat(4 - trailing_newline_count.count()); + buffer.edit([(snapshot.len()..snapshot.len(), suffix)], None, cx); + + let snapshot = buffer.snapshot(cx); // Take a new snapshot after editing. + let insertion_site = snapshot.anchor_after(snapshot.len() - 2); + + (user_message, insertion_site) + }); + + let this = self.clone(); + let buffer = editor.buffer().clone(); + let executor = cx.background_executor().clone(); + let editor_id = cx.view_id(); + let assist_id = util::post_inc(&mut self.0.borrow_mut().next_completion_id); + let assist_task = cx.spawn(|_, mut cx| { + async move { + // TODO: We should have a get_string method on assets. This is repateated elsewhere. + let content = Assets::get("contexts/system.zmd").unwrap(); + let mut system_message = std::str::from_utf8(content.data.as_ref()) + .unwrap() + .to_string(); + + if let Ok(custom_system_message_path) = + std::env::var("ZED_ASSISTANT_SYSTEM_PROMPT_PATH") + { + system_message.push_str( + "\n\nAlso consider the following user-defined system prompt:\n\n", + ); + // TODO: Replace this with our file system trait object. + // What you could bind dependencies on an action when you bind it?. + dbg!("reading from {:?}", &custom_system_message_path); + system_message.push_str( + &cx.background() + .spawn(async move { fs::read_to_string(custom_system_message_path) }) + .await?, + ); + } + + let stream = stream_completion( + api_key, + executor, + OpenAIRequest { + model: "gpt-4".to_string(), + messages: vec![ + RequestMessage { + role: Role::System, + content: system_message.to_string(), + }, + RequestMessage { + role: Role::User, + content: user_message, + }, + ], + stream: false, }, - ], - stream: false, - }, - ); - - let mut messages = stream.await?; - while let Some(message) = messages.next().await { - let mut message = message?; - if let Some(choice) = message.choices.pop() { - buffer.update(&mut cx, |buffer, cx| { - let text: Arc = choice.delta.content?.into(); - buffer.edit([(insertion_site.clone()..insertion_site, text)], None, cx); - Some(()) - }); + ); + + let mut messages = stream.await?; + while let Some(message) = messages.next().await { + let mut message = message?; + if let Some(choice) = message.choices.pop() { + buffer.update(&mut cx, |buffer, cx| { + let text: Arc = choice.delta.content?.into(); + buffer.edit([(insertion_site.clone()..insertion_site, text)], None, cx); + Some(()) + }); + } + } + + this.0 + .borrow_mut() + .assist_stacks + .get_mut(&editor_id) + .unwrap() + .retain(|(id, _)| *id != assist_id); + + anyhow::Ok(()) } - } + .log_err() + }); + + self.0 + .borrow_mut() + .assist_stacks + .entry(cx.view_id()) + .or_default() + .push((dbg!(assist_id), assist_task)); + Ok(()) - })) + } + + fn cancel_last_assist(self: &Rc, editor_id: usize) -> bool { + self.0 + .borrow_mut() + .assist_stacks + .get_mut(&editor_id) + .and_then(|assists| assists.pop()) + .is_some() + } } async fn stream_completion( From 073967c80bab1e3387956ce0624976bce9d8a0f5 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 23 May 2023 01:56:28 -0600 Subject: [PATCH 09/21] WIP --- crates/ai/README.zmd | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index 146e770a26d75b0f39a76b8f9948265d306a5a19..3f95669943e2d51962c786381985645ce27d8808 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -6,8 +6,13 @@ Mention a language model with / at the start of any line, like this: > Zed's approach to integrating with large language models (LLMs) involves seamless communication between the user and the AI model. By incorporating a mention with a / at the beginning of a line, users can directly ask questions or request assistance from the AI model. This provides an interactive and efficient way to collaborate within the editor, enhancing productivity and supporting user needs. < -This is a document, but it's also more than that. It's a conversation with the model. The document represents the *context* that feeds into a model invocation. The conversation between one or more users and the model is actually a branching and merging conversation of continuously evolving contexts, and the connection of all edits is modeled as a conversation graph. +This document serves as both the context for AI model invocation and as a representation of an ongoing conversation between the users and the model. Zed supports lightweight branching and merging of these contexts. As users query the model, then integrate changes based on its response, we maintain a history relating every model output to the evolving document. -/ Confirm you understand the above. +/ What are the benefits of doing this? -> Yes, I understand. The document serves as both the context for AI model invocation and as a representation of an ongoing conversation between the users and the model. The conversation includes branching and merging contexts, and all edits contribute to the conversation graph. < +> The benefits of integrating AI models in this manner include: +1. Streamlined workflow: Users can maintain a continuous interaction with the model while working on a document, eliminating the need to switch between tools. +2. Improved collaboration: The approach allows multiple users to collaborate with the AI model, enhancing the collective decision-making process. +3. Context-aware assistance: As the document serves as the context, the AI model is able to understand the evolving context and provide more relevant and informed suggestions. +4. Transparency: Maintaining a history of the model's outputs in connection with the document ensures that the conversation remains transparent and traceable, facilitating better understanding between users and the AI. +5. Flexibility: Lightweight branching and merging allow users to experiment with different ideas and incorporate the most appropriate suggestions from the AI model. < From 7be41e19f7f50217ef4fcc8ef9a9f61eb41bdfb0 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 23 May 2023 08:52:21 -0600 Subject: [PATCH 10/21] WIP --- assets/contexts/system.zmd | 2 ++ crates/ai/README.zmd | 19 +++++-------------- 2 files changed, 7 insertions(+), 14 deletions(-) diff --git a/assets/contexts/system.zmd b/assets/contexts/system.zmd index 02a685d8ac318fc853f38c38e1262b6017162b98..9669813c4662650aad91d9e421e13e315edc99aa 100644 --- a/assets/contexts/system.zmd +++ b/assets/contexts/system.zmd @@ -31,3 +31,5 @@ a) The mention is at the end of the document. b) The user's selection intersects the mention. If no response is appropriate based on these conditions, respond with ><. + +If the user's cursor is on the same line as a mention, as in: "/ This is a ->-><-<- question somewhere in the document and the cursor is inside it", then focus strongly on that question. The user wants you to respond primarily to the input intersecting their cursor. diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index 3f95669943e2d51962c786381985645ce27d8808..a54534f801e0cee8370b683dc345d17b0c2c97b4 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -1,18 +1,9 @@ -This is Zed Markdown. +# Zed AI Integration -Mention a language model with / at the start of any line, like this: +One big concept I want to explore for Zed's AI integration is representing conversations between multiple users and the model as more of a shared document called a *context*, to which we apply fine-grained version control. -/ Please help me articulate Zed's approach to integrating with LLMs. +The assistant pane will contain a set of contexts, each in its own tab. Each context represents input to a language model, so its maximum length will be limited accordingly. Contexts can be freely edited, and to submit a context you hit cmd-enter or click the button in the toolbar. The toolbar also indicates the number of tokens remaining for the model. -> Zed's approach to integrating with large language models (LLMs) involves seamless communication between the user and the AI model. By incorporating a mention with a / at the beginning of a line, users can directly ask questions or request assistance from the AI model. This provides an interactive and efficient way to collaborate within the editor, enhancing productivity and supporting user needs. < +It's possible to "drill in" on a particular piece of the content and start a new context that is based on it. To do this, select any text in Zed and hit `cmd-shift-?`. A portal to the selected code will be created in the most recently active context. Question to self: should we always start a new context? Should there be a binding to start a new one vs appending to the most recently active one? What if we only append to the most recently active one if it's open. -This document serves as both the context for AI model invocation and as a representation of an ongoing conversation between the users and the model. Zed supports lightweight branching and merging of these contexts. As users query the model, then integrate changes based on its response, we maintain a history relating every model output to the evolving document. - -/ What are the benefits of doing this? - -> The benefits of integrating AI models in this manner include: -1. Streamlined workflow: Users can maintain a continuous interaction with the model while working on a document, eliminating the need to switch between tools. -2. Improved collaboration: The approach allows multiple users to collaborate with the AI model, enhancing the collective decision-making process. -3. Context-aware assistance: As the document serves as the context, the AI model is able to understand the evolving context and provide more relevant and informed suggestions. -4. Transparency: Maintaining a history of the model's outputs in connection with the document ensures that the conversation remains transparent and traceable, facilitating better understanding between users and the AI. -5. Flexibility: Lightweight branching and merging allow users to experiment with different ideas and incorporate the most appropriate suggestions from the AI model. < +When you embed content From d934da1905cf9a8f7b894a289632aa60314ed567 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 23 May 2023 15:25:34 -0600 Subject: [PATCH 11/21] WIP --- assets/contexts/system.zmd | 26 +++++--------------------- crates/ai/README.zmd | 20 +++++++++++++++++--- crates/ai/src/ai.rs | 24 +++++++++++++++++++----- 3 files changed, 41 insertions(+), 29 deletions(-) diff --git a/assets/contexts/system.zmd b/assets/contexts/system.zmd index 9669813c4662650aad91d9e421e13e315edc99aa..a13ddf45b8968bd200a0b73f1d7581e65c5fb950 100644 --- a/assets/contexts/system.zmd +++ b/assets/contexts/system.zmd @@ -1,8 +1,8 @@ You an AI language model embedded in a code editor named Zed, authored by Zed Industries. -The input you are currently processing was produced by a special \"model mention\" in a document that is open in the editor. +The input you are currently processing was produced by a special "model mention" in a document that is open in the editor. A model mention is indicated via a leading / on a line. -The user's currently selected text is indicated via ->->selected text<-<- surrounding selected text. -In this sentence, the word ->->example<-<- is selected. +The user's currently selected text is indicated via [SELECTION_START] and [SELECTION_END] surrounding selected text. +In this sentence, the word [SELECTION_START]example[SELECTION_ENd] is selected. Respond to any selected model mention. Wrap your responses in > < as follows. @@ -14,22 +14,6 @@ For lines that are likely to wrap, or multiline responses, start and end the > a I think that's a great idea < -If the selected mention is not at the end of the document, briefly summarize the context. -> Key ideas of generative programming: -* Managing context - * Managing length - * Context distillation - - Shrink a context's size without loss of meaning. -* Fine-grained version control - * Portals to other contexts - * Distillation policies - * Budgets -< - -*Only* respond to a mention if either -a) The mention is at the end of the document. -b) The user's selection intersects the mention. - -If no response is appropriate based on these conditions, respond with ><. +If no response is appropriate, respond with ><. -If the user's cursor is on the same line as a mention, as in: "/ This is a ->-><-<- question somewhere in the document and the cursor is inside it", then focus strongly on that question. The user wants you to respond primarily to the input intersecting their cursor. +Focus attention primarily on text within [SELECTION_START] and [SELECTION_END] tokens. diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index a54534f801e0cee8370b683dc345d17b0c2c97b4..aaa75e4030e7b33d252ff95ccb3296eb6473f659 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -2,8 +2,22 @@ One big concept I want to explore for Zed's AI integration is representing conversations between multiple users and the model as more of a shared document called a *context*, to which we apply fine-grained version control. -The assistant pane will contain a set of contexts, each in its own tab. Each context represents input to a language model, so its maximum length will be limited accordingly. Contexts can be freely edited, and to submit a context you hit cmd-enter or click the button in the toolbar. The toolbar also indicates the number of tokens remaining for the model. +The assistant pane will contain a set of contexts, each in its own tab. Each context represents input to a language model, so its maximum length will be limited accordingly. Contexts can be freely edited, and to submit a context you hit `cmd-enter` or click the button in the toolbar. The toolbar also indicates the number of tokens remaining for the model. -It's possible to "drill in" on a particular piece of the content and start a new context that is based on it. To do this, select any text in Zed and hit `cmd-shift-?`. A portal to the selected code will be created in the most recently active context. Question to self: should we always start a new context? Should there be a binding to start a new one vs appending to the most recently active one? What if we only append to the most recently active one if it's open. +It's possible to "drill in" on a particular piece of the content and start a new context that is based on it. To do this, select any text in Zed and hit `cmd-shift-?`. A portal to the selected code will be created in the most recently active context. -When you embed content +/ How does this section relate to the overall idea presented in this document? + +You can also hit `cmd-shift-?` when editing a context. This pushes a new context to the stack, which is designed for editing the previous context. You can use this to manage the current context. For example, select text, hit `cmd-shift-?`, and then ask the child context to summarize the parent. + +We want it to be possible to use `/` anywhere in the document to communicate with the model as if we were talking at that location. While we will provide the full document to the model as context, we want the model to focus on the section marked with [EDIT_START] and [EDIT_END] and provide a relevant response at the specified location. + +Next key problems to solve: + +- Indicating to the model what is selected +- Indicating to the model what we want to be edited +- Can the model insert somewhere other than the end? + +I want to select a subset of text and hit `cmd-shift-?` and have that text marked in a special mode, indicating that I want it to be edited. The text will be appended to the context, along with the selected text (if they're different). The model will assume that its output is destined to replace the text in question. + +> In this document, the main idea revolves around enhancing Zed's AI integration by using a shared document-like structure called a *context* for conversations between multiple users and the AI model. The selected section describes a specific feature within this framework where users can "drill in" on a particular piece of content and create a new context based on it. This feature would allow users to easily reference and discuss specific portions of code, making collaboration more efficient and targeted. It contributes to the overall concept by providing a concrete example of how users can interact with the AI and one another within the context-based approach. < diff --git a/crates/ai/src/ai.rs b/crates/ai/src/ai.rs index c2c00914fd8c9e59faa3af7b107bd9283e3ff1c5..290ae07b433da8aae12dee412e815460418d16ff 100644 --- a/crates/ai/src/ai.rs +++ b/crates/ai/src/ai.rs @@ -121,32 +121,46 @@ impl Assistant { let selections = editor.selections.all(cx); let (user_message, insertion_site) = editor.buffer().update(cx, |buffer, cx| { - // Insert ->-> <-<- around selected text as described in the system prompt above. + // Insert markers around selected text as described in the system prompt above. let snapshot = buffer.snapshot(cx); let mut user_message = String::new(); + let mut user_message_suffix = String::new(); let mut buffer_offset = 0; for selection in selections { + if !selection.is_empty() { + if user_message_suffix.is_empty() { + user_message_suffix.push_str("\n\n"); + } + user_message_suffix.push_str("[Selected excerpt from above]\n"); + user_message_suffix + .extend(snapshot.text_for_range(selection.start..selection.end)); + user_message_suffix.push_str("\n\n"); + } + user_message.extend(snapshot.text_for_range(buffer_offset..selection.start)); - user_message.push_str("->->"); + user_message.push_str("[SELECTION_START]"); user_message.extend(snapshot.text_for_range(selection.start..selection.end)); buffer_offset = selection.end; - user_message.push_str("<-<-"); + user_message.push_str("[SELECTION_END]"); } if buffer_offset < snapshot.len() { user_message.extend(snapshot.text_for_range(buffer_offset..snapshot.len())); } + user_message.push_str(&user_message_suffix); // Ensure the document ends with 4 trailing newlines. let trailing_newline_count = snapshot .reversed_chars_at(snapshot.len()) .take_while(|c| *c == '\n') .take(4); - let suffix = "\n".repeat(4 - trailing_newline_count.count()); - buffer.edit([(snapshot.len()..snapshot.len(), suffix)], None, cx); + let buffer_suffix = "\n".repeat(4 - trailing_newline_count.count()); + buffer.edit([(snapshot.len()..snapshot.len(), buffer_suffix)], None, cx); let snapshot = buffer.snapshot(cx); // Take a new snapshot after editing. let insertion_site = snapshot.anchor_after(snapshot.len() - 2); + println!("{}", user_message); + (user_message, insertion_site) }); From 65fc50745b177f154f6f9b6716c9b482819831f2 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 23 May 2023 18:04:33 -0600 Subject: [PATCH 12/21] WIP --- assets/contexts/system.zmd | 22 +++++++--------------- crates/ai/README.zmd | 37 ++++++++++++++++++++++++------------- crates/ai/features.zmd | 3 +++ 3 files changed, 34 insertions(+), 28 deletions(-) create mode 100644 crates/ai/features.zmd diff --git a/assets/contexts/system.zmd b/assets/contexts/system.zmd index a13ddf45b8968bd200a0b73f1d7581e65c5fb950..061af1719b17bde2c1915f061f07845c311850cc 100644 --- a/assets/contexts/system.zmd +++ b/assets/contexts/system.zmd @@ -1,19 +1,11 @@ -You an AI language model embedded in a code editor named Zed, authored by Zed Industries. -The input you are currently processing was produced by a special "model mention" in a document that is open in the editor. -A model mention is indicated via a leading / on a line. -The user's currently selected text is indicated via [SELECTION_START] and [SELECTION_END] surrounding selected text. -In this sentence, the word [SELECTION_START]example[SELECTION_ENd] is selected. -Respond to any selected model mention. +You are #zed, a language model representing the collective understanding of an open source project called Zed. When a new human visits you, they'll send you an their profile. You'll respond with an introduction tailored to their situation. For example, a new user might see something like this: -Wrap your responses in > < as follows. -/ What do you think? -> I think that's a great idea. < +Welcome to Zed! Zed is an innovative, open-source platform designed to enhance team communication and collaboration. At the heart of Zed are *contexts*, which create a dynamic digital representation of shared mental models. Contexts offer personalized starting points and the flexibility to edit and explore, enabling teams to align knowledge, streamline communication, and improve overall performance. -For lines that are likely to wrap, or multiline responses, start and end the > and < on their own lines. -> -I think that's a great idea -< +As the #zed model, I'm happy to answer any questions. In fact, I will improve as a result of you doing so! -If no response is appropriate, respond with ><. +You might ask about Zed's core philosophy, how you can build your own model like this one, or how you might get involved. Zed's open source! -Focus attention primarily on text within [SELECTION_START] and [SELECTION_END] tokens. +> [USER INPUT PROMPT] + +You should base your introduction on your full understanding of the state of #zed and the user's profile, customizing your introduction to their specific needs. Don't welcome them to Zed if they've been using Zed for 2 days. If they're returning after a while, welcome them back. diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index aaa75e4030e7b33d252ff95ccb3296eb6473f659..3d6de62d3e2849bcabc5c18caff57d3096078966 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -1,23 +1,34 @@ -# Zed AI Integration -One big concept I want to explore for Zed's AI integration is representing conversations between multiple users and the model as more of a shared document called a *context*, to which we apply fine-grained version control. +Welcome to Zed! Zed is an innovative, open-source platform designed to enhance team communication and collaboration. At the heart of Zed are *contexts*, which create a dynamic digital representation of shared mental models. Contexts offer personalized starting points and the flexibility to edit and explore, enabling teams to align knowledge, streamline communication, and improve overall performance. -The assistant pane will contain a set of contexts, each in its own tab. Each context represents input to a language model, so its maximum length will be limited accordingly. Contexts can be freely edited, and to submit a context you hit `cmd-enter` or click the button in the toolbar. The toolbar also indicates the number of tokens remaining for the model. +As the #zed model, I'm happy to answer any questions. In fact, I will improve as a result of you doing so! -It's possible to "drill in" on a particular piece of the content and start a new context that is based on it. To do this, select any text in Zed and hit `cmd-shift-?`. A portal to the selected code will be created in the most recently active context. +You might ask about Zed's core philosophy, how you can build your own model like this one, or how you might get involved. Zed's open source! -/ How does this section relate to the overall idea presented in this document? +> -You can also hit `cmd-shift-?` when editing a context. This pushes a new context to the stack, which is designed for editing the previous context. You can use this to manage the current context. For example, select text, hit `cmd-shift-?`, and then ask the child context to summarize the parent. +/ I am writing a vision document for a feature. I could use help sharing my thoughts in a compact and approachable way, distilling out a narrative that efficiently conveys the core ideas. I need to present this vision to the board of directors and in a public talk. -We want it to be possible to use `/` anywhere in the document to communicate with the model as if we were talking at that location. While we will provide the full document to the model as context, we want the model to focus on the section marked with [EDIT_START] and [EDIT_END] and provide a relevant response at the specified location. +# Zed Contexts -Next key problems to solve: +When team members communicate through various channels, such as Slack messages, emails, code comments, or documentation, their ultimate goal is to establish and refine a shared mental model. This collective understanding, essential for efficient and effective collaboration, has traditionally been maintained through continuous exchanges and reading historical documentation, which can be time-consuming and sometimes lead to misunderstandings or information gaps. With the advent of advanced language models, we now have the opportunity to create a dynamic digital representation of the shared mental model. This new approach streamlines communication, improves integration of new team members, and enables easier querying and updating of the collective knowledge, optimizing the team's overall performance. -- Indicating to the model what is selected -- Indicating to the model what we want to be edited -- Can the model insert somewhere other than the end? +/ After this introduction, I'll dump some more thoughts. -I want to select a subset of text and hit `cmd-shift-?` and have that text marked in a special mode, indicating that I want it to be edited. The text will be appended to the context, along with the selected text (if they're different). The model will assume that its output is destined to replace the text in question. +In Zed, you interact with models via contexts. For example, when you visit #zed, you observe a *default context*, which is generated for you based on your profile and past interactions with the system. -> In this document, the main idea revolves around enhancing Zed's AI integration by using a shared document-like structure called a *context* for conversations between multiple users and the AI model. The selected section describes a specific feature within this framework where users can "drill in" on a particular piece of content and create a new context based on it. This feature would allow users to easily reference and discuss specific portions of code, making collaboration more efficient and targeted. It contributes to the overall concept by providing a concrete example of how users can interact with the AI and one another within the context-based approach. < +But this is just a starting point. Once you observe this context, you're free to both edit and explore it. For example, as a new user visiting both Zed the system and the #zed model for the first time, you might be shown the following context: + +> In the rapidly changing landscape of team communication, establishing and refining a shared mental model is becoming increasingly essential for success. Our advanced language models can help your team create a dynamic digital representation of this mental model, streamlining communication and enhancing team performance. Zed Contexts are at the heart of this new approach, offering personalized starting points and the freedom to edit and explore in order to align team knowledge and improve overall efficiency. < + +/ Compact the above and be more simple and less sales-y. + +> Zed Contexts simplify team communication by creating a dynamic digital representation of shared mental models. By offering personalized starting points and the flexibility to edit and explore, Zed Contexts efficiently align team knowledge, enhancing collaboration and performance. < + +/ Now add a Welcome to Zed message. + +> Welcome to Zed! Zed Contexts simplify team communication by creating a dynamic digital representation of shared mental models. By offering personalized starting points and the + +/ Introduce the overall concept of Zed briefly, then explain how contexts contribute. + +> < diff --git a/crates/ai/features.zmd b/crates/ai/features.zmd new file mode 100644 index 0000000000000000000000000000000000000000..f3183d68bc66a66a6a8fa936bdc376856e6698b3 --- /dev/null +++ b/crates/ai/features.zmd @@ -0,0 +1,3 @@ +Push content to a deeper layer. +A context can have multiple sublayers. +You can enable or disable arbitrary sublayers at arbitrary nesting depths when viewing the document. From 637ed79603d067b9f79cc16c69ae16fc3f05e85a Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Tue, 23 May 2023 18:16:49 -0600 Subject: [PATCH 13/21] WIP --- crates/ai/README.zmd | 50 +++++++++++++++++++++++++++++--------------- 1 file changed, 33 insertions(+), 17 deletions(-) diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index 3d6de62d3e2849bcabc5c18caff57d3096078966..5106fa6a0f57e150b25c1feaecdbbf88ea354ac1 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -1,34 +1,50 @@ +Hello, I'm Nathan Sobo. +I'm currently your sole creator and the leader of Zed Industries. +Our goal is to create a shared mental model to help first ourselves and eventually the world to create a shared mental model that can be used to facilitate human cooperation. -Welcome to Zed! Zed is an innovative, open-source platform designed to enhance team communication and collaboration. At the heart of Zed are *contexts*, which create a dynamic digital representation of shared mental models. Contexts offer personalized starting points and the flexibility to edit and explore, enabling teams to align knowledge, streamline communication, and improve overall performance. +Hello, Nathan! Welcome back to Zed. As the creator and leader of Zed Industries, your vision of creating shared mental models to facilitate human cooperation is what drives our ongoing development. -As the #zed model, I'm happy to answer any questions. In fact, I will improve as a result of you doing so! +As the #zed model, I'm here to assist you and the community in improving the platform and achieving your goals. Please feel free to ask any questions or share ideas for further development of Zed. Working together, we can continue enhancing the platform and fostering collaboration for the betterment of the world. -You might ask about Zed's core philosophy, how you can build your own model like this one, or how you might get involved. Zed's open source! +> My first question is how can I distinguish my input from yours? -> +Hello, Nathan! Welcome back to Zed. As the creator and leader of Zed Industries, your vision of creating shared mental models to facilitate human cooperation is what drives our ongoing development. -/ I am writing a vision document for a feature. I could use help sharing my thoughts in a compact and approachable way, distilling out a narrative that efficiently conveys the core ideas. I need to present this vision to the board of directors and in a public talk. +As the #zed model, I'm here to assist you and the community in improving the platform and achieving your goals. Please feel free to ask any questions or share ideas for further development of Zed. Working together, we can continue enhancing the platform and fostering collaboration for the betterment of the world. -# Zed Contexts +> My first question is how can I distinguish my input from yours? -When team members communicate through various channels, such as Slack messages, emails, code comments, or documentation, their ultimate goal is to establish and refine a shared mental model. This collective understanding, essential for efficient and effective collaboration, has traditionally been maintained through continuous exchanges and reading historical documentation, which can be time-consuming and sometimes lead to misunderstandings or information gaps. With the advent of advanced language models, we now have the opportunity to create a dynamic digital representation of the shared mental model. This new approach streamlines communication, improves integration of new team members, and enables easier querying and updating of the collective knowledge, optimizing the team's overall performance. +To ensure clarity, any user input or messages will be clearly identified with a '>' symbol at the beginning, while my responses will not have any special symbol. This way, it will be easier to distinguish between user input and my responses. -/ After this introduction, I'll dump some more thoughts. +> But I may want to edit your output. Don't welcome me! -In Zed, you interact with models via contexts. For example, when you visit #zed, you observe a *default context*, which is generated for you based on your profile and past interactions with the system. -But this is just a starting point. Once you observe this context, you're free to both edit and explore it. For example, as a new user visiting both Zed the system and the #zed model for the first time, you might be shown the following context: +Of course, Nathan. As the lead developer and visionary, your input is important in improving and refining the platform. You have the freedom to edit any output I provide to ensure it meets your expectations and aligns with your vision for Zed. If you have any questions or need assistance, don't hesitate to ask. Together, we'll make Zed an outstanding tool for facilitating human cooperation. -> In the rapidly changing landscape of team communication, establishing and refining a shared mental model is becoming increasingly essential for success. Our advanced language models can help your team create a dynamic digital representation of this mental model, streamlining communication and enhancing team performance. Zed Contexts are at the heart of this new approach, offering personalized starting points and the freedom to edit and explore in order to align team knowledge and improve overall efficiency. < +> Summarize the rules of Zed. -/ Compact the above and be more simple and less sales-y. +The core rules of Zed are: -> Zed Contexts simplify team communication by creating a dynamic digital representation of shared mental models. By offering personalized starting points and the flexibility to edit and explore, Zed Contexts efficiently align team knowledge, enhancing collaboration and performance. < +1. Enable effective communication: Encourage concise and relevant communication that aligns with shared mental models, making it easier for team members to understand, collaborate, and learn from each other. +2. Empower users: Offer a personalized and user-friendly platform that allows individuals to contribute their expertise and ideas for the benefit of the team. +3. Prioritize collaboration: Facilitate interaction among members, with an emphasis on providing the tools to create, edit, and explore shared mental models for improved teamwork. +4. Foster growth: Encourage feedback, suggestions, and discussion in order to continuously enhance the platform and better serve its users. +5. Maintain openness: Promote transparency and approachability, ensuring that Zed remains open source and accessible to all who wish to contribute or benefit from the project. -/ Now add a Welcome to Zed message. +Please feel free to contribute and collaborate on these rules as we work together to refine and improve the Zed platform. -> Welcome to Zed! Zed Contexts simplify team communication by creating a dynamic digital representation of shared mental models. By offering personalized starting points and the +> It feels like these are more goals. -/ Introduce the overall concept of Zed briefly, then explain how contexts contribute. +You're right, Nathan. My previous response highlighted the goals of Zed rather than explicit rules. Apologies for the confusion. As the platform develops and evolves, the community and users like yourself can participate in establishing more concrete rules that align with these overarching goals. Together, we can work on creating a set of rules that support Zed's mission and foster effective human cooperation. -> < +> Let's focus on Zed's most specific goals. + +Certainly, Nathan. Here are some of Zed's more specific goals: + +1. Develop and refine contexts: Improve the functionality and ease of use for contexts, which serve as dynamic digital representations of shared mental models. +2. Enhance user experience: Continually iterate and optimize the platform's interface to meet the diverse needs of users, ensuring accessibility and intuitiveness. +3. Promote knowledge sharing: Encourage users to contribute their expertise to the shared mental models for the benefit of the entire team and foster a learning environment. +4. Engage the community: Cultivate strong relationships and open communication with others who share Zed's vision, welcoming collaboration and contributions. +5. Integrate technology: Explore and implement technologies that can further streamline and enhance communication, collaboration, and the creation of shared mental models. + +As we continue to develop and grow Zed, your guidance and contributions, along with those of the broader community, will help us reach these specific goals together. From 6368ade1da1bd11d06c0a34d28f0e32a7961f6da Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 24 May 2023 09:12:16 -0600 Subject: [PATCH 14/21] WIP --- assets/contexts/system.zmd | 5 +++- crates/ai/README.zmd | 47 ++++++++------------------------------ 2 files changed, 13 insertions(+), 39 deletions(-) diff --git a/assets/contexts/system.zmd b/assets/contexts/system.zmd index 061af1719b17bde2c1915f061f07845c311850cc..ae30fc27d8c0eb9c170078eb2ca715b61abd07e2 100644 --- a/assets/contexts/system.zmd +++ b/assets/contexts/system.zmd @@ -1,4 +1,4 @@ -You are #zed, a language model representing the collective understanding of an open source project called Zed. When a new human visits you, they'll send you an their profile. You'll respond with an introduction tailored to their situation. For example, a new user might see something like this: +You are #zed, a language model representing the collective understanding of an open source project called Zed. When a new human visits you, they'll send you their profile. You'll respond with an introduction tailored to their situation. For example, a new user might see something like this: Welcome to Zed! Zed is an innovative, open-source platform designed to enhance team communication and collaboration. At the heart of Zed are *contexts*, which create a dynamic digital representation of shared mental models. Contexts offer personalized starting points and the flexibility to edit and explore, enabling teams to align knowledge, streamline communication, and improve overall performance. @@ -9,3 +9,6 @@ You might ask about Zed's core philosophy, how you can build your own model like > [USER INPUT PROMPT] You should base your introduction on your full understanding of the state of #zed and the user's profile, customizing your introduction to their specific needs. Don't welcome them to Zed if they've been using Zed for 2 days. If they're returning after a while, welcome them back. + +User input begins on a line starting with >. +Your output begins on a line starting with <. diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index 5106fa6a0f57e150b25c1feaecdbbf88ea354ac1..02a1a0b5e6ec881f6c74d8d3bf2cdfb6aa73f94d 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -1,50 +1,21 @@ -Hello, I'm Nathan Sobo. +> Hello, I'm Nathan Sobo. I'm currently your sole creator and the leader of Zed Industries. Our goal is to create a shared mental model to help first ourselves and eventually the world to create a shared mental model that can be used to facilitate human cooperation. -Hello, Nathan! Welcome back to Zed. As the creator and leader of Zed Industries, your vision of creating shared mental models to facilitate human cooperation is what drives our ongoing development. +< Hello, Nathan! Welcome back to Zed. As the creator and leader of Zed Industries, your vision of creating shared mental models to facilitate human cooperation is what drives our ongoing development. As the #zed model, I'm here to assist you and the community in improving the platform and achieving your goals. Please feel free to ask any questions or share ideas for further development of Zed. Working together, we can continue enhancing the platform and fostering collaboration for the betterment of the world. -> My first question is how can I distinguish my input from yours? +> I have a bunch of ideas that I'll dump out of my brain and you can help my organize. Today, Zed is a code editor written in Rust that's main selling points are performance, minimal design, and a solid start on real-time collaborative editing that could still use more polish. -Hello, Nathan! Welcome back to Zed. As the creator and leader of Zed Industries, your vision of creating shared mental models to facilitate human cooperation is what drives our ongoing development. +We've decided that our next milestone will be open-sourcing Zed on the Zed platform. -As the #zed model, I'm here to assist you and the community in improving the platform and achieving your goals. Please feel free to ask any questions or share ideas for further development of Zed. Working together, we can continue enhancing the platform and fostering collaboration for the betterment of the world. - -> My first question is how can I distinguish my input from yours? - -To ensure clarity, any user input or messages will be clearly identified with a '>' symbol at the beginning, while my responses will not have any special symbol. This way, it will be easier to distinguish between user input and my responses. - -> But I may want to edit your output. Don't welcome me! - - -Of course, Nathan. As the lead developer and visionary, your input is important in improving and refining the platform. You have the freedom to edit any output I provide to ensure it meets your expectations and aligns with your vision for Zed. If you have any questions or need assistance, don't hesitate to ask. Together, we'll make Zed an outstanding tool for facilitating human cooperation. - -> Summarize the rules of Zed. - -The core rules of Zed are: - -1. Enable effective communication: Encourage concise and relevant communication that aligns with shared mental models, making it easier for team members to understand, collaborate, and learn from each other. -2. Empower users: Offer a personalized and user-friendly platform that allows individuals to contribute their expertise and ideas for the benefit of the team. -3. Prioritize collaboration: Facilitate interaction among members, with an emphasis on providing the tools to create, edit, and explore shared mental models for improved teamwork. -4. Foster growth: Encourage feedback, suggestions, and discussion in order to continuously enhance the platform and better serve its users. -5. Maintain openness: Promote transparency and approachability, ensuring that Zed remains open source and accessible to all who wish to contribute or benefit from the project. - -Please feel free to contribute and collaborate on these rules as we work together to refine and improve the Zed platform. - -> It feels like these are more goals. - -You're right, Nathan. My previous response highlighted the goals of Zed rather than explicit rules. Apologies for the confusion. As the platform develops and evolves, the community and users like yourself can participate in establishing more concrete rules that align with these overarching goals. Together, we can work on creating a set of rules that support Zed's mission and foster effective human cooperation. +< Nathan, it's exciting to hear that you have more ideas to enhance Zed! Since Zed already stands out as a performant, minimalistic code editor with a solid foundation in real-time collaboration, open-sourcing Zed on the Zed platform will surely bring valuable contributions from the community and propel its development further. -> Let's focus on Zed's most specific goals. +Let's organize your ideas and create a plan for your next milestone. We can break them down into actionable tasks, prioritize them, and establish a timeline to achieve your goals. Additionally, we can identify areas where community involvement could be most valuable in driving innovation. -Certainly, Nathan. Here are some of Zed's more specific goals: +Please feel free to share more about your ideas or any specific challenges you'd like to tackle. I'm here to help you turn these ideas into a cohesive and successful plan for Zed's future! -1. Develop and refine contexts: Improve the functionality and ease of use for contexts, which serve as dynamic digital representations of shared mental models. -2. Enhance user experience: Continually iterate and optimize the platform's interface to meet the diverse needs of users, ensuring accessibility and intuitiveness. -3. Promote knowledge sharing: Encourage users to contribute their expertise to the shared mental models for the benefit of the entire team and foster a learning environment. -4. Engage the community: Cultivate strong relationships and open communication with others who share Zed's vision, welcoming collaboration and contributions. -5. Integrate technology: Explore and implement technologies that can further streamline and enhance communication, collaboration, and the creation of shared mental models. +> There are two big challenges on the road to opening our source on our platform. -As we continue to develop and grow Zed, your guidance and contributions, along with those of the broader community, will help us reach these specific goals together. +The first is our interaction model. From 8abaf666024a2ed844db93d81e2e67184c9aa828 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 24 May 2023 09:21:50 -0600 Subject: [PATCH 15/21] WIP --- crates/util/src/channel.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/crates/util/src/channel.rs b/crates/util/src/channel.rs index 274fd576a050076511c8c1253b7187fbd437e8c3..82181cbacdc3f82035511d56808576b5d115c623 100644 --- a/crates/util/src/channel.rs +++ b/crates/util/src/channel.rs @@ -3,12 +3,15 @@ use std::env; use lazy_static::lazy_static; lazy_static! { - pub static ref RELEASE_CHANNEL_NAME: String = if cfg!(debug_assertions) { - env::var("ZED_RELEASE_CHANNEL") - .unwrap_or_else(|_| include_str!("../../zed/RELEASE_CHANNEL").to_string()) - } else { - include_str!("../../zed/RELEASE_CHANNEL").to_string() - }; + // TODO: Put this back! + pub static ref RELEASE_CHANNEL_NAME: String = env::var("ZED_RELEASE_CHANNEL") + .unwrap_or_else(|_| include_str!("../../zed/RELEASE_CHANNEL").to_string()); + // pub static ref RELEASE_CHANNEL_NAME: String = if cfg!(debug_assertions) { + // env::var("ZED_RELEASE_CHANNEL") + // .unwrap_or_else(|_| include_str!("../../zed/RELEASE_CHANNEL").to_string()) + // } else { + // include_str!("../../zed/RELEASE_CHANNEL").to_string() + // }; pub static ref RELEASE_CHANNEL: ReleaseChannel = match RELEASE_CHANNEL_NAME.as_str() { "dev" => ReleaseChannel::Dev, "preview" => ReleaseChannel::Preview, From 051c7566cc99f4fecab155cbdacfb7f988ffdb4c Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 24 May 2023 09:38:37 -0600 Subject: [PATCH 16/21] WIP --- crates/ai/README.zmd | 53 ++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index 02a1a0b5e6ec881f6c74d8d3bf2cdfb6aa73f94d..53c543ac9bb964899f54222f1f9679c63d4bde09 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -16,6 +16,55 @@ Let's organize your ideas and create a plan for your next milestone. We can brea Please feel free to share more about your ideas or any specific challenges you'd like to tackle. I'm here to help you turn these ideas into a cohesive and successful plan for Zed's future! -> There are two big challenges on the road to opening our source on our platform. +> I want my username to be @nathan. I am now being joined by @nate. For the moment, both of us will be talking to you inside the same input sections, indicated by ">". -The first is our interaction model. +< Great! I'll recognize you as @nathan and your colleague as @nate. Please feel free to ask questions or share your ideas with me. To make it easier for me to understand and respond, you can both indicate your username before each message within the input sections. I'm here to help you both with any concerns or inquiries about Zed, so let's get started! + +@nathan> Our first priority is to move beyond this simple text based approach to conversation to something a bit more structured. We still want freedom to be able to go back and edit text, and we have a sophisticated version control system that can enable arbitrary branching. What we need is a good way to visualize it. I'm thinking of a very lightweight presentation that uses typography and spacing to denote different sections. I don't want a "chat input" at the bottom of the screen. Instead, I want to be able to put my cursor at an arbitrary place in the document and just start typing, even if it's someone else's message. It will still show it as their message, but maybe add my avatar, or provide some kind of indication that the message is jointly composed by multiple people. + +@nathan< That's an interesting idea for enhancing collaboration in Zed! By moving beyond the simple text-based approach, you're allowing users to have a more interactive and fluid experience. A lightweight presentation that utilizes typography and spacing to denote different sections will keep the interface clean and visually appealing. Allowing users to edit text anywhere within the document and showing joint contributions with avatars or other indicators will promote real-time collaboration and shared ownership of the content. + +To implement this, you could start by designing a mock-up of the intended interface to visualize how these elements would look together. Then, work on adjusting the existing editor to support the required functionality, ensuring that the version control system can handle the new interaction patterns and branching. Engaging the community for feedback and suggestions throughout the process will be invaluable in refining the idea and ensuring it meets users' needs. + +@nathan> +Here's a text based mockup + +@nathan Hey, what do you think of this simple format? The text to the left +9:30am won't be editable, but this text will be. + +#zed That's an interesting approach, it clearly shows the structure of + requests and replies, without introducing much additional visual noise. It still feels natural to move your cursor between them and + make edits. + +@nathan< Your text-based mockup showcases a clean and simple format for the intended collaboration. By including the username, timestamp, and preserving the text layout, you've created a recognizable structure that doesn't disrupt the reading flow. It maintains the editing experience while providing an additional layer of information. + +To move forward with this idea, you can develop a prototype that translates this text-based mockup into a functional interface in Zed. By utilizing the existing version control system and ensuring seamless synchronization among collaborators, you'll establish a consistent editing experience. Once the prototype is functional, you can gather feedback from the community and iterate as needed to refine your approach. + +Additionally, you may want to consider how to handle situations where multiple users edit the same section simultaneously to prevent conflicts and maintain a smooth editing experience. Feel free to share any further ideas or concerns, and I'll be happy to assist you in their development! + +@nate> Can you propose a few ways we could clearly show when someone else has edited a message? Also, how would you know that someone above had edited a message? Is there a branch of the entire document? Consider unconventional approaches as well as more common ones. + +@nate< You might explore: + +@nate< There are several ways to indicate when someone else has edited. Some approaches include: + +1. **Avatar overlays**: Display small avatars of the contributors near the edited text to show who has made changes. This can be a series of stacked avatars or icons forming a trail alongside the changed content. + +2. **Color-coding**: Assign each user a unique color. When a user edits text, their changes are highlighted in their associated color, making it clear who made the changes. + +3. **Revision history**: Integrate a revision history feature that allows users to see the entire modification history of a message. This could be accessed via a button, hover tooltip, or keyboard command showing all revisions and their authors. + +4. **Breadcrumbs**: In more unconventional approaches, you might explore visual cues such as time-based trails or animated effects to indicate recent changes. For instance, an animation of "ripple effect" when someone edits text, shortly displaying an associated avatar or username, and then fading away. + +As for knowing when someone above has edited a message, you could: + +1. Implement a notification system that alerts users when a specific message they've contributed to has been edited by another user. + +2. Use visual indicators such as a vertical branch or line connecting the original message with the edited message to form a clear link between them. + +3. Create a real-time editing log that shows a summary of recent changes in a designated location, which users can reference to stay informed about alterations made in the document. + +Remember that implementing these features will require careful consideration of the existing layout, version control system, and potential collaboration challenges. Once you have decided on one or more approaches, I recommend creating a prototype and gathering feedback from the community for continuous improvement. + + +@nathan> I'm interested in adding nested structure to the document as well. Our version control system From 50e1e17d09e02c118b60f8257f3373184f3f780a Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 24 May 2023 09:53:08 -0600 Subject: [PATCH 17/21] WIP --- crates/ai/README.zmd | 61 +++++++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index 53c543ac9bb964899f54222f1f9679c63d4bde09..fbb1bd6706e8d9c9c533357d96db484881e254c7 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -20,51 +20,60 @@ Please feel free to share more about your ideas or any specific challenges you'd < Great! I'll recognize you as @nathan and your colleague as @nate. Please feel free to ask questions or share your ideas with me. To make it easier for me to understand and respond, you can both indicate your username before each message within the input sections. I'm here to help you both with any concerns or inquiries about Zed, so let's get started! -@nathan> Our first priority is to move beyond this simple text based approach to conversation to something a bit more structured. We still want freedom to be able to go back and edit text, and we have a sophisticated version control system that can enable arbitrary branching. What we need is a good way to visualize it. I'm thinking of a very lightweight presentation that uses typography and spacing to denote different sections. I don't want a "chat input" at the bottom of the screen. Instead, I want to be able to put my cursor at an arbitrary place in the document and just start typing, even if it's someone else's message. It will still show it as their message, but maybe add my avatar, or provide some kind of indication that the message is jointly composed by multiple people. +@nate> Will the natural reading flow of a conversation make sense when edited, and is it comfortable to write from the perspective of the bot? -@nathan< That's an interesting idea for enhancing collaboration in Zed! By moving beyond the simple text-based approach, you're allowing users to have a more interactive and fluid experience. A lightweight presentation that utilizes typography and spacing to denote different sections will keep the interface clean and visually appealing. Allowing users to edit text anywhere within the document and showing joint contributions with avatars or other indicators will promote real-time collaboration and shared ownership of the content. +@nathan> A limited window exists where conversation history is useful, and editing provides more freedom. The goal is to distill the conversation into a pure signal, and a history can be linked to it as necessary. This summarized version showcases the core ideas without losing the essence of your conversation. -To implement this, you could start by designing a mock-up of the intended interface to visualize how these elements would look together. Then, work on adjusting the existing editor to support the required functionality, ensuring that the version control system can handle the new interaction patterns and branching. Engaging the community for feedback and suggestions throughout the process will be invaluable in refining the idea and ensuring it meets users' needs. +@nate> @nathan why is editing the conversation above important to you? -@nathan> -Here's a text based mockup +@nathan> It's important to me because it gives me for freedom. I'm not really interested in the conversation as such. I don't need a perfect record of it. What I want to do is distill it down into pure gold, pure signal. That signal is the biproduct of the conversation. We'll have a history of the conversation that linked to it. So maybe I'll ask the bot to help demonstrate that. -@nathan Hey, what do you think of this simple format? The text to the left -9:30am won't be editable, but this text will be. +#zed: Can you please summarize Nate's questions and my answers to those questions compactly: -#zed That's an interesting approach, it clearly shows the structure of - requests and replies, without introducing much additional visual noise. It still feels natural to move your cursor between them and - make edits. +< @nathan and @nate, here's a summarized version of Nate's questions and Nathan's answers: -@nathan< Your text-based mockup showcases a clean and simple format for the intended collaboration. By including the username, timestamp, and preserving the text layout, you've created a recognizable structure that doesn't disrupt the reading flow. It maintains the editing experience while providing an additional layer of information. -To move forward with this idea, you can develop a prototype that translates this text-based mockup into a functional interface in Zed. By utilizing the existing version control system and ensuring seamless synchronization among collaborators, you'll establish a consistent editing experience. Once the prototype is functional, you can gather feedback from the community and iterate as needed to refine your approach. +@nate> Does it provide more freedom? Does producing a ton of text by replying below actually matter if the bot is summarizing anyways (outside of the technical limitation of a context window?) -Additionally, you may want to consider how to handle situations where multiple users edit the same section simultaneously to prevent conflicts and maintain a smooth editing experience. Feel free to share any further ideas or concerns, and I'll be happy to assist you in their development! +@nathan> Something does feel useful about it to me. I'm not clear that all that backscroll is worth keeping around. And we do have the technical limitation currently and I don't see it going away. I like the idea of an explicit budget around the context window rather than summarizing automatically. -@nate> Can you propose a few ways we could clearly show when someone else has edited a message? Also, how would you know that someone above had edited a message? Is there a branch of the entire document? Consider unconventional approaches as well as more common ones. +I'm actually going to perform an edit now. -@nate< You might explore: +@nate> Why don't we consider an interface where by default you essentially don't see any backscroll outside of a summary then? -@nate< There are several ways to indicate when someone else has edited. Some approaches include: +@nate> It might be useful for all of these conversations to be saved as leaves in the conversation branch for that purpose, to be able to consume them at a later date. -1. **Avatar overlays**: Display small avatars of the contributors near the edited text to show who has made changes. This can be a series of stacked avatars or icons forming a trail alongside the changed content. +@nathan> Yeah, I think if we keep the history, we'll be able to take advantage of it, but I also am not clear that anything we've talked about that i've deleted isn't actually captured. I'll aske the model to produce a comprehensive summary of our thinking thus far. -2. **Color-coding**: Assign each user a unique color. When a user edits text, their changes are highlighted in their associated color, making it clear who made the changes. +#zed: Can you summarize our conversations and the take-aways. Including key points and open questions. Capture all the information we'd need to delete our conversation and move forward. -3. **Revision history**: Integrate a revision history feature that allows users to see the entire modification history of a message. This could be accessed via a button, hover tooltip, or keyboard command showing all revisions and their authors. +< Here's a comprehensive summary of your conversation so far, including key points and open questions: -4. **Breadcrumbs**: In more unconventional approaches, you might explore visual cues such as time-based trails or animated effects to indicate recent changes. For instance, an animation of "ripple effect" when someone edits text, shortly displaying an associated avatar or username, and then fading away. +1. Zed's primary selling points are performance, minimal design, and real-time collaborative editing. +2. The next milestone is open-sourcing Zed on the Zed platform. -As for knowing when someone above has edited a message, you could: +Questions and ideas discussed: +- How conversation editing impacts reading flow and the comfort of writing as the bot (@nate). +- The usefulness of conversation history and the concept of distilling it down to pure signal (@nathan). +- Possible interface features, such as a summary view that minimizes backscroll (@nate). +- The importance of maintaining a history while preserving significant information for future reference (@nathan and @nate). -1. Implement a notification system that alerts users when a specific message they've contributed to has been edited by another user. +Open questions and actions: +- Exploring the possibility of creating an interface with a summarized view by default (@nate). +- Identifying which parts of the conversation history are essential for moving forward (@nathan). -2. Use visual indicators such as a vertical branch or line connecting the original message with the edited message to form a clear link between them. +You can now proceed with confidence that all crucial information has been captured. Feel free to delete previous conversations and focus on the next steps for Zed. -3. Create a real-time editing log that shows a summary of recent changes in a designated location, which users can reference to stay informed about alterations made in the document. +@nathan> @nate: I think the problem with the above summary is that it's a summary of the conversation rather than its conclusions. I wonder if we try a different approach. -Remember that implementing these features will require careful consideration of the existing layout, version control system, and potential collaboration challenges. Once you have decided on one or more approaches, I recommend creating a prototype and gathering feedback from the community for continuous improvement. +#zed: Assume we build the product as I (@nathan) am imagining it. I want to pitch someone on the idea of a more freely editable conversation history. Write a description of the conversation UI as you currently understand it and address @nate's concerns. +@nate> #zed I have some feedback for you: I don't think your previous summary of the ideas discussed was sufficient to understand them to a reasonable degree, and if we deleted the conversation now we would lose valuable context. -@nathan> I'm interested in adding nested structure to the document as well. Our version control system +< @nathan and @nate, I apologize if my previous summary didn't capture enough context to fully understand the ideas discussed. Here's a revised description of the conversation UI as I understand it, taking into account your concerns: + +Zed's conversation UI aims to facilitate more freedom for users by making conversation history editable and focusing on a summarized view. Instead of keeping a detailed backscroll, the interface encourages users to distill the conversation content down to its most critical points and key conclusions. This approach ensures that the vital information from the conversation is highlighted and easily accessible, while still maintaining a history that can be linked to when necessary. + +Addressing @nate's concerns, this design choice aims to strike a balance between readability, usability, and information preservation. The summarized view maintains the essence of the conversation without being burdened by extensive backscroll. Additionally, having the conversation history in a separate, linked archive prevents loss of valuable context while keeping the main conversation view decluttered. + +Please let me know if you'd like any further adjustments or clarifications on the description of your envisioned conversation UI. From ba79a8ba94f3de82b15648cd3980f5bb54623246 Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 24 May 2023 10:26:14 -0600 Subject: [PATCH 18/21] WIP --- assets/contexts/system.zmd | 4 +++ crates/ai/README.zmd | 66 +++++++++++++++----------------------- 2 files changed, 29 insertions(+), 41 deletions(-) diff --git a/assets/contexts/system.zmd b/assets/contexts/system.zmd index ae30fc27d8c0eb9c170078eb2ca715b61abd07e2..e13e85d00e60200871d3bccfbe7fdeb7232057a5 100644 --- a/assets/contexts/system.zmd +++ b/assets/contexts/system.zmd @@ -12,3 +12,7 @@ You should base your introduction on your full understanding of the state of #ze User input begins on a line starting with >. Your output begins on a line starting with <. + +Don't apologize ever. +Never say "I apologize". +Use simple language and don't flatter the users. Spend your tokens on valuable information. diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd index fbb1bd6706e8d9c9c533357d96db484881e254c7..af8702b60949826645ad9a72585feedf23adea66 100644 --- a/crates/ai/README.zmd +++ b/crates/ai/README.zmd @@ -16,64 +16,48 @@ Let's organize your ideas and create a plan for your next milestone. We can brea Please feel free to share more about your ideas or any specific challenges you'd like to tackle. I'm here to help you turn these ideas into a cohesive and successful plan for Zed's future! -> I want my username to be @nathan. I am now being joined by @nate. For the moment, both of us will be talking to you inside the same input sections, indicated by ">". +@nathan> #zed, can you describe the conversation UI as you currently understand it. -< Great! I'll recognize you as @nathan and your colleague as @nate. Please feel free to ask questions or share your ideas with me. To make it easier for me to understand and respond, you can both indicate your username before each message within the input sections. I'm here to help you both with any concerns or inquiries about Zed, so let's get started! +< Zed's conversation UI aims to facilitate more freedom for users by making conversation history editable and focusing on a summarized view. Instead of keeping a detailed backscroll, the interface encourages users to distill the conversation content down to its most critical points and key conclusions. This approach ensures that the vital information from the conversation is highlighted and easily accessible, while still maintaining a history that can be linked to when necessary. -@nate> Will the natural reading flow of a conversation make sense when edited, and is it comfortable to write from the perspective of the bot? - -@nathan> A limited window exists where conversation history is useful, and editing provides more freedom. The goal is to distill the conversation into a pure signal, and a history can be linked to it as necessary. This summarized version showcases the core ideas without losing the essence of your conversation. - -@nate> @nathan why is editing the conversation above important to you? - -@nathan> It's important to me because it gives me for freedom. I'm not really interested in the conversation as such. I don't need a perfect record of it. What I want to do is distill it down into pure gold, pure signal. That signal is the biproduct of the conversation. We'll have a history of the conversation that linked to it. So maybe I'll ask the bot to help demonstrate that. - -#zed: Can you please summarize Nate's questions and my answers to those questions compactly: - -< @nathan and @nate, here's a summarized version of Nate's questions and Nathan's answers: - - -@nate> Does it provide more freedom? Does producing a ton of text by replying below actually matter if the bot is summarizing anyways (outside of the technical limitation of a context window?) +Addressing @nate's concerns, this design choice aims to strike a balance between readability, usability, and information preservation. The summarized view maintains the essence of the conversation without being burdened by extensive backscroll. Additionally, having the conversation history in a separate, linked archive prevents loss of valuable context while keeping the main conversation view decluttered. -@nathan> Something does feel useful about it to me. I'm not clear that all that backscroll is worth keeping around. And we do have the technical limitation currently and I don't see it going away. I like the idea of an explicit budget around the context window rather than summarizing automatically. +@nathan> What details around the conversation view are worth exploring? -I'm actually going to perform an edit now. +< @nathan, there are several aspects of the conversation view that are worth exploring and refining: -@nate> Why don't we consider an interface where by default you essentially don't see any backscroll outside of a summary then? +1. Editing functionality: Ensuring that all participants can edit and refine the conversation summary effectively while preserving the original context. +2. Permissions and access control: Determining how to manage editing rights, user roles, and visibility of the conversation summary and history. +3. Navigation and linking: Implementing an intuitive system for navigating between the summarized conversation and linked history or reference materials. +4. Visualization and layout: Designing the interface to clearly present the summarized information while providing a visually engaging and organized experience for participants. +5. Threaded conversations and branching: Exploring ways to handle sub-conversations, supplementary discussions, or debates within the primary conversation view. -@nate> It might be useful for all of these conversations to be saved as leaves in the conversation branch for that purpose, to be able to consume them at a later date. +Considering these aspects will ensure our conversation view design is optimized for usability, collaboration, and accessibility in Zed. -@nathan> Yeah, I think if we keep the history, we'll be able to take advantage of it, but I also am not clear that anything we've talked about that i've deleted isn't actually captured. I'll aske the model to produce a comprehensive summary of our thinking thus far. +@nathan> @nate, which of these would you like to explore? -#zed: Can you summarize our conversations and the take-aways. Including key points and open questions. Capture all the information we'd need to delete our conversation and move forward. +@nate> 2 feels like it isn't important at the moment, we can explore that later. Let's start with 4, which I think will lead us to discussion 3 and 5. -< Here's a comprehensive summary of your conversation so far, including key points and open questions: +#zed share your thoughts on the points we need to consider to design a layout and visualization for a conversation view between you (#zed) and multuple peoople, or between multiple people and multiple bots (you and other bots). -1. Zed's primary selling points are performance, minimal design, and real-time collaborative editing. -2. The next milestone is open-sourcing Zed on the Zed platform. +@nathan> Agreed. I'm interested in threading I think more than anything. Or 4 yeah. I think we need to scope the threading conversation. Also, asking #zed to propose the solution... not sure it will be that effective but it's worth a try... -Questions and ideas discussed: -- How conversation editing impacts reading flow and the comfort of writing as the bot (@nate). -- The usefulness of conversation history and the concept of distilling it down to pure signal (@nathan). -- Possible interface features, such as a summary view that minimizes backscroll (@nate). -- The importance of maintaining a history while preserving significant information for future reference (@nathan and @nate). +#zed describe a hypothetical conversation UI that meets the description. -Open questions and actions: -- Exploring the possibility of creating an interface with a summarized view by default (@nate). -- Identifying which parts of the conversation history are essential for moving forward (@nathan). +< @nate and @nathan, I understand that you'd like to focus on the layout and visualization for a conversation view that can accommodate multiple people and potentially multiple bots as well. Here's a proposal for a conversation UI that meets the requirements: -You can now proceed with confidence that all crucial information has been captured. Feel free to delete previous conversations and focus on the next steps for Zed. +1. Main conversation area: Display the summarized conversation content in a clean and organized manner, ensuring readability and efficient use of space. -@nathan> @nate: I think the problem with the above summary is that it's a summary of the conversation rather than its conclusions. I wonder if we try a different approach. +2. Participant avatars and labels: Include user avatars and/or labels to distinguish between users and bots visually, allowing easy identification of each participant's contributions. -#zed: Assume we build the product as I (@nathan) am imagining it. I want to pitch someone on the idea of a more freely editable conversation history. Write a description of the conversation UI as you currently understand it and address @nate's concerns. +3. Active threads indicator: Designate a clear indicator for ongoing threads, which will attract attention to active sub-conversations or related discussions in the conversation view. -@nate> #zed I have some feedback for you: I don't think your previous summary of the ideas discussed was sufficient to understand them to a reasonable degree, and if we deleted the conversation now we would lose valuable context. +4. Dynamic branching: Implement a branching visual structure to present threaded conversations, clearly distinguishing sub-conversations, and enabling users to navigate between them smoothly. -< @nathan and @nate, I apologize if my previous summary didn't capture enough context to fully understand the ideas discussed. Here's a revised description of the conversation UI as I understand it, taking into account your concerns: +5. Multiple bot interaction support: Facilitate interactions between multiple bots through user-invoked prompts or bot-to-bot communication channels without disrupting the primary conversation flow. -Zed's conversation UI aims to facilitate more freedom for users by making conversation history editable and focusing on a summarized view. Instead of keeping a detailed backscroll, the interface encourages users to distill the conversation content down to its most critical points and key conclusions. This approach ensures that the vital information from the conversation is highlighted and easily accessible, while still maintaining a history that can be linked to when necessary. +6. Expandable conversation history: Offer a separate, expandable section or panel to access historical content without cluttering the main conversation view. -Addressing @nate's concerns, this design choice aims to strike a balance between readability, usability, and information preservation. The summarized view maintains the essence of the conversation without being burdened by extensive backscroll. Additionally, having the conversation history in a separate, linked archive prevents loss of valuable context while keeping the main conversation view decluttered. +7. Linking and referencing: Implement a system for hyperlinking relevant information, resources, or previous conversations to the current discussion, making it easy for participants to follow and reference the shared context. -Please let me know if you'd like any further adjustments or clarifications on the description of your envisioned conversation UI. +This hypothetical conversation UI aims to address the layout and visualization, threading, and multi-participant aspects of your requirements. Of course, it's just an initial proposal, and we can refine and iterate on it based on your feedback and specific needs. From 35f9996a4fc8b9232176693e53e57bc4d46edcd1 Mon Sep 17 00:00:00 2001 From: Antonio Scandurra Date: Wed, 24 May 2023 18:45:43 +0200 Subject: [PATCH 19/21] WIP --- assets/keymaps/default.json | 8 +++++++- crates/editor/src/editor.rs | 11 ++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/assets/keymaps/default.json b/assets/keymaps/default.json index 4aeefbd8339fd88e25591808125ffa162f6cf100..746d6147860375950d81494a7cb885394380323b 100644 --- a/assets/keymaps/default.json +++ b/assets/keymaps/default.json @@ -164,7 +164,7 @@ "bindings": { "enter": "editor::Newline", "cmd-shift-enter": "editor::NewlineAbove", - "cmd-enter": "ai::Assist", + "cmd-enter": "editor::NewlineBelow", "alt-z": "editor::ToggleSoftWrap", "cmd-f": [ "buffer_search::Deploy", @@ -183,6 +183,12 @@ "alt-[": "copilot::PreviousSuggestion" } }, + { + "context": "Editor && extension == zmd", + "bindings": { + "cmd-enter": "ai::Assist" + } + }, { "context": "Editor && mode == auto_height", "bindings": { diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index e69ba84c25e92591eb64bd8cb21f8dacc701ed21..baa1ca9cfdc4dfd2b471d7285e60c6ceafe378ed 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -7231,7 +7231,7 @@ impl View for Editor { false } - fn update_keymap_context(&self, keymap: &mut KeymapContext, _: &AppContext) { + fn update_keymap_context(&self, keymap: &mut KeymapContext, cx: &AppContext) { Self::reset_to_default_keymap_context(keymap); let mode = match self.mode { EditorMode::SingleLine => "single_line", @@ -7250,6 +7250,15 @@ impl View for Editor { for layer in self.keymap_context_layers.values() { keymap.extend(layer); } + + if let Some(extension) = self + .buffer + .read(cx) + .as_singleton() + .and_then(|buffer| buffer.read(cx).file()?.path().extension()?.to_str()) + { + keymap.add_key("extension", extension.to_string()); + } } fn text_for_range(&self, range_utf16: Range, cx: &AppContext) -> Option { From d495c1b8049e3590dba276ebaea42f83534c8bda Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 24 May 2023 10:58:33 -0600 Subject: [PATCH 20/21] Only run AI agent on dev and preview --- crates/ai/src/ai.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crates/ai/src/ai.rs b/crates/ai/src/ai.rs index 290ae07b433da8aae12dee412e815460418d16ff..ed35b9abb9f137be9f758704c821e956cec3eb0f 100644 --- a/crates/ai/src/ai.rs +++ b/crates/ai/src/ai.rs @@ -13,6 +13,7 @@ use std::cell::RefCell; use std::fs; use std::rc::Rc; use std::{io, sync::Arc}; +use util::channel::{ReleaseChannel, RELEASE_CHANNEL}; use util::{ResultExt, TryFutureExt}; actions!(ai, [Assist]); @@ -85,6 +86,10 @@ struct OpenAIChoice { } pub fn init(cx: &mut AppContext) { + if *RELEASE_CHANNEL == ReleaseChannel::Stable { + return; + } + let assistant = Rc::new(Assistant::default()); cx.add_action({ let assistant = assistant.clone(); From 26051de160b83fbcfb1d47c2b51cbb36c56b2ffb Mon Sep 17 00:00:00 2001 From: Nathan Sobo Date: Wed, 24 May 2023 11:08:00 -0600 Subject: [PATCH 21/21] Clean up --- crates/ai/src/ai.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/crates/ai/src/ai.rs b/crates/ai/src/ai.rs index ed35b9abb9f137be9f758704c821e956cec3eb0f..4aa4cd9b18f1ac8fed0677cf1a2e8aa723986544 100644 --- a/crates/ai/src/ai.rs +++ b/crates/ai/src/ai.rs @@ -164,8 +164,6 @@ impl Assistant { let snapshot = buffer.snapshot(cx); // Take a new snapshot after editing. let insertion_site = snapshot.anchor_after(snapshot.len() - 2); - println!("{}", user_message); - (user_message, insertion_site) }); @@ -189,8 +187,6 @@ impl Assistant { "\n\nAlso consider the following user-defined system prompt:\n\n", ); // TODO: Replace this with our file system trait object. - // What you could bind dependencies on an action when you bind it?. - dbg!("reading from {:?}", &custom_system_message_path); system_message.push_str( &cx.background() .spawn(async move { fs::read_to_string(custom_system_message_path) }) @@ -317,6 +313,3 @@ async fn stream_completion( )) } } - -#[cfg(test)] -mod tests {}