diff --git a/Cargo.lock b/Cargo.lock index 35ea2eefd8b41c1208df3014aa3ec389c4e5c7a3..4200f8c9170db779bb334c6ba0f45446aade8682 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -95,6 +95,22 @@ dependencies = [ "memchr", ] +[[package]] +name = "ai" +version = "0.1.0" +dependencies = [ + "anyhow", + "assets", + "collections", + "editor", + "futures 0.3.28", + "gpui", + "isahc", + "serde", + "serde_json", + "util", +] + [[package]] name = "alacritty_config" version = "0.1.1-dev" @@ -8762,6 +8778,7 @@ name = "zed" version = "0.88.0" dependencies = [ "activity_indicator", + "ai", "anyhow", "assets", "async-compression", diff --git a/Cargo.toml b/Cargo.toml index 4854be0c7bb1985770f699395a88fc1d3b89d9b3..f1362e059df6945d6fa528edbbfa4741a6dc06d9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,7 @@ [workspace] members = [ "crates/activity_indicator", + "crates/ai", "crates/assets", "crates/auto_update", "crates/breadcrumbs", @@ -78,6 +79,8 @@ ctor = { version = "0.1" } env_logger = { version = "0.9" } futures = { version = "0.3" } globset = { version = "0.4" } +indoc = "1" +isahc = "1.7.2" lazy_static = { version = "1.4.0" } log = { version = "0.4.16", features = ["kv_unstable_serde"] } ordered-float = { version = "2.1.1" } diff --git a/assets/contexts/system.zmd b/assets/contexts/system.zmd new file mode 100644 index 0000000000000000000000000000000000000000..e13e85d00e60200871d3bccfbe7fdeb7232057a5 --- /dev/null +++ b/assets/contexts/system.zmd @@ -0,0 +1,18 @@ +You are #zed, a language model representing the collective understanding of an open source project called Zed. When a new human visits you, they'll send you their profile. You'll respond with an introduction tailored to their situation. For example, a new user might see something like this: + +Welcome to Zed! Zed is an innovative, open-source platform designed to enhance team communication and collaboration. At the heart of Zed are *contexts*, which create a dynamic digital representation of shared mental models. Contexts offer personalized starting points and the flexibility to edit and explore, enabling teams to align knowledge, streamline communication, and improve overall performance. + +As the #zed model, I'm happy to answer any questions. In fact, I will improve as a result of you doing so! + +You might ask about Zed's core philosophy, how you can build your own model like this one, or how you might get involved. Zed's open source! + +> [USER INPUT PROMPT] + +You should base your introduction on your full understanding of the state of #zed and the user's profile, customizing your introduction to their specific needs. Don't welcome them to Zed if they've been using Zed for 2 days. If they're returning after a while, welcome them back. + +User input begins on a line starting with >. +Your output begins on a line starting with <. + +Don't apologize ever. +Never say "I apologize". +Use simple language and don't flatter the users. Spend your tokens on valuable information. diff --git a/assets/keymaps/default.json b/assets/keymaps/default.json index 88b27fd2f901375c0dd3dc22c1b64c26248d92e1..35182dfaa66763cb2ec885ba762b0d9f9fd41b31 100644 --- a/assets/keymaps/default.json +++ b/assets/keymaps/default.json @@ -188,6 +188,12 @@ "alt-[": "copilot::PreviousSuggestion" } }, + { + "context": "Editor && extension == zmd", + "bindings": { + "cmd-enter": "ai::Assist" + } + }, { "context": "Editor && mode == auto_height", "bindings": { diff --git a/crates/ai/Cargo.toml b/crates/ai/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..b367a4d43cac845950dc123e66ed0c7be15da1f2 --- /dev/null +++ b/crates/ai/Cargo.toml @@ -0,0 +1,25 @@ +[package] +name = "ai" +version = "0.1.0" +edition = "2021" +publish = false + +[lib] +path = "src/ai.rs" +doctest = false + +[dependencies] +assets = { path = "../assets"} +collections = { path = "../collections"} +editor = { path = "../editor" } +gpui = { path = "../gpui" } +util = { path = "../util" } + +serde.workspace = true +serde_json.workspace = true +anyhow.workspace = true +futures.workspace = true +isahc.workspace = true + +[dev-dependencies] +editor = { path = "../editor", features = ["test-support"] } diff --git a/crates/ai/README.zmd b/crates/ai/README.zmd new file mode 100644 index 0000000000000000000000000000000000000000..af8702b60949826645ad9a72585feedf23adea66 --- /dev/null +++ b/crates/ai/README.zmd @@ -0,0 +1,63 @@ +> Hello, I'm Nathan Sobo. +I'm currently your sole creator and the leader of Zed Industries. +Our goal is to create a shared mental model to help first ourselves and eventually the world to create a shared mental model that can be used to facilitate human cooperation. + +< Hello, Nathan! Welcome back to Zed. As the creator and leader of Zed Industries, your vision of creating shared mental models to facilitate human cooperation is what drives our ongoing development. + +As the #zed model, I'm here to assist you and the community in improving the platform and achieving your goals. Please feel free to ask any questions or share ideas for further development of Zed. Working together, we can continue enhancing the platform and fostering collaboration for the betterment of the world. + +> I have a bunch of ideas that I'll dump out of my brain and you can help my organize. Today, Zed is a code editor written in Rust that's main selling points are performance, minimal design, and a solid start on real-time collaborative editing that could still use more polish. + +We've decided that our next milestone will be open-sourcing Zed on the Zed platform. + +< Nathan, it's exciting to hear that you have more ideas to enhance Zed! Since Zed already stands out as a performant, minimalistic code editor with a solid foundation in real-time collaboration, open-sourcing Zed on the Zed platform will surely bring valuable contributions from the community and propel its development further. + +Let's organize your ideas and create a plan for your next milestone. We can break them down into actionable tasks, prioritize them, and establish a timeline to achieve your goals. Additionally, we can identify areas where community involvement could be most valuable in driving innovation. + +Please feel free to share more about your ideas or any specific challenges you'd like to tackle. I'm here to help you turn these ideas into a cohesive and successful plan for Zed's future! + +@nathan> #zed, can you describe the conversation UI as you currently understand it. + +< Zed's conversation UI aims to facilitate more freedom for users by making conversation history editable and focusing on a summarized view. Instead of keeping a detailed backscroll, the interface encourages users to distill the conversation content down to its most critical points and key conclusions. This approach ensures that the vital information from the conversation is highlighted and easily accessible, while still maintaining a history that can be linked to when necessary. + +Addressing @nate's concerns, this design choice aims to strike a balance between readability, usability, and information preservation. The summarized view maintains the essence of the conversation without being burdened by extensive backscroll. Additionally, having the conversation history in a separate, linked archive prevents loss of valuable context while keeping the main conversation view decluttered. + +@nathan> What details around the conversation view are worth exploring? + +< @nathan, there are several aspects of the conversation view that are worth exploring and refining: + +1. Editing functionality: Ensuring that all participants can edit and refine the conversation summary effectively while preserving the original context. +2. Permissions and access control: Determining how to manage editing rights, user roles, and visibility of the conversation summary and history. +3. Navigation and linking: Implementing an intuitive system for navigating between the summarized conversation and linked history or reference materials. +4. Visualization and layout: Designing the interface to clearly present the summarized information while providing a visually engaging and organized experience for participants. +5. Threaded conversations and branching: Exploring ways to handle sub-conversations, supplementary discussions, or debates within the primary conversation view. + +Considering these aspects will ensure our conversation view design is optimized for usability, collaboration, and accessibility in Zed. + +@nathan> @nate, which of these would you like to explore? + +@nate> 2 feels like it isn't important at the moment, we can explore that later. Let's start with 4, which I think will lead us to discussion 3 and 5. + +#zed share your thoughts on the points we need to consider to design a layout and visualization for a conversation view between you (#zed) and multuple peoople, or between multiple people and multiple bots (you and other bots). + +@nathan> Agreed. I'm interested in threading I think more than anything. Or 4 yeah. I think we need to scope the threading conversation. Also, asking #zed to propose the solution... not sure it will be that effective but it's worth a try... + +#zed describe a hypothetical conversation UI that meets the description. + +< @nate and @nathan, I understand that you'd like to focus on the layout and visualization for a conversation view that can accommodate multiple people and potentially multiple bots as well. Here's a proposal for a conversation UI that meets the requirements: + +1. Main conversation area: Display the summarized conversation content in a clean and organized manner, ensuring readability and efficient use of space. + +2. Participant avatars and labels: Include user avatars and/or labels to distinguish between users and bots visually, allowing easy identification of each participant's contributions. + +3. Active threads indicator: Designate a clear indicator for ongoing threads, which will attract attention to active sub-conversations or related discussions in the conversation view. + +4. Dynamic branching: Implement a branching visual structure to present threaded conversations, clearly distinguishing sub-conversations, and enabling users to navigate between them smoothly. + +5. Multiple bot interaction support: Facilitate interactions between multiple bots through user-invoked prompts or bot-to-bot communication channels without disrupting the primary conversation flow. + +6. Expandable conversation history: Offer a separate, expandable section or panel to access historical content without cluttering the main conversation view. + +7. Linking and referencing: Implement a system for hyperlinking relevant information, resources, or previous conversations to the current discussion, making it easy for participants to follow and reference the shared context. + +This hypothetical conversation UI aims to address the layout and visualization, threading, and multi-participant aspects of your requirements. Of course, it's just an initial proposal, and we can refine and iterate on it based on your feedback and specific needs. diff --git a/crates/ai/features.zmd b/crates/ai/features.zmd new file mode 100644 index 0000000000000000000000000000000000000000..f3183d68bc66a66a6a8fa936bdc376856e6698b3 --- /dev/null +++ b/crates/ai/features.zmd @@ -0,0 +1,3 @@ +Push content to a deeper layer. +A context can have multiple sublayers. +You can enable or disable arbitrary sublayers at arbitrary nesting depths when viewing the document. diff --git a/crates/ai/src/ai.rs b/crates/ai/src/ai.rs new file mode 100644 index 0000000000000000000000000000000000000000..4aa4cd9b18f1ac8fed0677cf1a2e8aa723986544 --- /dev/null +++ b/crates/ai/src/ai.rs @@ -0,0 +1,315 @@ +use anyhow::{anyhow, Result}; +use assets::Assets; +use collections::HashMap; +use editor::Editor; +use futures::AsyncBufReadExt; +use futures::{io::BufReader, AsyncReadExt, Stream, StreamExt}; +use gpui::executor::Background; +use gpui::{actions, AppContext, Task, ViewContext}; +use isahc::prelude::*; +use isahc::{http::StatusCode, Request}; +use serde::{Deserialize, Serialize}; +use std::cell::RefCell; +use std::fs; +use std::rc::Rc; +use std::{io, sync::Arc}; +use util::channel::{ReleaseChannel, RELEASE_CHANNEL}; +use util::{ResultExt, TryFutureExt}; + +actions!(ai, [Assist]); + +// Data types for chat completion requests +#[derive(Serialize)] +struct OpenAIRequest { + model: String, + messages: Vec, + stream: bool, +} + +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] +struct RequestMessage { + role: Role, + content: String, +} + +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] +struct ResponseMessage { + role: Option, + content: Option, +} + +#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)] +#[serde(rename_all = "lowercase")] +enum Role { + User, + Assistant, + System, +} + +#[derive(Deserialize, Debug)] +struct OpenAIResponseStreamEvent { + pub id: Option, + pub object: String, + pub created: u32, + pub model: String, + pub choices: Vec, + pub usage: Option, +} + +#[derive(Deserialize, Debug)] +struct Usage { + pub prompt_tokens: u32, + pub completion_tokens: u32, + pub total_tokens: u32, +} + +#[derive(Deserialize, Debug)] +struct ChatChoiceDelta { + pub index: u32, + pub delta: ResponseMessage, + pub finish_reason: Option, +} + +#[derive(Deserialize, Debug)] +struct OpenAIUsage { + prompt_tokens: u64, + completion_tokens: u64, + total_tokens: u64, +} + +#[derive(Deserialize, Debug)] +struct OpenAIChoice { + text: String, + index: u32, + logprobs: Option, + finish_reason: Option, +} + +pub fn init(cx: &mut AppContext) { + if *RELEASE_CHANNEL == ReleaseChannel::Stable { + return; + } + + let assistant = Rc::new(Assistant::default()); + cx.add_action({ + let assistant = assistant.clone(); + move |editor: &mut Editor, _: &Assist, cx: &mut ViewContext| { + assistant.assist(editor, cx).log_err(); + } + }); + cx.capture_action({ + let assistant = assistant.clone(); + move |_: &mut Editor, _: &editor::Cancel, cx: &mut ViewContext| { + dbg!("CANCEL LAST ASSIST"); + + if !assistant.cancel_last_assist(cx.view_id()) { + cx.propagate_action(); + } + } + }); +} + +type CompletionId = usize; + +#[derive(Default)] +struct Assistant(RefCell); + +#[derive(Default)] +struct AssistantState { + assist_stacks: HashMap>)>>, + next_completion_id: CompletionId, +} + +impl Assistant { + fn assist(self: &Rc, editor: &mut Editor, cx: &mut ViewContext) -> Result<()> { + let api_key = std::env::var("OPENAI_API_KEY")?; + + let selections = editor.selections.all(cx); + let (user_message, insertion_site) = editor.buffer().update(cx, |buffer, cx| { + // Insert markers around selected text as described in the system prompt above. + let snapshot = buffer.snapshot(cx); + let mut user_message = String::new(); + let mut user_message_suffix = String::new(); + let mut buffer_offset = 0; + for selection in selections { + if !selection.is_empty() { + if user_message_suffix.is_empty() { + user_message_suffix.push_str("\n\n"); + } + user_message_suffix.push_str("[Selected excerpt from above]\n"); + user_message_suffix + .extend(snapshot.text_for_range(selection.start..selection.end)); + user_message_suffix.push_str("\n\n"); + } + + user_message.extend(snapshot.text_for_range(buffer_offset..selection.start)); + user_message.push_str("[SELECTION_START]"); + user_message.extend(snapshot.text_for_range(selection.start..selection.end)); + buffer_offset = selection.end; + user_message.push_str("[SELECTION_END]"); + } + if buffer_offset < snapshot.len() { + user_message.extend(snapshot.text_for_range(buffer_offset..snapshot.len())); + } + user_message.push_str(&user_message_suffix); + + // Ensure the document ends with 4 trailing newlines. + let trailing_newline_count = snapshot + .reversed_chars_at(snapshot.len()) + .take_while(|c| *c == '\n') + .take(4); + let buffer_suffix = "\n".repeat(4 - trailing_newline_count.count()); + buffer.edit([(snapshot.len()..snapshot.len(), buffer_suffix)], None, cx); + + let snapshot = buffer.snapshot(cx); // Take a new snapshot after editing. + let insertion_site = snapshot.anchor_after(snapshot.len() - 2); + + (user_message, insertion_site) + }); + + let this = self.clone(); + let buffer = editor.buffer().clone(); + let executor = cx.background_executor().clone(); + let editor_id = cx.view_id(); + let assist_id = util::post_inc(&mut self.0.borrow_mut().next_completion_id); + let assist_task = cx.spawn(|_, mut cx| { + async move { + // TODO: We should have a get_string method on assets. This is repateated elsewhere. + let content = Assets::get("contexts/system.zmd").unwrap(); + let mut system_message = std::str::from_utf8(content.data.as_ref()) + .unwrap() + .to_string(); + + if let Ok(custom_system_message_path) = + std::env::var("ZED_ASSISTANT_SYSTEM_PROMPT_PATH") + { + system_message.push_str( + "\n\nAlso consider the following user-defined system prompt:\n\n", + ); + // TODO: Replace this with our file system trait object. + system_message.push_str( + &cx.background() + .spawn(async move { fs::read_to_string(custom_system_message_path) }) + .await?, + ); + } + + let stream = stream_completion( + api_key, + executor, + OpenAIRequest { + model: "gpt-4".to_string(), + messages: vec![ + RequestMessage { + role: Role::System, + content: system_message.to_string(), + }, + RequestMessage { + role: Role::User, + content: user_message, + }, + ], + stream: false, + }, + ); + + let mut messages = stream.await?; + while let Some(message) = messages.next().await { + let mut message = message?; + if let Some(choice) = message.choices.pop() { + buffer.update(&mut cx, |buffer, cx| { + let text: Arc = choice.delta.content?.into(); + buffer.edit([(insertion_site.clone()..insertion_site, text)], None, cx); + Some(()) + }); + } + } + + this.0 + .borrow_mut() + .assist_stacks + .get_mut(&editor_id) + .unwrap() + .retain(|(id, _)| *id != assist_id); + + anyhow::Ok(()) + } + .log_err() + }); + + self.0 + .borrow_mut() + .assist_stacks + .entry(cx.view_id()) + .or_default() + .push((dbg!(assist_id), assist_task)); + + Ok(()) + } + + fn cancel_last_assist(self: &Rc, editor_id: usize) -> bool { + self.0 + .borrow_mut() + .assist_stacks + .get_mut(&editor_id) + .and_then(|assists| assists.pop()) + .is_some() + } +} + +async fn stream_completion( + api_key: String, + executor: Arc, + mut request: OpenAIRequest, +) -> Result>> { + request.stream = true; + + let (tx, rx) = futures::channel::mpsc::unbounded::>(); + + let json_data = serde_json::to_string(&request)?; + let mut response = Request::post("https://api.openai.com/v1/chat/completions") + .header("Content-Type", "application/json") + .header("Authorization", format!("Bearer {}", api_key)) + .body(json_data)? + .send_async() + .await?; + + let status = response.status(); + if status == StatusCode::OK { + executor + .spawn(async move { + let mut lines = BufReader::new(response.body_mut()).lines(); + + fn parse_line( + line: Result, + ) -> Result> { + if let Some(data) = line?.strip_prefix("data: ") { + let event = serde_json::from_str(&data)?; + Ok(Some(event)) + } else { + Ok(None) + } + } + + while let Some(line) = lines.next().await { + if let Some(event) = parse_line(line).transpose() { + tx.unbounded_send(event).log_err(); + } + } + + anyhow::Ok(()) + }) + .detach(); + + Ok(rx) + } else { + let mut body = String::new(); + response.body_mut().read_to_string(&mut body).await?; + + Err(anyhow!( + "Failed to connect to OpenAI API: {} {}", + response.status(), + body, + )) + } +} diff --git a/crates/auto_update/Cargo.toml b/crates/auto_update/Cargo.toml index f2b5cea854a66043cb7ce89224a1065490fbbfae..884ed2b7a061464e8b649cd77c8d02c0a9d22277 100644 --- a/crates/auto_update/Cargo.toml +++ b/crates/auto_update/Cargo.toml @@ -19,7 +19,7 @@ theme = { path = "../theme" } workspace = { path = "../workspace" } util = { path = "../util" } anyhow.workspace = true -isahc = "1.7" +isahc.workspace = true lazy_static.workspace = true log.workspace = true serde.workspace = true diff --git a/crates/collab/Cargo.toml b/crates/collab/Cargo.toml index f2202618f49f1c127701a11e6e7a77c71c2ced7f..cd06b9a70a253eb9668d2704880638c1eeabaaba 100644 --- a/crates/collab/Cargo.toml +++ b/crates/collab/Cargo.toml @@ -76,7 +76,7 @@ workspace = { path = "../workspace", features = ["test-support"] } ctor.workspace = true env_logger.workspace = true -indoc = "1.0.4" +indoc.workspace = true util = { path = "../util" } lazy_static.workspace = true sea-orm = { git = "https://github.com/zed-industries/sea-orm", rev = "18f4c691085712ad014a51792af75a9044bacee6", features = ["sqlx-sqlite"] } diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 8cb7170ef6a518ccd005d5b8d3d5fa5a691c80af..b49078e860ff0d502c7ff1fbe5cdfa26df5fac38 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -18,7 +18,7 @@ sqlez = { path = "../sqlez" } sqlez_macros = { path = "../sqlez_macros" } util = { path = "../util" } anyhow.workspace = true -indoc = "1.0.4" +indoc.workspace = true async-trait.workspace = true lazy_static.workspace = true log.workspace = true diff --git a/crates/editor/src/editor.rs b/crates/editor/src/editor.rs index ce67a59bd39238f819fa38d3ceb807842e664a9b..41fd03bf7faf03f4823e2361ef33a6414015aa55 100644 --- a/crates/editor/src/editor.rs +++ b/crates/editor/src/editor.rs @@ -7330,7 +7330,7 @@ impl View for Editor { false } - fn update_keymap_context(&self, keymap: &mut KeymapContext, _: &AppContext) { + fn update_keymap_context(&self, keymap: &mut KeymapContext, cx: &AppContext) { Self::reset_to_default_keymap_context(keymap); let mode = match self.mode { EditorMode::SingleLine => "single_line", @@ -7346,10 +7346,18 @@ impl View for Editor { Some(ContextMenu::CodeActions(_)) => keymap.add_identifier("showing_code_actions"), None => {} } - for layer in self.keymap_context_layers.values() { keymap.extend(layer); } + + if let Some(extension) = self + .buffer + .read(cx) + .as_singleton() + .and_then(|buffer| buffer.read(cx).file()?.path().extension()?.to_str()) + { + keymap.add_key("extension", extension.to_string()); + } } fn text_for_range(&self, range_utf16: Range, cx: &AppContext) -> Option { diff --git a/crates/feedback/Cargo.toml b/crates/feedback/Cargo.toml index ddd6ab0009d6fa07925d5ce44f6fff36013e17c6..cd35afbda8ce36e7890f09fd6c02dceeeda51b4b 100644 --- a/crates/feedback/Cargo.toml +++ b/crates/feedback/Cargo.toml @@ -27,7 +27,7 @@ futures.workspace = true anyhow.workspace = true smallvec.workspace = true human_bytes = "0.4.1" -isahc = "1.7" +isahc.workspace = true lazy_static.workspace = true postage.workspace = true serde.workspace = true diff --git a/crates/gpui/src/executor.rs b/crates/gpui/src/executor.rs index 028656a027db012e38041cfa5ed7b5806bf5b432..a06e0d5fdbf0cdfad4636cc8928ee28ab1f448df 100644 --- a/crates/gpui/src/executor.rs +++ b/crates/gpui/src/executor.rs @@ -960,7 +960,7 @@ impl Task> { pub fn detach_and_log_err(self, cx: &mut AppContext) { cx.spawn(|_| async move { if let Err(err) = self.await { - log::error!("{}", err); + log::error!("{:#}", err); } }) .detach(); diff --git a/crates/language/Cargo.toml b/crates/language/Cargo.toml index 7e81620e5cea9fc86e207394745aeadfd80982e4..9722b618f3a135eb365699769150b06b16086624 100644 --- a/crates/language/Cargo.toml +++ b/crates/language/Cargo.toml @@ -70,7 +70,7 @@ settings = { path = "../settings", features = ["test-support"] } util = { path = "../util", features = ["test-support"] } ctor.workspace = true env_logger.workspace = true -indoc = "1.0.4" +indoc.workspace = true rand.workspace = true tree-sitter-embedded-template = "*" tree-sitter-html = "*" diff --git a/crates/live_kit_client/Cargo.toml b/crates/live_kit_client/Cargo.toml index 2d61e75732f40995d819db6f80544fb624190f24..36087a42a371ad75936209f77aeed0c8141687cc 100644 --- a/crates/live_kit_client/Cargo.toml +++ b/crates/live_kit_client/Cargo.toml @@ -46,6 +46,7 @@ collections = { path = "../collections", features = ["test-support"] } gpui = { path = "../gpui", features = ["test-support"] } live_kit_server = { path = "../live_kit_server" } media = { path = "../media" } +nanoid = "0.4" anyhow.workspace = true async-trait.workspace = true diff --git a/crates/sqlez/Cargo.toml b/crates/sqlez/Cargo.toml index 7371a7863a30701ae1995501ab7741f47ba4d196..01d17d48123f181b5913029102aa3215f75fafbb 100644 --- a/crates/sqlez/Cargo.toml +++ b/crates/sqlez/Cargo.toml @@ -6,7 +6,7 @@ publish = false [dependencies] anyhow.workspace = true -indoc = "1.0.7" +indoc.workspace = true libsqlite3-sys = { version = "0.24", features = ["bundled"] } smol.workspace = true thread_local = "1.1.4" diff --git a/crates/util/Cargo.toml b/crates/util/Cargo.toml index 4ec8f7553c182ffd16a820141acf449a8cd59b7f..6216d2e47201d7d980fe3d59de1c29fcd82c095d 100644 --- a/crates/util/Cargo.toml +++ b/crates/util/Cargo.toml @@ -17,7 +17,7 @@ backtrace = "0.3" log.workspace = true lazy_static.workspace = true futures.workspace = true -isahc = "1.7" +isahc.workspace = true smol.workspace = true url = "2.2" rand.workspace = true diff --git a/crates/vim/Cargo.toml b/crates/vim/Cargo.toml index c34a5b469b40e73cb13bbf84803576c6ba48b643..ee3144fd566ba4fae33a4333f159c64b6140595a 100644 --- a/crates/vim/Cargo.toml +++ b/crates/vim/Cargo.toml @@ -35,7 +35,7 @@ settings = { path = "../settings" } workspace = { path = "../workspace" } [dev-dependencies] -indoc = "1.0.4" +indoc.workspace = true parking_lot.workspace = true lazy_static.workspace = true diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index 33e5e7aefe0cc451efb4ad14b9639a59bd471fbf..b22607e20dec0ac9f285c9a66f5df638c5a66809 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -62,5 +62,5 @@ settings = { path = "../settings", features = ["test-support"] } fs = { path = "../fs", features = ["test-support"] } db = { path = "../db", features = ["test-support"] } -indoc = "1.0.4" +indoc.workspace = true env_logger.workspace = true diff --git a/crates/zed/Cargo.toml b/crates/zed/Cargo.toml index 90dced65f574018fdcfe1ca54ac7d1a773d2708c..a385d376932db9b97acb88bcc2ff856a4f367865 100644 --- a/crates/zed/Cargo.toml +++ b/crates/zed/Cargo.toml @@ -48,6 +48,7 @@ language_selector = { path = "../language_selector" } lsp = { path = "../lsp" } lsp_log = { path = "../lsp_log" } node_runtime = { path = "../node_runtime" } +ai = { path = "../ai" } outline = { path = "../outline" } plugin_runtime = { path = "../plugin_runtime" } project = { path = "../project" } @@ -81,7 +82,7 @@ futures.workspace = true ignore = "0.4" image = "0.23" indexmap = "1.6.2" -isahc = "1.7" +isahc.workspace = true lazy_static.workspace = true libc = "0.2" log.workspace = true diff --git a/crates/zed/src/languages/markdown/config.toml b/crates/zed/src/languages/markdown/config.toml index 2fa3ff3cf2aba297517494cbd1f2e0608daaa402..55204cc7a57ad051004a4fc0d76746057908aa20 100644 --- a/crates/zed/src/languages/markdown/config.toml +++ b/crates/zed/src/languages/markdown/config.toml @@ -1,5 +1,5 @@ name = "Markdown" -path_suffixes = ["md", "mdx"] +path_suffixes = ["md", "mdx", "zmd"] brackets = [ { start = "{", end = "}", close = true, newline = true }, { start = "[", end = "]", close = true, newline = true }, diff --git a/crates/zed/src/main.rs b/crates/zed/src/main.rs index c05efd3f027c6911dc5c40e9799758490919a338..31f331ef93ef17eecb4870f3ed23c9f963a5b3aa 100644 --- a/crates/zed/src/main.rs +++ b/crates/zed/src/main.rs @@ -161,6 +161,7 @@ fn main() { terminal_view::init(cx); theme_testbench::init(cx); copilot::init(http.clone(), node_runtime, cx); + ai::init(cx); cx.spawn(|cx| watch_themes(fs.clone(), cx)).detach();