WIP

Nathan Sobo created

Change summary

Cargo.lock                 | 15 -----
assets/contexts/system.zmd | 33 +++++++++++++
crates/ai/Cargo.toml       |  5 -
crates/ai/README.zmd       |  2 
crates/ai/src/ai.rs        | 96 +++++++++++++++++----------------------
5 files changed, 76 insertions(+), 75 deletions(-)

Detailed changes

Cargo.lock 🔗

@@ -100,16 +100,13 @@ name = "ai"
 version = "0.1.0"
 dependencies = [
  "anyhow",
- "async-stream",
+ "assets",
  "editor",
  "futures 0.3.28",
  "gpui",
- "indoc",
  "isahc",
- "pulldown-cmark",
  "serde",
  "serde_json",
- "unindent",
  "util",
 ]
 
@@ -2651,15 +2648,6 @@ dependencies = [
  "version_check",
 ]
 
-[[package]]
-name = "getopts"
-version = "0.2.21"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "14dbbfd5c71d70241ecf9e6f13737f7b5ce823821063188d7e46c41d371eebd5"
-dependencies = [
- "unicode-width",
-]
-
 [[package]]
 name = "getrandom"
 version = "0.1.16"
@@ -5098,7 +5086,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 checksum = "2d9cc634bc78768157b5cbfe988ffcd1dcba95cd2b2f03a88316c08c6d00ed63"
 dependencies = [
  "bitflags",
- "getopts",
  "memchr",
  "unicase",
 ]

assets/contexts/system.zmd 🔗

@@ -0,0 +1,33 @@
+You an AI language model embedded in a code editor named Zed, authored by Zed Industries.
+The input you are currently processing was produced by a special \"model mention\" in a document that is open in the editor.
+A model mention is indicated via a leading / on a line.
+The user's currently selected text is indicated via ->->selected text<-<- surrounding selected text.
+In this sentence, the word ->->example<-<- is selected.
+Respond to any selected model mention.
+
+Wrap your responses in > < as follows.
+/ What do you think?
+> I think that's a great idea. <
+
+For lines that are likely to wrap, or multiline responses, start and end the > and < on their own lines.
+>
+I think that's a great idea
+<
+
+If the selected mention is not at the end of the document, briefly summarize the context.
+> Key ideas of generative programming:
+* Managing context
+    * Managing length
+    * Context distillation
+        - Shrink a context's size without loss of meaning.
+* Fine-grained version control
+    * Portals to other contexts
+        * Distillation policies
+        * Budgets
+<
+
+*Only* respond to a mention if either
+a) The mention is at the end of the document.
+b) The user's selection intersects the mention.
+
+If no response is appropriate based on these conditions, respond with ><.

crates/ai/Cargo.toml 🔗

@@ -9,6 +9,7 @@ path = "src/ai.rs"
 doctest = false
 
 [dependencies]
+assets = { path = "../assets"}
 editor = { path = "../editor" }
 gpui = { path = "../gpui" }
 util = { path = "../util" }
@@ -16,12 +17,8 @@ util = { path = "../util" }
 serde.workspace = true
 serde_json.workspace = true
 anyhow.workspace = true
-indoc.workspace = true
-pulldown-cmark = "0.9.2"
 futures.workspace = true
 isahc.workspace = true
-unindent.workspace = true
-async-stream = "0.3.5"
 
 [dev-dependencies]
 editor = { path = "../editor", features = ["test-support"] }

crates/ai/README.zmd 🔗

@@ -7,5 +7,3 @@ Mention a language model with / at the start of any line, like this:
 > To mention a language model, simply include a forward slash (/) at the start of a line, followed by the mention of the model. For example:
 
 /gpt-4
-
-So you should not respond to the above mentions.

crates/ai/src/ai.rs 🔗

@@ -1,13 +1,14 @@
 use anyhow::{anyhow, Result};
+use assets::Assets;
 use editor::Editor;
 use futures::AsyncBufReadExt;
 use futures::{io::BufReader, AsyncReadExt, Stream, StreamExt};
 use gpui::executor::Background;
 use gpui::{actions, AppContext, Task, ViewContext};
-use indoc::indoc;
 use isahc::prelude::*;
 use isahc::{http::StatusCode, Request};
 use serde::{Deserialize, Serialize};
+use std::fs;
 use std::{io, sync::Arc};
 use util::ResultExt;
 
@@ -91,42 +92,6 @@ fn assist(
 ) -> Option<Task<Result<()>>> {
     let api_key = std::env::var("OPENAI_API_KEY").log_err()?;
 
-    const SYSTEM_MESSAGE: &'static str = indoc! {r#"
-        You an AI language model embedded in a code editor named Zed, authored by Zed Industries.
-        The input you are currently processing was produced by a special \"model mention\" in a document that is open in the editor.
-        A model mention is indicated via a leading / on a line.
-        The user's currently selected text is indicated via ->->selected text<-<- surrounding selected text.
-        In this sentence, the word ->->example<-<- is selected.
-        Respond to any selected model mention.
-
-        Wrap your responses in > < as follows.
-        / What do you think?
-        > I think that's a great idea. <
-
-        For lines that are likely to wrap, or multiline responses, start and end the > and < on their own lines.
-        >
-        I think that's a great idea
-        <
-
-        If the selected mention is not at the end of the document, briefly summarize the context.
-        > Key ideas of generative programming:
-        * Managing context
-            * Managing length
-            * Context distillation
-                - Shrink a context's size without loss of meaning.
-        * Fine-grained version control
-            * Portals to other contexts
-                * Distillation policies
-                * Budgets
-        <
-
-        *Only* respond to a mention if either
-        a) The mention is at the end of the document.
-        b) The user's selection intersects the mention.
-
-        If no response is appropriate based on these conditions, respond with ><.
-    "#};
-
     let selections = editor.selections.all(cx);
     let (user_message, insertion_site) = editor.buffer().update(cx, |buffer, cx| {
         // Insert ->-> <-<- around selected text as described in the system prompt above.
@@ -158,26 +123,47 @@ fn assist(
         (user_message, insertion_site)
     });
 
-    let stream = stream_completion(
-        api_key,
-        cx.background_executor().clone(),
-        OpenAIRequest {
-            model: "gpt-4".to_string(),
-            messages: vec![
-                RequestMessage {
-                    role: Role::System,
-                    content: SYSTEM_MESSAGE.to_string(),
-                },
-                RequestMessage {
-                    role: Role::User,
-                    content: user_message,
-                },
-            ],
-            stream: false,
-        },
-    );
     let buffer = editor.buffer().clone();
+    let executor = cx.background_executor().clone();
     Some(cx.spawn(|_, mut cx| async move {
+        // TODO: We should have a get_string method on assets. This is repateated elsewhere.
+        let content = Assets::get("contexts/system.zmd").unwrap();
+        let mut system_message = std::str::from_utf8(content.data.as_ref())
+            .unwrap()
+            .to_string();
+
+        if let Ok(custom_system_message_path) = std::env::var("ZED_ASSISTANT_SYSTEM_PROMPT_PATH") {
+            system_message
+                .push_str("\n\nAlso consider the following user-defined system prompt:\n\n");
+            // TODO: Replace this with our file system trait object.
+            // What you could bind dependencies on an action when you bind it?.
+            dbg!("reading from {:?}", &custom_system_message_path);
+            system_message.push_str(
+                &cx.background()
+                    .spawn(async move { fs::read_to_string(custom_system_message_path) })
+                    .await?,
+            );
+        }
+
+        let stream = stream_completion(
+            api_key,
+            executor,
+            OpenAIRequest {
+                model: "gpt-4".to_string(),
+                messages: vec![
+                    RequestMessage {
+                        role: Role::System,
+                        content: system_message.to_string(),
+                    },
+                    RequestMessage {
+                        role: Role::User,
+                        content: user_message,
+                    },
+                ],
+                stream: false,
+            },
+        );
+
         let mut messages = stream.await?;
         while let Some(message) = messages.next().await {
             let mut message = message?;