WIP

Nathan Sobo created

Change summary

assets/keymaps/default.json |  2 +-
crates/ai/README.zmd        |  6 +++++-
crates/ai/src/ai.rs         | 22 +++++++++-------------
crates/editor/src/editor.rs |  1 -
4 files changed, 15 insertions(+), 16 deletions(-)

Detailed changes

assets/keymaps/default.json 🔗

@@ -164,7 +164,7 @@
     "bindings": {
       "enter": "editor::Newline",
       "cmd-shift-enter": "editor::NewlineAbove",
-      "cmd-enter": "editor::NewlineBelow",
+      "cmd-enter": "ai::Assist",
       "alt-z": "editor::ToggleSoftWrap",
       "cmd-f": [
         "buffer_search::Deploy",

crates/ai/README.zmd 🔗

@@ -2,4 +2,8 @@ This is Zed Markdown.
 
 Mention a language model with / at the start of any line, like this:
 
-/ Expand on this idea
+/
+
+> To mention a language model, simply include a forward slash (/) at the start of a line, followed by the mention of the model. For example:
+
+/gpt-4

crates/ai/src/ai.rs 🔗

@@ -98,12 +98,18 @@ fn assist(
         The user's currently selected text is indicated via ->->selected text<-<- surrounding selected text.
         In this sentence, the word ->->example<-<- is selected.
         Respond to any selected model mention.
+
         Wrap your responses in > < as follows.
+        / What do you think?
+        > I think that's a great idea. <
+
+        For lines that are likely to wrap, or multiline responses, start and end the > and < on their own lines.
         >
-        I think that's a great idea.
+        I think that's a great idea
         <
-        If you're responding to a distant mention or multiple mentions, provide context.
-        > Key ideas of generative programming.
+
+        If the selected mention is not at the end of the document, briefly summarize the context.
+        > Key ideas of generative programming:
         * Managing context
             * Managing length
             * Context distillation
@@ -113,16 +119,6 @@ fn assist(
                 * Distillation policies
                 * Budgets
         <
-
-        > Expand on the idea of context distillation.
-        It's important to stay below the model's context size when generative programming.
-        A key technique in doing so is called context distillation... [up to 1 paragraph].
-
-        Questions to consider:
-        -
-        -
-        - [Up to 3 questions]
-        <
     "#};
 
     let selections = editor.selections.all(cx);

crates/editor/src/editor.rs 🔗

@@ -7247,7 +7247,6 @@ impl View for Editor {
             Some(ContextMenu::CodeActions(_)) => keymap.add_identifier("showing_code_actions"),
             None => {}
         }
-
         for layer in self.keymap_context_layers.values() {
             keymap.extend(layer);
         }