@@ -34,6 +34,7 @@ use editor::{
};
use editor::{display_map::CreaseId, FoldPlaceholder};
use fs::Fs;
+use futures::FutureExt;
use gpui::{
canvas, div, img, percentage, point, pulsating_between, size, Action, Animation, AnimationExt,
AnyElement, AnyView, AppContext, AsyncWindowContext, ClipboardEntry, ClipboardItem,
@@ -46,11 +47,11 @@ use indexed_docs::IndexedDocsStore;
use language::{
language_settings::SoftWrap, Capability, LanguageRegistry, LspAdapterDelegate, Point, ToOffset,
};
-use language_model::LanguageModelToolUse;
use language_model::{
provider::cloud::PROVIDER_ID, LanguageModelProvider, LanguageModelProviderId,
LanguageModelRegistry, Role,
};
+use language_model::{LanguageModelImage, LanguageModelToolUse};
use multi_buffer::MultiBufferRow;
use picker::{Picker, PickerDelegate};
use project::lsp_store::ProjectLspAdapterDelegate;
@@ -3551,10 +3552,22 @@ impl ContextEditor {
self.context.update(cx, |context, cx| {
for image in images {
+ let Some(render_image) = image.to_image_data(cx).log_err() else {
+ continue;
+ };
let image_id = image.id();
- context.insert_image(image, cx);
+ let image_task = LanguageModelImage::from_image(image, cx).shared();
+
for image_position in image_positions.iter() {
- context.insert_image_content(image_id, image_position.text_anchor, cx);
+ context.insert_content(
+ Content::Image {
+ anchor: image_position.text_anchor,
+ image_id,
+ image: image_task.clone(),
+ render_image: render_image.clone(),
+ },
+ cx,
+ );
}
}
});
@@ -20,8 +20,8 @@ use futures::{
FutureExt, StreamExt,
};
use gpui::{
- AppContext, AsyncAppContext, Context as _, EventEmitter, Image, Model, ModelContext,
- RenderImage, SharedString, Subscription, Task,
+ AppContext, AsyncAppContext, Context as _, EventEmitter, Model, ModelContext, RenderImage,
+ SharedString, Subscription, Task,
};
use language::{AnchorRangeExt, Bias, Buffer, LanguageRegistry, OffsetRangeExt, Point, ToOffset};
@@ -38,7 +38,6 @@ use serde::{Deserialize, Serialize};
use smallvec::SmallVec;
use std::{
cmp::{self, max, Ordering},
- collections::hash_map,
fmt::Debug,
iter, mem,
ops::Range,
@@ -49,7 +48,7 @@ use std::{
};
use telemetry_events::AssistantKind;
use text::BufferSnapshot;
-use util::{post_inc, ResultExt, TryFutureExt};
+use util::{post_inc, TryFutureExt};
use uuid::Uuid;
#[derive(Clone, Eq, PartialEq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
@@ -468,7 +467,6 @@ pub struct Context {
slash_command_output_sections: Vec<SlashCommandOutputSection<language::Anchor>>,
pending_tool_uses_by_id: HashMap<Arc<str>, PendingToolUse>,
message_anchors: Vec<MessageAnchor>,
- images: HashMap<u64, (Arc<RenderImage>, Shared<Task<Option<LanguageModelImage>>>)>,
contents: Vec<Content>,
messages_metadata: HashMap<MessageId, MessageMetadata>,
summary: Option<ContextSummary>,
@@ -564,7 +562,6 @@ impl Context {
operations: Vec::new(),
message_anchors: Default::default(),
contents: Default::default(),
- images: Default::default(),
messages_metadata: Default::default(),
pending_slash_commands: Vec::new(),
finished_slash_commands: HashSet::default(),
@@ -2374,36 +2371,6 @@ impl Context {
}
}
- pub fn insert_image(&mut self, image: Image, cx: &mut ModelContext<Self>) -> Option<()> {
- if let hash_map::Entry::Vacant(entry) = self.images.entry(image.id()) {
- entry.insert((
- image.to_image_data(cx).log_err()?,
- LanguageModelImage::from_image(image, cx).shared(),
- ));
- }
-
- Some(())
- }
-
- pub fn insert_image_content(
- &mut self,
- image_id: u64,
- anchor: language::Anchor,
- cx: &mut ModelContext<Self>,
- ) {
- if let Some((render_image, image)) = self.images.get(&image_id) {
- self.insert_content(
- Content::Image {
- anchor,
- image_id,
- image: image.clone(),
- render_image: render_image.clone(),
- },
- cx,
- );
- }
- }
-
pub fn insert_content(&mut self, content: Content, cx: &mut ModelContext<Self>) {
let buffer = self.buffer.read(cx);
let insertion_ix = match self