1use std::{collections::hash_map, sync::Arc, time::Duration};
2
3use collections::{HashMap, HashSet};
4use futures::future::join_all;
5use gpui::{
6 App, Context, FontStyle, FontWeight, HighlightStyle, StrikethroughStyle, Task, UnderlineStyle,
7};
8use language::language_settings::language_settings;
9use project::{
10 lsp_store::{
11 BufferSemanticToken, BufferSemanticTokens, RefreshForServer, SemanticTokenStylizer,
12 TokenType,
13 },
14 project_settings::ProjectSettings,
15};
16use settings::{
17 SemanticTokenColorOverride, SemanticTokenFontStyle, SemanticTokenFontWeight,
18 SemanticTokenRules, Settings as _,
19};
20use text::BufferId;
21use theme::SyntaxTheme;
22use ui::ActiveTheme as _;
23
24use crate::{
25 Editor,
26 actions::ToggleSemanticHighlights,
27 display_map::{HighlightStyleInterner, SemanticTokenHighlight},
28};
29
30pub(super) struct SemanticTokenState {
31 rules: SemanticTokenRules,
32 enabled: bool,
33 update_task: Task<()>,
34 fetched_for_buffers: HashMap<BufferId, clock::Global>,
35}
36
37impl SemanticTokenState {
38 pub(super) fn new(cx: &App, enabled: bool) -> Self {
39 Self {
40 rules: ProjectSettings::get_global(cx)
41 .global_lsp_settings
42 .semantic_token_rules
43 .clone(),
44 enabled,
45 update_task: Task::ready(()),
46 fetched_for_buffers: HashMap::default(),
47 }
48 }
49
50 pub(super) fn enabled(&self) -> bool {
51 self.enabled
52 }
53
54 pub(super) fn toggle_enabled(&mut self) {
55 self.enabled = !self.enabled;
56 }
57
58 #[cfg(test)]
59 pub(super) fn take_update_task(&mut self) -> Task<()> {
60 std::mem::replace(&mut self.update_task, Task::ready(()))
61 }
62
63 pub(super) fn invalidate_buffer(&mut self, buffer_id: &BufferId) {
64 self.fetched_for_buffers.remove(buffer_id);
65 }
66
67 pub(super) fn update_rules(&mut self, new_rules: SemanticTokenRules) -> bool {
68 if new_rules != self.rules {
69 self.rules = new_rules;
70 true
71 } else {
72 false
73 }
74 }
75}
76
77impl Editor {
78 pub fn supports_semantic_tokens(&self, cx: &mut App) -> bool {
79 let Some(provider) = self.semantics_provider.as_ref() else {
80 return false;
81 };
82
83 let mut supports = false;
84 self.buffer().update(cx, |this, cx| {
85 this.for_each_buffer(|buffer| {
86 supports |= provider.supports_semantic_tokens(buffer, cx);
87 });
88 });
89
90 supports
91 }
92
93 pub fn semantic_highlights_enabled(&self) -> bool {
94 self.semantic_token_state.enabled()
95 }
96
97 pub fn toggle_semantic_highlights(
98 &mut self,
99 _: &ToggleSemanticHighlights,
100 _window: &mut gpui::Window,
101 cx: &mut Context<Self>,
102 ) {
103 self.semantic_token_state.toggle_enabled();
104 self.invalidate_semantic_tokens(None);
105 self.refresh_semantic_tokens(None, None, cx);
106 }
107
108 pub(super) fn invalidate_semantic_tokens(&mut self, for_buffer: Option<BufferId>) {
109 match for_buffer {
110 Some(for_buffer) => self.semantic_token_state.invalidate_buffer(&for_buffer),
111 None => self.semantic_token_state.fetched_for_buffers.clear(),
112 }
113 }
114
115 pub(super) fn refresh_semantic_tokens(
116 &mut self,
117 buffer_id: Option<BufferId>,
118 for_server: Option<RefreshForServer>,
119 cx: &mut Context<Self>,
120 ) {
121 if !self.mode().is_full() || !self.semantic_token_state.enabled() {
122 self.invalidate_semantic_tokens(None);
123 self.display_map.update(cx, |display_map, _| {
124 display_map.semantic_token_highlights.clear();
125 });
126 self.semantic_token_state.update_task = Task::ready(());
127 cx.notify();
128 return;
129 }
130
131 let mut invalidate_semantic_highlights_for_buffers = HashSet::default();
132 if for_server.is_some() {
133 invalidate_semantic_highlights_for_buffers.extend(
134 self.semantic_token_state
135 .fetched_for_buffers
136 .drain()
137 .map(|(buffer_id, _)| buffer_id),
138 );
139 }
140
141 let Some((sema, project)) = self.semantics_provider.clone().zip(self.project.clone())
142 else {
143 return;
144 };
145
146 let buffers_to_query = self
147 .visible_excerpts(true, cx)
148 .into_values()
149 .map(|(buffer, ..)| buffer)
150 .chain(buffer_id.and_then(|buffer_id| self.buffer.read(cx).buffer(buffer_id)))
151 .filter_map(|editor_buffer| {
152 let editor_buffer_id = editor_buffer.read(cx).remote_id();
153 if self.registered_buffers.contains_key(&editor_buffer_id)
154 && language_settings(
155 editor_buffer.read(cx).language().map(|l| l.name()),
156 editor_buffer.read(cx).file(),
157 cx,
158 )
159 .semantic_tokens
160 .enabled()
161 {
162 Some((editor_buffer_id, editor_buffer))
163 } else {
164 None
165 }
166 })
167 .collect::<HashMap<_, _>>();
168
169 for buffer_with_disabled_tokens in self
170 .display_map
171 .read(cx)
172 .semantic_token_highlights
173 .iter()
174 .map(|(buffer_id, _)| *buffer_id)
175 .filter(|buffer_id| !buffers_to_query.contains_key(buffer_id))
176 .filter(|buffer_id| {
177 !self
178 .buffer
179 .read(cx)
180 .buffer(*buffer_id)
181 .is_some_and(|buffer| {
182 let buffer = buffer.read(cx);
183 language_settings(buffer.language().map(|l| l.name()), buffer.file(), cx)
184 .semantic_tokens
185 .enabled()
186 })
187 })
188 .collect::<Vec<_>>()
189 {
190 self.semantic_token_state
191 .invalidate_buffer(&buffer_with_disabled_tokens);
192 self.display_map.update(cx, |display_map, _| {
193 display_map.invalidate_semantic_highlights(buffer_with_disabled_tokens);
194 });
195 }
196
197 self.semantic_token_state.update_task = cx.spawn(async move |editor, cx| {
198 cx.background_executor()
199 .timer(Duration::from_millis(50))
200 .await;
201 let Some(all_semantic_tokens_task) = editor
202 .update(cx, |editor, cx| {
203 buffers_to_query
204 .into_iter()
205 .filter_map(|(buffer_id, buffer)| {
206 let known_version =
207 editor.semantic_token_state.fetched_for_buffers.get(&buffer_id);
208 let query_version = buffer.read(cx).version();
209 if known_version.is_some_and(|known_version| {
210 !query_version.changed_since(known_version)
211 }) {
212 None
213 } else {
214 let task = sema.semantic_tokens(buffer, for_server, cx);
215 Some(async move { (buffer_id, query_version, task.await) })
216 }
217 })
218 .collect::<Vec<_>>()
219 })
220 .ok()
221 else {
222 return;
223 };
224
225 let all_semantic_tokens = join_all(all_semantic_tokens_task).await;
226 editor.update(cx, |editor, cx| {
227 editor.display_map.update(cx, |display_map, _| {
228 for buffer_id in invalidate_semantic_highlights_for_buffers {
229 display_map.invalidate_semantic_highlights(buffer_id);
230 editor.semantic_token_state.invalidate_buffer(&buffer_id);
231 }
232 });
233
234
235 if all_semantic_tokens.is_empty() {
236 return;
237 }
238 let multi_buffer_snapshot = editor.buffer().read(cx).snapshot(cx);
239 let all_excerpts = editor.buffer().read(cx).excerpt_ids();
240
241 for (buffer_id, query_version, tokens) in all_semantic_tokens {
242 let tokens = match tokens {
243 Ok(BufferSemanticTokens { tokens: Some(tokens) }) => {
244 tokens
245 },
246 Ok(BufferSemanticTokens { tokens: None }) => {
247 editor.display_map.update(cx, |display_map, _| {
248 display_map.invalidate_semantic_highlights(buffer_id);
249 });
250 continue;
251 },
252 Err(e) => {
253 log::error!("Failed to fetch semantic tokens for buffer {buffer_id:?}: {e:#}");
254 continue;
255 },
256 };
257
258 match editor.semantic_token_state.fetched_for_buffers.entry(buffer_id) {
259 hash_map::Entry::Occupied(mut o) => {
260 if query_version.changed_since(o.get()) {
261 o.insert(query_version);
262 } else {
263 continue;
264 }
265 },
266 hash_map::Entry::Vacant(v) => {
267 v.insert(query_version);
268 },
269 }
270
271 let language_name = editor
272 .buffer()
273 .read(cx)
274 .buffer(buffer_id)
275 .and_then(|buf| buf.read(cx).language().map(|l| l.name()));
276
277 editor.display_map.update(cx, |display_map, cx| {
278 project.read(cx).lsp_store().update(cx, |lsp_store, cx| {
279 let mut token_highlights = Vec::new();
280 let mut interner = HighlightStyleInterner::default();
281 for (server_id, server_tokens) in tokens {
282 let Some(stylizer) = lsp_store.get_or_create_token_stylizer(
283 server_id,
284 language_name.as_ref(),
285 cx,
286 )
287 else {
288 continue;
289 };
290 token_highlights.extend(buffer_into_editor_highlights(
291 &server_tokens,
292 stylizer,
293 &all_excerpts,
294 &multi_buffer_snapshot,
295 &mut interner,
296 cx,
297 ));
298 }
299
300 token_highlights.sort_by(|a, b| {
301 a.range.start.cmp(&b.range.start, &multi_buffer_snapshot)
302 });
303 display_map
304 .semantic_token_highlights
305 .insert(buffer_id, (Arc::from(token_highlights), Arc::new(interner)));
306 });
307 });
308 }
309
310 cx.notify();
311 }).ok();
312 });
313 }
314}
315
316fn buffer_into_editor_highlights<'a, 'b>(
317 buffer_tokens: &'a [BufferSemanticToken],
318 stylizer: &'a SemanticTokenStylizer,
319 all_excerpts: &'a [multi_buffer::ExcerptId],
320 multi_buffer_snapshot: &'a multi_buffer::MultiBufferSnapshot,
321 interner: &'b mut HighlightStyleInterner,
322 cx: &'a App,
323) -> impl Iterator<Item = SemanticTokenHighlight> + use<'a, 'b> {
324 buffer_tokens.iter().filter_map(|token| {
325 let multi_buffer_start = all_excerpts.iter().find_map(|&excerpt_id| {
326 multi_buffer_snapshot.anchor_in_excerpt(excerpt_id, token.range.start)
327 })?;
328 let multi_buffer_end = all_excerpts.iter().find_map(|&excerpt_id| {
329 multi_buffer_snapshot.anchor_in_excerpt(excerpt_id, token.range.end)
330 })?;
331
332 let style = convert_token(
333 stylizer,
334 cx.theme().syntax(),
335 token.token_type,
336 token.token_modifiers,
337 )?;
338 let style = interner.intern(style);
339 Some(SemanticTokenHighlight {
340 range: multi_buffer_start..multi_buffer_end,
341 style,
342 token_type: token.token_type,
343 token_modifiers: token.token_modifiers,
344 server_id: stylizer.server_id(),
345 })
346 })
347}
348
349fn convert_token(
350 stylizer: &SemanticTokenStylizer,
351 theme: &SyntaxTheme,
352 token_type: TokenType,
353 modifiers: u32,
354) -> Option<HighlightStyle> {
355 let rules = stylizer.rules_for_token(token_type)?;
356 let matching = rules.iter().filter(|rule| {
357 rule.token_modifiers
358 .iter()
359 .all(|m| stylizer.has_modifier(modifiers, m))
360 });
361
362 let mut highlight = HighlightStyle::default();
363 let mut empty = true;
364
365 for rule in matching {
366 empty = false;
367
368 let style = rule.style.iter().find_map(|style| theme.get_opt(style));
369
370 macro_rules! overwrite {
371 (
372 highlight.$highlight_field:ident,
373 SemanticTokenRule::$rule_field:ident,
374 $transform:expr $(,)?
375 ) => {
376 highlight.$highlight_field = rule
377 .$rule_field
378 .map($transform)
379 .or_else(|| style.and_then(|s| s.$highlight_field))
380 .or(highlight.$highlight_field)
381 };
382 }
383
384 overwrite!(
385 highlight.color,
386 SemanticTokenRule::foreground_color,
387 Into::into,
388 );
389
390 overwrite!(
391 highlight.background_color,
392 SemanticTokenRule::background_color,
393 Into::into,
394 );
395
396 overwrite!(
397 highlight.font_weight,
398 SemanticTokenRule::font_weight,
399 |w| match w {
400 SemanticTokenFontWeight::Normal => FontWeight::NORMAL,
401 SemanticTokenFontWeight::Bold => FontWeight::BOLD,
402 },
403 );
404
405 overwrite!(
406 highlight.font_style,
407 SemanticTokenRule::font_style,
408 |s| match s {
409 SemanticTokenFontStyle::Normal => FontStyle::Normal,
410 SemanticTokenFontStyle::Italic => FontStyle::Italic,
411 },
412 );
413
414 overwrite!(highlight.underline, SemanticTokenRule::underline, |u| {
415 UnderlineStyle {
416 thickness: 1.0.into(),
417 color: match u {
418 SemanticTokenColorOverride::InheritForeground(true) => highlight.color,
419 SemanticTokenColorOverride::InheritForeground(false) => None,
420 SemanticTokenColorOverride::Replace(c) => Some(c.into()),
421 },
422 ..UnderlineStyle::default()
423 }
424 });
425
426 overwrite!(
427 highlight.strikethrough,
428 SemanticTokenRule::strikethrough,
429 |s| StrikethroughStyle {
430 thickness: 1.0.into(),
431 color: match s {
432 SemanticTokenColorOverride::InheritForeground(true) => highlight.color,
433 SemanticTokenColorOverride::InheritForeground(false) => None,
434 SemanticTokenColorOverride::Replace(c) => Some(c.into()),
435 },
436 },
437 );
438 }
439
440 if empty { None } else { Some(highlight) }
441}
442
443#[cfg(test)]
444mod tests {
445 use std::{
446 ops::Range,
447 sync::atomic::{self, AtomicUsize},
448 };
449
450 use futures::StreamExt as _;
451 use gpui::{AppContext as _, Entity, Focusable as _, HighlightStyle, TestAppContext};
452 use language::{Language, LanguageConfig, LanguageMatcher};
453 use languages::FakeLspAdapter;
454 use multi_buffer::{
455 AnchorRangeExt, ExcerptRange, ExpandExcerptDirection, MultiBuffer, MultiBufferOffset,
456 };
457 use project::Project;
458 use rope::Point;
459 use serde_json::json;
460 use settings::{LanguageSettingsContent, SemanticTokenRules, SemanticTokens, SettingsStore};
461 use workspace::{MultiWorkspace, WorkspaceHandle as _};
462
463 use crate::{
464 Capability,
465 editor_tests::{init_test, update_test_language_settings},
466 test::{build_editor_with_project, editor_lsp_test_context::EditorLspTestContext},
467 };
468
469 use super::*;
470
471 #[gpui::test]
472 async fn lsp_semantic_tokens_full_capability(cx: &mut TestAppContext) {
473 init_test(cx, |_| {});
474
475 update_test_language_settings(cx, |language_settings| {
476 language_settings.languages.0.insert(
477 "Rust".into(),
478 LanguageSettingsContent {
479 semantic_tokens: Some(SemanticTokens::Full),
480 ..LanguageSettingsContent::default()
481 },
482 );
483 });
484
485 let mut cx = EditorLspTestContext::new_rust(
486 lsp::ServerCapabilities {
487 semantic_tokens_provider: Some(
488 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
489 lsp::SemanticTokensOptions {
490 legend: lsp::SemanticTokensLegend {
491 token_types: vec!["function".into()],
492 token_modifiers: Vec::new(),
493 },
494 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
495 ..lsp::SemanticTokensOptions::default()
496 },
497 ),
498 ),
499 ..lsp::ServerCapabilities::default()
500 },
501 cx,
502 )
503 .await;
504
505 let full_counter = Arc::new(AtomicUsize::new(0));
506 let full_counter_clone = full_counter.clone();
507
508 let mut full_request = cx
509 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
510 move |_, _, _| {
511 full_counter_clone.fetch_add(1, atomic::Ordering::Release);
512 async move {
513 Ok(Some(lsp::SemanticTokensResult::Tokens(
514 lsp::SemanticTokens {
515 data: vec![
516 0, // delta_line
517 3, // delta_start
518 4, // length
519 0, // token_type
520 0, // token_modifiers_bitset
521 ],
522 // The server isn't capable of deltas, so even though we sent back
523 // a result ID, the client shouldn't request a delta.
524 result_id: Some("a".into()),
525 },
526 )))
527 }
528 },
529 );
530
531 cx.set_state("ˇfn main() {}");
532 assert!(full_request.next().await.is_some());
533
534 cx.run_until_parked();
535
536 cx.set_state("ˇfn main() { a }");
537 assert!(full_request.next().await.is_some());
538
539 cx.run_until_parked();
540
541 assert_eq!(
542 extract_semantic_highlights(&cx.editor, &cx),
543 vec![MultiBufferOffset(3)..MultiBufferOffset(7)]
544 );
545
546 assert_eq!(full_counter.load(atomic::Ordering::Acquire), 2);
547 }
548
549 #[gpui::test]
550 async fn lsp_semantic_tokens_full_none_result_id(cx: &mut TestAppContext) {
551 init_test(cx, |_| {});
552
553 update_test_language_settings(cx, |language_settings| {
554 language_settings.languages.0.insert(
555 "Rust".into(),
556 LanguageSettingsContent {
557 semantic_tokens: Some(SemanticTokens::Full),
558 ..LanguageSettingsContent::default()
559 },
560 );
561 });
562
563 let mut cx = EditorLspTestContext::new_rust(
564 lsp::ServerCapabilities {
565 semantic_tokens_provider: Some(
566 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
567 lsp::SemanticTokensOptions {
568 legend: lsp::SemanticTokensLegend {
569 token_types: vec!["function".into()],
570 token_modifiers: Vec::new(),
571 },
572 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: Some(true) }),
573 ..lsp::SemanticTokensOptions::default()
574 },
575 ),
576 ),
577 ..lsp::ServerCapabilities::default()
578 },
579 cx,
580 )
581 .await;
582
583 let full_counter = Arc::new(AtomicUsize::new(0));
584 let full_counter_clone = full_counter.clone();
585
586 let mut full_request = cx
587 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
588 move |_, _, _| {
589 full_counter_clone.fetch_add(1, atomic::Ordering::Release);
590 async move {
591 Ok(Some(lsp::SemanticTokensResult::Tokens(
592 lsp::SemanticTokens {
593 data: vec![
594 0, // delta_line
595 3, // delta_start
596 4, // length
597 0, // token_type
598 0, // token_modifiers_bitset
599 ],
600 result_id: None, // Sending back `None` forces the client to not use deltas.
601 },
602 )))
603 }
604 },
605 );
606
607 cx.set_state("ˇfn main() {}");
608 assert!(full_request.next().await.is_some());
609
610 let task = cx.update_editor(|e, _, _| e.semantic_token_state.take_update_task());
611 task.await;
612
613 cx.set_state("ˇfn main() { a }");
614 assert!(full_request.next().await.is_some());
615
616 let task = cx.update_editor(|e, _, _| e.semantic_token_state.take_update_task());
617 task.await;
618 assert_eq!(
619 extract_semantic_highlights(&cx.editor, &cx),
620 vec![MultiBufferOffset(3)..MultiBufferOffset(7)]
621 );
622 assert_eq!(full_counter.load(atomic::Ordering::Acquire), 2);
623 }
624
625 #[gpui::test]
626 async fn lsp_semantic_tokens_delta(cx: &mut TestAppContext) {
627 init_test(cx, |_| {});
628
629 update_test_language_settings(cx, |language_settings| {
630 language_settings.languages.0.insert(
631 "Rust".into(),
632 LanguageSettingsContent {
633 semantic_tokens: Some(SemanticTokens::Full),
634 ..LanguageSettingsContent::default()
635 },
636 );
637 });
638
639 let mut cx = EditorLspTestContext::new_rust(
640 lsp::ServerCapabilities {
641 semantic_tokens_provider: Some(
642 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
643 lsp::SemanticTokensOptions {
644 legend: lsp::SemanticTokensLegend {
645 token_types: vec!["function".into()],
646 token_modifiers: Vec::new(),
647 },
648 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: Some(true) }),
649 ..lsp::SemanticTokensOptions::default()
650 },
651 ),
652 ),
653 ..lsp::ServerCapabilities::default()
654 },
655 cx,
656 )
657 .await;
658
659 let full_counter = Arc::new(AtomicUsize::new(0));
660 let full_counter_clone = full_counter.clone();
661 let delta_counter = Arc::new(AtomicUsize::new(0));
662 let delta_counter_clone = delta_counter.clone();
663
664 let mut full_request = cx
665 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
666 move |_, _, _| {
667 full_counter_clone.fetch_add(1, atomic::Ordering::Release);
668 async move {
669 Ok(Some(lsp::SemanticTokensResult::Tokens(
670 lsp::SemanticTokens {
671 data: vec![
672 0, // delta_line
673 3, // delta_start
674 4, // length
675 0, // token_type
676 0, // token_modifiers_bitset
677 ],
678 result_id: Some("a".into()),
679 },
680 )))
681 }
682 },
683 );
684
685 let mut delta_request = cx
686 .set_request_handler::<lsp::request::SemanticTokensFullDeltaRequest, _, _>(
687 move |_, params, _| {
688 delta_counter_clone.fetch_add(1, atomic::Ordering::Release);
689 assert_eq!(params.previous_result_id, "a");
690 async move {
691 Ok(Some(lsp::SemanticTokensFullDeltaResult::TokensDelta(
692 lsp::SemanticTokensDelta {
693 edits: Vec::new(),
694 result_id: Some("b".into()),
695 },
696 )))
697 }
698 },
699 );
700
701 // Initial request, for the empty buffer.
702 cx.set_state("ˇfn main() {}");
703 assert!(full_request.next().await.is_some());
704 let task = cx.update_editor(|e, _, _| e.semantic_token_state.take_update_task());
705 task.await;
706
707 cx.set_state("ˇfn main() { a }");
708 assert!(delta_request.next().await.is_some());
709 let task = cx.update_editor(|e, _, _| e.semantic_token_state.take_update_task());
710 task.await;
711
712 assert_eq!(
713 extract_semantic_highlights(&cx.editor, &cx),
714 vec![MultiBufferOffset(3)..MultiBufferOffset(7)]
715 );
716
717 assert_eq!(full_counter.load(atomic::Ordering::Acquire), 1);
718 assert_eq!(delta_counter.load(atomic::Ordering::Acquire), 1);
719 }
720
721 #[gpui::test]
722 async fn lsp_semantic_tokens_multiserver_full(cx: &mut TestAppContext) {
723 init_test(cx, |_| {});
724
725 update_test_language_settings(cx, |language_settings| {
726 language_settings.languages.0.insert(
727 "TOML".into(),
728 LanguageSettingsContent {
729 semantic_tokens: Some(SemanticTokens::Full),
730 ..LanguageSettingsContent::default()
731 },
732 );
733 });
734
735 let toml_language = Arc::new(Language::new(
736 LanguageConfig {
737 name: "TOML".into(),
738 matcher: LanguageMatcher {
739 path_suffixes: vec!["toml".into()],
740 ..LanguageMatcher::default()
741 },
742 ..LanguageConfig::default()
743 },
744 None,
745 ));
746
747 // We have 2 language servers for TOML in this test.
748 let toml_legend_1 = lsp::SemanticTokensLegend {
749 token_types: vec!["property".into()],
750 token_modifiers: Vec::new(),
751 };
752 let toml_legend_2 = lsp::SemanticTokensLegend {
753 token_types: vec!["number".into()],
754 token_modifiers: Vec::new(),
755 };
756
757 let app_state = cx.update(workspace::AppState::test);
758
759 cx.update(|cx| {
760 assets::Assets.load_test_fonts(cx);
761 crate::init(cx);
762 workspace::init(app_state.clone(), cx);
763 });
764
765 let project = Project::test(app_state.fs.clone(), [], cx).await;
766 let language_registry = project.read_with(cx, |project, _| project.languages().clone());
767
768 let full_counter_toml_1 = Arc::new(AtomicUsize::new(0));
769 let full_counter_toml_1_clone = full_counter_toml_1.clone();
770 let full_counter_toml_2 = Arc::new(AtomicUsize::new(0));
771 let full_counter_toml_2_clone = full_counter_toml_2.clone();
772
773 let mut toml_server_1 = language_registry.register_fake_lsp(
774 toml_language.name(),
775 FakeLspAdapter {
776 name: "toml1",
777 capabilities: lsp::ServerCapabilities {
778 semantic_tokens_provider: Some(
779 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
780 lsp::SemanticTokensOptions {
781 legend: toml_legend_1,
782 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
783 ..lsp::SemanticTokensOptions::default()
784 },
785 ),
786 ),
787 ..lsp::ServerCapabilities::default()
788 },
789 initializer: Some(Box::new({
790 let full_counter_toml_1_clone = full_counter_toml_1_clone.clone();
791 move |fake_server| {
792 let full_counter = full_counter_toml_1_clone.clone();
793 fake_server
794 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
795 move |_, _| {
796 full_counter.fetch_add(1, atomic::Ordering::Release);
797 async move {
798 Ok(Some(lsp::SemanticTokensResult::Tokens(
799 lsp::SemanticTokens {
800 // highlight 'a' as a property
801 data: vec![
802 0, // delta_line
803 0, // delta_start
804 1, // length
805 0, // token_type
806 0, // token_modifiers_bitset
807 ],
808 result_id: Some("a".into()),
809 },
810 )))
811 }
812 },
813 );
814 }
815 })),
816 ..FakeLspAdapter::default()
817 },
818 );
819 let mut toml_server_2 = language_registry.register_fake_lsp(
820 toml_language.name(),
821 FakeLspAdapter {
822 name: "toml2",
823 capabilities: lsp::ServerCapabilities {
824 semantic_tokens_provider: Some(
825 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
826 lsp::SemanticTokensOptions {
827 legend: toml_legend_2,
828 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
829 ..lsp::SemanticTokensOptions::default()
830 },
831 ),
832 ),
833 ..lsp::ServerCapabilities::default()
834 },
835 initializer: Some(Box::new({
836 let full_counter_toml_2_clone = full_counter_toml_2_clone.clone();
837 move |fake_server| {
838 let full_counter = full_counter_toml_2_clone.clone();
839 fake_server
840 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
841 move |_, _| {
842 full_counter.fetch_add(1, atomic::Ordering::Release);
843 async move {
844 Ok(Some(lsp::SemanticTokensResult::Tokens(
845 lsp::SemanticTokens {
846 // highlight '3' as a literal
847 data: vec![
848 0, // delta_line
849 4, // delta_start
850 1, // length
851 0, // token_type
852 0, // token_modifiers_bitset
853 ],
854 result_id: Some("a".into()),
855 },
856 )))
857 }
858 },
859 );
860 }
861 })),
862 ..FakeLspAdapter::default()
863 },
864 );
865 language_registry.add(toml_language.clone());
866
867 app_state
868 .fs
869 .as_fake()
870 .insert_tree(
871 EditorLspTestContext::root_path(),
872 json!({
873 ".git": {},
874 "dir": {
875 "foo.toml": "a = 1\nb = 2\n",
876 }
877 }),
878 )
879 .await;
880
881 let (multi_workspace, cx) =
882 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
883 let workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
884 project
885 .update(cx, |project, cx| {
886 project.find_or_create_worktree(EditorLspTestContext::root_path(), true, cx)
887 })
888 .await
889 .unwrap();
890 cx.read(|cx| workspace.read(cx).worktree_scans_complete(cx))
891 .await;
892
893 let toml_file = cx.read(|cx| workspace.file_project_paths(cx)[0].clone());
894 let toml_item = workspace
895 .update_in(cx, |workspace, window, cx| {
896 workspace.open_path(toml_file, None, true, window, cx)
897 })
898 .await
899 .expect("Could not open test file");
900
901 let editor = cx.update(|_, cx| {
902 toml_item
903 .act_as::<Editor>(cx)
904 .expect("Opened test file wasn't an editor")
905 });
906
907 editor.update_in(cx, |editor, window, cx| {
908 let nav_history = workspace
909 .read(cx)
910 .active_pane()
911 .read(cx)
912 .nav_history_for_item(&cx.entity());
913 editor.set_nav_history(Some(nav_history));
914 window.focus(&editor.focus_handle(cx), cx)
915 });
916
917 let _toml_server_1 = toml_server_1.next().await.unwrap();
918 let _toml_server_2 = toml_server_2.next().await.unwrap();
919
920 // Trigger semantic tokens.
921 editor.update_in(cx, |editor, _, cx| {
922 editor.edit([(MultiBufferOffset(0)..MultiBufferOffset(1), "b")], cx);
923 });
924 cx.executor().advance_clock(Duration::from_millis(200));
925 let task = editor.update_in(cx, |e, _, _| e.semantic_token_state.take_update_task());
926 cx.run_until_parked();
927 task.await;
928
929 assert_eq!(
930 extract_semantic_highlights(&editor, &cx),
931 vec![
932 MultiBufferOffset(0)..MultiBufferOffset(1),
933 MultiBufferOffset(4)..MultiBufferOffset(5),
934 ]
935 );
936
937 assert_eq!(full_counter_toml_1.load(atomic::Ordering::Acquire), 1);
938 assert_eq!(full_counter_toml_2.load(atomic::Ordering::Acquire), 1);
939 }
940
941 #[gpui::test]
942 async fn lsp_semantic_tokens_multibuffer_part(cx: &mut TestAppContext) {
943 init_test(cx, |_| {});
944
945 update_test_language_settings(cx, |language_settings| {
946 language_settings.languages.0.insert(
947 "TOML".into(),
948 LanguageSettingsContent {
949 semantic_tokens: Some(SemanticTokens::Full),
950 ..LanguageSettingsContent::default()
951 },
952 );
953 language_settings.languages.0.insert(
954 "Rust".into(),
955 LanguageSettingsContent {
956 semantic_tokens: Some(SemanticTokens::Full),
957 ..LanguageSettingsContent::default()
958 },
959 );
960 });
961
962 let toml_language = Arc::new(Language::new(
963 LanguageConfig {
964 name: "TOML".into(),
965 matcher: LanguageMatcher {
966 path_suffixes: vec!["toml".into()],
967 ..LanguageMatcher::default()
968 },
969 ..LanguageConfig::default()
970 },
971 None,
972 ));
973 let rust_language = Arc::new(Language::new(
974 LanguageConfig {
975 name: "Rust".into(),
976 matcher: LanguageMatcher {
977 path_suffixes: vec!["rs".into()],
978 ..LanguageMatcher::default()
979 },
980 ..LanguageConfig::default()
981 },
982 None,
983 ));
984
985 let toml_legend = lsp::SemanticTokensLegend {
986 token_types: vec!["property".into()],
987 token_modifiers: Vec::new(),
988 };
989 let rust_legend = lsp::SemanticTokensLegend {
990 token_types: vec!["constant".into()],
991 token_modifiers: Vec::new(),
992 };
993
994 let app_state = cx.update(workspace::AppState::test);
995
996 cx.update(|cx| {
997 assets::Assets.load_test_fonts(cx);
998 crate::init(cx);
999 workspace::init(app_state.clone(), cx);
1000 });
1001
1002 let project = Project::test(app_state.fs.clone(), [], cx).await;
1003 let language_registry = project.read_with(cx, |project, _| project.languages().clone());
1004 let full_counter_toml = Arc::new(AtomicUsize::new(0));
1005 let full_counter_toml_clone = full_counter_toml.clone();
1006
1007 let mut toml_server = language_registry.register_fake_lsp(
1008 toml_language.name(),
1009 FakeLspAdapter {
1010 name: "toml",
1011 capabilities: lsp::ServerCapabilities {
1012 semantic_tokens_provider: Some(
1013 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1014 lsp::SemanticTokensOptions {
1015 legend: toml_legend,
1016 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1017 ..lsp::SemanticTokensOptions::default()
1018 },
1019 ),
1020 ),
1021 ..lsp::ServerCapabilities::default()
1022 },
1023 initializer: Some(Box::new({
1024 let full_counter_toml_clone = full_counter_toml_clone.clone();
1025 move |fake_server| {
1026 let full_counter = full_counter_toml_clone.clone();
1027 fake_server
1028 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1029 move |_, _| {
1030 full_counter.fetch_add(1, atomic::Ordering::Release);
1031 async move {
1032 Ok(Some(lsp::SemanticTokensResult::Tokens(
1033 lsp::SemanticTokens {
1034 // highlight 'a', 'b', 'c' as properties on lines 0, 1, 2
1035 data: vec![
1036 0, // delta_line (line 0)
1037 0, // delta_start
1038 1, // length
1039 0, // token_type
1040 0, // token_modifiers_bitset
1041 1, // delta_line (line 1)
1042 0, // delta_start
1043 1, // length
1044 0, // token_type
1045 0, // token_modifiers_bitset
1046 1, // delta_line (line 2)
1047 0, // delta_start
1048 1, // length
1049 0, // token_type
1050 0, // token_modifiers_bitset
1051 ],
1052 result_id: Some("a".into()),
1053 },
1054 )))
1055 }
1056 },
1057 );
1058 }
1059 })),
1060 ..FakeLspAdapter::default()
1061 },
1062 );
1063 language_registry.add(toml_language.clone());
1064 let mut rust_server = language_registry.register_fake_lsp(
1065 rust_language.name(),
1066 FakeLspAdapter {
1067 name: "rust",
1068 capabilities: lsp::ServerCapabilities {
1069 semantic_tokens_provider: Some(
1070 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1071 lsp::SemanticTokensOptions {
1072 legend: rust_legend,
1073 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1074 ..lsp::SemanticTokensOptions::default()
1075 },
1076 ),
1077 ),
1078 ..lsp::ServerCapabilities::default()
1079 },
1080 ..FakeLspAdapter::default()
1081 },
1082 );
1083 language_registry.add(rust_language.clone());
1084
1085 app_state
1086 .fs
1087 .as_fake()
1088 .insert_tree(
1089 EditorLspTestContext::root_path(),
1090 json!({
1091 ".git": {},
1092 "dir": {
1093 "foo.toml": "a = 1\nb = 2\nc = 3\n",
1094 "bar.rs": "const c: usize = 3;\n",
1095 }
1096 }),
1097 )
1098 .await;
1099
1100 let (multi_workspace, cx) =
1101 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
1102 let workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
1103 project
1104 .update(cx, |project, cx| {
1105 project.find_or_create_worktree(EditorLspTestContext::root_path(), true, cx)
1106 })
1107 .await
1108 .unwrap();
1109 cx.read(|cx| workspace.read(cx).worktree_scans_complete(cx))
1110 .await;
1111
1112 let toml_file = cx.read(|cx| workspace.file_project_paths(cx)[1].clone());
1113 let rust_file = cx.read(|cx| workspace.file_project_paths(cx)[0].clone());
1114 let (toml_item, rust_item) = workspace.update_in(cx, |workspace, window, cx| {
1115 (
1116 workspace.open_path(toml_file, None, true, window, cx),
1117 workspace.open_path(rust_file, None, true, window, cx),
1118 )
1119 });
1120 let toml_item = toml_item.await.expect("Could not open test file");
1121 let rust_item = rust_item.await.expect("Could not open test file");
1122
1123 let (toml_editor, rust_editor) = cx.update(|_, cx| {
1124 (
1125 toml_item
1126 .act_as::<Editor>(cx)
1127 .expect("Opened test file wasn't an editor"),
1128 rust_item
1129 .act_as::<Editor>(cx)
1130 .expect("Opened test file wasn't an editor"),
1131 )
1132 });
1133 let toml_buffer = cx.read(|cx| {
1134 toml_editor
1135 .read(cx)
1136 .buffer()
1137 .read(cx)
1138 .as_singleton()
1139 .unwrap()
1140 });
1141 let rust_buffer = cx.read(|cx| {
1142 rust_editor
1143 .read(cx)
1144 .buffer()
1145 .read(cx)
1146 .as_singleton()
1147 .unwrap()
1148 });
1149 let multibuffer = cx.new(|cx| {
1150 let mut multibuffer = MultiBuffer::new(Capability::ReadWrite);
1151 multibuffer.push_excerpts(
1152 toml_buffer.clone(),
1153 [ExcerptRange::new(Point::new(0, 0)..Point::new(1, 0))],
1154 cx,
1155 );
1156 multibuffer.push_excerpts(
1157 rust_buffer.clone(),
1158 [ExcerptRange::new(Point::new(0, 0)..Point::new(1, 0))],
1159 cx,
1160 );
1161 multibuffer
1162 });
1163
1164 let editor = workspace.update_in(cx, |workspace, window, cx| {
1165 let editor = cx.new(|cx| build_editor_with_project(project, multibuffer, window, cx));
1166 workspace.add_item_to_active_pane(Box::new(editor.clone()), None, true, window, cx);
1167 editor
1168 });
1169 editor.update_in(cx, |editor, window, cx| {
1170 let nav_history = workspace
1171 .read(cx)
1172 .active_pane()
1173 .read(cx)
1174 .nav_history_for_item(&cx.entity());
1175 editor.set_nav_history(Some(nav_history));
1176 window.focus(&editor.focus_handle(cx), cx)
1177 });
1178
1179 let _toml_server = toml_server.next().await.unwrap();
1180 let _rust_server = rust_server.next().await.unwrap();
1181
1182 // Initial request.
1183 cx.executor().advance_clock(Duration::from_millis(200));
1184 let task = editor.update_in(cx, |e, _, _| e.semantic_token_state.take_update_task());
1185 cx.run_until_parked();
1186 task.await;
1187 assert_eq!(full_counter_toml.load(atomic::Ordering::Acquire), 1);
1188 cx.run_until_parked();
1189
1190 // Initially, excerpt only covers line 0, so only the 'a' token should be highlighted.
1191 // The excerpt content is "a = 1\n" (6 chars), so 'a' is at offset 0.
1192 assert_eq!(
1193 extract_semantic_highlights(&editor, &cx),
1194 vec![MultiBufferOffset(0)..MultiBufferOffset(1)]
1195 );
1196
1197 // Get the excerpt id for the TOML excerpt and expand it down by 2 lines.
1198 let toml_excerpt_id =
1199 editor.read_with(cx, |editor, cx| editor.buffer().read(cx).excerpt_ids()[0]);
1200 editor.update_in(cx, |editor, _, cx| {
1201 editor.buffer().update(cx, |buffer, cx| {
1202 buffer.expand_excerpts([toml_excerpt_id], 2, ExpandExcerptDirection::Down, cx);
1203 });
1204 });
1205
1206 // Wait for semantic tokens to be re-fetched after expansion.
1207 cx.executor().advance_clock(Duration::from_millis(200));
1208 let task = editor.update_in(cx, |e, _, _| e.semantic_token_state.take_update_task());
1209 cx.run_until_parked();
1210 task.await;
1211
1212 // After expansion, the excerpt covers lines 0-2, so 'a', 'b', 'c' should all be highlighted.
1213 // Content is now "a = 1\nb = 2\nc = 3\n" (18 chars).
1214 // 'a' at offset 0, 'b' at offset 6, 'c' at offset 12.
1215 assert_eq!(
1216 extract_semantic_highlights(&editor, &cx),
1217 vec![
1218 MultiBufferOffset(0)..MultiBufferOffset(1),
1219 MultiBufferOffset(6)..MultiBufferOffset(7),
1220 MultiBufferOffset(12)..MultiBufferOffset(13),
1221 ]
1222 );
1223 }
1224
1225 #[gpui::test]
1226 async fn lsp_semantic_tokens_multibuffer_shared(cx: &mut TestAppContext) {
1227 init_test(cx, |_| {});
1228
1229 update_test_language_settings(cx, |language_settings| {
1230 language_settings.languages.0.insert(
1231 "TOML".into(),
1232 LanguageSettingsContent {
1233 semantic_tokens: Some(SemanticTokens::Full),
1234 ..LanguageSettingsContent::default()
1235 },
1236 );
1237 });
1238
1239 let toml_language = Arc::new(Language::new(
1240 LanguageConfig {
1241 name: "TOML".into(),
1242 matcher: LanguageMatcher {
1243 path_suffixes: vec!["toml".into()],
1244 ..LanguageMatcher::default()
1245 },
1246 ..LanguageConfig::default()
1247 },
1248 None,
1249 ));
1250
1251 let toml_legend = lsp::SemanticTokensLegend {
1252 token_types: vec!["property".into()],
1253 token_modifiers: Vec::new(),
1254 };
1255
1256 let app_state = cx.update(workspace::AppState::test);
1257
1258 cx.update(|cx| {
1259 assets::Assets.load_test_fonts(cx);
1260 crate::init(cx);
1261 workspace::init(app_state.clone(), cx);
1262 });
1263
1264 let project = Project::test(app_state.fs.clone(), [], cx).await;
1265 let language_registry = project.read_with(cx, |project, _| project.languages().clone());
1266 let full_counter_toml = Arc::new(AtomicUsize::new(0));
1267 let full_counter_toml_clone = full_counter_toml.clone();
1268
1269 let mut toml_server = language_registry.register_fake_lsp(
1270 toml_language.name(),
1271 FakeLspAdapter {
1272 name: "toml",
1273 capabilities: lsp::ServerCapabilities {
1274 semantic_tokens_provider: Some(
1275 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1276 lsp::SemanticTokensOptions {
1277 legend: toml_legend,
1278 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1279 ..lsp::SemanticTokensOptions::default()
1280 },
1281 ),
1282 ),
1283 ..lsp::ServerCapabilities::default()
1284 },
1285 initializer: Some(Box::new({
1286 let full_counter_toml_clone = full_counter_toml_clone.clone();
1287 move |fake_server| {
1288 let full_counter = full_counter_toml_clone.clone();
1289 fake_server
1290 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1291 move |_, _| {
1292 full_counter.fetch_add(1, atomic::Ordering::Release);
1293 async move {
1294 Ok(Some(lsp::SemanticTokensResult::Tokens(
1295 lsp::SemanticTokens {
1296 // highlight 'a' as a property
1297 data: vec![
1298 0, // delta_line
1299 0, // delta_start
1300 1, // length
1301 0, // token_type
1302 0, // token_modifiers_bitset
1303 ],
1304 result_id: Some("a".into()),
1305 },
1306 )))
1307 }
1308 },
1309 );
1310 }
1311 })),
1312 ..FakeLspAdapter::default()
1313 },
1314 );
1315 language_registry.add(toml_language.clone());
1316
1317 app_state
1318 .fs
1319 .as_fake()
1320 .insert_tree(
1321 EditorLspTestContext::root_path(),
1322 json!({
1323 ".git": {},
1324 "dir": {
1325 "foo.toml": "a = 1\nb = 2\n",
1326 }
1327 }),
1328 )
1329 .await;
1330
1331 let (multi_workspace, cx) =
1332 cx.add_window_view(|window, cx| MultiWorkspace::test_new(project.clone(), window, cx));
1333 let workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
1334 project
1335 .update(cx, |project, cx| {
1336 project.find_or_create_worktree(EditorLspTestContext::root_path(), true, cx)
1337 })
1338 .await
1339 .unwrap();
1340 cx.read(|cx| workspace.read(cx).worktree_scans_complete(cx))
1341 .await;
1342
1343 let toml_file = cx.read(|cx| workspace.file_project_paths(cx)[0].clone());
1344 let toml_item = workspace
1345 .update_in(cx, |workspace, window, cx| {
1346 workspace.open_path(toml_file, None, true, window, cx)
1347 })
1348 .await
1349 .expect("Could not open test file");
1350
1351 let toml_editor = cx.update(|_, cx| {
1352 toml_item
1353 .act_as::<Editor>(cx)
1354 .expect("Opened test file wasn't an editor")
1355 });
1356 let toml_buffer = cx.read(|cx| {
1357 toml_editor
1358 .read(cx)
1359 .buffer()
1360 .read(cx)
1361 .as_singleton()
1362 .unwrap()
1363 });
1364 let multibuffer = cx.new(|cx| {
1365 let mut multibuffer = MultiBuffer::new(Capability::ReadWrite);
1366 multibuffer.push_excerpts(
1367 toml_buffer.clone(),
1368 [ExcerptRange::new(Point::new(0, 0)..Point::new(2, 0))],
1369 cx,
1370 );
1371 multibuffer.push_excerpts(
1372 toml_buffer.clone(),
1373 [ExcerptRange::new(Point::new(0, 0)..Point::new(2, 0))],
1374 cx,
1375 );
1376 multibuffer
1377 });
1378
1379 let editor = workspace.update_in(cx, |_, window, cx| {
1380 cx.new(|cx| build_editor_with_project(project, multibuffer, window, cx))
1381 });
1382 editor.update_in(cx, |editor, window, cx| {
1383 let nav_history = workspace
1384 .read(cx)
1385 .active_pane()
1386 .read(cx)
1387 .nav_history_for_item(&cx.entity());
1388 editor.set_nav_history(Some(nav_history));
1389 window.focus(&editor.focus_handle(cx), cx)
1390 });
1391
1392 let _toml_server = toml_server.next().await.unwrap();
1393
1394 // Initial request.
1395 cx.executor().advance_clock(Duration::from_millis(200));
1396 let task = editor.update_in(cx, |e, _, _| e.semantic_token_state.take_update_task());
1397 cx.run_until_parked();
1398 task.await;
1399 assert_eq!(full_counter_toml.load(atomic::Ordering::Acquire), 1);
1400
1401 // Edit two parts of the multibuffer, which both map to the same buffer.
1402 //
1403 // Without debouncing, this grabs semantic tokens 4 times (twice for the
1404 // toml editor, and twice for the multibuffer).
1405 editor.update_in(cx, |editor, _, cx| {
1406 editor.edit([(MultiBufferOffset(0)..MultiBufferOffset(1), "b")], cx);
1407 editor.edit([(MultiBufferOffset(12)..MultiBufferOffset(13), "c")], cx);
1408 });
1409 cx.executor().advance_clock(Duration::from_millis(200));
1410 let task = editor.update_in(cx, |e, _, _| e.semantic_token_state.take_update_task());
1411 cx.run_until_parked();
1412 task.await;
1413 assert_eq!(
1414 extract_semantic_highlights(&editor, &cx),
1415 vec![MultiBufferOffset(0)..MultiBufferOffset(1)]
1416 );
1417
1418 assert_eq!(full_counter_toml.load(atomic::Ordering::Acquire), 2);
1419 }
1420
1421 fn extract_semantic_highlights(
1422 editor: &Entity<Editor>,
1423 cx: &TestAppContext,
1424 ) -> Vec<Range<MultiBufferOffset>> {
1425 editor.read_with(cx, |editor, cx| {
1426 let multi_buffer_snapshot = editor.buffer().read(cx).snapshot(cx);
1427 editor
1428 .display_map
1429 .read(cx)
1430 .semantic_token_highlights
1431 .iter()
1432 .flat_map(|(_, (v, _))| v.iter())
1433 .map(|highlights| highlights.range.to_offset(&multi_buffer_snapshot))
1434 .collect()
1435 })
1436 }
1437
1438 #[gpui::test]
1439 async fn test_semantic_tokens_rules_changes_restyle_tokens(cx: &mut TestAppContext) {
1440 use gpui::{Hsla, Rgba, UpdateGlobal as _};
1441 use settings::{GlobalLspSettingsContent, SemanticTokenRule};
1442
1443 init_test(cx, |_| {});
1444
1445 update_test_language_settings(cx, |language_settings| {
1446 language_settings.languages.0.insert(
1447 "Rust".into(),
1448 LanguageSettingsContent {
1449 semantic_tokens: Some(SemanticTokens::Full),
1450 ..LanguageSettingsContent::default()
1451 },
1452 );
1453 });
1454
1455 let mut cx = EditorLspTestContext::new_rust(
1456 lsp::ServerCapabilities {
1457 semantic_tokens_provider: Some(
1458 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1459 lsp::SemanticTokensOptions {
1460 legend: lsp::SemanticTokensLegend {
1461 token_types: Vec::from(["function".into()]),
1462 token_modifiers: Vec::new(),
1463 },
1464 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1465 ..lsp::SemanticTokensOptions::default()
1466 },
1467 ),
1468 ),
1469 ..lsp::ServerCapabilities::default()
1470 },
1471 cx,
1472 )
1473 .await;
1474
1475 let mut full_request = cx
1476 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1477 move |_, _, _| {
1478 async move {
1479 Ok(Some(lsp::SemanticTokensResult::Tokens(
1480 lsp::SemanticTokens {
1481 data: vec![
1482 0, // delta_line
1483 3, // delta_start
1484 4, // length
1485 0, // token_type (function)
1486 0, // token_modifiers_bitset
1487 ],
1488 result_id: None,
1489 },
1490 )))
1491 }
1492 },
1493 );
1494
1495 // Trigger initial semantic tokens fetch
1496 cx.set_state("ˇfn main() {}");
1497 full_request.next().await;
1498 cx.run_until_parked();
1499
1500 // Verify initial highlights exist (with no custom color yet)
1501 let initial_ranges = extract_semantic_highlights(&cx.editor, &cx);
1502 assert_eq!(
1503 initial_ranges,
1504 vec![MultiBufferOffset(3)..MultiBufferOffset(7)],
1505 "Should have initial semantic token highlights"
1506 );
1507 let initial_styles = extract_semantic_highlight_styles(&cx.editor, &cx);
1508 assert_eq!(initial_styles.len(), 1, "Should have one highlight style");
1509 // Initial color should be None or theme default (not red or blue)
1510 let initial_color = initial_styles[0].color;
1511
1512 // Set a custom foreground color for function tokens via settings.json
1513 let red_color = Rgba {
1514 r: 1.0,
1515 g: 0.0,
1516 b: 0.0,
1517 a: 1.0,
1518 };
1519 cx.update(|_, cx| {
1520 SettingsStore::update_global(cx, |store, cx| {
1521 store.update_user_settings(cx, |settings| {
1522 settings.global_lsp_settings = Some(GlobalLspSettingsContent {
1523 semantic_token_rules: Some(SemanticTokenRules {
1524 rules: Vec::from([SemanticTokenRule {
1525 token_type: Some("function".to_string()),
1526 foreground_color: Some(red_color),
1527 ..SemanticTokenRule::default()
1528 }]),
1529 }),
1530 ..GlobalLspSettingsContent::default()
1531 });
1532 });
1533 });
1534 });
1535
1536 // Trigger a refetch by making an edit (which forces semantic tokens update)
1537 cx.set_state("ˇfn main() { }");
1538 full_request.next().await;
1539 cx.run_until_parked();
1540
1541 // Verify the highlights now have the custom red color
1542 let styles_after_settings_change = extract_semantic_highlight_styles(&cx.editor, &cx);
1543 assert_eq!(
1544 styles_after_settings_change.len(),
1545 1,
1546 "Should still have one highlight"
1547 );
1548 assert_eq!(
1549 styles_after_settings_change[0].color,
1550 Some(Hsla::from(red_color)),
1551 "Highlight should have the custom red color from settings.json"
1552 );
1553 assert_ne!(
1554 styles_after_settings_change[0].color, initial_color,
1555 "Color should have changed from initial"
1556 );
1557 }
1558
1559 #[gpui::test]
1560 async fn test_theme_override_changes_restyle_semantic_tokens(cx: &mut TestAppContext) {
1561 use collections::IndexMap;
1562 use gpui::{Hsla, Rgba, UpdateGlobal as _};
1563 use theme::{HighlightStyleContent, ThemeStyleContent};
1564
1565 init_test(cx, |_| {});
1566
1567 update_test_language_settings(cx, |language_settings| {
1568 language_settings.languages.0.insert(
1569 "Rust".into(),
1570 LanguageSettingsContent {
1571 semantic_tokens: Some(SemanticTokens::Full),
1572 ..LanguageSettingsContent::default()
1573 },
1574 );
1575 });
1576
1577 let mut cx = EditorLspTestContext::new_rust(
1578 lsp::ServerCapabilities {
1579 semantic_tokens_provider: Some(
1580 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1581 lsp::SemanticTokensOptions {
1582 legend: lsp::SemanticTokensLegend {
1583 token_types: Vec::from(["function".into()]),
1584 token_modifiers: Vec::new(),
1585 },
1586 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1587 ..lsp::SemanticTokensOptions::default()
1588 },
1589 ),
1590 ),
1591 ..lsp::ServerCapabilities::default()
1592 },
1593 cx,
1594 )
1595 .await;
1596
1597 let mut full_request = cx
1598 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1599 move |_, _, _| async move {
1600 Ok(Some(lsp::SemanticTokensResult::Tokens(
1601 lsp::SemanticTokens {
1602 data: vec![
1603 0, // delta_line
1604 3, // delta_start
1605 4, // length
1606 0, // token_type (function)
1607 0, // token_modifiers_bitset
1608 ],
1609 result_id: None,
1610 },
1611 )))
1612 },
1613 );
1614
1615 cx.set_state("ˇfn main() {}");
1616 full_request.next().await;
1617 cx.run_until_parked();
1618
1619 let initial_styles = extract_semantic_highlight_styles(&cx.editor, &cx);
1620 assert_eq!(initial_styles.len(), 1, "Should have one highlight style");
1621 let initial_color = initial_styles[0].color;
1622
1623 // Changing experimental_theme_overrides triggers GlobalTheme reload,
1624 // which fires theme_changed → refresh_semantic_token_highlights.
1625 let red_color: Hsla = Rgba {
1626 r: 1.0,
1627 g: 0.0,
1628 b: 0.0,
1629 a: 1.0,
1630 }
1631 .into();
1632 cx.update(|_, cx| {
1633 SettingsStore::update_global(cx, |store, cx| {
1634 store.update_user_settings(cx, |settings| {
1635 settings.theme.experimental_theme_overrides = Some(ThemeStyleContent {
1636 syntax: IndexMap::from_iter([(
1637 "function".to_string(),
1638 HighlightStyleContent {
1639 color: Some("#ff0000".to_string()),
1640 background_color: None,
1641 font_style: None,
1642 font_weight: None,
1643 },
1644 )]),
1645 ..ThemeStyleContent::default()
1646 });
1647 });
1648 });
1649 });
1650
1651 cx.executor().advance_clock(Duration::from_millis(200));
1652 cx.run_until_parked();
1653
1654 let styles_after_override = extract_semantic_highlight_styles(&cx.editor, &cx);
1655 assert_eq!(styles_after_override.len(), 1);
1656 assert_eq!(
1657 styles_after_override[0].color,
1658 Some(red_color),
1659 "Highlight should have red color from theme override"
1660 );
1661 assert_ne!(
1662 styles_after_override[0].color, initial_color,
1663 "Color should have changed from initial"
1664 );
1665
1666 // Changing the override to a different color also restyles.
1667 let blue_color: Hsla = Rgba {
1668 r: 0.0,
1669 g: 0.0,
1670 b: 1.0,
1671 a: 1.0,
1672 }
1673 .into();
1674 cx.update(|_, cx| {
1675 SettingsStore::update_global(cx, |store, cx| {
1676 store.update_user_settings(cx, |settings| {
1677 settings.theme.experimental_theme_overrides = Some(ThemeStyleContent {
1678 syntax: IndexMap::from_iter([(
1679 "function".to_string(),
1680 HighlightStyleContent {
1681 color: Some("#0000ff".to_string()),
1682 background_color: None,
1683 font_style: None,
1684 font_weight: None,
1685 },
1686 )]),
1687 ..ThemeStyleContent::default()
1688 });
1689 });
1690 });
1691 });
1692
1693 cx.executor().advance_clock(Duration::from_millis(200));
1694 cx.run_until_parked();
1695
1696 let styles_after_second_override = extract_semantic_highlight_styles(&cx.editor, &cx);
1697 assert_eq!(styles_after_second_override.len(), 1);
1698 assert_eq!(
1699 styles_after_second_override[0].color,
1700 Some(blue_color),
1701 "Highlight should have blue color from updated theme override"
1702 );
1703
1704 // Removing overrides reverts to the original theme color.
1705 cx.update(|_, cx| {
1706 SettingsStore::update_global(cx, |store, cx| {
1707 store.update_user_settings(cx, |settings| {
1708 settings.theme.experimental_theme_overrides = None;
1709 });
1710 });
1711 });
1712
1713 cx.executor().advance_clock(Duration::from_millis(200));
1714 cx.run_until_parked();
1715
1716 let styles_after_clear = extract_semantic_highlight_styles(&cx.editor, &cx);
1717 assert_eq!(styles_after_clear.len(), 1);
1718 assert_eq!(
1719 styles_after_clear[0].color, initial_color,
1720 "Highlight should revert to initial color after clearing overrides"
1721 );
1722 }
1723
1724 #[gpui::test]
1725 async fn test_per_theme_overrides_restyle_semantic_tokens(cx: &mut TestAppContext) {
1726 use collections::IndexMap;
1727 use gpui::{Hsla, Rgba, UpdateGlobal as _};
1728 use theme::{HighlightStyleContent, ThemeStyleContent};
1729 use ui::ActiveTheme as _;
1730
1731 init_test(cx, |_| {});
1732
1733 update_test_language_settings(cx, |language_settings| {
1734 language_settings.languages.0.insert(
1735 "Rust".into(),
1736 LanguageSettingsContent {
1737 semantic_tokens: Some(SemanticTokens::Full),
1738 ..LanguageSettingsContent::default()
1739 },
1740 );
1741 });
1742
1743 let mut cx = EditorLspTestContext::new_rust(
1744 lsp::ServerCapabilities {
1745 semantic_tokens_provider: Some(
1746 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1747 lsp::SemanticTokensOptions {
1748 legend: lsp::SemanticTokensLegend {
1749 token_types: Vec::from(["function".into()]),
1750 token_modifiers: Vec::new(),
1751 },
1752 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1753 ..lsp::SemanticTokensOptions::default()
1754 },
1755 ),
1756 ),
1757 ..lsp::ServerCapabilities::default()
1758 },
1759 cx,
1760 )
1761 .await;
1762
1763 let mut full_request = cx
1764 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1765 move |_, _, _| async move {
1766 Ok(Some(lsp::SemanticTokensResult::Tokens(
1767 lsp::SemanticTokens {
1768 data: vec![
1769 0, // delta_line
1770 3, // delta_start
1771 4, // length
1772 0, // token_type (function)
1773 0, // token_modifiers_bitset
1774 ],
1775 result_id: None,
1776 },
1777 )))
1778 },
1779 );
1780
1781 cx.set_state("ˇfn main() {}");
1782 full_request.next().await;
1783 cx.run_until_parked();
1784
1785 let initial_styles = extract_semantic_highlight_styles(&cx.editor, &cx);
1786 assert_eq!(initial_styles.len(), 1, "Should have one highlight style");
1787 let initial_color = initial_styles[0].color;
1788
1789 // Per-theme overrides (theme_overrides keyed by theme name) also go through
1790 // GlobalTheme reload → theme_changed → refresh_semantic_token_highlights.
1791 let theme_name = cx.update(|_, cx| cx.theme().name.to_string());
1792 let green_color: Hsla = Rgba {
1793 r: 0.0,
1794 g: 1.0,
1795 b: 0.0,
1796 a: 1.0,
1797 }
1798 .into();
1799 cx.update(|_, cx| {
1800 SettingsStore::update_global(cx, |store, cx| {
1801 store.update_user_settings(cx, |settings| {
1802 settings.theme.theme_overrides = collections::HashMap::from_iter([(
1803 theme_name.clone(),
1804 ThemeStyleContent {
1805 syntax: IndexMap::from_iter([(
1806 "function".to_string(),
1807 HighlightStyleContent {
1808 color: Some("#00ff00".to_string()),
1809 background_color: None,
1810 font_style: None,
1811 font_weight: None,
1812 },
1813 )]),
1814 ..ThemeStyleContent::default()
1815 },
1816 )]);
1817 });
1818 });
1819 });
1820
1821 cx.executor().advance_clock(Duration::from_millis(200));
1822 cx.run_until_parked();
1823
1824 let styles_after_override = extract_semantic_highlight_styles(&cx.editor, &cx);
1825 assert_eq!(styles_after_override.len(), 1);
1826 assert_eq!(
1827 styles_after_override[0].color,
1828 Some(green_color),
1829 "Highlight should have green color from per-theme override"
1830 );
1831 assert_ne!(
1832 styles_after_override[0].color, initial_color,
1833 "Color should have changed from initial"
1834 );
1835 }
1836
1837 #[gpui::test]
1838 async fn test_stopping_language_server_clears_semantic_tokens(cx: &mut TestAppContext) {
1839 init_test(cx, |_| {});
1840
1841 update_test_language_settings(cx, |language_settings| {
1842 language_settings.languages.0.insert(
1843 "Rust".into(),
1844 LanguageSettingsContent {
1845 semantic_tokens: Some(SemanticTokens::Full),
1846 ..LanguageSettingsContent::default()
1847 },
1848 );
1849 });
1850
1851 let mut cx = EditorLspTestContext::new_rust(
1852 lsp::ServerCapabilities {
1853 semantic_tokens_provider: Some(
1854 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1855 lsp::SemanticTokensOptions {
1856 legend: lsp::SemanticTokensLegend {
1857 token_types: vec!["function".into()],
1858 token_modifiers: Vec::new(),
1859 },
1860 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1861 ..lsp::SemanticTokensOptions::default()
1862 },
1863 ),
1864 ),
1865 ..lsp::ServerCapabilities::default()
1866 },
1867 cx,
1868 )
1869 .await;
1870
1871 let mut full_request = cx
1872 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1873 move |_, _, _| async move {
1874 Ok(Some(lsp::SemanticTokensResult::Tokens(
1875 lsp::SemanticTokens {
1876 data: vec![
1877 0, // delta_line
1878 3, // delta_start
1879 4, // length
1880 0, // token_type
1881 0, // token_modifiers_bitset
1882 ],
1883 result_id: None,
1884 },
1885 )))
1886 },
1887 );
1888
1889 cx.set_state("ˇfn main() {}");
1890 assert!(full_request.next().await.is_some());
1891 cx.run_until_parked();
1892
1893 assert_eq!(
1894 extract_semantic_highlights(&cx.editor, &cx),
1895 vec![MultiBufferOffset(3)..MultiBufferOffset(7)],
1896 "Semantic tokens should be present before stopping the server"
1897 );
1898
1899 cx.update_editor(|editor, _, cx| {
1900 let buffers = editor.buffer.read(cx).all_buffers().into_iter().collect();
1901 editor.project.as_ref().unwrap().update(cx, |project, cx| {
1902 project.stop_language_servers_for_buffers(buffers, HashSet::default(), cx);
1903 })
1904 });
1905 cx.executor().advance_clock(Duration::from_millis(200));
1906 cx.run_until_parked();
1907
1908 assert_eq!(
1909 extract_semantic_highlights(&cx.editor, &cx),
1910 Vec::new(),
1911 "Semantic tokens should be cleared after stopping the server"
1912 );
1913 }
1914
1915 #[gpui::test]
1916 async fn test_disabling_semantic_tokens_setting_clears_highlights(cx: &mut TestAppContext) {
1917 init_test(cx, |_| {});
1918
1919 update_test_language_settings(cx, |language_settings| {
1920 language_settings.languages.0.insert(
1921 "Rust".into(),
1922 LanguageSettingsContent {
1923 semantic_tokens: Some(SemanticTokens::Full),
1924 ..LanguageSettingsContent::default()
1925 },
1926 );
1927 });
1928
1929 let mut cx = EditorLspTestContext::new_rust(
1930 lsp::ServerCapabilities {
1931 semantic_tokens_provider: Some(
1932 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1933 lsp::SemanticTokensOptions {
1934 legend: lsp::SemanticTokensLegend {
1935 token_types: vec!["function".into()],
1936 token_modifiers: Vec::new(),
1937 },
1938 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1939 ..lsp::SemanticTokensOptions::default()
1940 },
1941 ),
1942 ),
1943 ..lsp::ServerCapabilities::default()
1944 },
1945 cx,
1946 )
1947 .await;
1948
1949 let mut full_request = cx
1950 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1951 move |_, _, _| async move {
1952 Ok(Some(lsp::SemanticTokensResult::Tokens(
1953 lsp::SemanticTokens {
1954 data: vec![
1955 0, // delta_line
1956 3, // delta_start
1957 4, // length
1958 0, // token_type
1959 0, // token_modifiers_bitset
1960 ],
1961 result_id: None,
1962 },
1963 )))
1964 },
1965 );
1966
1967 cx.set_state("ˇfn main() {}");
1968 assert!(full_request.next().await.is_some());
1969 cx.run_until_parked();
1970
1971 assert_eq!(
1972 extract_semantic_highlights(&cx.editor, &cx),
1973 vec![MultiBufferOffset(3)..MultiBufferOffset(7)],
1974 "Semantic tokens should be present before disabling the setting"
1975 );
1976
1977 update_test_language_settings(&mut cx, |language_settings| {
1978 language_settings.languages.0.insert(
1979 "Rust".into(),
1980 LanguageSettingsContent {
1981 semantic_tokens: Some(SemanticTokens::Off),
1982 ..LanguageSettingsContent::default()
1983 },
1984 );
1985 });
1986 cx.executor().advance_clock(Duration::from_millis(200));
1987 cx.run_until_parked();
1988
1989 assert_eq!(
1990 extract_semantic_highlights(&cx.editor, &cx),
1991 Vec::new(),
1992 "Semantic tokens should be cleared after disabling the setting"
1993 );
1994 }
1995
1996 fn extract_semantic_highlight_styles(
1997 editor: &Entity<Editor>,
1998 cx: &TestAppContext,
1999 ) -> Vec<HighlightStyle> {
2000 editor.read_with(cx, |editor, cx| {
2001 editor
2002 .display_map
2003 .read(cx)
2004 .semantic_token_highlights
2005 .iter()
2006 .flat_map(|(_, (v, interner))| {
2007 v.iter().map(|highlights| interner[highlights.style])
2008 })
2009 .collect()
2010 })
2011 }
2012}