1use std::{collections::hash_map, sync::Arc, time::Duration};
2
3use collections::{HashMap, HashSet};
4use futures::future::join_all;
5use gpui::{
6 App, Context, FontStyle, FontWeight, HighlightStyle, StrikethroughStyle, Task, UnderlineStyle,
7};
8use itertools::Itertools;
9use language::language_settings::language_settings;
10use project::{
11 lsp_store::{
12 BufferSemanticToken, BufferSemanticTokens, RefreshForServer, SemanticTokenStylizer,
13 TokenType,
14 },
15 project_settings::ProjectSettings,
16};
17use settings::{
18 SemanticTokenColorOverride, SemanticTokenFontStyle, SemanticTokenFontWeight,
19 SemanticTokenRules, Settings as _,
20};
21use text::BufferId;
22use theme::SyntaxTheme;
23use ui::ActiveTheme as _;
24
25use crate::{
26 Editor,
27 actions::ToggleSemanticHighlights,
28 display_map::{HighlightStyleInterner, SemanticTokenHighlight},
29};
30
31pub(super) struct SemanticTokenState {
32 rules: SemanticTokenRules,
33 enabled: bool,
34 update_task: Task<()>,
35 fetched_for_buffers: HashMap<BufferId, clock::Global>,
36}
37
38impl SemanticTokenState {
39 pub(super) fn new(cx: &App, enabled: bool) -> Self {
40 Self {
41 rules: ProjectSettings::get_global(cx)
42 .global_lsp_settings
43 .semantic_token_rules
44 .clone(),
45 enabled,
46 update_task: Task::ready(()),
47 fetched_for_buffers: HashMap::default(),
48 }
49 }
50
51 pub(super) fn enabled(&self) -> bool {
52 self.enabled
53 }
54
55 pub(super) fn toggle_enabled(&mut self) {
56 self.enabled = !self.enabled;
57 }
58
59 #[cfg(test)]
60 pub(super) fn take_update_task(&mut self) -> Task<()> {
61 std::mem::replace(&mut self.update_task, Task::ready(()))
62 }
63
64 pub(super) fn invalidate_buffer(&mut self, buffer_id: &BufferId) {
65 self.fetched_for_buffers.remove(buffer_id);
66 }
67
68 pub(super) fn update_rules(&mut self, new_rules: SemanticTokenRules) -> bool {
69 if new_rules != self.rules {
70 self.rules = new_rules;
71 true
72 } else {
73 false
74 }
75 }
76}
77
78impl Editor {
79 pub fn supports_semantic_tokens(&self, cx: &mut App) -> bool {
80 let Some(provider) = self.semantics_provider.as_ref() else {
81 return false;
82 };
83
84 let mut supports = false;
85 self.buffer().update(cx, |this, cx| {
86 this.for_each_buffer(|buffer| {
87 supports |= provider.supports_semantic_tokens(buffer, cx);
88 });
89 });
90
91 supports
92 }
93
94 pub fn semantic_highlights_enabled(&self) -> bool {
95 self.semantic_token_state.enabled()
96 }
97
98 pub fn toggle_semantic_highlights(
99 &mut self,
100 _: &ToggleSemanticHighlights,
101 _window: &mut gpui::Window,
102 cx: &mut Context<Self>,
103 ) {
104 self.semantic_token_state.toggle_enabled();
105 self.update_semantic_tokens(None, None, cx);
106 }
107
108 pub(crate) fn update_semantic_tokens(
109 &mut self,
110 buffer_id: Option<BufferId>,
111 for_server: Option<RefreshForServer>,
112 cx: &mut Context<Self>,
113 ) {
114 if !self.mode().is_full() || !self.semantic_token_state.enabled() {
115 self.semantic_token_state.fetched_for_buffers.clear();
116 self.display_map.update(cx, |display_map, _| {
117 display_map.semantic_token_highlights.clear();
118 });
119 self.semantic_token_state.update_task = Task::ready(());
120 cx.notify();
121 return;
122 }
123
124 let mut invalidate_semantic_highlights_for_buffers = HashSet::default();
125 if for_server.is_some() {
126 invalidate_semantic_highlights_for_buffers.extend(
127 self.semantic_token_state
128 .fetched_for_buffers
129 .drain()
130 .map(|(buffer_id, _)| buffer_id),
131 );
132 }
133
134 let Some((sema, project)) = self.semantics_provider.clone().zip(self.project.clone())
135 else {
136 return;
137 };
138
139 let buffers_to_query = self
140 .visible_excerpts(true, cx)
141 .into_values()
142 .map(|(buffer, ..)| buffer)
143 .chain(buffer_id.and_then(|buffer_id| self.buffer.read(cx).buffer(buffer_id)))
144 .filter_map(|editor_buffer| {
145 let editor_buffer_id = editor_buffer.read(cx).remote_id();
146 if self.registered_buffers.contains_key(&editor_buffer_id)
147 && language_settings(
148 editor_buffer.read(cx).language().map(|l| l.name()),
149 editor_buffer.read(cx).file(),
150 cx,
151 )
152 .semantic_tokens
153 .enabled()
154 {
155 Some((editor_buffer_id, editor_buffer))
156 } else {
157 None
158 }
159 })
160 .collect::<HashMap<_, _>>();
161
162 for buffer_with_disabled_tokens in self
163 .display_map
164 .read(cx)
165 .semantic_token_highlights
166 .iter()
167 .map(|(buffer_id, _)| *buffer_id)
168 .filter(|buffer_id| !buffers_to_query.contains_key(buffer_id))
169 .filter(|buffer_id| {
170 !self
171 .buffer
172 .read(cx)
173 .buffer(*buffer_id)
174 .is_some_and(|buffer| {
175 let buffer = buffer.read(cx);
176 language_settings(buffer.language().map(|l| l.name()), buffer.file(), cx)
177 .semantic_tokens
178 .enabled()
179 })
180 })
181 .collect::<Vec<_>>()
182 {
183 self.semantic_token_state
184 .invalidate_buffer(&buffer_with_disabled_tokens);
185 self.display_map.update(cx, |display_map, _| {
186 display_map.invalidate_semantic_highlights(buffer_with_disabled_tokens);
187 });
188 }
189
190 self.semantic_token_state.update_task = cx.spawn(async move |editor, cx| {
191 cx.background_executor()
192 .timer(Duration::from_millis(50))
193 .await;
194 let Some(all_semantic_tokens_task) = editor
195 .update(cx, |editor, cx| {
196 buffers_to_query
197 .into_iter()
198 .filter_map(|(buffer_id, buffer)| {
199 let known_version =
200 editor.semantic_token_state.fetched_for_buffers.get(&buffer_id);
201 let query_version = buffer.read(cx).version();
202 if known_version.is_some_and(|known_version| {
203 !query_version.changed_since(known_version)
204 }) {
205 None
206 } else {
207 let task = sema.semantic_tokens(buffer, for_server, cx);
208 Some(async move { (buffer_id, query_version, task.await) })
209 }
210 })
211 .collect::<Vec<_>>()
212 })
213 .ok()
214 else {
215 return;
216 };
217
218 let all_semantic_tokens = join_all(all_semantic_tokens_task).await;
219 editor.update(cx, |editor, cx| {
220 editor.display_map.update(cx, |display_map, _| {
221 for buffer_id in invalidate_semantic_highlights_for_buffers {
222 display_map.invalidate_semantic_highlights(buffer_id);
223 }
224 });
225
226
227 if all_semantic_tokens.is_empty() {
228 return;
229 }
230 let multi_buffer_snapshot = editor.buffer().read(cx).snapshot(cx);
231
232 for (buffer_id, query_version, tokens) in all_semantic_tokens {
233 let tokens = match tokens {
234 Ok(BufferSemanticTokens { tokens: Some(tokens) }) => {
235 tokens
236 },
237 Ok(BufferSemanticTokens { tokens: None }) => {
238 editor.display_map.update(cx, |display_map, _| {
239 display_map.invalidate_semantic_highlights(buffer_id);
240 });
241 continue;
242 },
243 Err(e) => {
244 log::error!("Failed to fetch semantic tokens for buffer {buffer_id:?}: {e:#}");
245 continue;
246 },
247 };
248
249 match editor.semantic_token_state.fetched_for_buffers.entry(buffer_id) {
250 hash_map::Entry::Occupied(mut o) => {
251 if query_version.changed_since(o.get()) {
252 o.insert(query_version);
253 } else {
254 continue;
255 }
256 },
257 hash_map::Entry::Vacant(v) => {
258 v.insert(query_version);
259 },
260 }
261
262 let language_name = editor
263 .buffer()
264 .read(cx)
265 .buffer(buffer_id)
266 .and_then(|buf| buf.read(cx).language().map(|l| l.name()));
267
268 editor.display_map.update(cx, |display_map, cx| {
269 project.read(cx).lsp_store().update(cx, |lsp_store, cx| {
270 let mut token_highlights = Vec::new();
271 let mut interner = HighlightStyleInterner::default();
272 for (server_id, server_tokens) in tokens {
273 let Some(stylizer) = lsp_store.get_or_create_token_stylizer(
274 server_id,
275 language_name.as_ref(),
276 cx,
277 )
278 else {
279 continue;
280 };
281 token_highlights.extend(buffer_into_editor_highlights(
282 &server_tokens,
283 stylizer,
284 &multi_buffer_snapshot,
285 &mut interner,
286 cx,
287 ));
288 }
289
290 token_highlights.sort_by(|a, b| {
291 a.range.start.cmp(&b.range.start, &multi_buffer_snapshot)
292 });
293 display_map
294 .semantic_token_highlights
295 .insert(buffer_id, (Arc::from(token_highlights), Arc::new(interner)));
296 });
297 });
298 }
299
300 cx.notify();
301 }).ok();
302 });
303 }
304
305 pub(super) fn refresh_semantic_token_highlights(&mut self, cx: &mut Context<Self>) {
306 self.semantic_token_state.fetched_for_buffers.clear();
307 self.update_semantic_tokens(None, None, cx);
308 }
309}
310
311fn buffer_into_editor_highlights<'a, 'b>(
312 buffer_tokens: &'a [BufferSemanticToken],
313 stylizer: &'a SemanticTokenStylizer,
314 multi_buffer_snapshot: &'a multi_buffer::MultiBufferSnapshot,
315 interner: &'b mut HighlightStyleInterner,
316 cx: &'a App,
317) -> impl Iterator<Item = SemanticTokenHighlight> + use<'a, 'b> {
318 multi_buffer_snapshot
319 .text_anchors_to_visible_anchors(
320 buffer_tokens
321 .iter()
322 .flat_map(|token| [token.range.start, token.range.end]),
323 )
324 .into_iter()
325 .tuples::<(_, _)>()
326 .zip(buffer_tokens)
327 .filter_map(|((multi_buffer_start, multi_buffer_end), token)| {
328 let range = multi_buffer_start?..multi_buffer_end?;
329 let style = convert_token(
330 stylizer,
331 cx.theme().syntax(),
332 token.token_type,
333 token.token_modifiers,
334 )?;
335 let style = interner.intern(style);
336 Some(SemanticTokenHighlight {
337 range,
338 style,
339 token_type: token.token_type,
340 token_modifiers: token.token_modifiers,
341 server_id: stylizer.server_id(),
342 })
343 })
344}
345
346fn convert_token(
347 stylizer: &SemanticTokenStylizer,
348 theme: &SyntaxTheme,
349 token_type: TokenType,
350 modifiers: u32,
351) -> Option<HighlightStyle> {
352 let rules = stylizer.rules_for_token(token_type)?;
353 let matching = rules.iter().filter(|rule| {
354 rule.token_modifiers
355 .iter()
356 .all(|m| stylizer.has_modifier(modifiers, m))
357 });
358
359 let mut highlight = HighlightStyle::default();
360 let mut empty = true;
361
362 for rule in matching {
363 empty = false;
364
365 let style = rule.style.iter().find_map(|style| theme.get_opt(style));
366
367 macro_rules! overwrite {
368 (
369 highlight.$highlight_field:ident,
370 SemanticTokenRule::$rule_field:ident,
371 $transform:expr $(,)?
372 ) => {
373 highlight.$highlight_field = rule
374 .$rule_field
375 .map($transform)
376 .or_else(|| style.and_then(|s| s.$highlight_field))
377 .or(highlight.$highlight_field)
378 };
379 }
380
381 overwrite!(
382 highlight.color,
383 SemanticTokenRule::foreground_color,
384 Into::into,
385 );
386
387 overwrite!(
388 highlight.background_color,
389 SemanticTokenRule::background_color,
390 Into::into,
391 );
392
393 overwrite!(
394 highlight.font_weight,
395 SemanticTokenRule::font_weight,
396 |w| match w {
397 SemanticTokenFontWeight::Normal => FontWeight::NORMAL,
398 SemanticTokenFontWeight::Bold => FontWeight::BOLD,
399 },
400 );
401
402 overwrite!(
403 highlight.font_style,
404 SemanticTokenRule::font_style,
405 |s| match s {
406 SemanticTokenFontStyle::Normal => FontStyle::Normal,
407 SemanticTokenFontStyle::Italic => FontStyle::Italic,
408 },
409 );
410
411 overwrite!(highlight.underline, SemanticTokenRule::underline, |u| {
412 UnderlineStyle {
413 thickness: 1.0.into(),
414 color: match u {
415 SemanticTokenColorOverride::InheritForeground(true) => highlight.color,
416 SemanticTokenColorOverride::InheritForeground(false) => None,
417 SemanticTokenColorOverride::Replace(c) => Some(c.into()),
418 },
419 ..UnderlineStyle::default()
420 }
421 });
422
423 overwrite!(
424 highlight.strikethrough,
425 SemanticTokenRule::strikethrough,
426 |s| StrikethroughStyle {
427 thickness: 1.0.into(),
428 color: match s {
429 SemanticTokenColorOverride::InheritForeground(true) => highlight.color,
430 SemanticTokenColorOverride::InheritForeground(false) => None,
431 SemanticTokenColorOverride::Replace(c) => Some(c.into()),
432 },
433 },
434 );
435 }
436
437 if empty { None } else { Some(highlight) }
438}
439
440#[cfg(test)]
441mod tests {
442 use std::{
443 ops::{Deref as _, Range},
444 sync::atomic::{self, AtomicUsize},
445 };
446
447 use futures::StreamExt as _;
448 use gpui::{
449 AppContext as _, Entity, Focusable as _, HighlightStyle, TestAppContext, VisualTestContext,
450 };
451 use language::{Language, LanguageConfig, LanguageMatcher};
452 use languages::FakeLspAdapter;
453 use multi_buffer::{
454 AnchorRangeExt, ExcerptRange, ExpandExcerptDirection, MultiBuffer, MultiBufferOffset,
455 };
456 use project::Project;
457 use rope::Point;
458 use serde_json::json;
459 use settings::{LanguageSettingsContent, SemanticTokenRules, SemanticTokens, SettingsStore};
460 use workspace::{Workspace, WorkspaceHandle as _};
461
462 use crate::{
463 Capability,
464 editor_tests::{init_test, update_test_language_settings},
465 test::{build_editor_with_project, editor_lsp_test_context::EditorLspTestContext},
466 };
467
468 use super::*;
469
470 #[gpui::test]
471 async fn lsp_semantic_tokens_full_capability(cx: &mut TestAppContext) {
472 init_test(cx, |_| {});
473
474 update_test_language_settings(cx, |language_settings| {
475 language_settings.languages.0.insert(
476 "Rust".into(),
477 LanguageSettingsContent {
478 semantic_tokens: Some(SemanticTokens::Full),
479 ..LanguageSettingsContent::default()
480 },
481 );
482 });
483
484 let mut cx = EditorLspTestContext::new_rust(
485 lsp::ServerCapabilities {
486 semantic_tokens_provider: Some(
487 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
488 lsp::SemanticTokensOptions {
489 legend: lsp::SemanticTokensLegend {
490 token_types: vec!["function".into()],
491 token_modifiers: Vec::new(),
492 },
493 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
494 ..lsp::SemanticTokensOptions::default()
495 },
496 ),
497 ),
498 ..lsp::ServerCapabilities::default()
499 },
500 cx,
501 )
502 .await;
503
504 let full_counter = Arc::new(AtomicUsize::new(0));
505 let full_counter_clone = full_counter.clone();
506
507 let mut full_request = cx
508 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
509 move |_, _, _| {
510 full_counter_clone.fetch_add(1, atomic::Ordering::Release);
511 async move {
512 Ok(Some(lsp::SemanticTokensResult::Tokens(
513 lsp::SemanticTokens {
514 data: vec![
515 0, // delta_line
516 3, // delta_start
517 4, // length
518 0, // token_type
519 0, // token_modifiers_bitset
520 ],
521 // The server isn't capable of deltas, so even though we sent back
522 // a result ID, the client shouldn't request a delta.
523 result_id: Some("a".into()),
524 },
525 )))
526 }
527 },
528 );
529
530 cx.set_state("ˇfn main() {}");
531 assert!(full_request.next().await.is_some());
532
533 cx.run_until_parked();
534
535 cx.set_state("ˇfn main() { a }");
536 assert!(full_request.next().await.is_some());
537
538 cx.run_until_parked();
539
540 assert_eq!(
541 extract_semantic_highlights(&cx.editor, &cx),
542 vec![MultiBufferOffset(3)..MultiBufferOffset(7)]
543 );
544
545 assert_eq!(full_counter.load(atomic::Ordering::Acquire), 2);
546 }
547
548 #[gpui::test]
549 async fn lsp_semantic_tokens_full_none_result_id(cx: &mut TestAppContext) {
550 init_test(cx, |_| {});
551
552 update_test_language_settings(cx, |language_settings| {
553 language_settings.languages.0.insert(
554 "Rust".into(),
555 LanguageSettingsContent {
556 semantic_tokens: Some(SemanticTokens::Full),
557 ..LanguageSettingsContent::default()
558 },
559 );
560 });
561
562 let mut cx = EditorLspTestContext::new_rust(
563 lsp::ServerCapabilities {
564 semantic_tokens_provider: Some(
565 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
566 lsp::SemanticTokensOptions {
567 legend: lsp::SemanticTokensLegend {
568 token_types: vec!["function".into()],
569 token_modifiers: Vec::new(),
570 },
571 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: Some(true) }),
572 ..lsp::SemanticTokensOptions::default()
573 },
574 ),
575 ),
576 ..lsp::ServerCapabilities::default()
577 },
578 cx,
579 )
580 .await;
581
582 let full_counter = Arc::new(AtomicUsize::new(0));
583 let full_counter_clone = full_counter.clone();
584
585 let mut full_request = cx
586 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
587 move |_, _, _| {
588 full_counter_clone.fetch_add(1, atomic::Ordering::Release);
589 async move {
590 Ok(Some(lsp::SemanticTokensResult::Tokens(
591 lsp::SemanticTokens {
592 data: vec![
593 0, // delta_line
594 3, // delta_start
595 4, // length
596 0, // token_type
597 0, // token_modifiers_bitset
598 ],
599 result_id: None, // Sending back `None` forces the client to not use deltas.
600 },
601 )))
602 }
603 },
604 );
605
606 cx.set_state("ˇfn main() {}");
607 assert!(full_request.next().await.is_some());
608
609 let task = cx.update_editor(|e, _, _| e.semantic_token_state.take_update_task());
610 task.await;
611
612 cx.set_state("ˇfn main() { a }");
613 assert!(full_request.next().await.is_some());
614
615 let task = cx.update_editor(|e, _, _| e.semantic_token_state.take_update_task());
616 task.await;
617 assert_eq!(
618 extract_semantic_highlights(&cx.editor, &cx),
619 vec![MultiBufferOffset(3)..MultiBufferOffset(7)]
620 );
621 assert_eq!(full_counter.load(atomic::Ordering::Acquire), 2);
622 }
623
624 #[gpui::test]
625 async fn lsp_semantic_tokens_delta(cx: &mut TestAppContext) {
626 init_test(cx, |_| {});
627
628 update_test_language_settings(cx, |language_settings| {
629 language_settings.languages.0.insert(
630 "Rust".into(),
631 LanguageSettingsContent {
632 semantic_tokens: Some(SemanticTokens::Full),
633 ..LanguageSettingsContent::default()
634 },
635 );
636 });
637
638 let mut cx = EditorLspTestContext::new_rust(
639 lsp::ServerCapabilities {
640 semantic_tokens_provider: Some(
641 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
642 lsp::SemanticTokensOptions {
643 legend: lsp::SemanticTokensLegend {
644 token_types: vec!["function".into()],
645 token_modifiers: Vec::new(),
646 },
647 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: Some(true) }),
648 ..lsp::SemanticTokensOptions::default()
649 },
650 ),
651 ),
652 ..lsp::ServerCapabilities::default()
653 },
654 cx,
655 )
656 .await;
657
658 let full_counter = Arc::new(AtomicUsize::new(0));
659 let full_counter_clone = full_counter.clone();
660 let delta_counter = Arc::new(AtomicUsize::new(0));
661 let delta_counter_clone = delta_counter.clone();
662
663 let mut full_request = cx
664 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
665 move |_, _, _| {
666 full_counter_clone.fetch_add(1, atomic::Ordering::Release);
667 async move {
668 Ok(Some(lsp::SemanticTokensResult::Tokens(
669 lsp::SemanticTokens {
670 data: vec![
671 0, // delta_line
672 3, // delta_start
673 4, // length
674 0, // token_type
675 0, // token_modifiers_bitset
676 ],
677 result_id: Some("a".into()),
678 },
679 )))
680 }
681 },
682 );
683
684 let mut delta_request = cx
685 .set_request_handler::<lsp::request::SemanticTokensFullDeltaRequest, _, _>(
686 move |_, params, _| {
687 delta_counter_clone.fetch_add(1, atomic::Ordering::Release);
688 assert_eq!(params.previous_result_id, "a");
689 async move {
690 Ok(Some(lsp::SemanticTokensFullDeltaResult::TokensDelta(
691 lsp::SemanticTokensDelta {
692 edits: Vec::new(),
693 result_id: Some("b".into()),
694 },
695 )))
696 }
697 },
698 );
699
700 // Initial request, for the empty buffer.
701 cx.set_state("ˇfn main() {}");
702 assert!(full_request.next().await.is_some());
703 let task = cx.update_editor(|e, _, _| e.semantic_token_state.take_update_task());
704 task.await;
705
706 cx.set_state("ˇfn main() { a }");
707 assert!(delta_request.next().await.is_some());
708 let task = cx.update_editor(|e, _, _| e.semantic_token_state.take_update_task());
709 task.await;
710
711 assert_eq!(
712 extract_semantic_highlights(&cx.editor, &cx),
713 vec![MultiBufferOffset(3)..MultiBufferOffset(7)]
714 );
715
716 assert_eq!(full_counter.load(atomic::Ordering::Acquire), 1);
717 assert_eq!(delta_counter.load(atomic::Ordering::Acquire), 1);
718 }
719
720 #[gpui::test]
721 async fn lsp_semantic_tokens_multiserver_full(cx: &mut TestAppContext) {
722 init_test(cx, |_| {});
723
724 update_test_language_settings(cx, |language_settings| {
725 language_settings.languages.0.insert(
726 "TOML".into(),
727 LanguageSettingsContent {
728 semantic_tokens: Some(SemanticTokens::Full),
729 ..LanguageSettingsContent::default()
730 },
731 );
732 });
733
734 let toml_language = Arc::new(Language::new(
735 LanguageConfig {
736 name: "TOML".into(),
737 matcher: LanguageMatcher {
738 path_suffixes: vec!["toml".into()],
739 ..LanguageMatcher::default()
740 },
741 ..LanguageConfig::default()
742 },
743 None,
744 ));
745
746 // We have 2 language servers for TOML in this test.
747 let toml_legend_1 = lsp::SemanticTokensLegend {
748 token_types: vec!["property".into()],
749 token_modifiers: Vec::new(),
750 };
751 let toml_legend_2 = lsp::SemanticTokensLegend {
752 token_types: vec!["number".into()],
753 token_modifiers: Vec::new(),
754 };
755
756 let app_state = cx.update(workspace::AppState::test);
757
758 cx.update(|cx| {
759 assets::Assets.load_test_fonts(cx);
760 crate::init(cx);
761 workspace::init(app_state.clone(), cx);
762 });
763
764 let project = Project::test(app_state.fs.clone(), [], cx).await;
765 let language_registry = project.read_with(cx, |project, _| project.languages().clone());
766
767 let full_counter_toml_1 = Arc::new(AtomicUsize::new(0));
768 let full_counter_toml_1_clone = full_counter_toml_1.clone();
769 let full_counter_toml_2 = Arc::new(AtomicUsize::new(0));
770 let full_counter_toml_2_clone = full_counter_toml_2.clone();
771
772 let mut toml_server_1 = language_registry.register_fake_lsp(
773 toml_language.name(),
774 FakeLspAdapter {
775 name: "toml1",
776 capabilities: lsp::ServerCapabilities {
777 semantic_tokens_provider: Some(
778 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
779 lsp::SemanticTokensOptions {
780 legend: toml_legend_1,
781 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
782 ..lsp::SemanticTokensOptions::default()
783 },
784 ),
785 ),
786 ..lsp::ServerCapabilities::default()
787 },
788 initializer: Some(Box::new({
789 let full_counter_toml_1_clone = full_counter_toml_1_clone.clone();
790 move |fake_server| {
791 let full_counter = full_counter_toml_1_clone.clone();
792 fake_server
793 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
794 move |_, _| {
795 full_counter.fetch_add(1, atomic::Ordering::Release);
796 async move {
797 Ok(Some(lsp::SemanticTokensResult::Tokens(
798 lsp::SemanticTokens {
799 // highlight 'a' as a property
800 data: vec![
801 0, // delta_line
802 0, // delta_start
803 1, // length
804 0, // token_type
805 0, // token_modifiers_bitset
806 ],
807 result_id: Some("a".into()),
808 },
809 )))
810 }
811 },
812 );
813 }
814 })),
815 ..FakeLspAdapter::default()
816 },
817 );
818 let mut toml_server_2 = language_registry.register_fake_lsp(
819 toml_language.name(),
820 FakeLspAdapter {
821 name: "toml2",
822 capabilities: lsp::ServerCapabilities {
823 semantic_tokens_provider: Some(
824 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
825 lsp::SemanticTokensOptions {
826 legend: toml_legend_2,
827 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
828 ..lsp::SemanticTokensOptions::default()
829 },
830 ),
831 ),
832 ..lsp::ServerCapabilities::default()
833 },
834 initializer: Some(Box::new({
835 let full_counter_toml_2_clone = full_counter_toml_2_clone.clone();
836 move |fake_server| {
837 let full_counter = full_counter_toml_2_clone.clone();
838 fake_server
839 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
840 move |_, _| {
841 full_counter.fetch_add(1, atomic::Ordering::Release);
842 async move {
843 Ok(Some(lsp::SemanticTokensResult::Tokens(
844 lsp::SemanticTokens {
845 // highlight '3' as a literal
846 data: vec![
847 0, // delta_line
848 4, // delta_start
849 1, // length
850 0, // token_type
851 0, // token_modifiers_bitset
852 ],
853 result_id: Some("a".into()),
854 },
855 )))
856 }
857 },
858 );
859 }
860 })),
861 ..FakeLspAdapter::default()
862 },
863 );
864 language_registry.add(toml_language.clone());
865
866 app_state
867 .fs
868 .as_fake()
869 .insert_tree(
870 EditorLspTestContext::root_path(),
871 json!({
872 ".git": {},
873 "dir": {
874 "foo.toml": "a = 1\nb = 2\n",
875 }
876 }),
877 )
878 .await;
879
880 let window = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx));
881 let workspace = window.root(cx).unwrap();
882
883 let mut cx = VisualTestContext::from_window(*window.deref(), cx);
884 project
885 .update(&mut cx, |project, cx| {
886 project.find_or_create_worktree(EditorLspTestContext::root_path(), true, cx)
887 })
888 .await
889 .unwrap();
890 cx.read(|cx| workspace.read(cx).worktree_scans_complete(cx))
891 .await;
892
893 let toml_file = cx.read(|cx| workspace.file_project_paths(cx)[0].clone());
894 let toml_item = workspace
895 .update_in(&mut cx, |workspace, window, cx| {
896 workspace.open_path(toml_file, None, true, window, cx)
897 })
898 .await
899 .expect("Could not open test file");
900
901 let editor = cx.update(|_, cx| {
902 toml_item
903 .act_as::<Editor>(cx)
904 .expect("Opened test file wasn't an editor")
905 });
906
907 editor.update_in(&mut cx, |editor, window, cx| {
908 let nav_history = workspace
909 .read(cx)
910 .active_pane()
911 .read(cx)
912 .nav_history_for_item(&cx.entity());
913 editor.set_nav_history(Some(nav_history));
914 window.focus(&editor.focus_handle(cx), cx)
915 });
916
917 let _toml_server_1 = toml_server_1.next().await.unwrap();
918 let _toml_server_2 = toml_server_2.next().await.unwrap();
919
920 // Trigger semantic tokens.
921 editor.update_in(&mut cx, |editor, _, cx| {
922 editor.edit([(MultiBufferOffset(0)..MultiBufferOffset(1), "b")], cx);
923 });
924 cx.executor().advance_clock(Duration::from_millis(200));
925 let task = editor.update_in(&mut cx, |e, _, _| e.semantic_token_state.take_update_task());
926 cx.run_until_parked();
927 task.await;
928
929 assert_eq!(
930 extract_semantic_highlights(&editor, &cx),
931 vec![
932 MultiBufferOffset(0)..MultiBufferOffset(1),
933 MultiBufferOffset(4)..MultiBufferOffset(5),
934 ]
935 );
936
937 assert_eq!(full_counter_toml_1.load(atomic::Ordering::Acquire), 1);
938 assert_eq!(full_counter_toml_2.load(atomic::Ordering::Acquire), 1);
939 }
940
941 #[gpui::test]
942 async fn lsp_semantic_tokens_multibuffer_part(cx: &mut TestAppContext) {
943 init_test(cx, |_| {});
944
945 update_test_language_settings(cx, |language_settings| {
946 language_settings.languages.0.insert(
947 "TOML".into(),
948 LanguageSettingsContent {
949 semantic_tokens: Some(SemanticTokens::Full),
950 ..LanguageSettingsContent::default()
951 },
952 );
953 language_settings.languages.0.insert(
954 "Rust".into(),
955 LanguageSettingsContent {
956 semantic_tokens: Some(SemanticTokens::Full),
957 ..LanguageSettingsContent::default()
958 },
959 );
960 });
961
962 let toml_language = Arc::new(Language::new(
963 LanguageConfig {
964 name: "TOML".into(),
965 matcher: LanguageMatcher {
966 path_suffixes: vec!["toml".into()],
967 ..LanguageMatcher::default()
968 },
969 ..LanguageConfig::default()
970 },
971 None,
972 ));
973 let rust_language = Arc::new(Language::new(
974 LanguageConfig {
975 name: "Rust".into(),
976 matcher: LanguageMatcher {
977 path_suffixes: vec!["rs".into()],
978 ..LanguageMatcher::default()
979 },
980 ..LanguageConfig::default()
981 },
982 None,
983 ));
984
985 let toml_legend = lsp::SemanticTokensLegend {
986 token_types: vec!["property".into()],
987 token_modifiers: Vec::new(),
988 };
989 let rust_legend = lsp::SemanticTokensLegend {
990 token_types: vec!["constant".into()],
991 token_modifiers: Vec::new(),
992 };
993
994 let app_state = cx.update(workspace::AppState::test);
995
996 cx.update(|cx| {
997 assets::Assets.load_test_fonts(cx);
998 crate::init(cx);
999 workspace::init(app_state.clone(), cx);
1000 });
1001
1002 let project = Project::test(app_state.fs.clone(), [], cx).await;
1003 let language_registry = project.read_with(cx, |project, _| project.languages().clone());
1004 let full_counter_toml = Arc::new(AtomicUsize::new(0));
1005 let full_counter_toml_clone = full_counter_toml.clone();
1006
1007 let mut toml_server = language_registry.register_fake_lsp(
1008 toml_language.name(),
1009 FakeLspAdapter {
1010 name: "toml",
1011 capabilities: lsp::ServerCapabilities {
1012 semantic_tokens_provider: Some(
1013 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1014 lsp::SemanticTokensOptions {
1015 legend: toml_legend,
1016 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1017 ..lsp::SemanticTokensOptions::default()
1018 },
1019 ),
1020 ),
1021 ..lsp::ServerCapabilities::default()
1022 },
1023 initializer: Some(Box::new({
1024 let full_counter_toml_clone = full_counter_toml_clone.clone();
1025 move |fake_server| {
1026 let full_counter = full_counter_toml_clone.clone();
1027 fake_server
1028 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1029 move |_, _| {
1030 full_counter.fetch_add(1, atomic::Ordering::Release);
1031 async move {
1032 Ok(Some(lsp::SemanticTokensResult::Tokens(
1033 lsp::SemanticTokens {
1034 // highlight 'a', 'b', 'c' as properties on lines 0, 1, 2
1035 data: vec![
1036 0, // delta_line (line 0)
1037 0, // delta_start
1038 1, // length
1039 0, // token_type
1040 0, // token_modifiers_bitset
1041 1, // delta_line (line 1)
1042 0, // delta_start
1043 1, // length
1044 0, // token_type
1045 0, // token_modifiers_bitset
1046 1, // delta_line (line 2)
1047 0, // delta_start
1048 1, // length
1049 0, // token_type
1050 0, // token_modifiers_bitset
1051 ],
1052 result_id: Some("a".into()),
1053 },
1054 )))
1055 }
1056 },
1057 );
1058 }
1059 })),
1060 ..FakeLspAdapter::default()
1061 },
1062 );
1063 language_registry.add(toml_language.clone());
1064 let mut rust_server = language_registry.register_fake_lsp(
1065 rust_language.name(),
1066 FakeLspAdapter {
1067 name: "rust",
1068 capabilities: lsp::ServerCapabilities {
1069 semantic_tokens_provider: Some(
1070 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1071 lsp::SemanticTokensOptions {
1072 legend: rust_legend,
1073 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1074 ..lsp::SemanticTokensOptions::default()
1075 },
1076 ),
1077 ),
1078 ..lsp::ServerCapabilities::default()
1079 },
1080 ..FakeLspAdapter::default()
1081 },
1082 );
1083 language_registry.add(rust_language.clone());
1084
1085 app_state
1086 .fs
1087 .as_fake()
1088 .insert_tree(
1089 EditorLspTestContext::root_path(),
1090 json!({
1091 ".git": {},
1092 "dir": {
1093 "foo.toml": "a = 1\nb = 2\nc = 3\n",
1094 "bar.rs": "const c: usize = 3;\n",
1095 }
1096 }),
1097 )
1098 .await;
1099
1100 let window = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx));
1101 let workspace = window.root(cx).unwrap();
1102
1103 let mut cx = VisualTestContext::from_window(*window.deref(), cx);
1104 project
1105 .update(&mut cx, |project, cx| {
1106 project.find_or_create_worktree(EditorLspTestContext::root_path(), true, cx)
1107 })
1108 .await
1109 .unwrap();
1110 cx.read(|cx| workspace.read(cx).worktree_scans_complete(cx))
1111 .await;
1112
1113 let toml_file = cx.read(|cx| workspace.file_project_paths(cx)[1].clone());
1114 let rust_file = cx.read(|cx| workspace.file_project_paths(cx)[0].clone());
1115 let (toml_item, rust_item) = workspace.update_in(&mut cx, |workspace, window, cx| {
1116 (
1117 workspace.open_path(toml_file, None, true, window, cx),
1118 workspace.open_path(rust_file, None, true, window, cx),
1119 )
1120 });
1121 let toml_item = toml_item.await.expect("Could not open test file");
1122 let rust_item = rust_item.await.expect("Could not open test file");
1123
1124 let (toml_editor, rust_editor) = cx.update(|_, cx| {
1125 (
1126 toml_item
1127 .act_as::<Editor>(cx)
1128 .expect("Opened test file wasn't an editor"),
1129 rust_item
1130 .act_as::<Editor>(cx)
1131 .expect("Opened test file wasn't an editor"),
1132 )
1133 });
1134 let toml_buffer = cx.read(|cx| {
1135 toml_editor
1136 .read(cx)
1137 .buffer()
1138 .read(cx)
1139 .as_singleton()
1140 .unwrap()
1141 });
1142 let rust_buffer = cx.read(|cx| {
1143 rust_editor
1144 .read(cx)
1145 .buffer()
1146 .read(cx)
1147 .as_singleton()
1148 .unwrap()
1149 });
1150 let multibuffer = cx.new(|cx| {
1151 let mut multibuffer = MultiBuffer::new(Capability::ReadWrite);
1152 multibuffer.push_excerpts(
1153 toml_buffer.clone(),
1154 [ExcerptRange::new(Point::new(0, 0)..Point::new(1, 0))],
1155 cx,
1156 );
1157 multibuffer.push_excerpts(
1158 rust_buffer.clone(),
1159 [ExcerptRange::new(Point::new(0, 0)..Point::new(1, 0))],
1160 cx,
1161 );
1162 multibuffer
1163 });
1164
1165 let editor = workspace.update_in(&mut cx, |workspace, window, cx| {
1166 let editor = cx.new(|cx| build_editor_with_project(project, multibuffer, window, cx));
1167 workspace.add_item_to_active_pane(Box::new(editor.clone()), None, true, window, cx);
1168 editor
1169 });
1170 editor.update_in(&mut cx, |editor, window, cx| {
1171 let nav_history = workspace
1172 .read(cx)
1173 .active_pane()
1174 .read(cx)
1175 .nav_history_for_item(&cx.entity());
1176 editor.set_nav_history(Some(nav_history));
1177 window.focus(&editor.focus_handle(cx), cx)
1178 });
1179
1180 let _toml_server = toml_server.next().await.unwrap();
1181 let _rust_server = rust_server.next().await.unwrap();
1182
1183 // Initial request.
1184 cx.executor().advance_clock(Duration::from_millis(200));
1185 let task = editor.update_in(&mut cx, |e, _, _| e.semantic_token_state.take_update_task());
1186 cx.run_until_parked();
1187 task.await;
1188 assert_eq!(full_counter_toml.load(atomic::Ordering::Acquire), 1);
1189 cx.run_until_parked();
1190
1191 // Initially, excerpt only covers line 0, so only the 'a' token should be highlighted.
1192 // The excerpt content is "a = 1\n" (6 chars), so 'a' is at offset 0.
1193 assert_eq!(
1194 extract_semantic_highlights(&editor, &cx),
1195 vec![MultiBufferOffset(0)..MultiBufferOffset(1)]
1196 );
1197
1198 // Get the excerpt id for the TOML excerpt and expand it down by 2 lines.
1199 let toml_excerpt_id =
1200 editor.read_with(&cx, |editor, cx| editor.buffer().read(cx).excerpt_ids()[0]);
1201 editor.update_in(&mut cx, |editor, _, cx| {
1202 editor.buffer().update(cx, |buffer, cx| {
1203 buffer.expand_excerpts([toml_excerpt_id], 2, ExpandExcerptDirection::Down, cx);
1204 });
1205 });
1206
1207 // Wait for semantic tokens to be re-fetched after expansion.
1208 cx.executor().advance_clock(Duration::from_millis(200));
1209 let task = editor.update_in(&mut cx, |e, _, _| e.semantic_token_state.take_update_task());
1210 cx.run_until_parked();
1211 task.await;
1212
1213 // After expansion, the excerpt covers lines 0-2, so 'a', 'b', 'c' should all be highlighted.
1214 // Content is now "a = 1\nb = 2\nc = 3\n" (18 chars).
1215 // 'a' at offset 0, 'b' at offset 6, 'c' at offset 12.
1216 assert_eq!(
1217 extract_semantic_highlights(&editor, &cx),
1218 vec![
1219 MultiBufferOffset(0)..MultiBufferOffset(1),
1220 MultiBufferOffset(6)..MultiBufferOffset(7),
1221 MultiBufferOffset(12)..MultiBufferOffset(13),
1222 ]
1223 );
1224 }
1225
1226 #[gpui::test]
1227 async fn lsp_semantic_tokens_multibuffer_shared(cx: &mut TestAppContext) {
1228 init_test(cx, |_| {});
1229
1230 update_test_language_settings(cx, |language_settings| {
1231 language_settings.languages.0.insert(
1232 "TOML".into(),
1233 LanguageSettingsContent {
1234 semantic_tokens: Some(SemanticTokens::Full),
1235 ..LanguageSettingsContent::default()
1236 },
1237 );
1238 });
1239
1240 let toml_language = Arc::new(Language::new(
1241 LanguageConfig {
1242 name: "TOML".into(),
1243 matcher: LanguageMatcher {
1244 path_suffixes: vec!["toml".into()],
1245 ..LanguageMatcher::default()
1246 },
1247 ..LanguageConfig::default()
1248 },
1249 None,
1250 ));
1251
1252 let toml_legend = lsp::SemanticTokensLegend {
1253 token_types: vec!["property".into()],
1254 token_modifiers: Vec::new(),
1255 };
1256
1257 let app_state = cx.update(workspace::AppState::test);
1258
1259 cx.update(|cx| {
1260 assets::Assets.load_test_fonts(cx);
1261 crate::init(cx);
1262 workspace::init(app_state.clone(), cx);
1263 });
1264
1265 let project = Project::test(app_state.fs.clone(), [], cx).await;
1266 let language_registry = project.read_with(cx, |project, _| project.languages().clone());
1267 let full_counter_toml = Arc::new(AtomicUsize::new(0));
1268 let full_counter_toml_clone = full_counter_toml.clone();
1269
1270 let mut toml_server = language_registry.register_fake_lsp(
1271 toml_language.name(),
1272 FakeLspAdapter {
1273 name: "toml",
1274 capabilities: lsp::ServerCapabilities {
1275 semantic_tokens_provider: Some(
1276 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1277 lsp::SemanticTokensOptions {
1278 legend: toml_legend,
1279 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1280 ..lsp::SemanticTokensOptions::default()
1281 },
1282 ),
1283 ),
1284 ..lsp::ServerCapabilities::default()
1285 },
1286 initializer: Some(Box::new({
1287 let full_counter_toml_clone = full_counter_toml_clone.clone();
1288 move |fake_server| {
1289 let full_counter = full_counter_toml_clone.clone();
1290 fake_server
1291 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1292 move |_, _| {
1293 full_counter.fetch_add(1, atomic::Ordering::Release);
1294 async move {
1295 Ok(Some(lsp::SemanticTokensResult::Tokens(
1296 lsp::SemanticTokens {
1297 // highlight 'a' as a property
1298 data: vec![
1299 0, // delta_line
1300 0, // delta_start
1301 1, // length
1302 0, // token_type
1303 0, // token_modifiers_bitset
1304 ],
1305 result_id: Some("a".into()),
1306 },
1307 )))
1308 }
1309 },
1310 );
1311 }
1312 })),
1313 ..FakeLspAdapter::default()
1314 },
1315 );
1316 language_registry.add(toml_language.clone());
1317
1318 app_state
1319 .fs
1320 .as_fake()
1321 .insert_tree(
1322 EditorLspTestContext::root_path(),
1323 json!({
1324 ".git": {},
1325 "dir": {
1326 "foo.toml": "a = 1\nb = 2\n",
1327 }
1328 }),
1329 )
1330 .await;
1331
1332 let window = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx));
1333 let workspace = window.root(cx).unwrap();
1334
1335 let mut cx = VisualTestContext::from_window(*window.deref(), cx);
1336 project
1337 .update(&mut cx, |project, cx| {
1338 project.find_or_create_worktree(EditorLspTestContext::root_path(), true, cx)
1339 })
1340 .await
1341 .unwrap();
1342 cx.read(|cx| workspace.read(cx).worktree_scans_complete(cx))
1343 .await;
1344
1345 let toml_file = cx.read(|cx| workspace.file_project_paths(cx)[0].clone());
1346 let toml_item = workspace
1347 .update_in(&mut cx, |workspace, window, cx| {
1348 workspace.open_path(toml_file, None, true, window, cx)
1349 })
1350 .await
1351 .expect("Could not open test file");
1352
1353 let toml_editor = cx.update(|_, cx| {
1354 toml_item
1355 .act_as::<Editor>(cx)
1356 .expect("Opened test file wasn't an editor")
1357 });
1358 let toml_buffer = cx.read(|cx| {
1359 toml_editor
1360 .read(cx)
1361 .buffer()
1362 .read(cx)
1363 .as_singleton()
1364 .unwrap()
1365 });
1366 let multibuffer = cx.new(|cx| {
1367 let mut multibuffer = MultiBuffer::new(Capability::ReadWrite);
1368 multibuffer.push_excerpts(
1369 toml_buffer.clone(),
1370 [ExcerptRange::new(Point::new(0, 0)..Point::new(2, 0))],
1371 cx,
1372 );
1373 multibuffer.push_excerpts(
1374 toml_buffer.clone(),
1375 [ExcerptRange::new(Point::new(0, 0)..Point::new(2, 0))],
1376 cx,
1377 );
1378 multibuffer
1379 });
1380
1381 let editor = workspace.update_in(&mut cx, |_, window, cx| {
1382 cx.new(|cx| build_editor_with_project(project, multibuffer, window, cx))
1383 });
1384 editor.update_in(&mut cx, |editor, window, cx| {
1385 let nav_history = workspace
1386 .read(cx)
1387 .active_pane()
1388 .read(cx)
1389 .nav_history_for_item(&cx.entity());
1390 editor.set_nav_history(Some(nav_history));
1391 window.focus(&editor.focus_handle(cx), cx)
1392 });
1393
1394 let _toml_server = toml_server.next().await.unwrap();
1395
1396 // Initial request.
1397 cx.executor().advance_clock(Duration::from_millis(200));
1398 let task = editor.update_in(&mut cx, |e, _, _| e.semantic_token_state.take_update_task());
1399 cx.run_until_parked();
1400 task.await;
1401 assert_eq!(full_counter_toml.load(atomic::Ordering::Acquire), 1);
1402
1403 // Edit two parts of the multibuffer, which both map to the same buffer.
1404 //
1405 // Without debouncing, this grabs semantic tokens 4 times (twice for the
1406 // toml editor, and twice for the multibuffer).
1407 editor.update_in(&mut cx, |editor, _, cx| {
1408 editor.edit([(MultiBufferOffset(0)..MultiBufferOffset(1), "b")], cx);
1409 editor.edit([(MultiBufferOffset(12)..MultiBufferOffset(13), "c")], cx);
1410 });
1411 cx.executor().advance_clock(Duration::from_millis(200));
1412 let task = editor.update_in(&mut cx, |e, _, _| e.semantic_token_state.take_update_task());
1413 cx.run_until_parked();
1414 task.await;
1415 assert_eq!(
1416 extract_semantic_highlights(&editor, &cx),
1417 vec![MultiBufferOffset(0)..MultiBufferOffset(1)]
1418 );
1419
1420 assert_eq!(full_counter_toml.load(atomic::Ordering::Acquire), 2);
1421 }
1422
1423 fn extract_semantic_highlights(
1424 editor: &Entity<Editor>,
1425 cx: &TestAppContext,
1426 ) -> Vec<Range<MultiBufferOffset>> {
1427 editor.read_with(cx, |editor, cx| {
1428 let multi_buffer_snapshot = editor.buffer().read(cx).snapshot(cx);
1429 editor
1430 .display_map
1431 .read(cx)
1432 .semantic_token_highlights
1433 .iter()
1434 .flat_map(|(_, (v, _))| v.iter())
1435 .map(|highlights| highlights.range.to_offset(&multi_buffer_snapshot))
1436 .collect()
1437 })
1438 }
1439
1440 #[gpui::test]
1441 async fn test_semantic_tokens_rules_changes_restyle_tokens(cx: &mut TestAppContext) {
1442 use gpui::{Hsla, Rgba, UpdateGlobal as _};
1443 use settings::{GlobalLspSettingsContent, SemanticTokenRule};
1444
1445 init_test(cx, |_| {});
1446
1447 update_test_language_settings(cx, |language_settings| {
1448 language_settings.languages.0.insert(
1449 "Rust".into(),
1450 LanguageSettingsContent {
1451 semantic_tokens: Some(SemanticTokens::Full),
1452 ..LanguageSettingsContent::default()
1453 },
1454 );
1455 });
1456
1457 let mut cx = EditorLspTestContext::new_rust(
1458 lsp::ServerCapabilities {
1459 semantic_tokens_provider: Some(
1460 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1461 lsp::SemanticTokensOptions {
1462 legend: lsp::SemanticTokensLegend {
1463 token_types: Vec::from(["function".into()]),
1464 token_modifiers: Vec::new(),
1465 },
1466 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1467 ..lsp::SemanticTokensOptions::default()
1468 },
1469 ),
1470 ),
1471 ..lsp::ServerCapabilities::default()
1472 },
1473 cx,
1474 )
1475 .await;
1476
1477 let mut full_request = cx
1478 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1479 move |_, _, _| {
1480 async move {
1481 Ok(Some(lsp::SemanticTokensResult::Tokens(
1482 lsp::SemanticTokens {
1483 data: vec![
1484 0, // delta_line
1485 3, // delta_start
1486 4, // length
1487 0, // token_type (function)
1488 0, // token_modifiers_bitset
1489 ],
1490 result_id: None,
1491 },
1492 )))
1493 }
1494 },
1495 );
1496
1497 // Trigger initial semantic tokens fetch
1498 cx.set_state("ˇfn main() {}");
1499 full_request.next().await;
1500 cx.run_until_parked();
1501
1502 // Verify initial highlights exist (with no custom color yet)
1503 let initial_ranges = extract_semantic_highlights(&cx.editor, &cx);
1504 assert_eq!(
1505 initial_ranges,
1506 vec![MultiBufferOffset(3)..MultiBufferOffset(7)],
1507 "Should have initial semantic token highlights"
1508 );
1509 let initial_styles = extract_semantic_highlight_styles(&cx.editor, &cx);
1510 assert_eq!(initial_styles.len(), 1, "Should have one highlight style");
1511 // Initial color should be None or theme default (not red or blue)
1512 let initial_color = initial_styles[0].color;
1513
1514 // Set a custom foreground color for function tokens via settings.json
1515 let red_color = Rgba {
1516 r: 1.0,
1517 g: 0.0,
1518 b: 0.0,
1519 a: 1.0,
1520 };
1521 cx.update(|_, cx| {
1522 SettingsStore::update_global(cx, |store, cx| {
1523 store.update_user_settings(cx, |settings| {
1524 settings.global_lsp_settings = Some(GlobalLspSettingsContent {
1525 semantic_token_rules: Some(SemanticTokenRules {
1526 rules: Vec::from([SemanticTokenRule {
1527 token_type: Some("function".to_string()),
1528 foreground_color: Some(red_color),
1529 ..SemanticTokenRule::default()
1530 }]),
1531 }),
1532 ..GlobalLspSettingsContent::default()
1533 });
1534 });
1535 });
1536 });
1537
1538 // Trigger a refetch by making an edit (which forces semantic tokens update)
1539 cx.set_state("ˇfn main() { }");
1540 full_request.next().await;
1541 cx.run_until_parked();
1542
1543 // Verify the highlights now have the custom red color
1544 let styles_after_settings_change = extract_semantic_highlight_styles(&cx.editor, &cx);
1545 assert_eq!(
1546 styles_after_settings_change.len(),
1547 1,
1548 "Should still have one highlight"
1549 );
1550 assert_eq!(
1551 styles_after_settings_change[0].color,
1552 Some(Hsla::from(red_color)),
1553 "Highlight should have the custom red color from settings.json"
1554 );
1555 assert_ne!(
1556 styles_after_settings_change[0].color, initial_color,
1557 "Color should have changed from initial"
1558 );
1559 }
1560
1561 #[gpui::test]
1562 async fn test_theme_override_changes_restyle_semantic_tokens(cx: &mut TestAppContext) {
1563 use collections::IndexMap;
1564 use gpui::{Hsla, Rgba, UpdateGlobal as _};
1565 use theme::{HighlightStyleContent, ThemeStyleContent};
1566
1567 init_test(cx, |_| {});
1568
1569 update_test_language_settings(cx, |language_settings| {
1570 language_settings.languages.0.insert(
1571 "Rust".into(),
1572 LanguageSettingsContent {
1573 semantic_tokens: Some(SemanticTokens::Full),
1574 ..LanguageSettingsContent::default()
1575 },
1576 );
1577 });
1578
1579 let mut cx = EditorLspTestContext::new_rust(
1580 lsp::ServerCapabilities {
1581 semantic_tokens_provider: Some(
1582 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1583 lsp::SemanticTokensOptions {
1584 legend: lsp::SemanticTokensLegend {
1585 token_types: Vec::from(["function".into()]),
1586 token_modifiers: Vec::new(),
1587 },
1588 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1589 ..lsp::SemanticTokensOptions::default()
1590 },
1591 ),
1592 ),
1593 ..lsp::ServerCapabilities::default()
1594 },
1595 cx,
1596 )
1597 .await;
1598
1599 let mut full_request = cx
1600 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1601 move |_, _, _| async move {
1602 Ok(Some(lsp::SemanticTokensResult::Tokens(
1603 lsp::SemanticTokens {
1604 data: vec![
1605 0, // delta_line
1606 3, // delta_start
1607 4, // length
1608 0, // token_type (function)
1609 0, // token_modifiers_bitset
1610 ],
1611 result_id: None,
1612 },
1613 )))
1614 },
1615 );
1616
1617 cx.set_state("ˇfn main() {}");
1618 full_request.next().await;
1619 cx.run_until_parked();
1620
1621 let initial_styles = extract_semantic_highlight_styles(&cx.editor, &cx);
1622 assert_eq!(initial_styles.len(), 1, "Should have one highlight style");
1623 let initial_color = initial_styles[0].color;
1624
1625 // Changing experimental_theme_overrides triggers GlobalTheme reload,
1626 // which fires theme_changed → refresh_semantic_token_highlights.
1627 let red_color: Hsla = Rgba {
1628 r: 1.0,
1629 g: 0.0,
1630 b: 0.0,
1631 a: 1.0,
1632 }
1633 .into();
1634 cx.update(|_, cx| {
1635 SettingsStore::update_global(cx, |store, cx| {
1636 store.update_user_settings(cx, |settings| {
1637 settings.theme.experimental_theme_overrides = Some(ThemeStyleContent {
1638 syntax: IndexMap::from_iter([(
1639 "function".to_string(),
1640 HighlightStyleContent {
1641 color: Some("#ff0000".to_string()),
1642 background_color: None,
1643 font_style: None,
1644 font_weight: None,
1645 },
1646 )]),
1647 ..ThemeStyleContent::default()
1648 });
1649 });
1650 });
1651 });
1652
1653 cx.executor().advance_clock(Duration::from_millis(200));
1654 cx.run_until_parked();
1655
1656 let styles_after_override = extract_semantic_highlight_styles(&cx.editor, &cx);
1657 assert_eq!(styles_after_override.len(), 1);
1658 assert_eq!(
1659 styles_after_override[0].color,
1660 Some(red_color),
1661 "Highlight should have red color from theme override"
1662 );
1663 assert_ne!(
1664 styles_after_override[0].color, initial_color,
1665 "Color should have changed from initial"
1666 );
1667
1668 // Changing the override to a different color also restyles.
1669 let blue_color: Hsla = Rgba {
1670 r: 0.0,
1671 g: 0.0,
1672 b: 1.0,
1673 a: 1.0,
1674 }
1675 .into();
1676 cx.update(|_, cx| {
1677 SettingsStore::update_global(cx, |store, cx| {
1678 store.update_user_settings(cx, |settings| {
1679 settings.theme.experimental_theme_overrides = Some(ThemeStyleContent {
1680 syntax: IndexMap::from_iter([(
1681 "function".to_string(),
1682 HighlightStyleContent {
1683 color: Some("#0000ff".to_string()),
1684 background_color: None,
1685 font_style: None,
1686 font_weight: None,
1687 },
1688 )]),
1689 ..ThemeStyleContent::default()
1690 });
1691 });
1692 });
1693 });
1694
1695 cx.executor().advance_clock(Duration::from_millis(200));
1696 cx.run_until_parked();
1697
1698 let styles_after_second_override = extract_semantic_highlight_styles(&cx.editor, &cx);
1699 assert_eq!(styles_after_second_override.len(), 1);
1700 assert_eq!(
1701 styles_after_second_override[0].color,
1702 Some(blue_color),
1703 "Highlight should have blue color from updated theme override"
1704 );
1705
1706 // Removing overrides reverts to the original theme color.
1707 cx.update(|_, cx| {
1708 SettingsStore::update_global(cx, |store, cx| {
1709 store.update_user_settings(cx, |settings| {
1710 settings.theme.experimental_theme_overrides = None;
1711 });
1712 });
1713 });
1714
1715 cx.executor().advance_clock(Duration::from_millis(200));
1716 cx.run_until_parked();
1717
1718 let styles_after_clear = extract_semantic_highlight_styles(&cx.editor, &cx);
1719 assert_eq!(styles_after_clear.len(), 1);
1720 assert_eq!(
1721 styles_after_clear[0].color, initial_color,
1722 "Highlight should revert to initial color after clearing overrides"
1723 );
1724 }
1725
1726 #[gpui::test]
1727 async fn test_per_theme_overrides_restyle_semantic_tokens(cx: &mut TestAppContext) {
1728 use collections::IndexMap;
1729 use gpui::{Hsla, Rgba, UpdateGlobal as _};
1730 use theme::{HighlightStyleContent, ThemeStyleContent};
1731 use ui::ActiveTheme as _;
1732
1733 init_test(cx, |_| {});
1734
1735 update_test_language_settings(cx, |language_settings| {
1736 language_settings.languages.0.insert(
1737 "Rust".into(),
1738 LanguageSettingsContent {
1739 semantic_tokens: Some(SemanticTokens::Full),
1740 ..LanguageSettingsContent::default()
1741 },
1742 );
1743 });
1744
1745 let mut cx = EditorLspTestContext::new_rust(
1746 lsp::ServerCapabilities {
1747 semantic_tokens_provider: Some(
1748 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1749 lsp::SemanticTokensOptions {
1750 legend: lsp::SemanticTokensLegend {
1751 token_types: Vec::from(["function".into()]),
1752 token_modifiers: Vec::new(),
1753 },
1754 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1755 ..lsp::SemanticTokensOptions::default()
1756 },
1757 ),
1758 ),
1759 ..lsp::ServerCapabilities::default()
1760 },
1761 cx,
1762 )
1763 .await;
1764
1765 let mut full_request = cx
1766 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1767 move |_, _, _| async move {
1768 Ok(Some(lsp::SemanticTokensResult::Tokens(
1769 lsp::SemanticTokens {
1770 data: vec![
1771 0, // delta_line
1772 3, // delta_start
1773 4, // length
1774 0, // token_type (function)
1775 0, // token_modifiers_bitset
1776 ],
1777 result_id: None,
1778 },
1779 )))
1780 },
1781 );
1782
1783 cx.set_state("ˇfn main() {}");
1784 full_request.next().await;
1785 cx.run_until_parked();
1786
1787 let initial_styles = extract_semantic_highlight_styles(&cx.editor, &cx);
1788 assert_eq!(initial_styles.len(), 1, "Should have one highlight style");
1789 let initial_color = initial_styles[0].color;
1790
1791 // Per-theme overrides (theme_overrides keyed by theme name) also go through
1792 // GlobalTheme reload → theme_changed → refresh_semantic_token_highlights.
1793 let theme_name = cx.update(|_, cx| cx.theme().name.to_string());
1794 let green_color: Hsla = Rgba {
1795 r: 0.0,
1796 g: 1.0,
1797 b: 0.0,
1798 a: 1.0,
1799 }
1800 .into();
1801 cx.update(|_, cx| {
1802 SettingsStore::update_global(cx, |store, cx| {
1803 store.update_user_settings(cx, |settings| {
1804 settings.theme.theme_overrides = collections::HashMap::from_iter([(
1805 theme_name.clone(),
1806 ThemeStyleContent {
1807 syntax: IndexMap::from_iter([(
1808 "function".to_string(),
1809 HighlightStyleContent {
1810 color: Some("#00ff00".to_string()),
1811 background_color: None,
1812 font_style: None,
1813 font_weight: None,
1814 },
1815 )]),
1816 ..ThemeStyleContent::default()
1817 },
1818 )]);
1819 });
1820 });
1821 });
1822
1823 cx.executor().advance_clock(Duration::from_millis(200));
1824 cx.run_until_parked();
1825
1826 let styles_after_override = extract_semantic_highlight_styles(&cx.editor, &cx);
1827 assert_eq!(styles_after_override.len(), 1);
1828 assert_eq!(
1829 styles_after_override[0].color,
1830 Some(green_color),
1831 "Highlight should have green color from per-theme override"
1832 );
1833 assert_ne!(
1834 styles_after_override[0].color, initial_color,
1835 "Color should have changed from initial"
1836 );
1837 }
1838
1839 #[gpui::test]
1840 async fn test_stopping_language_server_clears_semantic_tokens(cx: &mut TestAppContext) {
1841 init_test(cx, |_| {});
1842
1843 update_test_language_settings(cx, |language_settings| {
1844 language_settings.languages.0.insert(
1845 "Rust".into(),
1846 LanguageSettingsContent {
1847 semantic_tokens: Some(SemanticTokens::Full),
1848 ..LanguageSettingsContent::default()
1849 },
1850 );
1851 });
1852
1853 let mut cx = EditorLspTestContext::new_rust(
1854 lsp::ServerCapabilities {
1855 semantic_tokens_provider: Some(
1856 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1857 lsp::SemanticTokensOptions {
1858 legend: lsp::SemanticTokensLegend {
1859 token_types: vec!["function".into()],
1860 token_modifiers: Vec::new(),
1861 },
1862 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1863 ..lsp::SemanticTokensOptions::default()
1864 },
1865 ),
1866 ),
1867 ..lsp::ServerCapabilities::default()
1868 },
1869 cx,
1870 )
1871 .await;
1872
1873 let mut full_request = cx
1874 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1875 move |_, _, _| async move {
1876 Ok(Some(lsp::SemanticTokensResult::Tokens(
1877 lsp::SemanticTokens {
1878 data: vec![
1879 0, // delta_line
1880 3, // delta_start
1881 4, // length
1882 0, // token_type
1883 0, // token_modifiers_bitset
1884 ],
1885 result_id: None,
1886 },
1887 )))
1888 },
1889 );
1890
1891 cx.set_state("ˇfn main() {}");
1892 assert!(full_request.next().await.is_some());
1893 cx.run_until_parked();
1894
1895 assert_eq!(
1896 extract_semantic_highlights(&cx.editor, &cx),
1897 vec![MultiBufferOffset(3)..MultiBufferOffset(7)],
1898 "Semantic tokens should be present before stopping the server"
1899 );
1900
1901 cx.update_editor(|editor, _, cx| {
1902 let buffers = editor.buffer.read(cx).all_buffers().into_iter().collect();
1903 editor.project.as_ref().unwrap().update(cx, |project, cx| {
1904 project.stop_language_servers_for_buffers(buffers, HashSet::default(), cx);
1905 })
1906 });
1907 cx.executor().advance_clock(Duration::from_millis(200));
1908 cx.run_until_parked();
1909
1910 assert_eq!(
1911 extract_semantic_highlights(&cx.editor, &cx),
1912 Vec::new(),
1913 "Semantic tokens should be cleared after stopping the server"
1914 );
1915 }
1916
1917 #[gpui::test]
1918 async fn test_disabling_semantic_tokens_setting_clears_highlights(cx: &mut TestAppContext) {
1919 init_test(cx, |_| {});
1920
1921 update_test_language_settings(cx, |language_settings| {
1922 language_settings.languages.0.insert(
1923 "Rust".into(),
1924 LanguageSettingsContent {
1925 semantic_tokens: Some(SemanticTokens::Full),
1926 ..LanguageSettingsContent::default()
1927 },
1928 );
1929 });
1930
1931 let mut cx = EditorLspTestContext::new_rust(
1932 lsp::ServerCapabilities {
1933 semantic_tokens_provider: Some(
1934 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1935 lsp::SemanticTokensOptions {
1936 legend: lsp::SemanticTokensLegend {
1937 token_types: vec!["function".into()],
1938 token_modifiers: Vec::new(),
1939 },
1940 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1941 ..lsp::SemanticTokensOptions::default()
1942 },
1943 ),
1944 ),
1945 ..lsp::ServerCapabilities::default()
1946 },
1947 cx,
1948 )
1949 .await;
1950
1951 let mut full_request = cx
1952 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1953 move |_, _, _| async move {
1954 Ok(Some(lsp::SemanticTokensResult::Tokens(
1955 lsp::SemanticTokens {
1956 data: vec![
1957 0, // delta_line
1958 3, // delta_start
1959 4, // length
1960 0, // token_type
1961 0, // token_modifiers_bitset
1962 ],
1963 result_id: None,
1964 },
1965 )))
1966 },
1967 );
1968
1969 cx.set_state("ˇfn main() {}");
1970 assert!(full_request.next().await.is_some());
1971 cx.run_until_parked();
1972
1973 assert_eq!(
1974 extract_semantic_highlights(&cx.editor, &cx),
1975 vec![MultiBufferOffset(3)..MultiBufferOffset(7)],
1976 "Semantic tokens should be present before disabling the setting"
1977 );
1978
1979 update_test_language_settings(&mut cx, |language_settings| {
1980 language_settings.languages.0.insert(
1981 "Rust".into(),
1982 LanguageSettingsContent {
1983 semantic_tokens: Some(SemanticTokens::Off),
1984 ..LanguageSettingsContent::default()
1985 },
1986 );
1987 });
1988 cx.executor().advance_clock(Duration::from_millis(200));
1989 cx.run_until_parked();
1990
1991 assert_eq!(
1992 extract_semantic_highlights(&cx.editor, &cx),
1993 Vec::new(),
1994 "Semantic tokens should be cleared after disabling the setting"
1995 );
1996 }
1997
1998 fn extract_semantic_highlight_styles(
1999 editor: &Entity<Editor>,
2000 cx: &TestAppContext,
2001 ) -> Vec<HighlightStyle> {
2002 editor.read_with(cx, |editor, cx| {
2003 editor
2004 .display_map
2005 .read(cx)
2006 .semantic_token_highlights
2007 .iter()
2008 .flat_map(|(_, (v, interner))| {
2009 v.iter().map(|highlights| interner[highlights.style])
2010 })
2011 .collect()
2012 })
2013 }
2014}