1use std::{collections::hash_map, sync::Arc, time::Duration};
2
3use collections::{HashMap, HashSet};
4use futures::future::join_all;
5use gpui::{
6 App, Context, FontStyle, FontWeight, HighlightStyle, StrikethroughStyle, Task, UnderlineStyle,
7};
8use itertools::Itertools as _;
9use language::language_settings::language_settings;
10use project::{
11 lsp_store::{
12 BufferSemanticToken, BufferSemanticTokens, RefreshForServer, SemanticTokenStylizer,
13 TokenType,
14 },
15 project_settings::ProjectSettings,
16};
17use settings::{
18 SemanticTokenColorOverride, SemanticTokenFontStyle, SemanticTokenFontWeight,
19 SemanticTokenRules, Settings as _,
20};
21use text::BufferId;
22use theme::SyntaxTheme;
23use ui::ActiveTheme as _;
24
25use crate::{
26 Editor,
27 actions::ToggleSemanticHighlights,
28 display_map::{HighlightStyleInterner, SemanticTokenHighlight},
29};
30
31pub(super) struct SemanticTokenState {
32 rules: SemanticTokenRules,
33 enabled: bool,
34 update_task: Task<()>,
35 fetched_for_buffers: HashMap<BufferId, clock::Global>,
36}
37
38impl SemanticTokenState {
39 pub(super) fn new(cx: &App, enabled: bool) -> Self {
40 Self {
41 rules: ProjectSettings::get_global(cx)
42 .global_lsp_settings
43 .semantic_token_rules
44 .clone(),
45 enabled,
46 update_task: Task::ready(()),
47 fetched_for_buffers: HashMap::default(),
48 }
49 }
50
51 pub(super) fn enabled(&self) -> bool {
52 self.enabled
53 }
54
55 pub(super) fn toggle_enabled(&mut self) {
56 self.enabled = !self.enabled;
57 }
58
59 #[cfg(test)]
60 pub(super) fn take_update_task(&mut self) -> Task<()> {
61 std::mem::replace(&mut self.update_task, Task::ready(()))
62 }
63
64 pub(super) fn invalidate_buffer(&mut self, buffer_id: &BufferId) {
65 self.fetched_for_buffers.remove(buffer_id);
66 }
67
68 pub(super) fn update_rules(&mut self, new_rules: SemanticTokenRules) -> bool {
69 if new_rules != self.rules {
70 self.rules = new_rules;
71 true
72 } else {
73 false
74 }
75 }
76}
77
78impl Editor {
79 pub fn supports_semantic_tokens(&self, cx: &mut App) -> bool {
80 let Some(provider) = self.semantics_provider.as_ref() else {
81 return false;
82 };
83
84 let mut supports = false;
85 self.buffer().update(cx, |this, cx| {
86 this.for_each_buffer(|buffer| {
87 supports |= provider.supports_semantic_tokens(buffer, cx);
88 });
89 });
90
91 supports
92 }
93
94 pub fn semantic_highlights_enabled(&self) -> bool {
95 self.semantic_token_state.enabled()
96 }
97
98 pub fn toggle_semantic_highlights(
99 &mut self,
100 _: &ToggleSemanticHighlights,
101 _window: &mut gpui::Window,
102 cx: &mut Context<Self>,
103 ) {
104 self.semantic_token_state.toggle_enabled();
105 self.update_semantic_tokens(None, None, cx);
106 }
107
108 pub(crate) fn update_semantic_tokens(
109 &mut self,
110 buffer_id: Option<BufferId>,
111 for_server: Option<RefreshForServer>,
112 cx: &mut Context<Self>,
113 ) {
114 if !self.mode().is_full() || !self.semantic_token_state.enabled() {
115 self.semantic_token_state.fetched_for_buffers.clear();
116 self.display_map.update(cx, |display_map, _| {
117 display_map.semantic_token_highlights.clear();
118 });
119 self.semantic_token_state.update_task = Task::ready(());
120 cx.notify();
121 return;
122 }
123
124 let mut invalidate_semantic_highlights_for_buffers = HashSet::default();
125 if for_server.is_some() {
126 invalidate_semantic_highlights_for_buffers.extend(
127 self.semantic_token_state
128 .fetched_for_buffers
129 .drain()
130 .map(|(buffer_id, _)| buffer_id),
131 );
132 }
133
134 let Some((sema, project)) = self.semantics_provider.clone().zip(self.project.clone())
135 else {
136 return;
137 };
138
139 let buffers_to_query = self
140 .visible_excerpts(true, cx)
141 .into_values()
142 .map(|(buffer, ..)| buffer)
143 .chain(buffer_id.and_then(|buffer_id| self.buffer.read(cx).buffer(buffer_id)))
144 .filter_map(|editor_buffer| {
145 let editor_buffer_id = editor_buffer.read(cx).remote_id();
146 if self.registered_buffers.contains_key(&editor_buffer_id)
147 && language_settings(
148 editor_buffer.read(cx).language().map(|l| l.name()),
149 editor_buffer.read(cx).file(),
150 cx,
151 )
152 .semantic_tokens
153 .enabled()
154 {
155 Some((editor_buffer_id, editor_buffer))
156 } else {
157 None
158 }
159 })
160 .unique_by(|(buffer_id, _)| *buffer_id)
161 .collect::<Vec<_>>();
162
163 self.semantic_token_state.update_task = cx.spawn(async move |editor, cx| {
164 cx.background_executor()
165 .timer(Duration::from_millis(50))
166 .await;
167 let Some(all_semantic_tokens_task) = editor
168 .update(cx, |editor, cx| {
169 buffers_to_query
170 .into_iter()
171 .filter_map(|(buffer_id, buffer)| {
172 let known_version =
173 editor.semantic_token_state.fetched_for_buffers.get(&buffer_id);
174 let query_version = buffer.read(cx).version();
175 if known_version.is_some_and(|known_version| {
176 !query_version.changed_since(known_version)
177 }) {
178 None
179 } else {
180 let task = sema.semantic_tokens(buffer, for_server, cx);
181 Some(async move { (buffer_id, query_version, task.await) })
182 }
183 })
184 .collect::<Vec<_>>()
185 })
186 .ok()
187 else {
188 return;
189 };
190
191 let all_semantic_tokens = join_all(all_semantic_tokens_task).await;
192 editor.update(cx, |editor, cx| {
193 editor.display_map.update(cx, |display_map, _| {
194 for buffer_id in invalidate_semantic_highlights_for_buffers {
195 display_map.invalidate_semantic_highlights(buffer_id);
196 }
197 });
198
199
200 if all_semantic_tokens.is_empty() {
201 return;
202 }
203 let multi_buffer_snapshot = editor.buffer().read(cx).snapshot(cx);
204 let all_excerpts = editor.buffer().read(cx).excerpt_ids();
205
206 for (buffer_id, query_version, tokens) in all_semantic_tokens {
207 let tokens = match tokens {
208 Ok(BufferSemanticTokens { tokens: Some(tokens) }) => {
209 tokens
210 },
211 Ok(BufferSemanticTokens { tokens: None }) => {
212 editor.display_map.update(cx, |display_map, _| {
213 display_map.invalidate_semantic_highlights(buffer_id);
214 });
215 continue;
216 },
217 Err(e) => {
218 log::error!("Failed to fetch semantic tokens for buffer {buffer_id:?}: {e:#}");
219 continue;
220 },
221 };
222
223 match editor.semantic_token_state.fetched_for_buffers.entry(buffer_id) {
224 hash_map::Entry::Occupied(mut o) => {
225 if query_version.changed_since(o.get()) {
226 o.insert(query_version);
227 } else {
228 continue;
229 }
230 },
231 hash_map::Entry::Vacant(v) => {
232 v.insert(query_version);
233 },
234 }
235
236 let language_name = editor
237 .buffer()
238 .read(cx)
239 .buffer(buffer_id)
240 .and_then(|buf| buf.read(cx).language().map(|l| l.name()));
241
242 editor.display_map.update(cx, |display_map, cx| {
243 project.read(cx).lsp_store().update(cx, |lsp_store, cx| {
244 let mut token_highlights = Vec::new();
245 let mut interner = HighlightStyleInterner::default();
246 for (server_id, server_tokens) in tokens {
247 let Some(stylizer) = lsp_store.get_or_create_token_stylizer(
248 server_id,
249 language_name.as_ref(),
250 cx,
251 )
252 else {
253 continue;
254 };
255 token_highlights.extend(buffer_into_editor_highlights(
256 &server_tokens,
257 stylizer,
258 &all_excerpts,
259 &multi_buffer_snapshot,
260 &mut interner,
261 cx,
262 ));
263 }
264
265 token_highlights.sort_by(|a, b| {
266 a.range.start.cmp(&b.range.start, &multi_buffer_snapshot)
267 });
268 display_map
269 .semantic_token_highlights
270 .insert(buffer_id, (Arc::from(token_highlights), Arc::new(interner)));
271 });
272 });
273 }
274
275 cx.notify();
276 }).ok();
277 });
278 }
279
280 pub(super) fn refresh_semantic_token_highlights(&mut self, cx: &mut Context<Self>) {
281 self.semantic_token_state.fetched_for_buffers.clear();
282 self.update_semantic_tokens(None, None, cx);
283 }
284}
285
286fn buffer_into_editor_highlights<'a, 'b>(
287 buffer_tokens: &'a [BufferSemanticToken],
288 stylizer: &'a SemanticTokenStylizer,
289 all_excerpts: &'a [multi_buffer::ExcerptId],
290 multi_buffer_snapshot: &'a multi_buffer::MultiBufferSnapshot,
291 interner: &'b mut HighlightStyleInterner,
292 cx: &'a App,
293) -> impl Iterator<Item = SemanticTokenHighlight> + use<'a, 'b> {
294 buffer_tokens.iter().filter_map(|token| {
295 let multi_buffer_start = all_excerpts.iter().find_map(|&excerpt_id| {
296 multi_buffer_snapshot.anchor_in_excerpt(excerpt_id, token.range.start)
297 })?;
298 let multi_buffer_end = all_excerpts.iter().find_map(|&excerpt_id| {
299 multi_buffer_snapshot.anchor_in_excerpt(excerpt_id, token.range.end)
300 })?;
301
302 let style = convert_token(
303 stylizer,
304 cx.theme().syntax(),
305 token.token_type,
306 token.token_modifiers,
307 )?;
308 let style = interner.intern(style);
309 Some(SemanticTokenHighlight {
310 range: multi_buffer_start..multi_buffer_end,
311 style,
312 token_type: token.token_type,
313 token_modifiers: token.token_modifiers,
314 server_id: stylizer.server_id(),
315 })
316 })
317}
318
319fn convert_token(
320 stylizer: &SemanticTokenStylizer,
321 theme: &SyntaxTheme,
322 token_type: TokenType,
323 modifiers: u32,
324) -> Option<HighlightStyle> {
325 let rules = stylizer.rules_for_token(token_type)?;
326 let matching = rules.iter().filter(|rule| {
327 rule.token_modifiers
328 .iter()
329 .all(|m| stylizer.has_modifier(modifiers, m))
330 });
331
332 let mut highlight = HighlightStyle::default();
333 let mut empty = true;
334
335 for rule in matching {
336 empty = false;
337
338 let style = rule.style.iter().find_map(|style| theme.get_opt(style));
339
340 macro_rules! overwrite {
341 (
342 highlight.$highlight_field:ident,
343 SemanticTokenRule::$rule_field:ident,
344 $transform:expr $(,)?
345 ) => {
346 highlight.$highlight_field = rule
347 .$rule_field
348 .map($transform)
349 .or_else(|| style.and_then(|s| s.$highlight_field))
350 .or(highlight.$highlight_field)
351 };
352 }
353
354 overwrite!(
355 highlight.color,
356 SemanticTokenRule::foreground_color,
357 Into::into,
358 );
359
360 overwrite!(
361 highlight.background_color,
362 SemanticTokenRule::background_color,
363 Into::into,
364 );
365
366 overwrite!(
367 highlight.font_weight,
368 SemanticTokenRule::font_weight,
369 |w| match w {
370 SemanticTokenFontWeight::Normal => FontWeight::NORMAL,
371 SemanticTokenFontWeight::Bold => FontWeight::BOLD,
372 },
373 );
374
375 overwrite!(
376 highlight.font_style,
377 SemanticTokenRule::font_style,
378 |s| match s {
379 SemanticTokenFontStyle::Normal => FontStyle::Normal,
380 SemanticTokenFontStyle::Italic => FontStyle::Italic,
381 },
382 );
383
384 overwrite!(highlight.underline, SemanticTokenRule::underline, |u| {
385 UnderlineStyle {
386 thickness: 1.0.into(),
387 color: match u {
388 SemanticTokenColorOverride::InheritForeground(true) => highlight.color,
389 SemanticTokenColorOverride::InheritForeground(false) => None,
390 SemanticTokenColorOverride::Replace(c) => Some(c.into()),
391 },
392 ..Default::default()
393 }
394 });
395
396 overwrite!(
397 highlight.strikethrough,
398 SemanticTokenRule::strikethrough,
399 |s| StrikethroughStyle {
400 thickness: 1.0.into(),
401 color: match s {
402 SemanticTokenColorOverride::InheritForeground(true) => highlight.color,
403 SemanticTokenColorOverride::InheritForeground(false) => None,
404 SemanticTokenColorOverride::Replace(c) => Some(c.into()),
405 },
406 },
407 );
408 }
409
410 if empty { None } else { Some(highlight) }
411}
412
413#[cfg(test)]
414mod tests {
415 use std::{
416 ops::{Deref as _, Range},
417 sync::atomic::{self, AtomicUsize},
418 };
419
420 use futures::StreamExt as _;
421 use gpui::{
422 AppContext as _, Entity, Focusable as _, HighlightStyle, TestAppContext, VisualTestContext,
423 };
424 use language::{Language, LanguageConfig, LanguageMatcher};
425 use languages::FakeLspAdapter;
426 use multi_buffer::{
427 AnchorRangeExt, ExcerptRange, ExpandExcerptDirection, MultiBuffer, MultiBufferOffset,
428 };
429 use project::Project;
430 use rope::Point;
431 use serde_json::json;
432 use settings::{LanguageSettingsContent, SemanticTokenRules, SemanticTokens, SettingsStore};
433 use workspace::{Workspace, WorkspaceHandle as _};
434
435 use crate::{
436 Capability,
437 editor_tests::{init_test, update_test_language_settings},
438 test::{build_editor_with_project, editor_lsp_test_context::EditorLspTestContext},
439 };
440
441 use super::*;
442
443 #[gpui::test]
444 async fn lsp_semantic_tokens_full_capability(cx: &mut TestAppContext) {
445 init_test(cx, |_| {});
446
447 update_test_language_settings(cx, |language_settings| {
448 language_settings.languages.0.insert(
449 "Rust".into(),
450 LanguageSettingsContent {
451 semantic_tokens: Some(SemanticTokens::Full),
452 ..Default::default()
453 },
454 );
455 });
456
457 let mut cx = EditorLspTestContext::new_rust(
458 lsp::ServerCapabilities {
459 semantic_tokens_provider: Some(
460 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
461 lsp::SemanticTokensOptions {
462 legend: lsp::SemanticTokensLegend {
463 token_types: vec!["function".into()],
464 token_modifiers: Vec::new(),
465 },
466 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
467 ..lsp::SemanticTokensOptions::default()
468 },
469 ),
470 ),
471 ..lsp::ServerCapabilities::default()
472 },
473 cx,
474 )
475 .await;
476
477 let full_counter = Arc::new(AtomicUsize::new(0));
478 let full_counter_clone = full_counter.clone();
479
480 let mut full_request = cx
481 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
482 move |_, _, _| {
483 full_counter_clone.fetch_add(1, atomic::Ordering::Release);
484 async move {
485 Ok(Some(lsp::SemanticTokensResult::Tokens(
486 lsp::SemanticTokens {
487 data: vec![
488 0, // delta_line
489 3, // delta_start
490 4, // length
491 0, // token_type
492 0, // token_modifiers_bitset
493 ],
494 // The server isn't capable of deltas, so even though we sent back
495 // a result ID, the client shouldn't request a delta.
496 result_id: Some("a".into()),
497 },
498 )))
499 }
500 },
501 );
502
503 cx.set_state("ˇfn main() {}");
504 assert!(full_request.next().await.is_some());
505
506 cx.run_until_parked();
507
508 cx.set_state("ˇfn main() { a }");
509 assert!(full_request.next().await.is_some());
510
511 cx.run_until_parked();
512
513 assert_eq!(
514 extract_semantic_highlights(&cx.editor, &cx),
515 vec![MultiBufferOffset(3)..MultiBufferOffset(7)]
516 );
517
518 assert_eq!(full_counter.load(atomic::Ordering::Acquire), 2);
519 }
520
521 #[gpui::test]
522 async fn lsp_semantic_tokens_full_none_result_id(cx: &mut TestAppContext) {
523 init_test(cx, |_| {});
524
525 update_test_language_settings(cx, |language_settings| {
526 language_settings.languages.0.insert(
527 "Rust".into(),
528 LanguageSettingsContent {
529 semantic_tokens: Some(SemanticTokens::Full),
530 ..Default::default()
531 },
532 );
533 });
534
535 let mut cx = EditorLspTestContext::new_rust(
536 lsp::ServerCapabilities {
537 semantic_tokens_provider: Some(
538 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
539 lsp::SemanticTokensOptions {
540 legend: lsp::SemanticTokensLegend {
541 token_types: vec!["function".into()],
542 token_modifiers: Vec::new(),
543 },
544 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: Some(true) }),
545 ..lsp::SemanticTokensOptions::default()
546 },
547 ),
548 ),
549 ..lsp::ServerCapabilities::default()
550 },
551 cx,
552 )
553 .await;
554
555 let full_counter = Arc::new(AtomicUsize::new(0));
556 let full_counter_clone = full_counter.clone();
557
558 let mut full_request = cx
559 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
560 move |_, _, _| {
561 full_counter_clone.fetch_add(1, atomic::Ordering::Release);
562 async move {
563 Ok(Some(lsp::SemanticTokensResult::Tokens(
564 lsp::SemanticTokens {
565 data: vec![
566 0, // delta_line
567 3, // delta_start
568 4, // length
569 0, // token_type
570 0, // token_modifiers_bitset
571 ],
572 result_id: None, // Sending back `None` forces the client to not use deltas.
573 },
574 )))
575 }
576 },
577 );
578
579 cx.set_state("ˇfn main() {}");
580 assert!(full_request.next().await.is_some());
581
582 let task = cx.update_editor(|e, _, _| e.semantic_token_state.take_update_task());
583 task.await;
584
585 cx.set_state("ˇfn main() { a }");
586 assert!(full_request.next().await.is_some());
587
588 let task = cx.update_editor(|e, _, _| e.semantic_token_state.take_update_task());
589 task.await;
590 assert_eq!(
591 extract_semantic_highlights(&cx.editor, &cx),
592 vec![MultiBufferOffset(3)..MultiBufferOffset(7)]
593 );
594 assert_eq!(full_counter.load(atomic::Ordering::Acquire), 2);
595 }
596
597 #[gpui::test]
598 async fn lsp_semantic_tokens_delta(cx: &mut TestAppContext) {
599 init_test(cx, |_| {});
600
601 update_test_language_settings(cx, |language_settings| {
602 language_settings.languages.0.insert(
603 "Rust".into(),
604 LanguageSettingsContent {
605 semantic_tokens: Some(SemanticTokens::Full),
606 ..Default::default()
607 },
608 );
609 });
610
611 let mut cx = EditorLspTestContext::new_rust(
612 lsp::ServerCapabilities {
613 semantic_tokens_provider: Some(
614 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
615 lsp::SemanticTokensOptions {
616 legend: lsp::SemanticTokensLegend {
617 token_types: vec!["function".into()],
618 token_modifiers: Vec::new(),
619 },
620 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: Some(true) }),
621 ..lsp::SemanticTokensOptions::default()
622 },
623 ),
624 ),
625 ..lsp::ServerCapabilities::default()
626 },
627 cx,
628 )
629 .await;
630
631 let full_counter = Arc::new(AtomicUsize::new(0));
632 let full_counter_clone = full_counter.clone();
633 let delta_counter = Arc::new(AtomicUsize::new(0));
634 let delta_counter_clone = delta_counter.clone();
635
636 let mut full_request = cx
637 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
638 move |_, _, _| {
639 full_counter_clone.fetch_add(1, atomic::Ordering::Release);
640 async move {
641 Ok(Some(lsp::SemanticTokensResult::Tokens(
642 lsp::SemanticTokens {
643 data: vec![
644 0, // delta_line
645 3, // delta_start
646 4, // length
647 0, // token_type
648 0, // token_modifiers_bitset
649 ],
650 result_id: Some("a".into()),
651 },
652 )))
653 }
654 },
655 );
656
657 let mut delta_request = cx
658 .set_request_handler::<lsp::request::SemanticTokensFullDeltaRequest, _, _>(
659 move |_, params, _| {
660 delta_counter_clone.fetch_add(1, atomic::Ordering::Release);
661 assert_eq!(params.previous_result_id, "a");
662 async move {
663 Ok(Some(lsp::SemanticTokensFullDeltaResult::TokensDelta(
664 lsp::SemanticTokensDelta {
665 edits: Vec::new(),
666 result_id: Some("b".into()),
667 },
668 )))
669 }
670 },
671 );
672
673 // Initial request, for the empty buffer.
674 cx.set_state("ˇfn main() {}");
675 assert!(full_request.next().await.is_some());
676 let task = cx.update_editor(|e, _, _| e.semantic_token_state.take_update_task());
677 task.await;
678
679 cx.set_state("ˇfn main() { a }");
680 assert!(delta_request.next().await.is_some());
681 let task = cx.update_editor(|e, _, _| e.semantic_token_state.take_update_task());
682 task.await;
683
684 assert_eq!(
685 extract_semantic_highlights(&cx.editor, &cx),
686 vec![MultiBufferOffset(3)..MultiBufferOffset(7)]
687 );
688
689 assert_eq!(full_counter.load(atomic::Ordering::Acquire), 1);
690 assert_eq!(delta_counter.load(atomic::Ordering::Acquire), 1);
691 }
692
693 #[gpui::test]
694 async fn lsp_semantic_tokens_multiserver_full(cx: &mut TestAppContext) {
695 init_test(cx, |_| {});
696
697 update_test_language_settings(cx, |language_settings| {
698 language_settings.languages.0.insert(
699 "TOML".into(),
700 LanguageSettingsContent {
701 semantic_tokens: Some(SemanticTokens::Full),
702 ..Default::default()
703 },
704 );
705 });
706
707 let toml_language = Arc::new(Language::new(
708 LanguageConfig {
709 name: "TOML".into(),
710 matcher: LanguageMatcher {
711 path_suffixes: vec!["toml".into()],
712 ..Default::default()
713 },
714 ..Default::default()
715 },
716 None,
717 ));
718
719 // We have 2 language servers for TOML in this test.
720 let toml_legend_1 = lsp::SemanticTokensLegend {
721 token_types: vec!["property".into()],
722 token_modifiers: Vec::new(),
723 };
724 let toml_legend_2 = lsp::SemanticTokensLegend {
725 token_types: vec!["number".into()],
726 token_modifiers: Vec::new(),
727 };
728
729 let app_state = cx.update(workspace::AppState::test);
730
731 cx.update(|cx| {
732 assets::Assets.load_test_fonts(cx);
733 crate::init(cx);
734 workspace::init(app_state.clone(), cx);
735 });
736
737 let project = Project::test(app_state.fs.clone(), [], cx).await;
738 let language_registry = project.read_with(cx, |project, _| project.languages().clone());
739
740 let full_counter_toml_1 = Arc::new(AtomicUsize::new(0));
741 let full_counter_toml_1_clone = full_counter_toml_1.clone();
742 let full_counter_toml_2 = Arc::new(AtomicUsize::new(0));
743 let full_counter_toml_2_clone = full_counter_toml_2.clone();
744
745 let mut toml_server_1 = language_registry.register_fake_lsp(
746 toml_language.name(),
747 FakeLspAdapter {
748 name: "toml1",
749 capabilities: lsp::ServerCapabilities {
750 semantic_tokens_provider: Some(
751 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
752 lsp::SemanticTokensOptions {
753 legend: toml_legend_1,
754 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
755 ..lsp::SemanticTokensOptions::default()
756 },
757 ),
758 ),
759 ..lsp::ServerCapabilities::default()
760 },
761 initializer: Some(Box::new({
762 let full_counter_toml_1_clone = full_counter_toml_1_clone.clone();
763 move |fake_server| {
764 let full_counter = full_counter_toml_1_clone.clone();
765 fake_server
766 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
767 move |_, _| {
768 full_counter.fetch_add(1, atomic::Ordering::Release);
769 async move {
770 Ok(Some(lsp::SemanticTokensResult::Tokens(
771 lsp::SemanticTokens {
772 // highlight 'a' as a property
773 data: vec![
774 0, // delta_line
775 0, // delta_start
776 1, // length
777 0, // token_type
778 0, // token_modifiers_bitset
779 ],
780 result_id: Some("a".into()),
781 },
782 )))
783 }
784 },
785 );
786 }
787 })),
788 ..FakeLspAdapter::default()
789 },
790 );
791 let mut toml_server_2 = language_registry.register_fake_lsp(
792 toml_language.name(),
793 FakeLspAdapter {
794 name: "toml2",
795 capabilities: lsp::ServerCapabilities {
796 semantic_tokens_provider: Some(
797 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
798 lsp::SemanticTokensOptions {
799 legend: toml_legend_2,
800 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
801 ..lsp::SemanticTokensOptions::default()
802 },
803 ),
804 ),
805 ..lsp::ServerCapabilities::default()
806 },
807 initializer: Some(Box::new({
808 let full_counter_toml_2_clone = full_counter_toml_2_clone.clone();
809 move |fake_server| {
810 let full_counter = full_counter_toml_2_clone.clone();
811 fake_server
812 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
813 move |_, _| {
814 full_counter.fetch_add(1, atomic::Ordering::Release);
815 async move {
816 Ok(Some(lsp::SemanticTokensResult::Tokens(
817 lsp::SemanticTokens {
818 // highlight '3' as a literal
819 data: vec![
820 0, // delta_line
821 4, // delta_start
822 1, // length
823 0, // token_type
824 0, // token_modifiers_bitset
825 ],
826 result_id: Some("a".into()),
827 },
828 )))
829 }
830 },
831 );
832 }
833 })),
834 ..FakeLspAdapter::default()
835 },
836 );
837 language_registry.add(toml_language.clone());
838
839 app_state
840 .fs
841 .as_fake()
842 .insert_tree(
843 EditorLspTestContext::root_path(),
844 json!({
845 ".git": {},
846 "dir": {
847 "foo.toml": "a = 1\nb = 2\n",
848 }
849 }),
850 )
851 .await;
852
853 let window = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx));
854 let workspace = window.root(cx).unwrap();
855
856 let mut cx = VisualTestContext::from_window(*window.deref(), cx);
857 project
858 .update(&mut cx, |project, cx| {
859 project.find_or_create_worktree(EditorLspTestContext::root_path(), true, cx)
860 })
861 .await
862 .unwrap();
863 cx.read(|cx| workspace.read(cx).worktree_scans_complete(cx))
864 .await;
865
866 let toml_file = cx.read(|cx| workspace.file_project_paths(cx)[0].clone());
867 let toml_item = workspace
868 .update_in(&mut cx, |workspace, window, cx| {
869 workspace.open_path(toml_file, None, true, window, cx)
870 })
871 .await
872 .expect("Could not open test file");
873
874 let editor = cx.update(|_, cx| {
875 toml_item
876 .act_as::<Editor>(cx)
877 .expect("Opened test file wasn't an editor")
878 });
879
880 editor.update_in(&mut cx, |editor, window, cx| {
881 let nav_history = workspace
882 .read(cx)
883 .active_pane()
884 .read(cx)
885 .nav_history_for_item(&cx.entity());
886 editor.set_nav_history(Some(nav_history));
887 window.focus(&editor.focus_handle(cx), cx)
888 });
889
890 let _toml_server_1 = toml_server_1.next().await.unwrap();
891 let _toml_server_2 = toml_server_2.next().await.unwrap();
892
893 // Trigger semantic tokens.
894 editor.update_in(&mut cx, |editor, _, cx| {
895 editor.edit([(MultiBufferOffset(0)..MultiBufferOffset(1), "b")], cx);
896 });
897 cx.executor().advance_clock(Duration::from_millis(200));
898 let task = editor.update_in(&mut cx, |e, _, _| e.semantic_token_state.take_update_task());
899 cx.run_until_parked();
900 task.await;
901
902 assert_eq!(
903 extract_semantic_highlights(&editor, &cx),
904 vec![
905 MultiBufferOffset(0)..MultiBufferOffset(1),
906 MultiBufferOffset(4)..MultiBufferOffset(5),
907 ]
908 );
909
910 assert_eq!(full_counter_toml_1.load(atomic::Ordering::Acquire), 1);
911 assert_eq!(full_counter_toml_2.load(atomic::Ordering::Acquire), 1);
912 }
913
914 #[gpui::test]
915 async fn lsp_semantic_tokens_multibuffer_part(cx: &mut TestAppContext) {
916 init_test(cx, |_| {});
917
918 update_test_language_settings(cx, |language_settings| {
919 language_settings.languages.0.insert(
920 "TOML".into(),
921 LanguageSettingsContent {
922 semantic_tokens: Some(SemanticTokens::Full),
923 ..Default::default()
924 },
925 );
926 language_settings.languages.0.insert(
927 "Rust".into(),
928 LanguageSettingsContent {
929 semantic_tokens: Some(SemanticTokens::Full),
930 ..Default::default()
931 },
932 );
933 });
934
935 let toml_language = Arc::new(Language::new(
936 LanguageConfig {
937 name: "TOML".into(),
938 matcher: LanguageMatcher {
939 path_suffixes: vec!["toml".into()],
940 ..Default::default()
941 },
942 ..Default::default()
943 },
944 None,
945 ));
946 let rust_language = Arc::new(Language::new(
947 LanguageConfig {
948 name: "Rust".into(),
949 matcher: LanguageMatcher {
950 path_suffixes: vec!["rs".into()],
951 ..Default::default()
952 },
953 ..Default::default()
954 },
955 None,
956 ));
957
958 let toml_legend = lsp::SemanticTokensLegend {
959 token_types: vec!["property".into()],
960 token_modifiers: Vec::new(),
961 };
962 let rust_legend = lsp::SemanticTokensLegend {
963 token_types: vec!["constant".into()],
964 token_modifiers: Vec::new(),
965 };
966
967 let app_state = cx.update(workspace::AppState::test);
968
969 cx.update(|cx| {
970 assets::Assets.load_test_fonts(cx);
971 crate::init(cx);
972 workspace::init(app_state.clone(), cx);
973 });
974
975 let project = Project::test(app_state.fs.clone(), [], cx).await;
976 let language_registry = project.read_with(cx, |project, _| project.languages().clone());
977 let full_counter_toml = Arc::new(AtomicUsize::new(0));
978 let full_counter_toml_clone = full_counter_toml.clone();
979
980 let mut toml_server = language_registry.register_fake_lsp(
981 toml_language.name(),
982 FakeLspAdapter {
983 name: "toml",
984 capabilities: lsp::ServerCapabilities {
985 semantic_tokens_provider: Some(
986 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
987 lsp::SemanticTokensOptions {
988 legend: toml_legend,
989 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
990 ..lsp::SemanticTokensOptions::default()
991 },
992 ),
993 ),
994 ..lsp::ServerCapabilities::default()
995 },
996 initializer: Some(Box::new({
997 let full_counter_toml_clone = full_counter_toml_clone.clone();
998 move |fake_server| {
999 let full_counter = full_counter_toml_clone.clone();
1000 fake_server
1001 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1002 move |_, _| {
1003 full_counter.fetch_add(1, atomic::Ordering::Release);
1004 async move {
1005 Ok(Some(lsp::SemanticTokensResult::Tokens(
1006 lsp::SemanticTokens {
1007 // highlight 'a', 'b', 'c' as properties on lines 0, 1, 2
1008 data: vec![
1009 0, // delta_line (line 0)
1010 0, // delta_start
1011 1, // length
1012 0, // token_type
1013 0, // token_modifiers_bitset
1014 1, // delta_line (line 1)
1015 0, // delta_start
1016 1, // length
1017 0, // token_type
1018 0, // token_modifiers_bitset
1019 1, // delta_line (line 2)
1020 0, // delta_start
1021 1, // length
1022 0, // token_type
1023 0, // token_modifiers_bitset
1024 ],
1025 result_id: Some("a".into()),
1026 },
1027 )))
1028 }
1029 },
1030 );
1031 }
1032 })),
1033 ..FakeLspAdapter::default()
1034 },
1035 );
1036 language_registry.add(toml_language.clone());
1037 let mut rust_server = language_registry.register_fake_lsp(
1038 rust_language.name(),
1039 FakeLspAdapter {
1040 name: "rust",
1041 capabilities: lsp::ServerCapabilities {
1042 semantic_tokens_provider: Some(
1043 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1044 lsp::SemanticTokensOptions {
1045 legend: rust_legend,
1046 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1047 ..lsp::SemanticTokensOptions::default()
1048 },
1049 ),
1050 ),
1051 ..lsp::ServerCapabilities::default()
1052 },
1053 ..FakeLspAdapter::default()
1054 },
1055 );
1056 language_registry.add(rust_language.clone());
1057
1058 app_state
1059 .fs
1060 .as_fake()
1061 .insert_tree(
1062 EditorLspTestContext::root_path(),
1063 json!({
1064 ".git": {},
1065 "dir": {
1066 "foo.toml": "a = 1\nb = 2\nc = 3\n",
1067 "bar.rs": "const c: usize = 3;\n",
1068 }
1069 }),
1070 )
1071 .await;
1072
1073 let window = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx));
1074 let workspace = window.root(cx).unwrap();
1075
1076 let mut cx = VisualTestContext::from_window(*window.deref(), cx);
1077 project
1078 .update(&mut cx, |project, cx| {
1079 project.find_or_create_worktree(EditorLspTestContext::root_path(), true, cx)
1080 })
1081 .await
1082 .unwrap();
1083 cx.read(|cx| workspace.read(cx).worktree_scans_complete(cx))
1084 .await;
1085
1086 let toml_file = cx.read(|cx| workspace.file_project_paths(cx)[1].clone());
1087 let rust_file = cx.read(|cx| workspace.file_project_paths(cx)[0].clone());
1088 let (toml_item, rust_item) = workspace.update_in(&mut cx, |workspace, window, cx| {
1089 (
1090 workspace.open_path(toml_file, None, true, window, cx),
1091 workspace.open_path(rust_file, None, true, window, cx),
1092 )
1093 });
1094 let toml_item = toml_item.await.expect("Could not open test file");
1095 let rust_item = rust_item.await.expect("Could not open test file");
1096
1097 let (toml_editor, rust_editor) = cx.update(|_, cx| {
1098 (
1099 toml_item
1100 .act_as::<Editor>(cx)
1101 .expect("Opened test file wasn't an editor"),
1102 rust_item
1103 .act_as::<Editor>(cx)
1104 .expect("Opened test file wasn't an editor"),
1105 )
1106 });
1107 let toml_buffer = cx.read(|cx| {
1108 toml_editor
1109 .read(cx)
1110 .buffer()
1111 .read(cx)
1112 .as_singleton()
1113 .unwrap()
1114 });
1115 let rust_buffer = cx.read(|cx| {
1116 rust_editor
1117 .read(cx)
1118 .buffer()
1119 .read(cx)
1120 .as_singleton()
1121 .unwrap()
1122 });
1123 let multibuffer = cx.new(|cx| {
1124 let mut multibuffer = MultiBuffer::new(Capability::ReadWrite);
1125 multibuffer.push_excerpts(
1126 toml_buffer.clone(),
1127 [ExcerptRange::new(Point::new(0, 0)..Point::new(1, 0))],
1128 cx,
1129 );
1130 multibuffer.push_excerpts(
1131 rust_buffer.clone(),
1132 [ExcerptRange::new(Point::new(0, 0)..Point::new(1, 0))],
1133 cx,
1134 );
1135 multibuffer
1136 });
1137
1138 let editor = workspace.update_in(&mut cx, |workspace, window, cx| {
1139 let editor = cx.new(|cx| build_editor_with_project(project, multibuffer, window, cx));
1140 workspace.add_item_to_active_pane(Box::new(editor.clone()), None, true, window, cx);
1141 editor
1142 });
1143 editor.update_in(&mut cx, |editor, window, cx| {
1144 let nav_history = workspace
1145 .read(cx)
1146 .active_pane()
1147 .read(cx)
1148 .nav_history_for_item(&cx.entity());
1149 editor.set_nav_history(Some(nav_history));
1150 window.focus(&editor.focus_handle(cx), cx)
1151 });
1152
1153 let _toml_server = toml_server.next().await.unwrap();
1154 let _rust_server = rust_server.next().await.unwrap();
1155
1156 // Initial request.
1157 cx.executor().advance_clock(Duration::from_millis(200));
1158 let task = editor.update_in(&mut cx, |e, _, _| e.semantic_token_state.take_update_task());
1159 cx.run_until_parked();
1160 task.await;
1161 assert_eq!(full_counter_toml.load(atomic::Ordering::Acquire), 1);
1162 cx.run_until_parked();
1163
1164 // Initially, excerpt only covers line 0, so only the 'a' token should be highlighted.
1165 // The excerpt content is "a = 1\n" (6 chars), so 'a' is at offset 0.
1166 assert_eq!(
1167 extract_semantic_highlights(&editor, &cx),
1168 vec![MultiBufferOffset(0)..MultiBufferOffset(1)]
1169 );
1170
1171 // Get the excerpt id for the TOML excerpt and expand it down by 2 lines.
1172 let toml_excerpt_id =
1173 editor.read_with(&cx, |editor, cx| editor.buffer().read(cx).excerpt_ids()[0]);
1174 editor.update_in(&mut cx, |editor, _, cx| {
1175 editor.buffer().update(cx, |buffer, cx| {
1176 buffer.expand_excerpts([toml_excerpt_id], 2, ExpandExcerptDirection::Down, cx);
1177 });
1178 });
1179
1180 // Wait for semantic tokens to be re-fetched after expansion.
1181 cx.executor().advance_clock(Duration::from_millis(200));
1182 let task = editor.update_in(&mut cx, |e, _, _| e.semantic_token_state.take_update_task());
1183 cx.run_until_parked();
1184 task.await;
1185
1186 // After expansion, the excerpt covers lines 0-2, so 'a', 'b', 'c' should all be highlighted.
1187 // Content is now "a = 1\nb = 2\nc = 3\n" (18 chars).
1188 // 'a' at offset 0, 'b' at offset 6, 'c' at offset 12.
1189 assert_eq!(
1190 extract_semantic_highlights(&editor, &cx),
1191 vec![
1192 MultiBufferOffset(0)..MultiBufferOffset(1),
1193 MultiBufferOffset(6)..MultiBufferOffset(7),
1194 MultiBufferOffset(12)..MultiBufferOffset(13),
1195 ]
1196 );
1197 }
1198
1199 #[gpui::test]
1200 async fn lsp_semantic_tokens_multibuffer_shared(cx: &mut TestAppContext) {
1201 init_test(cx, |_| {});
1202
1203 update_test_language_settings(cx, |language_settings| {
1204 language_settings.languages.0.insert(
1205 "TOML".into(),
1206 LanguageSettingsContent {
1207 semantic_tokens: Some(SemanticTokens::Full),
1208 ..Default::default()
1209 },
1210 );
1211 });
1212
1213 let toml_language = Arc::new(Language::new(
1214 LanguageConfig {
1215 name: "TOML".into(),
1216 matcher: LanguageMatcher {
1217 path_suffixes: vec!["toml".into()],
1218 ..Default::default()
1219 },
1220 ..Default::default()
1221 },
1222 None,
1223 ));
1224
1225 let toml_legend = lsp::SemanticTokensLegend {
1226 token_types: vec!["property".into()],
1227 token_modifiers: Vec::new(),
1228 };
1229
1230 let app_state = cx.update(workspace::AppState::test);
1231
1232 cx.update(|cx| {
1233 assets::Assets.load_test_fonts(cx);
1234 crate::init(cx);
1235 workspace::init(app_state.clone(), cx);
1236 });
1237
1238 let project = Project::test(app_state.fs.clone(), [], cx).await;
1239 let language_registry = project.read_with(cx, |project, _| project.languages().clone());
1240 let full_counter_toml = Arc::new(AtomicUsize::new(0));
1241 let full_counter_toml_clone = full_counter_toml.clone();
1242
1243 let mut toml_server = language_registry.register_fake_lsp(
1244 toml_language.name(),
1245 FakeLspAdapter {
1246 name: "toml",
1247 capabilities: lsp::ServerCapabilities {
1248 semantic_tokens_provider: Some(
1249 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1250 lsp::SemanticTokensOptions {
1251 legend: toml_legend,
1252 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1253 ..lsp::SemanticTokensOptions::default()
1254 },
1255 ),
1256 ),
1257 ..lsp::ServerCapabilities::default()
1258 },
1259 initializer: Some(Box::new({
1260 let full_counter_toml_clone = full_counter_toml_clone.clone();
1261 move |fake_server| {
1262 let full_counter = full_counter_toml_clone.clone();
1263 fake_server
1264 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1265 move |_, _| {
1266 full_counter.fetch_add(1, atomic::Ordering::Release);
1267 async move {
1268 Ok(Some(lsp::SemanticTokensResult::Tokens(
1269 lsp::SemanticTokens {
1270 // highlight 'a' as a property
1271 data: vec![
1272 0, // delta_line
1273 0, // delta_start
1274 1, // length
1275 0, // token_type
1276 0, // token_modifiers_bitset
1277 ],
1278 result_id: Some("a".into()),
1279 },
1280 )))
1281 }
1282 },
1283 );
1284 }
1285 })),
1286 ..FakeLspAdapter::default()
1287 },
1288 );
1289 language_registry.add(toml_language.clone());
1290
1291 app_state
1292 .fs
1293 .as_fake()
1294 .insert_tree(
1295 EditorLspTestContext::root_path(),
1296 json!({
1297 ".git": {},
1298 "dir": {
1299 "foo.toml": "a = 1\nb = 2\n",
1300 }
1301 }),
1302 )
1303 .await;
1304
1305 let window = cx.add_window(|window, cx| Workspace::test_new(project.clone(), window, cx));
1306 let workspace = window.root(cx).unwrap();
1307
1308 let mut cx = VisualTestContext::from_window(*window.deref(), cx);
1309 project
1310 .update(&mut cx, |project, cx| {
1311 project.find_or_create_worktree(EditorLspTestContext::root_path(), true, cx)
1312 })
1313 .await
1314 .unwrap();
1315 cx.read(|cx| workspace.read(cx).worktree_scans_complete(cx))
1316 .await;
1317
1318 let toml_file = cx.read(|cx| workspace.file_project_paths(cx)[0].clone());
1319 let toml_item = workspace
1320 .update_in(&mut cx, |workspace, window, cx| {
1321 workspace.open_path(toml_file, None, true, window, cx)
1322 })
1323 .await
1324 .expect("Could not open test file");
1325
1326 let toml_editor = cx.update(|_, cx| {
1327 toml_item
1328 .act_as::<Editor>(cx)
1329 .expect("Opened test file wasn't an editor")
1330 });
1331 let toml_buffer = cx.read(|cx| {
1332 toml_editor
1333 .read(cx)
1334 .buffer()
1335 .read(cx)
1336 .as_singleton()
1337 .unwrap()
1338 });
1339 let multibuffer = cx.new(|cx| {
1340 let mut multibuffer = MultiBuffer::new(Capability::ReadWrite);
1341 multibuffer.push_excerpts(
1342 toml_buffer.clone(),
1343 [ExcerptRange::new(Point::new(0, 0)..Point::new(2, 0))],
1344 cx,
1345 );
1346 multibuffer.push_excerpts(
1347 toml_buffer.clone(),
1348 [ExcerptRange::new(Point::new(0, 0)..Point::new(2, 0))],
1349 cx,
1350 );
1351 multibuffer
1352 });
1353
1354 let editor = workspace.update_in(&mut cx, |_, window, cx| {
1355 cx.new(|cx| build_editor_with_project(project, multibuffer, window, cx))
1356 });
1357 editor.update_in(&mut cx, |editor, window, cx| {
1358 let nav_history = workspace
1359 .read(cx)
1360 .active_pane()
1361 .read(cx)
1362 .nav_history_for_item(&cx.entity());
1363 editor.set_nav_history(Some(nav_history));
1364 window.focus(&editor.focus_handle(cx), cx)
1365 });
1366
1367 let _toml_server = toml_server.next().await.unwrap();
1368
1369 // Initial request.
1370 cx.executor().advance_clock(Duration::from_millis(200));
1371 let task = editor.update_in(&mut cx, |e, _, _| e.semantic_token_state.take_update_task());
1372 cx.run_until_parked();
1373 task.await;
1374 assert_eq!(full_counter_toml.load(atomic::Ordering::Acquire), 1);
1375
1376 // Edit two parts of the multibuffer, which both map to the same buffer.
1377 //
1378 // Without debouncing, this grabs semantic tokens 4 times (twice for the
1379 // toml editor, and twice for the multibuffer).
1380 editor.update_in(&mut cx, |editor, _, cx| {
1381 editor.edit([(MultiBufferOffset(0)..MultiBufferOffset(1), "b")], cx);
1382 editor.edit([(MultiBufferOffset(12)..MultiBufferOffset(13), "c")], cx);
1383 });
1384 cx.executor().advance_clock(Duration::from_millis(200));
1385 let task = editor.update_in(&mut cx, |e, _, _| e.semantic_token_state.take_update_task());
1386 cx.run_until_parked();
1387 task.await;
1388 assert_eq!(
1389 extract_semantic_highlights(&editor, &cx),
1390 vec![MultiBufferOffset(0)..MultiBufferOffset(1)]
1391 );
1392
1393 assert_eq!(full_counter_toml.load(atomic::Ordering::Acquire), 2);
1394 }
1395
1396 fn extract_semantic_highlights(
1397 editor: &Entity<Editor>,
1398 cx: &TestAppContext,
1399 ) -> Vec<Range<MultiBufferOffset>> {
1400 editor.read_with(cx, |editor, cx| {
1401 let multi_buffer_snapshot = editor.buffer().read(cx).snapshot(cx);
1402 editor
1403 .display_map
1404 .read(cx)
1405 .semantic_token_highlights
1406 .iter()
1407 .flat_map(|(_, (v, _))| v.iter())
1408 .map(|highlights| highlights.range.to_offset(&multi_buffer_snapshot))
1409 .collect()
1410 })
1411 }
1412
1413 #[gpui::test]
1414 async fn test_semantic_tokens_rules_changes_restyle_tokens(cx: &mut TestAppContext) {
1415 use gpui::{Hsla, Rgba, UpdateGlobal as _};
1416 use settings::{GlobalLspSettingsContent, SemanticTokenRule};
1417
1418 init_test(cx, |_| {});
1419
1420 update_test_language_settings(cx, |language_settings| {
1421 language_settings.languages.0.insert(
1422 "Rust".into(),
1423 LanguageSettingsContent {
1424 semantic_tokens: Some(SemanticTokens::Full),
1425 ..LanguageSettingsContent::default()
1426 },
1427 );
1428 });
1429
1430 let mut cx = EditorLspTestContext::new_rust(
1431 lsp::ServerCapabilities {
1432 semantic_tokens_provider: Some(
1433 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1434 lsp::SemanticTokensOptions {
1435 legend: lsp::SemanticTokensLegend {
1436 token_types: Vec::from(["function".into()]),
1437 token_modifiers: Vec::new(),
1438 },
1439 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1440 ..lsp::SemanticTokensOptions::default()
1441 },
1442 ),
1443 ),
1444 ..lsp::ServerCapabilities::default()
1445 },
1446 cx,
1447 )
1448 .await;
1449
1450 let mut full_request = cx
1451 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1452 move |_, _, _| {
1453 async move {
1454 Ok(Some(lsp::SemanticTokensResult::Tokens(
1455 lsp::SemanticTokens {
1456 data: vec![
1457 0, // delta_line
1458 3, // delta_start
1459 4, // length
1460 0, // token_type (function)
1461 0, // token_modifiers_bitset
1462 ],
1463 result_id: None,
1464 },
1465 )))
1466 }
1467 },
1468 );
1469
1470 // Trigger initial semantic tokens fetch
1471 cx.set_state("ˇfn main() {}");
1472 full_request.next().await;
1473 cx.run_until_parked();
1474
1475 // Verify initial highlights exist (with no custom color yet)
1476 let initial_ranges = extract_semantic_highlights(&cx.editor, &cx);
1477 assert_eq!(
1478 initial_ranges,
1479 vec![MultiBufferOffset(3)..MultiBufferOffset(7)],
1480 "Should have initial semantic token highlights"
1481 );
1482 let initial_styles = extract_semantic_highlight_styles(&cx.editor, &cx);
1483 assert_eq!(initial_styles.len(), 1, "Should have one highlight style");
1484 // Initial color should be None or theme default (not red or blue)
1485 let initial_color = initial_styles[0].color;
1486
1487 // Set a custom foreground color for function tokens via settings.json
1488 let red_color = Rgba {
1489 r: 1.0,
1490 g: 0.0,
1491 b: 0.0,
1492 a: 1.0,
1493 };
1494 cx.update(|_, cx| {
1495 SettingsStore::update_global(cx, |store, cx| {
1496 store.update_user_settings(cx, |settings| {
1497 settings.global_lsp_settings = Some(GlobalLspSettingsContent {
1498 semantic_token_rules: Some(SemanticTokenRules {
1499 rules: Vec::from([SemanticTokenRule {
1500 token_type: Some("function".to_string()),
1501 foreground_color: Some(red_color),
1502 ..SemanticTokenRule::default()
1503 }]),
1504 }),
1505 ..GlobalLspSettingsContent::default()
1506 });
1507 });
1508 });
1509 });
1510
1511 // Trigger a refetch by making an edit (which forces semantic tokens update)
1512 cx.set_state("ˇfn main() { }");
1513 full_request.next().await;
1514 cx.run_until_parked();
1515
1516 // Verify the highlights now have the custom red color
1517 let styles_after_settings_change = extract_semantic_highlight_styles(&cx.editor, &cx);
1518 assert_eq!(
1519 styles_after_settings_change.len(),
1520 1,
1521 "Should still have one highlight"
1522 );
1523 assert_eq!(
1524 styles_after_settings_change[0].color,
1525 Some(Hsla::from(red_color)),
1526 "Highlight should have the custom red color from settings.json"
1527 );
1528 assert_ne!(
1529 styles_after_settings_change[0].color, initial_color,
1530 "Color should have changed from initial"
1531 );
1532 }
1533
1534 #[gpui::test]
1535 async fn test_theme_override_changes_restyle_semantic_tokens(cx: &mut TestAppContext) {
1536 use collections::IndexMap;
1537 use gpui::{Hsla, Rgba, UpdateGlobal as _};
1538 use theme::{HighlightStyleContent, ThemeStyleContent};
1539
1540 init_test(cx, |_| {});
1541
1542 update_test_language_settings(cx, |language_settings| {
1543 language_settings.languages.0.insert(
1544 "Rust".into(),
1545 LanguageSettingsContent {
1546 semantic_tokens: Some(SemanticTokens::Full),
1547 ..LanguageSettingsContent::default()
1548 },
1549 );
1550 });
1551
1552 let mut cx = EditorLspTestContext::new_rust(
1553 lsp::ServerCapabilities {
1554 semantic_tokens_provider: Some(
1555 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1556 lsp::SemanticTokensOptions {
1557 legend: lsp::SemanticTokensLegend {
1558 token_types: Vec::from(["function".into()]),
1559 token_modifiers: Vec::new(),
1560 },
1561 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1562 ..lsp::SemanticTokensOptions::default()
1563 },
1564 ),
1565 ),
1566 ..lsp::ServerCapabilities::default()
1567 },
1568 cx,
1569 )
1570 .await;
1571
1572 let mut full_request = cx
1573 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1574 move |_, _, _| async move {
1575 Ok(Some(lsp::SemanticTokensResult::Tokens(
1576 lsp::SemanticTokens {
1577 data: vec![
1578 0, // delta_line
1579 3, // delta_start
1580 4, // length
1581 0, // token_type (function)
1582 0, // token_modifiers_bitset
1583 ],
1584 result_id: None,
1585 },
1586 )))
1587 },
1588 );
1589
1590 cx.set_state("ˇfn main() {}");
1591 full_request.next().await;
1592 cx.run_until_parked();
1593
1594 let initial_styles = extract_semantic_highlight_styles(&cx.editor, &cx);
1595 assert_eq!(initial_styles.len(), 1, "Should have one highlight style");
1596 let initial_color = initial_styles[0].color;
1597
1598 // Changing experimental_theme_overrides triggers GlobalTheme reload,
1599 // which fires theme_changed → refresh_semantic_token_highlights.
1600 let red_color: Hsla = Rgba {
1601 r: 1.0,
1602 g: 0.0,
1603 b: 0.0,
1604 a: 1.0,
1605 }
1606 .into();
1607 cx.update(|_, cx| {
1608 SettingsStore::update_global(cx, |store, cx| {
1609 store.update_user_settings(cx, |settings| {
1610 settings.theme.experimental_theme_overrides = Some(ThemeStyleContent {
1611 syntax: IndexMap::from_iter([(
1612 "function".to_string(),
1613 HighlightStyleContent {
1614 color: Some("#ff0000".to_string()),
1615 background_color: None,
1616 font_style: None,
1617 font_weight: None,
1618 },
1619 )]),
1620 ..ThemeStyleContent::default()
1621 });
1622 });
1623 });
1624 });
1625
1626 cx.executor().advance_clock(Duration::from_millis(200));
1627 cx.run_until_parked();
1628
1629 let styles_after_override = extract_semantic_highlight_styles(&cx.editor, &cx);
1630 assert_eq!(styles_after_override.len(), 1);
1631 assert_eq!(
1632 styles_after_override[0].color,
1633 Some(red_color),
1634 "Highlight should have red color from theme override"
1635 );
1636 assert_ne!(
1637 styles_after_override[0].color, initial_color,
1638 "Color should have changed from initial"
1639 );
1640
1641 // Changing the override to a different color also restyles.
1642 let blue_color: Hsla = Rgba {
1643 r: 0.0,
1644 g: 0.0,
1645 b: 1.0,
1646 a: 1.0,
1647 }
1648 .into();
1649 cx.update(|_, cx| {
1650 SettingsStore::update_global(cx, |store, cx| {
1651 store.update_user_settings(cx, |settings| {
1652 settings.theme.experimental_theme_overrides = Some(ThemeStyleContent {
1653 syntax: IndexMap::from_iter([(
1654 "function".to_string(),
1655 HighlightStyleContent {
1656 color: Some("#0000ff".to_string()),
1657 background_color: None,
1658 font_style: None,
1659 font_weight: None,
1660 },
1661 )]),
1662 ..ThemeStyleContent::default()
1663 });
1664 });
1665 });
1666 });
1667
1668 cx.executor().advance_clock(Duration::from_millis(200));
1669 cx.run_until_parked();
1670
1671 let styles_after_second_override = extract_semantic_highlight_styles(&cx.editor, &cx);
1672 assert_eq!(styles_after_second_override.len(), 1);
1673 assert_eq!(
1674 styles_after_second_override[0].color,
1675 Some(blue_color),
1676 "Highlight should have blue color from updated theme override"
1677 );
1678
1679 // Removing overrides reverts to the original theme color.
1680 cx.update(|_, cx| {
1681 SettingsStore::update_global(cx, |store, cx| {
1682 store.update_user_settings(cx, |settings| {
1683 settings.theme.experimental_theme_overrides = None;
1684 });
1685 });
1686 });
1687
1688 cx.executor().advance_clock(Duration::from_millis(200));
1689 cx.run_until_parked();
1690
1691 let styles_after_clear = extract_semantic_highlight_styles(&cx.editor, &cx);
1692 assert_eq!(styles_after_clear.len(), 1);
1693 assert_eq!(
1694 styles_after_clear[0].color, initial_color,
1695 "Highlight should revert to initial color after clearing overrides"
1696 );
1697 }
1698
1699 #[gpui::test]
1700 async fn test_per_theme_overrides_restyle_semantic_tokens(cx: &mut TestAppContext) {
1701 use collections::IndexMap;
1702 use gpui::{Hsla, Rgba, UpdateGlobal as _};
1703 use theme::{HighlightStyleContent, ThemeStyleContent};
1704 use ui::ActiveTheme as _;
1705
1706 init_test(cx, |_| {});
1707
1708 update_test_language_settings(cx, |language_settings| {
1709 language_settings.languages.0.insert(
1710 "Rust".into(),
1711 LanguageSettingsContent {
1712 semantic_tokens: Some(SemanticTokens::Full),
1713 ..LanguageSettingsContent::default()
1714 },
1715 );
1716 });
1717
1718 let mut cx = EditorLspTestContext::new_rust(
1719 lsp::ServerCapabilities {
1720 semantic_tokens_provider: Some(
1721 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1722 lsp::SemanticTokensOptions {
1723 legend: lsp::SemanticTokensLegend {
1724 token_types: Vec::from(["function".into()]),
1725 token_modifiers: Vec::new(),
1726 },
1727 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1728 ..lsp::SemanticTokensOptions::default()
1729 },
1730 ),
1731 ),
1732 ..lsp::ServerCapabilities::default()
1733 },
1734 cx,
1735 )
1736 .await;
1737
1738 let mut full_request = cx
1739 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1740 move |_, _, _| async move {
1741 Ok(Some(lsp::SemanticTokensResult::Tokens(
1742 lsp::SemanticTokens {
1743 data: vec![
1744 0, // delta_line
1745 3, // delta_start
1746 4, // length
1747 0, // token_type (function)
1748 0, // token_modifiers_bitset
1749 ],
1750 result_id: None,
1751 },
1752 )))
1753 },
1754 );
1755
1756 cx.set_state("ˇfn main() {}");
1757 full_request.next().await;
1758 cx.run_until_parked();
1759
1760 let initial_styles = extract_semantic_highlight_styles(&cx.editor, &cx);
1761 assert_eq!(initial_styles.len(), 1, "Should have one highlight style");
1762 let initial_color = initial_styles[0].color;
1763
1764 // Per-theme overrides (theme_overrides keyed by theme name) also go through
1765 // GlobalTheme reload → theme_changed → refresh_semantic_token_highlights.
1766 let theme_name = cx.update(|_, cx| cx.theme().name.to_string());
1767 let green_color: Hsla = Rgba {
1768 r: 0.0,
1769 g: 1.0,
1770 b: 0.0,
1771 a: 1.0,
1772 }
1773 .into();
1774 cx.update(|_, cx| {
1775 SettingsStore::update_global(cx, |store, cx| {
1776 store.update_user_settings(cx, |settings| {
1777 settings.theme.theme_overrides = collections::HashMap::from_iter([(
1778 theme_name.clone(),
1779 ThemeStyleContent {
1780 syntax: IndexMap::from_iter([(
1781 "function".to_string(),
1782 HighlightStyleContent {
1783 color: Some("#00ff00".to_string()),
1784 background_color: None,
1785 font_style: None,
1786 font_weight: None,
1787 },
1788 )]),
1789 ..ThemeStyleContent::default()
1790 },
1791 )]);
1792 });
1793 });
1794 });
1795
1796 cx.executor().advance_clock(Duration::from_millis(200));
1797 cx.run_until_parked();
1798
1799 let styles_after_override = extract_semantic_highlight_styles(&cx.editor, &cx);
1800 assert_eq!(styles_after_override.len(), 1);
1801 assert_eq!(
1802 styles_after_override[0].color,
1803 Some(green_color),
1804 "Highlight should have green color from per-theme override"
1805 );
1806 assert_ne!(
1807 styles_after_override[0].color, initial_color,
1808 "Color should have changed from initial"
1809 );
1810 }
1811
1812 #[gpui::test]
1813 async fn test_stopping_language_server_clears_semantic_tokens(cx: &mut TestAppContext) {
1814 init_test(cx, |_| {});
1815
1816 update_test_language_settings(cx, |language_settings| {
1817 language_settings.languages.0.insert(
1818 "Rust".into(),
1819 LanguageSettingsContent {
1820 semantic_tokens: Some(SemanticTokens::Full),
1821 ..LanguageSettingsContent::default()
1822 },
1823 );
1824 });
1825
1826 let mut cx = EditorLspTestContext::new_rust(
1827 lsp::ServerCapabilities {
1828 semantic_tokens_provider: Some(
1829 lsp::SemanticTokensServerCapabilities::SemanticTokensOptions(
1830 lsp::SemanticTokensOptions {
1831 legend: lsp::SemanticTokensLegend {
1832 token_types: vec!["function".into()],
1833 token_modifiers: Vec::new(),
1834 },
1835 full: Some(lsp::SemanticTokensFullOptions::Delta { delta: None }),
1836 ..lsp::SemanticTokensOptions::default()
1837 },
1838 ),
1839 ),
1840 ..lsp::ServerCapabilities::default()
1841 },
1842 cx,
1843 )
1844 .await;
1845
1846 let mut full_request = cx
1847 .set_request_handler::<lsp::request::SemanticTokensFullRequest, _, _>(
1848 move |_, _, _| async move {
1849 Ok(Some(lsp::SemanticTokensResult::Tokens(
1850 lsp::SemanticTokens {
1851 data: vec![
1852 0, // delta_line
1853 3, // delta_start
1854 4, // length
1855 0, // token_type
1856 0, // token_modifiers_bitset
1857 ],
1858 result_id: None,
1859 },
1860 )))
1861 },
1862 );
1863
1864 cx.set_state("ˇfn main() {}");
1865 assert!(full_request.next().await.is_some());
1866 cx.run_until_parked();
1867
1868 assert_eq!(
1869 extract_semantic_highlights(&cx.editor, &cx),
1870 vec![MultiBufferOffset(3)..MultiBufferOffset(7)],
1871 "Semantic tokens should be present before stopping the server"
1872 );
1873
1874 cx.update_editor(|editor, _, cx| {
1875 let buffers = editor.buffer.read(cx).all_buffers().into_iter().collect();
1876 editor.project.as_ref().unwrap().update(cx, |project, cx| {
1877 project.stop_language_servers_for_buffers(buffers, HashSet::default(), cx);
1878 })
1879 });
1880 cx.executor().advance_clock(Duration::from_millis(200));
1881 cx.run_until_parked();
1882
1883 assert_eq!(
1884 extract_semantic_highlights(&cx.editor, &cx),
1885 Vec::new(),
1886 "Semantic tokens should be cleared after stopping the server"
1887 );
1888 }
1889
1890 fn extract_semantic_highlight_styles(
1891 editor: &Entity<Editor>,
1892 cx: &TestAppContext,
1893 ) -> Vec<HighlightStyle> {
1894 editor.read_with(cx, |editor, cx| {
1895 editor
1896 .display_map
1897 .read(cx)
1898 .semantic_token_highlights
1899 .iter()
1900 .flat_map(|(_, (v, interner))| {
1901 v.iter().map(|highlights| interner[highlights.style])
1902 })
1903 .collect()
1904 })
1905 }
1906}