1use std::collections::HashMap;
2use std::sync::Mutex;
3use zed_extension_api::{self as zed, *};
4
5struct ExampleProvider {
6 /// Active completion streams, keyed by stream ID
7 streams: Mutex<HashMap<String, Vec<LlmCompletionEvent>>>,
8 /// Counter for generating unique stream IDs
9 next_stream_id: Mutex<u64>,
10}
11
12impl zed::Extension for ExampleProvider {
13 fn new() -> Self {
14 Self {
15 streams: Mutex::new(HashMap::new()),
16 next_stream_id: Mutex::new(0),
17 }
18 }
19
20 fn llm_providers(&self) -> Vec<LlmProviderInfo> {
21 vec![LlmProviderInfo {
22 id: "example".into(),
23 name: "Example Provider".into(),
24 icon: None,
25 }]
26 }
27
28 fn llm_provider_models(&self, _provider_id: &str) -> Result<Vec<LlmModelInfo>, String> {
29 Ok(vec![
30 LlmModelInfo {
31 id: "example-fast".into(),
32 name: "Example Fast".into(),
33 max_token_count: 8192,
34 max_output_tokens: Some(4096),
35 capabilities: LlmModelCapabilities {
36 supports_images: false,
37 supports_tools: true,
38 supports_tool_choice_auto: true,
39 supports_tool_choice_any: true,
40 supports_tool_choice_none: true,
41 supports_thinking: false,
42 tool_input_format: LlmToolInputFormat::JsonSchema,
43 },
44 is_default: false,
45 is_default_fast: true,
46 },
47 LlmModelInfo {
48 id: "example-smart".into(),
49 name: "Example Smart".into(),
50 max_token_count: 32768,
51 max_output_tokens: Some(8192),
52 capabilities: LlmModelCapabilities {
53 supports_images: true,
54 supports_tools: true,
55 supports_tool_choice_auto: true,
56 supports_tool_choice_any: true,
57 supports_tool_choice_none: true,
58 supports_thinking: true,
59 tool_input_format: LlmToolInputFormat::JsonSchema,
60 },
61 is_default: true,
62 is_default_fast: false,
63 },
64 ])
65 }
66
67 fn llm_provider_is_authenticated(&self, _provider_id: &str) -> bool {
68 // Example provider is always authenticated for testing
69 true
70 }
71
72 fn llm_provider_settings_markdown(&self, _provider_id: &str) -> Option<String> {
73 Some(r#"# Example Provider Setup
74
75Welcome to the **Example Provider**! This is a demonstration LLM provider for testing purposes.
76
77## Features
78
79- 🚀 **Fast responses** - Instant echo responses for testing
80- 🛠️ **Tool support** - Full function calling capabilities
81- 🖼️ **Image support** - Vision model available (Example Smart)
82
83## Configuration
84
85No API key is required for this example provider. It echoes back your messages for testing purposes.
86
87## Models
88
89- **Example Fast** - Quick responses, 8K context
90- **Example Smart** - Extended features, 32K context, supports images and thinking
91
92## Usage
93
94Simply select this provider and start chatting! Your messages will be echoed back with the model name.
95"#.to_string())
96 }
97
98 fn llm_provider_authenticate(&mut self, _provider_id: &str) -> Result<(), String> {
99 // Example provider doesn't need authentication
100 Ok(())
101 }
102
103 fn llm_stream_completion_start(
104 &mut self,
105 _provider_id: &str,
106 model_id: &str,
107 request: &LlmCompletionRequest,
108 ) -> Result<String, String> {
109 // Get the last user message to echo back
110 let user_message = request
111 .messages
112 .iter()
113 .filter(|m| matches!(m.role, LlmMessageRole::User))
114 .last()
115 .and_then(|m| {
116 m.content.iter().find_map(|c| {
117 if let LlmMessageContent::Text(text) = c {
118 Some(text.clone())
119 } else {
120 None
121 }
122 })
123 })
124 .unwrap_or_else(|| "Hello!".to_string());
125
126 // Create a response based on the model
127 let response_text = format!("Hello from {}! You said: \"{}\"", model_id, user_message);
128
129 // Create events for the stream - simulate streaming by breaking into chunks
130 let mut events = vec![LlmCompletionEvent::Started];
131
132 // Stream the response in chunks
133 for chunk in response_text.chars().collect::<Vec<_>>().chunks(10) {
134 let text: String = chunk.iter().collect();
135 events.push(LlmCompletionEvent::Text(text));
136 }
137
138 events.push(LlmCompletionEvent::Stop(LlmStopReason::EndTurn));
139 events.push(LlmCompletionEvent::Usage(LlmTokenUsage {
140 input_tokens: 10,
141 output_tokens: response_text.len() as u64 / 4,
142 cache_creation_input_tokens: None,
143 cache_read_input_tokens: None,
144 }));
145
146 // Generate a unique stream ID
147 let mut id_counter = self.next_stream_id.lock().unwrap();
148 let stream_id = format!("example-stream-{}", *id_counter);
149 *id_counter += 1;
150
151 // Store the events
152 self.streams
153 .lock()
154 .unwrap()
155 .insert(stream_id.clone(), events);
156
157 Ok(stream_id)
158 }
159
160 fn llm_stream_completion_next(
161 &mut self,
162 stream_id: &str,
163 ) -> Result<Option<LlmCompletionEvent>, String> {
164 let mut streams = self.streams.lock().unwrap();
165 if let Some(events) = streams.get_mut(stream_id) {
166 if events.is_empty() {
167 Ok(None)
168 } else {
169 Ok(Some(events.remove(0)))
170 }
171 } else {
172 Err(format!("Unknown stream: {}", stream_id))
173 }
174 }
175
176 fn llm_stream_completion_close(&mut self, stream_id: &str) {
177 self.streams.lock().unwrap().remove(stream_id);
178 }
179}
180
181zed::register_extension!(ExampleProvider);