1use anyhow::anyhow;
2use axum::headers::HeaderMapExt;
3use axum::{
4 Extension, Router,
5 extract::MatchedPath,
6 http::{Request, Response},
7 routing::get,
8};
9
10use collab::api::CloudflareIpCountryHeader;
11use collab::api::billing::sync_llm_request_usage_with_stripe_periodically;
12use collab::llm::db::LlmDatabase;
13use collab::migrations::run_database_migrations;
14use collab::user_backfiller::spawn_user_backfiller;
15use collab::{
16 AppState, Config, Result, api::fetch_extensions_from_blob_store_periodically, db, env,
17 executor::Executor, rpc::ResultExt,
18};
19use collab::{ServiceMode, api::billing::poll_stripe_events_periodically};
20use db::Database;
21use std::{
22 env::args,
23 net::{SocketAddr, TcpListener},
24 path::Path,
25 sync::Arc,
26 time::Duration,
27};
28#[cfg(unix)]
29use tokio::signal::unix::SignalKind;
30use tower_http::trace::TraceLayer;
31use tracing_subscriber::{
32 Layer, filter::EnvFilter, fmt::format::JsonFields, util::SubscriberInitExt,
33};
34use util::{ResultExt as _, maybe};
35
36const VERSION: &str = env!("CARGO_PKG_VERSION");
37const REVISION: Option<&'static str> = option_env!("GITHUB_SHA");
38
39#[tokio::main]
40async fn main() -> Result<()> {
41 if let Err(error) = env::load_dotenv() {
42 eprintln!(
43 "error loading .env.toml (this is expected in production): {}",
44 error
45 );
46 }
47
48 let mut args = args().skip(1);
49 match args.next().as_deref() {
50 Some("version") => {
51 println!("collab v{} ({})", VERSION, REVISION.unwrap_or("unknown"));
52 }
53 Some("migrate") => {
54 let config = envy::from_env::<Config>().expect("error loading config");
55 setup_app_database(&config).await?;
56 }
57 Some("seed") => {
58 let config = envy::from_env::<Config>().expect("error loading config");
59 let db_options = db::ConnectOptions::new(config.database_url.clone());
60
61 let mut db = Database::new(db_options, Executor::Production).await?;
62 db.initialize_notification_kinds().await?;
63
64 collab::seed::seed(&config, &db, false).await?;
65
66 if let Some(llm_database_url) = config.llm_database_url.clone() {
67 let db_options = db::ConnectOptions::new(llm_database_url);
68 let mut db = LlmDatabase::new(db_options.clone(), Executor::Production).await?;
69 db.initialize().await?;
70 collab::llm::db::seed_database(&config, &mut db, true).await?;
71 }
72 }
73 Some("serve") => {
74 let mode = match args.next().as_deref() {
75 Some("collab") => ServiceMode::Collab,
76 Some("api") => ServiceMode::Api,
77 Some("all") => ServiceMode::All,
78 _ => {
79 return Err(anyhow!(
80 "usage: collab <version | migrate | seed | serve <api|collab|all>>"
81 ))?;
82 }
83 };
84
85 let config = envy::from_env::<Config>().expect("error loading config");
86 init_tracing(&config);
87 init_panic_hook();
88
89 let mut app = Router::new()
90 .route("/", get(handle_root))
91 .route("/healthz", get(handle_liveness_probe))
92 .layer(Extension(mode));
93
94 let listener = TcpListener::bind(format!("0.0.0.0:{}", config.http_port))
95 .expect("failed to bind TCP listener");
96
97 let mut on_shutdown = None;
98
99 if mode.is_collab() || mode.is_api() {
100 setup_app_database(&config).await?;
101 setup_llm_database(&config).await?;
102
103 let state = AppState::new(config, Executor::Production).await?;
104
105 if let Some(stripe_billing) = state.stripe_billing.clone() {
106 let executor = state.executor.clone();
107 executor.spawn_detached(async move {
108 stripe_billing.initialize().await.trace_err();
109 });
110 }
111
112 if mode.is_collab() {
113 state.db.purge_old_embeddings().await.trace_err();
114
115 let epoch = state
116 .db
117 .create_server(&state.config.zed_environment)
118 .await?;
119 let rpc_server = collab::rpc::Server::new(epoch, state.clone());
120 rpc_server.start().await?;
121
122 poll_stripe_events_periodically(state.clone(), rpc_server.clone());
123
124 app = app
125 .merge(collab::api::routes(rpc_server.clone()))
126 .merge(collab::rpc::routes(rpc_server.clone()));
127
128 on_shutdown = Some(Box::new(move || rpc_server.teardown()));
129 }
130
131 if mode.is_api() {
132 fetch_extensions_from_blob_store_periodically(state.clone());
133 spawn_user_backfiller(state.clone());
134
135 let llm_db = maybe!(async {
136 let database_url = state
137 .config
138 .llm_database_url
139 .as_ref()
140 .ok_or_else(|| anyhow!("missing LLM_DATABASE_URL"))?;
141 let max_connections = state
142 .config
143 .llm_database_max_connections
144 .ok_or_else(|| anyhow!("missing LLM_DATABASE_MAX_CONNECTIONS"))?;
145
146 let mut db_options = db::ConnectOptions::new(database_url);
147 db_options.max_connections(max_connections);
148 LlmDatabase::new(db_options, state.executor.clone()).await
149 })
150 .await
151 .trace_err();
152
153 if let Some(mut llm_db) = llm_db {
154 llm_db.initialize().await?;
155 sync_llm_request_usage_with_stripe_periodically(state.clone());
156 }
157
158 app = app
159 .merge(collab::api::events::router())
160 .merge(collab::api::extensions::router())
161 }
162
163 app = app.layer(Extension(state.clone()));
164 }
165
166 app = app.layer(
167 TraceLayer::new_for_http()
168 .make_span_with(|request: &Request<_>| {
169 let matched_path = request
170 .extensions()
171 .get::<MatchedPath>()
172 .map(MatchedPath::as_str);
173
174 let geoip_country_code = request
175 .headers()
176 .typed_get::<CloudflareIpCountryHeader>()
177 .map(|header| header.to_string());
178
179 tracing::info_span!(
180 "http_request",
181 method = ?request.method(),
182 matched_path,
183 geoip_country_code,
184 user_id = tracing::field::Empty,
185 login = tracing::field::Empty,
186 authn.jti = tracing::field::Empty,
187 is_staff = tracing::field::Empty
188 )
189 })
190 .on_response(
191 |response: &Response<_>, latency: Duration, _: &tracing::Span| {
192 let duration_ms = latency.as_micros() as f64 / 1000.;
193 tracing::info!(
194 duration_ms,
195 status = response.status().as_u16(),
196 "finished processing request"
197 );
198 },
199 ),
200 );
201
202 #[cfg(unix)]
203 let signal = async move {
204 let mut sigterm = tokio::signal::unix::signal(SignalKind::terminate())
205 .expect("failed to listen for interrupt signal");
206 let mut sigint = tokio::signal::unix::signal(SignalKind::interrupt())
207 .expect("failed to listen for interrupt signal");
208 let sigterm = sigterm.recv();
209 let sigint = sigint.recv();
210 futures::pin_mut!(sigterm, sigint);
211 futures::future::select(sigterm, sigint).await;
212 };
213
214 #[cfg(windows)]
215 let signal = async move {
216 // todo(windows):
217 // `ctrl_close` does not work well, because tokio's signal handler always returns soon,
218 // but system terminates the application soon after returning CTRL+CLOSE handler.
219 // So we should implement blocking handler to treat CTRL+CLOSE signal.
220 let mut ctrl_break = tokio::signal::windows::ctrl_break()
221 .expect("failed to listen for interrupt signal");
222 let mut ctrl_c = tokio::signal::windows::ctrl_c()
223 .expect("failed to listen for interrupt signal");
224 let ctrl_break = ctrl_break.recv();
225 let ctrl_c = ctrl_c.recv();
226 futures::pin_mut!(ctrl_break, ctrl_c);
227 futures::future::select(ctrl_break, ctrl_c).await;
228 };
229
230 axum::Server::from_tcp(listener)
231 .map_err(|e| anyhow!(e))?
232 .serve(app.into_make_service_with_connect_info::<SocketAddr>())
233 .with_graceful_shutdown(async move {
234 signal.await;
235 tracing::info!("Received interrupt signal");
236
237 if let Some(on_shutdown) = on_shutdown {
238 on_shutdown();
239 }
240 })
241 .await
242 .map_err(|e| anyhow!(e))?;
243 }
244 _ => {
245 Err(anyhow!(
246 "usage: collab <version | migrate | seed | serve <api|collab|llm|all>>"
247 ))?;
248 }
249 }
250 Ok(())
251}
252
253async fn setup_app_database(config: &Config) -> Result<()> {
254 let db_options = db::ConnectOptions::new(config.database_url.clone());
255 let mut db = Database::new(db_options, Executor::Production).await?;
256
257 let migrations_path = config.migrations_path.as_deref().unwrap_or_else(|| {
258 #[cfg(feature = "sqlite")]
259 let default_migrations = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations.sqlite");
260 #[cfg(not(feature = "sqlite"))]
261 let default_migrations = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations");
262
263 Path::new(default_migrations)
264 });
265
266 let migrations = run_database_migrations(db.options(), migrations_path).await?;
267 for (migration, duration) in migrations {
268 log::info!(
269 "Migrated {} {} {:?}",
270 migration.version,
271 migration.description,
272 duration
273 );
274 }
275
276 db.initialize_notification_kinds().await?;
277
278 if config.seed_path.is_some() {
279 collab::seed::seed(config, &db, false).await?;
280 }
281
282 Ok(())
283}
284
285async fn setup_llm_database(config: &Config) -> Result<()> {
286 let database_url = config
287 .llm_database_url
288 .as_ref()
289 .ok_or_else(|| anyhow!("missing LLM_DATABASE_URL"))?;
290
291 let db_options = db::ConnectOptions::new(database_url.clone());
292 let db = LlmDatabase::new(db_options, Executor::Production).await?;
293
294 let migrations_path = config
295 .llm_database_migrations_path
296 .as_deref()
297 .unwrap_or_else(|| {
298 #[cfg(feature = "sqlite")]
299 let default_migrations = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations_llm.sqlite");
300 #[cfg(not(feature = "sqlite"))]
301 let default_migrations = concat!(env!("CARGO_MANIFEST_DIR"), "/migrations_llm");
302
303 Path::new(default_migrations)
304 });
305
306 let migrations = run_database_migrations(db.options(), migrations_path).await?;
307 for (migration, duration) in migrations {
308 log::info!(
309 "Migrated {} {} {:?}",
310 migration.version,
311 migration.description,
312 duration
313 );
314 }
315
316 Ok(())
317}
318
319async fn handle_root(Extension(mode): Extension<ServiceMode>) -> String {
320 format!("zed:{mode} v{VERSION} ({})", REVISION.unwrap_or("unknown"))
321}
322
323async fn handle_liveness_probe(app_state: Option<Extension<Arc<AppState>>>) -> Result<String> {
324 if let Some(state) = app_state {
325 state.db.get_all_users(0, 1).await?;
326 }
327
328 Ok("ok".to_string())
329}
330
331pub fn init_tracing(config: &Config) -> Option<()> {
332 use std::str::FromStr;
333 use tracing_subscriber::layer::SubscriberExt;
334
335 let filter = EnvFilter::from_str(config.rust_log.as_deref()?).log_err()?;
336
337 tracing_subscriber::registry()
338 .with(if config.log_json.unwrap_or(false) {
339 Box::new(
340 tracing_subscriber::fmt::layer()
341 .fmt_fields(JsonFields::default())
342 .event_format(
343 tracing_subscriber::fmt::format()
344 .json()
345 .flatten_event(true)
346 .with_span_list(false),
347 )
348 .with_filter(filter),
349 ) as Box<dyn Layer<_> + Send + Sync>
350 } else {
351 Box::new(
352 tracing_subscriber::fmt::layer()
353 .event_format(tracing_subscriber::fmt::format().pretty())
354 .with_filter(filter),
355 )
356 })
357 .init();
358
359 None
360}
361
362fn init_panic_hook() {
363 std::panic::set_hook(Box::new(move |panic_info| {
364 let panic_message = match panic_info.payload().downcast_ref::<&'static str>() {
365 Some(message) => *message,
366 None => match panic_info.payload().downcast_ref::<String>() {
367 Some(message) => message.as_str(),
368 None => "Box<Any>",
369 },
370 };
371 let backtrace = std::backtrace::Backtrace::force_capture();
372 let location = panic_info
373 .location()
374 .map(|loc| format!("{}:{}", loc.file(), loc.line()));
375 tracing::error!(panic = true, ?location, %panic_message, %backtrace, "Server Panic");
376 }));
377}