1pub mod kvp;
2pub mod query;
3
4// Re-export
5pub use anyhow;
6use anyhow::Context;
7pub use indoc::indoc;
8pub use lazy_static;
9use parking_lot::{Mutex, RwLock};
10pub use smol;
11pub use sqlez;
12pub use sqlez_macros;
13pub use util::channel::{RELEASE_CHANNEL, RELEASE_CHANNEL_NAME};
14pub use util::paths::DB_DIR;
15
16use sqlez::domain::Migrator;
17use sqlez::thread_safe_connection::ThreadSafeConnection;
18use sqlez_macros::sql;
19use std::fs::{create_dir_all, remove_dir_all};
20use std::path::{Path, PathBuf};
21use std::sync::atomic::{AtomicBool, Ordering};
22use std::time::{SystemTime, UNIX_EPOCH};
23use util::{async_iife, ResultExt};
24use util::channel::ReleaseChannel;
25
26const CONNECTION_INITIALIZE_QUERY: &'static str = sql!(
27 PRAGMA synchronous=NORMAL;
28 PRAGMA busy_timeout=1;
29 PRAGMA foreign_keys=TRUE;
30 PRAGMA case_sensitive_like=TRUE;
31);
32
33const DB_INITIALIZE_QUERY: &'static str = sql!(
34 PRAGMA journal_mode=WAL;
35);
36
37const FALLBACK_DB_NAME: &'static str = "FALLBACK_MEMORY_DB";
38
39lazy_static::lazy_static! {
40 static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(());
41 static ref DB_WIPED: RwLock<bool> = RwLock::new(false);
42 pub static ref BACKUP_DB_PATH: RwLock<Option<PathBuf>> = RwLock::new(None);
43 pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false);
44}
45
46/// Open or create a database at the given directory path.
47/// This will retry a couple times if there are failures. If opening fails once, the db directory
48/// is moved to a backup folder and a new one is created. If that fails, a shared in memory db is created.
49/// In either case, static variables are set so that the user can be notified.
50pub async fn open_db<M: Migrator + 'static>(wipe_db: bool, db_dir: &Path, release_channel: &ReleaseChannel) -> ThreadSafeConnection<M> {
51 let main_db_dir = db_dir.join(Path::new(&format!("0-{}", release_channel.name())));
52
53 // If WIPE_DB, delete 0-{channel}
54 if release_channel == &ReleaseChannel::Dev
55 && wipe_db
56 && !*DB_WIPED.read()
57 {
58 let mut db_wiped = DB_WIPED.write();
59 if !*db_wiped {
60 remove_dir_all(&main_db_dir).ok();
61
62 *db_wiped = true;
63 }
64 }
65
66 let connection = async_iife!({
67 // Note: This still has a race condition where 1 set of migrations succeeds
68 // (e.g. (Workspace, Editor)) and another fails (e.g. (Workspace, Terminal))
69 // This will cause the first connection to have the database taken out
70 // from under it. This *should* be fine though. The second dabatase failure will
71 // cause errors in the log and so should be observed by developers while writing
72 // soon-to-be good migrations. If user databases are corrupted, we toss them out
73 // and try again from a blank. As long as running all migrations from start to end
74 // is ok, this race condition will never be triggered.
75 //
76 // Basically: Don't ever push invalid migrations to stable or everyone will have
77 // a bad time.
78
79 // If no db folder, create one at 0-{channel}
80 create_dir_all(&main_db_dir).context("Could not create db directory")?;
81 let db_path = main_db_dir.join(Path::new("db.sqlite"));
82
83 // Optimistically open databases in parallel
84 if !DB_FILE_OPERATIONS.is_locked() {
85 // Try building a connection
86 if let Some(connection) = open_main_db(&db_path).await {
87 return Ok(connection)
88 };
89 }
90
91 // Take a lock in the failure case so that we move the db once per process instead
92 // of potentially multiple times from different threads. This shouldn't happen in the
93 // normal path
94 let _lock = DB_FILE_OPERATIONS.lock();
95 if let Some(connection) = open_main_db(&db_path).await {
96 return Ok(connection)
97 };
98
99 let backup_timestamp = SystemTime::now()
100 .duration_since(UNIX_EPOCH)
101 .expect("System clock is set before the unix timestamp, Zed does not support this region of spacetime")
102 .as_millis();
103
104 // If failed, move 0-{channel} to {current unix timestamp}-{channel}
105 let backup_db_dir = db_dir.join(Path::new(&format!(
106 "{}-{}",
107 backup_timestamp,
108 release_channel.name(),
109 )));
110
111 std::fs::rename(&main_db_dir, &backup_db_dir)
112 .context("Failed clean up corrupted database, panicking.")?;
113
114 // Set a static ref with the failed timestamp and error so we can notify the user
115 {
116 let mut guard = BACKUP_DB_PATH.write();
117 *guard = Some(backup_db_dir);
118 }
119
120 // Create a new 0-{channel}
121 create_dir_all(&main_db_dir).context("Should be able to create the database directory")?;
122 let db_path = main_db_dir.join(Path::new("db.sqlite"));
123
124 // Try again
125 open_main_db(&db_path).await.context("Could not newly created db")
126 }).await.log_err();
127
128 if let Some(connection) = connection {
129 return connection;
130 }
131
132 // Set another static ref so that we can escalate the notification
133 ALL_FILE_DB_FAILED.store(true, Ordering::Release);
134
135 // If still failed, create an in memory db with a known name
136 open_fallback_db().await
137}
138
139async fn open_main_db<M: Migrator>(db_path: &PathBuf) -> Option<ThreadSafeConnection<M>> {
140 println!("Opening main db");
141 ThreadSafeConnection::<M>::builder(db_path.to_string_lossy().as_ref(), true)
142 .with_db_initialization_query(DB_INITIALIZE_QUERY)
143 .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY)
144 .build()
145 .await
146 .log_err()
147}
148
149async fn open_fallback_db<M: Migrator>() -> ThreadSafeConnection<M> {
150 println!("Opening fallback db");
151 ThreadSafeConnection::<M>::builder(FALLBACK_DB_NAME, false)
152 .with_db_initialization_query(DB_INITIALIZE_QUERY)
153 .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY)
154 .build()
155 .await
156 .expect(
157 "Fallback in memory database failed. Likely initialization queries or migrations have fundamental errors",
158 )
159}
160
161#[cfg(any(test, feature = "test-support"))]
162pub async fn open_test_db<M: Migrator>(db_name: &str) -> ThreadSafeConnection<M> {
163 use sqlez::thread_safe_connection::locking_queue;
164
165 ThreadSafeConnection::<M>::builder(db_name, false)
166 .with_db_initialization_query(DB_INITIALIZE_QUERY)
167 .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY)
168 // Serialize queued writes via a mutex and run them synchronously
169 .with_write_queue_constructor(locking_queue())
170 .build()
171 .await
172 .unwrap()
173}
174
175/// Implements a basic DB wrapper for a given domain
176#[macro_export]
177macro_rules! define_connection {
178 (pub static ref $id:ident: $t:ident<()> = $migrations:expr;) => {
179 pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<$t>);
180
181 impl ::std::ops::Deref for $t {
182 type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<$t>;
183
184 fn deref(&self) -> &Self::Target {
185 &self.0
186 }
187 }
188
189 impl $crate::sqlez::domain::Domain for $t {
190 fn name() -> &'static str {
191 stringify!($t)
192 }
193
194 fn migrations() -> &'static [&'static str] {
195 $migrations
196 }
197 }
198
199 #[cfg(any(test, feature = "test-support"))]
200 $crate::lazy_static::lazy_static! {
201 pub static ref $id: $t = $t($crate::smol::block_on($crate::open_test_db(stringify!($id))));
202 }
203
204 #[cfg(not(any(test, feature = "test-support")))]
205 $crate::lazy_static::lazy_static! {
206 pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(std::env::var("WIPE_DB").is_ok(), &$crate::DB_DIR, &$crate::RELEASE_CHANNEL)));
207 }
208 };
209 (pub static ref $id:ident: $t:ident<$($d:ty),+> = $migrations:expr;) => {
210 pub struct $t($crate::sqlez::thread_safe_connection::ThreadSafeConnection<( $($d),+, $t )>);
211
212 impl ::std::ops::Deref for $t {
213 type Target = $crate::sqlez::thread_safe_connection::ThreadSafeConnection<($($d),+, $t)>;
214
215 fn deref(&self) -> &Self::Target {
216 &self.0
217 }
218 }
219
220 impl $crate::sqlez::domain::Domain for $t {
221 fn name() -> &'static str {
222 stringify!($t)
223 }
224
225 fn migrations() -> &'static [&'static str] {
226 $migrations
227 }
228 }
229
230 #[cfg(any(test, feature = "test-support"))]
231 $crate::lazy_static::lazy_static! {
232 pub static ref $id: $t = $t($crate::smol::block_on($crate::open_test_db(stringify!($id))));
233 }
234
235 #[cfg(not(any(test, feature = "test-support")))]
236 $crate::lazy_static::lazy_static! {
237 pub static ref $id: $t = $t($crate::smol::block_on($crate::open_db(std::env::var("WIPE_DB").is_ok(), &$crate::DB_DIR, &$crate::RELEASE_CHANNEL)));
238 }
239 };
240}
241
242#[cfg(test)]
243mod tests {
244 use std::thread;
245
246 use sqlez::domain::Domain;
247 use sqlez_macros::sql;
248 use tempdir::TempDir;
249 use util::channel::ReleaseChannel;
250
251 use crate::open_db;
252
253 enum TestDB {}
254
255 impl Domain for TestDB {
256 fn name() -> &'static str {
257 "db_tests"
258 }
259
260 fn migrations() -> &'static [&'static str] {
261 &[sql!(
262 CREATE TABLE test(value);
263 )]
264 }
265 }
266
267 // Test that wipe_db exists and works and gives a new db
268 #[test]
269 fn test_wipe_db() {
270 env_logger::try_init().ok();
271
272 smol::block_on(async {
273 let tempdir = TempDir::new("DbTests").unwrap();
274
275 let test_db = open_db::<TestDB>(false, tempdir.path(), &util::channel::ReleaseChannel::Dev).await;
276 test_db.write(|connection|
277 connection.exec(sql!(
278 INSERT INTO test(value) VALUES (10)
279 )).unwrap()().unwrap()
280 ).await;
281 drop(test_db);
282
283 let mut guards = vec![];
284 for _ in 0..5 {
285 let path = tempdir.path().to_path_buf();
286 let guard = thread::spawn(move || smol::block_on(async {
287 let test_db = open_db::<TestDB>(true, &path, &ReleaseChannel::Dev).await;
288
289 assert!(test_db.select_row::<()>(sql!(SELECT value FROM test)).unwrap()().unwrap().is_none())
290 }));
291
292 guards.push(guard);
293 }
294
295 for guard in guards {
296 guard.join().unwrap();
297 }
298 })
299 }
300
301 // Test a file system failure (like in create_dir_all())
302 #[test]
303 fn test_file_system_failure() {
304
305 }
306
307 // Test happy path where everything exists and opens
308 #[test]
309 fn test_open_db() {
310
311 }
312
313 // Test bad migration panics
314 #[test]
315 fn test_bad_migration_panics() {
316
317 }
318
319 /// Test that DB exists but corrupted (causing recreate)
320 #[test]
321 fn test_db_corruption() {
322
323
324 // open_db(db_dir, release_channel)
325 }
326}