1use anyhow::Context;
2use async_std::task::{block_on, yield_now};
3use serde::Serialize;
4use sqlx::{types::Uuid, FromRow, Result};
5use time::OffsetDateTime;
6
7pub use async_sqlx_session::PostgresSessionStore as SessionStore;
8pub use sqlx::postgres::PgPoolOptions as DbOptions;
9
10macro_rules! test_support {
11 ($self:ident, { $($token:tt)* }) => {{
12 let body = async {
13 $($token)*
14 };
15 if $self.test_mode {
16 yield_now().await;
17 block_on(body)
18 } else {
19 body.await
20 }
21 }};
22}
23
24#[derive(Clone)]
25pub struct Db {
26 pool: sqlx::PgPool,
27 test_mode: bool,
28}
29
30impl Db {
31 pub async fn new(url: &str, max_connections: u32) -> tide::Result<Self> {
32 let pool = DbOptions::new()
33 .max_connections(max_connections)
34 .connect(url)
35 .await
36 .context("failed to connect to postgres database")?;
37 Ok(Self {
38 pool,
39 test_mode: false,
40 })
41 }
42
43 // signups
44
45 pub async fn create_signup(
46 &self,
47 github_login: &str,
48 email_address: &str,
49 about: &str,
50 wants_releases: bool,
51 wants_updates: bool,
52 wants_community: bool,
53 ) -> Result<SignupId> {
54 test_support!(self, {
55 let query = "
56 INSERT INTO signups (
57 github_login,
58 email_address,
59 about,
60 wants_releases,
61 wants_updates,
62 wants_community
63 )
64 VALUES ($1, $2, $3, $4, $5, $6)
65 RETURNING id
66 ";
67 sqlx::query_scalar(query)
68 .bind(github_login)
69 .bind(email_address)
70 .bind(about)
71 .bind(wants_releases)
72 .bind(wants_updates)
73 .bind(wants_community)
74 .fetch_one(&self.pool)
75 .await
76 .map(SignupId)
77 })
78 }
79
80 pub async fn get_all_signups(&self) -> Result<Vec<Signup>> {
81 test_support!(self, {
82 let query = "SELECT * FROM signups ORDER BY github_login ASC";
83 sqlx::query_as(query).fetch_all(&self.pool).await
84 })
85 }
86
87 pub async fn destroy_signup(&self, id: SignupId) -> Result<()> {
88 test_support!(self, {
89 let query = "DELETE FROM signups WHERE id = $1";
90 sqlx::query(query)
91 .bind(id.0)
92 .execute(&self.pool)
93 .await
94 .map(drop)
95 })
96 }
97
98 // users
99
100 pub async fn create_user(&self, github_login: &str, admin: bool) -> Result<UserId> {
101 test_support!(self, {
102 let query = "
103 INSERT INTO users (github_login, admin)
104 VALUES ($1, $2)
105 ON CONFLICT (github_login) DO UPDATE SET github_login = excluded.github_login
106 RETURNING id
107 ";
108 sqlx::query_scalar(query)
109 .bind(github_login)
110 .bind(admin)
111 .fetch_one(&self.pool)
112 .await
113 .map(UserId)
114 })
115 }
116
117 pub async fn get_all_users(&self) -> Result<Vec<User>> {
118 test_support!(self, {
119 let query = "SELECT * FROM users ORDER BY github_login ASC";
120 sqlx::query_as(query).fetch_all(&self.pool).await
121 })
122 }
123
124 pub async fn get_user_by_id(&self, id: UserId) -> Result<Option<User>> {
125 let users = self.get_users_by_ids([id]).await?;
126 Ok(users.into_iter().next())
127 }
128
129 pub async fn get_users_by_ids(
130 &self,
131 ids: impl IntoIterator<Item = UserId>,
132 ) -> Result<Vec<User>> {
133 let ids = ids.into_iter().map(|id| id.0).collect::<Vec<_>>();
134 test_support!(self, {
135 let query = "
136 SELECT users.*
137 FROM users
138 WHERE users.id = ANY ($1)
139 ";
140
141 sqlx::query_as(query).bind(&ids).fetch_all(&self.pool).await
142 })
143 }
144
145 pub async fn get_user_by_github_login(&self, github_login: &str) -> Result<Option<User>> {
146 test_support!(self, {
147 let query = "SELECT * FROM users WHERE github_login = $1 LIMIT 1";
148 sqlx::query_as(query)
149 .bind(github_login)
150 .fetch_optional(&self.pool)
151 .await
152 })
153 }
154
155 pub async fn set_user_is_admin(&self, id: UserId, is_admin: bool) -> Result<()> {
156 test_support!(self, {
157 let query = "UPDATE users SET admin = $1 WHERE id = $2";
158 sqlx::query(query)
159 .bind(is_admin)
160 .bind(id.0)
161 .execute(&self.pool)
162 .await
163 .map(drop)
164 })
165 }
166
167 pub async fn destroy_user(&self, id: UserId) -> Result<()> {
168 test_support!(self, {
169 let query = "DELETE FROM access_tokens WHERE user_id = $1;";
170 sqlx::query(query)
171 .bind(id.0)
172 .execute(&self.pool)
173 .await
174 .map(drop)?;
175 let query = "DELETE FROM users WHERE id = $1;";
176 sqlx::query(query)
177 .bind(id.0)
178 .execute(&self.pool)
179 .await
180 .map(drop)
181 })
182 }
183
184 // access tokens
185
186 pub async fn create_access_token_hash(
187 &self,
188 user_id: UserId,
189 access_token_hash: &str,
190 max_access_token_count: usize,
191 ) -> Result<()> {
192 test_support!(self, {
193 let insert_query = "
194 INSERT INTO access_tokens (user_id, hash)
195 VALUES ($1, $2);
196 ";
197 let cleanup_query = "
198 DELETE FROM access_tokens
199 WHERE id IN (
200 SELECT id from access_tokens
201 WHERE user_id = $1
202 ORDER BY id DESC
203 OFFSET $3
204 )
205 ";
206
207 let mut tx = self.pool.begin().await?;
208 sqlx::query(insert_query)
209 .bind(user_id.0)
210 .bind(access_token_hash)
211 .execute(&mut tx)
212 .await?;
213 sqlx::query(cleanup_query)
214 .bind(user_id.0)
215 .bind(access_token_hash)
216 .bind(max_access_token_count as u32)
217 .execute(&mut tx)
218 .await?;
219 tx.commit().await
220 })
221 }
222
223 pub async fn get_access_token_hashes(&self, user_id: UserId) -> Result<Vec<String>> {
224 test_support!(self, {
225 let query = "
226 SELECT hash
227 FROM access_tokens
228 WHERE user_id = $1
229 ORDER BY id DESC
230 ";
231 sqlx::query_scalar(query)
232 .bind(user_id.0)
233 .fetch_all(&self.pool)
234 .await
235 })
236 }
237
238 // orgs
239
240 #[allow(unused)] // Help rust-analyzer
241 #[cfg(any(test, feature = "seed-support"))]
242 pub async fn find_org_by_slug(&self, slug: &str) -> Result<Option<Org>> {
243 test_support!(self, {
244 let query = "
245 SELECT *
246 FROM orgs
247 WHERE slug = $1
248 ";
249 sqlx::query_as(query)
250 .bind(slug)
251 .fetch_optional(&self.pool)
252 .await
253 })
254 }
255
256 #[cfg(any(test, feature = "seed-support"))]
257 pub async fn create_org(&self, name: &str, slug: &str) -> Result<OrgId> {
258 test_support!(self, {
259 let query = "
260 INSERT INTO orgs (name, slug)
261 VALUES ($1, $2)
262 RETURNING id
263 ";
264 sqlx::query_scalar(query)
265 .bind(name)
266 .bind(slug)
267 .fetch_one(&self.pool)
268 .await
269 .map(OrgId)
270 })
271 }
272
273 #[cfg(any(test, feature = "seed-support"))]
274 pub async fn add_org_member(
275 &self,
276 org_id: OrgId,
277 user_id: UserId,
278 is_admin: bool,
279 ) -> Result<()> {
280 test_support!(self, {
281 let query = "
282 INSERT INTO org_memberships (org_id, user_id, admin)
283 VALUES ($1, $2, $3)
284 ON CONFLICT DO NOTHING
285 ";
286 sqlx::query(query)
287 .bind(org_id.0)
288 .bind(user_id.0)
289 .bind(is_admin)
290 .execute(&self.pool)
291 .await
292 .map(drop)
293 })
294 }
295
296 // channels
297
298 #[cfg(any(test, feature = "seed-support"))]
299 pub async fn create_org_channel(&self, org_id: OrgId, name: &str) -> Result<ChannelId> {
300 test_support!(self, {
301 let query = "
302 INSERT INTO channels (owner_id, owner_is_user, name)
303 VALUES ($1, false, $2)
304 RETURNING id
305 ";
306 sqlx::query_scalar(query)
307 .bind(org_id.0)
308 .bind(name)
309 .fetch_one(&self.pool)
310 .await
311 .map(ChannelId)
312 })
313 }
314
315 #[allow(unused)] // Help rust-analyzer
316 #[cfg(any(test, feature = "seed-support"))]
317 pub async fn get_org_channels(&self, org_id: OrgId) -> Result<Vec<Channel>> {
318 test_support!(self, {
319 let query = "
320 SELECT *
321 FROM channels
322 WHERE
323 channels.owner_is_user = false AND
324 channels.owner_id = $1
325 ";
326 sqlx::query_as(query)
327 .bind(org_id.0)
328 .fetch_all(&self.pool)
329 .await
330 })
331 }
332
333 pub async fn get_accessible_channels(&self, user_id: UserId) -> Result<Vec<Channel>> {
334 test_support!(self, {
335 let query = "
336 SELECT
337 channels.id, channels.name
338 FROM
339 channel_memberships, channels
340 WHERE
341 channel_memberships.user_id = $1 AND
342 channel_memberships.channel_id = channels.id
343 ";
344 sqlx::query_as(query)
345 .bind(user_id.0)
346 .fetch_all(&self.pool)
347 .await
348 })
349 }
350
351 pub async fn can_user_access_channel(
352 &self,
353 user_id: UserId,
354 channel_id: ChannelId,
355 ) -> Result<bool> {
356 test_support!(self, {
357 let query = "
358 SELECT id
359 FROM channel_memberships
360 WHERE user_id = $1 AND channel_id = $2
361 LIMIT 1
362 ";
363 sqlx::query_scalar::<_, i32>(query)
364 .bind(user_id.0)
365 .bind(channel_id.0)
366 .fetch_optional(&self.pool)
367 .await
368 .map(|e| e.is_some())
369 })
370 }
371
372 #[cfg(any(test, feature = "seed-support"))]
373 pub async fn add_channel_member(
374 &self,
375 channel_id: ChannelId,
376 user_id: UserId,
377 is_admin: bool,
378 ) -> Result<()> {
379 test_support!(self, {
380 let query = "
381 INSERT INTO channel_memberships (channel_id, user_id, admin)
382 VALUES ($1, $2, $3)
383 ON CONFLICT DO NOTHING
384 ";
385 sqlx::query(query)
386 .bind(channel_id.0)
387 .bind(user_id.0)
388 .bind(is_admin)
389 .execute(&self.pool)
390 .await
391 .map(drop)
392 })
393 }
394
395 // messages
396
397 pub async fn create_channel_message(
398 &self,
399 channel_id: ChannelId,
400 sender_id: UserId,
401 body: &str,
402 timestamp: OffsetDateTime,
403 nonce: u128,
404 ) -> Result<MessageId> {
405 test_support!(self, {
406 let query = "
407 INSERT INTO channel_messages (channel_id, sender_id, body, sent_at, nonce)
408 VALUES ($1, $2, $3, $4, $5)
409 ON CONFLICT (nonce) DO UPDATE SET nonce = excluded.nonce
410 RETURNING id
411 ";
412 sqlx::query_scalar(query)
413 .bind(channel_id.0)
414 .bind(sender_id.0)
415 .bind(body)
416 .bind(timestamp)
417 .bind(Uuid::from_u128(nonce))
418 .fetch_one(&self.pool)
419 .await
420 .map(MessageId)
421 })
422 }
423
424 pub async fn get_channel_messages(
425 &self,
426 channel_id: ChannelId,
427 count: usize,
428 before_id: Option<MessageId>,
429 ) -> Result<Vec<ChannelMessage>> {
430 test_support!(self, {
431 let query = r#"
432 SELECT * FROM (
433 SELECT
434 id, sender_id, body, sent_at AT TIME ZONE 'UTC' as sent_at, nonce
435 FROM
436 channel_messages
437 WHERE
438 channel_id = $1 AND
439 id < $2
440 ORDER BY id DESC
441 LIMIT $3
442 ) as recent_messages
443 ORDER BY id ASC
444 "#;
445 sqlx::query_as(query)
446 .bind(channel_id.0)
447 .bind(before_id.unwrap_or(MessageId::MAX))
448 .bind(count as i64)
449 .fetch_all(&self.pool)
450 .await
451 })
452 }
453}
454
455macro_rules! id_type {
456 ($name:ident) => {
457 #[derive(
458 Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, sqlx::Type, Serialize,
459 )]
460 #[sqlx(transparent)]
461 #[serde(transparent)]
462 pub struct $name(pub i32);
463
464 impl $name {
465 #[allow(unused)]
466 pub const MAX: Self = Self(i32::MAX);
467
468 #[allow(unused)]
469 pub fn from_proto(value: u64) -> Self {
470 Self(value as i32)
471 }
472
473 #[allow(unused)]
474 pub fn to_proto(&self) -> u64 {
475 self.0 as u64
476 }
477 }
478 };
479}
480
481id_type!(UserId);
482#[derive(Debug, FromRow, Serialize, PartialEq)]
483pub struct User {
484 pub id: UserId,
485 pub github_login: String,
486 pub admin: bool,
487}
488
489id_type!(OrgId);
490#[derive(FromRow)]
491pub struct Org {
492 pub id: OrgId,
493 pub name: String,
494 pub slug: String,
495}
496
497id_type!(SignupId);
498#[derive(Debug, FromRow, Serialize)]
499pub struct Signup {
500 pub id: SignupId,
501 pub github_login: String,
502 pub email_address: String,
503 pub about: String,
504 pub wants_releases: Option<bool>,
505 pub wants_updates: Option<bool>,
506 pub wants_community: Option<bool>,
507}
508
509id_type!(ChannelId);
510#[derive(Debug, FromRow, Serialize)]
511pub struct Channel {
512 pub id: ChannelId,
513 pub name: String,
514}
515
516id_type!(MessageId);
517#[derive(Debug, FromRow)]
518pub struct ChannelMessage {
519 pub id: MessageId,
520 pub sender_id: UserId,
521 pub body: String,
522 pub sent_at: OffsetDateTime,
523 pub nonce: Uuid,
524}
525
526#[cfg(test)]
527pub mod tests {
528 use super::*;
529 use lazy_static::lazy_static;
530 use parking_lot::Mutex;
531 use rand::prelude::*;
532 use sqlx::{
533 migrate::{MigrateDatabase, Migrator},
534 Postgres,
535 };
536 use std::{
537 mem,
538 path::Path,
539 sync::atomic::{AtomicUsize, Ordering::SeqCst},
540 thread,
541 };
542 use util::ResultExt as _;
543
544 pub struct TestDb {
545 pub db: Option<Db>,
546 pub name: String,
547 pub url: String,
548 clean_pool_on_drop: bool,
549 }
550
551 lazy_static! {
552 static ref DB_POOL: Mutex<Vec<TestDb>> = Default::default();
553 static ref DB_COUNT: AtomicUsize = Default::default();
554 }
555
556 impl TestDb {
557 pub fn new() -> Self {
558 DB_COUNT.fetch_add(1, SeqCst);
559 let mut pool = DB_POOL.lock();
560 if let Some(db) = pool.pop() {
561 db.truncate();
562 db
563 } else {
564 let mut rng = StdRng::from_entropy();
565 let name = format!("zed-test-{}", rng.gen::<u128>());
566 let url = format!("postgres://postgres@localhost/{}", name);
567 let migrations_path = Path::new(concat!(env!("CARGO_MANIFEST_DIR"), "/migrations"));
568 let db = block_on(async {
569 Postgres::create_database(&url)
570 .await
571 .expect("failed to create test db");
572 let mut db = Db::new(&url, 5).await.unwrap();
573 db.test_mode = true;
574 let migrator = Migrator::new(migrations_path).await.unwrap();
575 migrator.run(&db.pool).await.unwrap();
576 db
577 });
578
579 Self {
580 db: Some(db),
581 name,
582 url,
583 clean_pool_on_drop: false,
584 }
585 }
586 }
587
588 pub fn set_clean_pool_on_drop(&mut self, delete_on_drop: bool) {
589 self.clean_pool_on_drop = delete_on_drop;
590 }
591
592 pub fn db(&self) -> &Db {
593 self.db.as_ref().unwrap()
594 }
595
596 fn truncate(&self) {
597 block_on(async {
598 let query = "
599 SELECT tablename FROM pg_tables
600 WHERE schemaname = 'public';
601 ";
602 let table_names = sqlx::query_scalar::<_, String>(query)
603 .fetch_all(&self.db().pool)
604 .await
605 .unwrap();
606 sqlx::query(&format!(
607 "TRUNCATE TABLE {} RESTART IDENTITY",
608 table_names.join(", ")
609 ))
610 .execute(&self.db().pool)
611 .await
612 .unwrap();
613 })
614 }
615
616 async fn teardown(mut self) -> Result<()> {
617 let db = self.db.take().unwrap();
618 let query = "
619 SELECT pg_terminate_backend(pg_stat_activity.pid)
620 FROM pg_stat_activity
621 WHERE pg_stat_activity.datname = '{}' AND pid <> pg_backend_pid();
622 ";
623 sqlx::query(query)
624 .bind(&self.name)
625 .execute(&db.pool)
626 .await?;
627 db.pool.close().await;
628 Postgres::drop_database(&self.url).await?;
629 Ok(())
630 }
631 }
632
633 impl Drop for TestDb {
634 fn drop(&mut self) {
635 if let Some(db) = self.db.take() {
636 DB_POOL.lock().push(TestDb {
637 db: Some(db),
638 name: mem::take(&mut self.name),
639 url: mem::take(&mut self.url),
640 clean_pool_on_drop: true,
641 });
642 if DB_COUNT.fetch_sub(1, SeqCst) == 1
643 && (self.clean_pool_on_drop || thread::panicking())
644 {
645 block_on(async move {
646 let mut pool = DB_POOL.lock();
647 for db in pool.drain(..) {
648 db.teardown().await.log_err();
649 }
650 });
651 }
652 }
653 }
654 }
655
656 #[gpui::test]
657 async fn test_get_users_by_ids() {
658 let test_db = TestDb::new();
659 let db = test_db.db();
660
661 let user = db.create_user("user", false).await.unwrap();
662 let friend1 = db.create_user("friend-1", false).await.unwrap();
663 let friend2 = db.create_user("friend-2", false).await.unwrap();
664 let friend3 = db.create_user("friend-3", false).await.unwrap();
665
666 assert_eq!(
667 db.get_users_by_ids([user, friend1, friend2, friend3])
668 .await
669 .unwrap(),
670 vec![
671 User {
672 id: user,
673 github_login: "user".to_string(),
674 admin: false,
675 },
676 User {
677 id: friend1,
678 github_login: "friend-1".to_string(),
679 admin: false,
680 },
681 User {
682 id: friend2,
683 github_login: "friend-2".to_string(),
684 admin: false,
685 },
686 User {
687 id: friend3,
688 github_login: "friend-3".to_string(),
689 admin: false,
690 }
691 ]
692 );
693 }
694
695 #[gpui::test]
696 async fn test_recent_channel_messages() {
697 let test_db = TestDb::new();
698 let db = test_db.db();
699 let user = db.create_user("user", false).await.unwrap();
700 let org = db.create_org("org", "org").await.unwrap();
701 let channel = db.create_org_channel(org, "channel").await.unwrap();
702 for i in 0..10 {
703 db.create_channel_message(channel, user, &i.to_string(), OffsetDateTime::now_utc(), i)
704 .await
705 .unwrap();
706 }
707
708 let messages = db.get_channel_messages(channel, 5, None).await.unwrap();
709 assert_eq!(
710 messages.iter().map(|m| &m.body).collect::<Vec<_>>(),
711 ["5", "6", "7", "8", "9"]
712 );
713
714 let prev_messages = db
715 .get_channel_messages(channel, 4, Some(messages[0].id))
716 .await
717 .unwrap();
718 assert_eq!(
719 prev_messages.iter().map(|m| &m.body).collect::<Vec<_>>(),
720 ["1", "2", "3", "4"]
721 );
722 }
723
724 #[gpui::test]
725 async fn test_channel_message_nonces() {
726 let test_db = TestDb::new();
727 let db = test_db.db();
728 let user = db.create_user("user", false).await.unwrap();
729 let org = db.create_org("org", "org").await.unwrap();
730 let channel = db.create_org_channel(org, "channel").await.unwrap();
731
732 let msg1_id = db
733 .create_channel_message(channel, user, "1", OffsetDateTime::now_utc(), 1)
734 .await
735 .unwrap();
736 let msg2_id = db
737 .create_channel_message(channel, user, "2", OffsetDateTime::now_utc(), 2)
738 .await
739 .unwrap();
740 let msg3_id = db
741 .create_channel_message(channel, user, "3", OffsetDateTime::now_utc(), 1)
742 .await
743 .unwrap();
744 let msg4_id = db
745 .create_channel_message(channel, user, "4", OffsetDateTime::now_utc(), 2)
746 .await
747 .unwrap();
748
749 assert_ne!(msg1_id, msg2_id);
750 assert_eq!(msg1_id, msg3_id);
751 assert_eq!(msg2_id, msg4_id);
752 }
753
754 #[gpui::test]
755 async fn test_create_access_tokens() {
756 let test_db = TestDb::new();
757 let db = test_db.db();
758 let user = db.create_user("the-user", false).await.unwrap();
759
760 db.create_access_token_hash(user, "h1", 3).await.unwrap();
761 db.create_access_token_hash(user, "h2", 3).await.unwrap();
762 assert_eq!(
763 db.get_access_token_hashes(user).await.unwrap(),
764 &["h2".to_string(), "h1".to_string()]
765 );
766
767 db.create_access_token_hash(user, "h3", 3).await.unwrap();
768 assert_eq!(
769 db.get_access_token_hashes(user).await.unwrap(),
770 &["h3".to_string(), "h2".to_string(), "h1".to_string(),]
771 );
772
773 db.create_access_token_hash(user, "h4", 3).await.unwrap();
774 assert_eq!(
775 db.get_access_token_hashes(user).await.unwrap(),
776 &["h4".to_string(), "h3".to_string(), "h2".to_string(),]
777 );
778
779 db.create_access_token_hash(user, "h5", 3).await.unwrap();
780 assert_eq!(
781 db.get_access_token_hashes(user).await.unwrap(),
782 &["h5".to_string(), "h4".to_string(), "h3".to_string()]
783 );
784 }
785}