1use anyhow::Context;
2use async_std::task::{block_on, yield_now};
3use serde::Serialize;
4use sqlx::{types::Uuid, FromRow, Result};
5use time::OffsetDateTime;
6
7pub use async_sqlx_session::PostgresSessionStore as SessionStore;
8pub use sqlx::postgres::PgPoolOptions as DbOptions;
9
10macro_rules! test_support {
11 ($self:ident, { $($token:tt)* }) => {{
12 let body = async {
13 $($token)*
14 };
15 if $self.test_mode {
16 yield_now().await;
17 block_on(body)
18 } else {
19 body.await
20 }
21 }};
22}
23
24#[derive(Clone)]
25pub struct Db {
26 pool: sqlx::PgPool,
27 test_mode: bool,
28}
29
30impl Db {
31 pub async fn new(url: &str, max_connections: u32) -> tide::Result<Self> {
32 let pool = DbOptions::new()
33 .max_connections(max_connections)
34 .connect(url)
35 .await
36 .context("failed to connect to postgres database")?;
37 Ok(Self {
38 pool,
39 test_mode: false,
40 })
41 }
42
43 // signups
44
45 pub async fn create_signup(
46 &self,
47 github_login: &str,
48 email_address: &str,
49 about: &str,
50 wants_releases: bool,
51 wants_updates: bool,
52 wants_community: bool,
53 ) -> Result<SignupId> {
54 test_support!(self, {
55 let query = "
56 INSERT INTO signups (
57 github_login,
58 email_address,
59 about,
60 wants_releases,
61 wants_updates,
62 wants_community
63 )
64 VALUES ($1, $2, $3, $4, $5, $6)
65 RETURNING id
66 ";
67 sqlx::query_scalar(query)
68 .bind(github_login)
69 .bind(email_address)
70 .bind(about)
71 .bind(wants_releases)
72 .bind(wants_updates)
73 .bind(wants_community)
74 .fetch_one(&self.pool)
75 .await
76 .map(SignupId)
77 })
78 }
79
80 pub async fn get_all_signups(&self) -> Result<Vec<Signup>> {
81 test_support!(self, {
82 let query = "SELECT * FROM signups ORDER BY github_login ASC";
83 sqlx::query_as(query).fetch_all(&self.pool).await
84 })
85 }
86
87 pub async fn delete_signup(&self, id: SignupId) -> Result<()> {
88 test_support!(self, {
89 let query = "DELETE FROM signups WHERE id = $1";
90 sqlx::query(query)
91 .bind(id.0)
92 .execute(&self.pool)
93 .await
94 .map(drop)
95 })
96 }
97
98 // users
99
100 pub async fn create_user(&self, github_login: &str, admin: bool) -> Result<UserId> {
101 test_support!(self, {
102 let query = "
103 INSERT INTO users (github_login, admin)
104 VALUES ($1, $2)
105 ON CONFLICT (github_login) DO UPDATE SET github_login = excluded.github_login
106 RETURNING id
107 ";
108 sqlx::query_scalar(query)
109 .bind(github_login)
110 .bind(admin)
111 .fetch_one(&self.pool)
112 .await
113 .map(UserId)
114 })
115 }
116
117 pub async fn get_all_users(&self) -> Result<Vec<User>> {
118 test_support!(self, {
119 let query = "SELECT * FROM users ORDER BY github_login ASC";
120 sqlx::query_as(query).fetch_all(&self.pool).await
121 })
122 }
123
124 pub async fn get_user_by_id(&self, id: UserId) -> Result<Option<User>> {
125 let users = self.get_users_by_ids([id]).await?;
126 Ok(users.into_iter().next())
127 }
128
129 pub async fn get_users_by_ids(
130 &self,
131 ids: impl IntoIterator<Item = UserId>,
132 ) -> Result<Vec<User>> {
133 let ids = ids.into_iter().map(|id| id.0).collect::<Vec<_>>();
134 test_support!(self, {
135 let query = "
136 SELECT users.*
137 FROM users
138 WHERE users.id = ANY ($1)
139 ";
140
141 sqlx::query_as(query).bind(&ids).fetch_all(&self.pool).await
142 })
143 }
144
145 pub async fn get_user_by_github_login(&self, github_login: &str) -> Result<Option<User>> {
146 test_support!(self, {
147 let query = "SELECT * FROM users WHERE github_login = $1 LIMIT 1";
148 sqlx::query_as(query)
149 .bind(github_login)
150 .fetch_optional(&self.pool)
151 .await
152 })
153 }
154
155 pub async fn set_user_is_admin(&self, id: UserId, is_admin: bool) -> Result<()> {
156 test_support!(self, {
157 let query = "UPDATE users SET admin = $1 WHERE id = $2";
158 sqlx::query(query)
159 .bind(is_admin)
160 .bind(id.0)
161 .execute(&self.pool)
162 .await
163 .map(drop)
164 })
165 }
166
167 pub async fn delete_user(&self, id: UserId) -> Result<()> {
168 test_support!(self, {
169 let query = "DELETE FROM users WHERE id = $1;";
170 sqlx::query(query)
171 .bind(id.0)
172 .execute(&self.pool)
173 .await
174 .map(drop)
175 })
176 }
177
178 // access tokens
179
180 pub async fn create_access_token_hash(
181 &self,
182 user_id: UserId,
183 access_token_hash: &str,
184 max_access_token_count: usize,
185 ) -> Result<()> {
186 test_support!(self, {
187 let insert_query = "
188 INSERT INTO access_tokens (user_id, hash)
189 VALUES ($1, $2);
190 ";
191 let cleanup_query = "
192 DELETE FROM access_tokens
193 WHERE id IN (
194 SELECT id from access_tokens
195 WHERE user_id = $1
196 ORDER BY id DESC
197 OFFSET $3
198 )
199 ";
200
201 let mut tx = self.pool.begin().await?;
202 sqlx::query(insert_query)
203 .bind(user_id.0)
204 .bind(access_token_hash)
205 .execute(&mut tx)
206 .await?;
207 sqlx::query(cleanup_query)
208 .bind(user_id.0)
209 .bind(access_token_hash)
210 .bind(max_access_token_count as u32)
211 .execute(&mut tx)
212 .await?;
213 tx.commit().await
214 })
215 }
216
217 pub async fn get_access_token_hashes(&self, user_id: UserId) -> Result<Vec<String>> {
218 test_support!(self, {
219 let query = "
220 SELECT hash
221 FROM access_tokens
222 WHERE user_id = $1
223 ORDER BY id DESC
224 ";
225 sqlx::query_scalar(query)
226 .bind(user_id.0)
227 .fetch_all(&self.pool)
228 .await
229 })
230 }
231
232 // orgs
233
234 #[allow(unused)] // Help rust-analyzer
235 #[cfg(any(test, feature = "seed-support"))]
236 pub async fn find_org_by_slug(&self, slug: &str) -> Result<Option<Org>> {
237 test_support!(self, {
238 let query = "
239 SELECT *
240 FROM orgs
241 WHERE slug = $1
242 ";
243 sqlx::query_as(query)
244 .bind(slug)
245 .fetch_optional(&self.pool)
246 .await
247 })
248 }
249
250 #[cfg(any(test, feature = "seed-support"))]
251 pub async fn create_org(&self, name: &str, slug: &str) -> Result<OrgId> {
252 test_support!(self, {
253 let query = "
254 INSERT INTO orgs (name, slug)
255 VALUES ($1, $2)
256 RETURNING id
257 ";
258 sqlx::query_scalar(query)
259 .bind(name)
260 .bind(slug)
261 .fetch_one(&self.pool)
262 .await
263 .map(OrgId)
264 })
265 }
266
267 #[cfg(any(test, feature = "seed-support"))]
268 pub async fn add_org_member(
269 &self,
270 org_id: OrgId,
271 user_id: UserId,
272 is_admin: bool,
273 ) -> Result<()> {
274 test_support!(self, {
275 let query = "
276 INSERT INTO org_memberships (org_id, user_id, admin)
277 VALUES ($1, $2, $3)
278 ON CONFLICT DO NOTHING
279 ";
280 sqlx::query(query)
281 .bind(org_id.0)
282 .bind(user_id.0)
283 .bind(is_admin)
284 .execute(&self.pool)
285 .await
286 .map(drop)
287 })
288 }
289
290 // channels
291
292 #[cfg(any(test, feature = "seed-support"))]
293 pub async fn create_org_channel(&self, org_id: OrgId, name: &str) -> Result<ChannelId> {
294 test_support!(self, {
295 let query = "
296 INSERT INTO channels (owner_id, owner_is_user, name)
297 VALUES ($1, false, $2)
298 RETURNING id
299 ";
300 sqlx::query_scalar(query)
301 .bind(org_id.0)
302 .bind(name)
303 .fetch_one(&self.pool)
304 .await
305 .map(ChannelId)
306 })
307 }
308
309 #[allow(unused)] // Help rust-analyzer
310 #[cfg(any(test, feature = "seed-support"))]
311 pub async fn get_org_channels(&self, org_id: OrgId) -> Result<Vec<Channel>> {
312 test_support!(self, {
313 let query = "
314 SELECT *
315 FROM channels
316 WHERE
317 channels.owner_is_user = false AND
318 channels.owner_id = $1
319 ";
320 sqlx::query_as(query)
321 .bind(org_id.0)
322 .fetch_all(&self.pool)
323 .await
324 })
325 }
326
327 pub async fn get_accessible_channels(&self, user_id: UserId) -> Result<Vec<Channel>> {
328 test_support!(self, {
329 let query = "
330 SELECT
331 channels.id, channels.name
332 FROM
333 channel_memberships, channels
334 WHERE
335 channel_memberships.user_id = $1 AND
336 channel_memberships.channel_id = channels.id
337 ";
338 sqlx::query_as(query)
339 .bind(user_id.0)
340 .fetch_all(&self.pool)
341 .await
342 })
343 }
344
345 pub async fn can_user_access_channel(
346 &self,
347 user_id: UserId,
348 channel_id: ChannelId,
349 ) -> Result<bool> {
350 test_support!(self, {
351 let query = "
352 SELECT id
353 FROM channel_memberships
354 WHERE user_id = $1 AND channel_id = $2
355 LIMIT 1
356 ";
357 sqlx::query_scalar::<_, i32>(query)
358 .bind(user_id.0)
359 .bind(channel_id.0)
360 .fetch_optional(&self.pool)
361 .await
362 .map(|e| e.is_some())
363 })
364 }
365
366 #[cfg(any(test, feature = "seed-support"))]
367 pub async fn add_channel_member(
368 &self,
369 channel_id: ChannelId,
370 user_id: UserId,
371 is_admin: bool,
372 ) -> Result<()> {
373 test_support!(self, {
374 let query = "
375 INSERT INTO channel_memberships (channel_id, user_id, admin)
376 VALUES ($1, $2, $3)
377 ON CONFLICT DO NOTHING
378 ";
379 sqlx::query(query)
380 .bind(channel_id.0)
381 .bind(user_id.0)
382 .bind(is_admin)
383 .execute(&self.pool)
384 .await
385 .map(drop)
386 })
387 }
388
389 // messages
390
391 pub async fn create_channel_message(
392 &self,
393 channel_id: ChannelId,
394 sender_id: UserId,
395 body: &str,
396 timestamp: OffsetDateTime,
397 nonce: u128,
398 ) -> Result<MessageId> {
399 test_support!(self, {
400 let query = "
401 INSERT INTO channel_messages (channel_id, sender_id, body, sent_at, nonce)
402 VALUES ($1, $2, $3, $4, $5)
403 ON CONFLICT (nonce) DO UPDATE SET nonce = excluded.nonce
404 RETURNING id
405 ";
406 sqlx::query_scalar(query)
407 .bind(channel_id.0)
408 .bind(sender_id.0)
409 .bind(body)
410 .bind(timestamp)
411 .bind(Uuid::from_u128(nonce))
412 .fetch_one(&self.pool)
413 .await
414 .map(MessageId)
415 })
416 }
417
418 pub async fn get_channel_messages(
419 &self,
420 channel_id: ChannelId,
421 count: usize,
422 before_id: Option<MessageId>,
423 ) -> Result<Vec<ChannelMessage>> {
424 test_support!(self, {
425 let query = r#"
426 SELECT * FROM (
427 SELECT
428 id, sender_id, body, sent_at AT TIME ZONE 'UTC' as sent_at, nonce
429 FROM
430 channel_messages
431 WHERE
432 channel_id = $1 AND
433 id < $2
434 ORDER BY id DESC
435 LIMIT $3
436 ) as recent_messages
437 ORDER BY id ASC
438 "#;
439 sqlx::query_as(query)
440 .bind(channel_id.0)
441 .bind(before_id.unwrap_or(MessageId::MAX))
442 .bind(count as i64)
443 .fetch_all(&self.pool)
444 .await
445 })
446 }
447}
448
449macro_rules! id_type {
450 ($name:ident) => {
451 #[derive(
452 Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, sqlx::Type, Serialize,
453 )]
454 #[sqlx(transparent)]
455 #[serde(transparent)]
456 pub struct $name(pub i32);
457
458 impl $name {
459 #[allow(unused)]
460 pub const MAX: Self = Self(i32::MAX);
461
462 #[allow(unused)]
463 pub fn from_proto(value: u64) -> Self {
464 Self(value as i32)
465 }
466
467 #[allow(unused)]
468 pub fn to_proto(&self) -> u64 {
469 self.0 as u64
470 }
471 }
472 };
473}
474
475id_type!(UserId);
476#[derive(Debug, FromRow, Serialize, PartialEq)]
477pub struct User {
478 pub id: UserId,
479 pub github_login: String,
480 pub admin: bool,
481}
482
483id_type!(OrgId);
484#[derive(FromRow)]
485pub struct Org {
486 pub id: OrgId,
487 pub name: String,
488 pub slug: String,
489}
490
491id_type!(SignupId);
492#[derive(Debug, FromRow, Serialize)]
493pub struct Signup {
494 pub id: SignupId,
495 pub github_login: String,
496 pub email_address: String,
497 pub about: String,
498 pub wants_releases: Option<bool>,
499 pub wants_updates: Option<bool>,
500 pub wants_community: Option<bool>,
501}
502
503id_type!(ChannelId);
504#[derive(Debug, FromRow, Serialize)]
505pub struct Channel {
506 pub id: ChannelId,
507 pub name: String,
508}
509
510id_type!(MessageId);
511#[derive(Debug, FromRow)]
512pub struct ChannelMessage {
513 pub id: MessageId,
514 pub sender_id: UserId,
515 pub body: String,
516 pub sent_at: OffsetDateTime,
517 pub nonce: Uuid,
518}
519
520#[cfg(test)]
521pub mod tests {
522 use super::*;
523 use rand::prelude::*;
524 use sqlx::{
525 migrate::{MigrateDatabase, Migrator},
526 Postgres,
527 };
528 use std::path::Path;
529
530 pub struct TestDb {
531 pub db: Db,
532 pub name: String,
533 pub url: String,
534 }
535
536 impl TestDb {
537 pub fn new() -> Self {
538 // Enable tests to run in parallel by serializing the creation of each test database.
539 lazy_static::lazy_static! {
540 static ref DB_CREATION: std::sync::Mutex<()> = std::sync::Mutex::new(());
541 }
542
543 let mut rng = StdRng::from_entropy();
544 let name = format!("zed-test-{}", rng.gen::<u128>());
545 let url = format!("postgres://postgres@localhost/{}", name);
546 let migrations_path = Path::new(concat!(env!("CARGO_MANIFEST_DIR"), "/migrations"));
547 let db = block_on(async {
548 {
549 let _lock = DB_CREATION.lock();
550 Postgres::create_database(&url)
551 .await
552 .expect("failed to create test db");
553 }
554 let mut db = Db::new(&url, 5).await.unwrap();
555 db.test_mode = true;
556 let migrator = Migrator::new(migrations_path).await.unwrap();
557 migrator.run(&db.pool).await.unwrap();
558 db
559 });
560
561 Self { db, name, url }
562 }
563
564 pub fn db(&self) -> &Db {
565 &self.db
566 }
567 }
568
569 impl Drop for TestDb {
570 fn drop(&mut self) {
571 block_on(async {
572 let query = "
573 SELECT pg_terminate_backend(pg_stat_activity.pid)
574 FROM pg_stat_activity
575 WHERE pg_stat_activity.datname = '{}' AND pid <> pg_backend_pid();
576 ";
577 sqlx::query(query)
578 .bind(&self.name)
579 .execute(&self.db.pool)
580 .await
581 .unwrap();
582 self.db.pool.close().await;
583 Postgres::drop_database(&self.url).await.unwrap();
584 });
585 }
586 }
587
588 #[gpui::test]
589 async fn test_get_users_by_ids() {
590 let test_db = TestDb::new();
591 let db = test_db.db();
592
593 let user = db.create_user("user", false).await.unwrap();
594 let friend1 = db.create_user("friend-1", false).await.unwrap();
595 let friend2 = db.create_user("friend-2", false).await.unwrap();
596 let friend3 = db.create_user("friend-3", false).await.unwrap();
597
598 assert_eq!(
599 db.get_users_by_ids([user, friend1, friend2, friend3])
600 .await
601 .unwrap(),
602 vec![
603 User {
604 id: user,
605 github_login: "user".to_string(),
606 admin: false,
607 },
608 User {
609 id: friend1,
610 github_login: "friend-1".to_string(),
611 admin: false,
612 },
613 User {
614 id: friend2,
615 github_login: "friend-2".to_string(),
616 admin: false,
617 },
618 User {
619 id: friend3,
620 github_login: "friend-3".to_string(),
621 admin: false,
622 }
623 ]
624 );
625 }
626
627 #[gpui::test]
628 async fn test_recent_channel_messages() {
629 let test_db = TestDb::new();
630 let db = test_db.db();
631 let user = db.create_user("user", false).await.unwrap();
632 let org = db.create_org("org", "org").await.unwrap();
633 let channel = db.create_org_channel(org, "channel").await.unwrap();
634 for i in 0..10 {
635 db.create_channel_message(channel, user, &i.to_string(), OffsetDateTime::now_utc(), i)
636 .await
637 .unwrap();
638 }
639
640 let messages = db.get_channel_messages(channel, 5, None).await.unwrap();
641 assert_eq!(
642 messages.iter().map(|m| &m.body).collect::<Vec<_>>(),
643 ["5", "6", "7", "8", "9"]
644 );
645
646 let prev_messages = db
647 .get_channel_messages(channel, 4, Some(messages[0].id))
648 .await
649 .unwrap();
650 assert_eq!(
651 prev_messages.iter().map(|m| &m.body).collect::<Vec<_>>(),
652 ["1", "2", "3", "4"]
653 );
654 }
655
656 #[gpui::test]
657 async fn test_channel_message_nonces() {
658 let test_db = TestDb::new();
659 let db = test_db.db();
660 let user = db.create_user("user", false).await.unwrap();
661 let org = db.create_org("org", "org").await.unwrap();
662 let channel = db.create_org_channel(org, "channel").await.unwrap();
663
664 let msg1_id = db
665 .create_channel_message(channel, user, "1", OffsetDateTime::now_utc(), 1)
666 .await
667 .unwrap();
668 let msg2_id = db
669 .create_channel_message(channel, user, "2", OffsetDateTime::now_utc(), 2)
670 .await
671 .unwrap();
672 let msg3_id = db
673 .create_channel_message(channel, user, "3", OffsetDateTime::now_utc(), 1)
674 .await
675 .unwrap();
676 let msg4_id = db
677 .create_channel_message(channel, user, "4", OffsetDateTime::now_utc(), 2)
678 .await
679 .unwrap();
680
681 assert_ne!(msg1_id, msg2_id);
682 assert_eq!(msg1_id, msg3_id);
683 assert_eq!(msg2_id, msg4_id);
684 }
685
686 #[gpui::test]
687 async fn test_create_access_tokens() {
688 let test_db = TestDb::new();
689 let db = test_db.db();
690 let user = db.create_user("the-user", false).await.unwrap();
691
692 db.create_access_token_hash(user, "h1", 3).await.unwrap();
693 db.create_access_token_hash(user, "h2", 3).await.unwrap();
694 assert_eq!(
695 db.get_access_token_hashes(user).await.unwrap(),
696 &["h2".to_string(), "h1".to_string()]
697 );
698
699 db.create_access_token_hash(user, "h3", 3).await.unwrap();
700 assert_eq!(
701 db.get_access_token_hashes(user).await.unwrap(),
702 &["h3".to_string(), "h2".to_string(), "h1".to_string(),]
703 );
704
705 db.create_access_token_hash(user, "h4", 3).await.unwrap();
706 assert_eq!(
707 db.get_access_token_hashes(user).await.unwrap(),
708 &["h4".to_string(), "h3".to_string(), "h2".to_string(),]
709 );
710
711 db.create_access_token_hash(user, "h5", 3).await.unwrap();
712 assert_eq!(
713 db.get_access_token_hashes(user).await.unwrap(),
714 &["h5".to_string(), "h4".to_string(), "h3".to_string()]
715 );
716 }
717}