diff --git a/crates/livekit_client/src/livekit_client/playback.rs b/crates/livekit_client/src/livekit_client/playback.rs index e825df63a6d114d7ce7821b96bf32831f9a23ab9..6e39c2abfb4162ceaa43373f4170a41ffdb36351 100644 --- a/crates/livekit_client/src/livekit_client/playback.rs +++ b/crates/livekit_client/src/livekit_client/playback.rs @@ -5,8 +5,8 @@ use cpal::traits::{DeviceTrait, StreamTrait as _}; use futures::channel::mpsc::UnboundedSender; use futures::{Stream, StreamExt as _}; use gpui::{ - AsyncApp, BackgroundExecutor, ScreenCaptureFrame, ScreenCaptureSource, ScreenCaptureStream, - Task, + AsyncApp, BackgroundExecutor, Priority, ScreenCaptureFrame, ScreenCaptureSource, + ScreenCaptureStream, Task, }; use libwebrtc::native::{apm, audio_mixer, audio_resampler}; use livekit::track; @@ -211,21 +211,13 @@ impl AudioStack { let voip_parts = audio::VoipParts::new(cx)?; // Audio needs to run real-time and should never be paused. That is // why we are using a normal std::thread and not a background task - thread::Builder::new() - .name("MicrophoneToLivekit".to_string()) - .spawn(move || { + self.executor + .spawn_with_priority(Priority::RealtimeAudio, async move { // microphone is non send on mac - let microphone = match audio::Audio::open_microphone(voip_parts) { - Ok(m) => m, - Err(e) => { - log::error!("Could not open microphone: {e}"); - return; - } - }; + let microphone = audio::Audio::open_microphone(voip_parts)?; send_to_livekit(frame_tx, microphone); + Ok(()) }) - .expect("should be able to spawn threads"); - Task::ready(Ok(())) } else { self.executor.spawn(async move { Self::capture_input(