diff --git a/crates/agent2/src/agent.rs b/crates/agent2/src/agent.rs index d72216208769de98c6ad408fec0c17133090b79b..44d4075b89306b6c6dd81d6de503888c036e6fbf 100644 --- a/crates/agent2/src/agent.rs +++ b/crates/agent2/src/agent.rs @@ -166,33 +166,41 @@ impl LanguageModels { cx.background_spawn(async move { for (provider_id, provider_name, authenticate_task) in authenticate_all_providers { if let Err(err) = authenticate_task.await { - if matches!(err, language_model::AuthenticateError::CredentialsNotFound) { - // Since we're authenticating these providers in the - // background for the purposes of populating the - // language selector, we don't care about providers - // where the credentials are not found. - } else { - // Some providers have noisy failure states that we - // don't want to spam the logs with every time the - // language model selector is initialized. - // - // Ideally these should have more clear failure modes - // that we know are safe to ignore here, like what we do - // with `CredentialsNotFound` above. - match provider_id.0.as_ref() { - "lmstudio" | "ollama" => { - // LM Studio and Ollama both make fetch requests to the local APIs to determine if they are "authenticated". - // - // These fail noisily, so we don't log them. - } - "copilot_chat" => { - // Copilot Chat returns an error if Copilot is not enabled, so we don't log those errors. - } - _ => { - log::error!( - "Failed to authenticate provider: {}: {err}", - provider_name.0 - ); + match err { + language_model::AuthenticateError::CredentialsNotFound => { + // Since we're authenticating these providers in the + // background for the purposes of populating the + // language selector, we don't care about providers + // where the credentials are not found. + } + language_model::AuthenticateError::ConnectionRefused => { + // Not logging connection refused errors as they are mostly from LM Studio's noisy auth failures. + // LM Studio only has one auth method (endpoint call) which fails for users who haven't enabled it. + // TODO: Better manage LM Studio auth logic to avoid these noisy failures. + } + _ => { + // Some providers have noisy failure states that we + // don't want to spam the logs with every time the + // language model selector is initialized. + // + // Ideally these should have more clear failure modes + // that we know are safe to ignore here, like what we do + // with `CredentialsNotFound` above. + match provider_id.0.as_ref() { + "lmstudio" | "ollama" => { + // LM Studio and Ollama both make fetch requests to the local APIs to determine if they are "authenticated". + // + // These fail noisily, so we don't log them. + } + "copilot_chat" => { + // Copilot Chat returns an error if Copilot is not enabled, so we don't log those errors. + } + _ => { + log::error!( + "Failed to authenticate provider: {}: {err}", + provider_name.0 + ); + } } } } diff --git a/crates/language_model/src/language_model.rs b/crates/language_model/src/language_model.rs index fac302104fd9a4da82f5a383d5cd86b64fde4731..fb35a1cae109490620268b11382c3070a6ef6da2 100644 --- a/crates/language_model/src/language_model.rs +++ b/crates/language_model/src/language_model.rs @@ -681,6 +681,8 @@ pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema { /// An error that occurred when trying to authenticate the language model provider. #[derive(Debug, Error)] pub enum AuthenticateError { + #[error("connection refused")] + ConnectionRefused, #[error("credentials not found")] CredentialsNotFound, #[error(transparent)] diff --git a/crates/language_models/src/provider/lmstudio.rs b/crates/language_models/src/provider/lmstudio.rs index 80b28a396b958ab20de3faa0a0f6919c57011e5c..e6e1726ff5b1f0a2edd51d5fde32da9734e56f9d 100644 --- a/crates/language_models/src/provider/lmstudio.rs +++ b/crates/language_models/src/provider/lmstudio.rs @@ -111,7 +111,30 @@ impl State { } let fetch_models_task = self.fetch_models(cx); - cx.spawn(async move |_this, _cx| Ok(fetch_models_task.await?)) + cx.spawn(async move |_this, _cx| { + match fetch_models_task.await { + Ok(()) => Ok(()), + Err(err) => { + // If any cause in the error chain is an std::io::Error with + // ErrorKind::ConnectionRefused, treat this as "credentials not found" + // (i.e. LM Studio not running). + let mut connection_refused = false; + for cause in err.chain() { + if let Some(io_err) = cause.downcast_ref::() { + if io_err.kind() == std::io::ErrorKind::ConnectionRefused { + connection_refused = true; + break; + } + } + } + if connection_refused { + Err(AuthenticateError::ConnectionRefused) + } else { + Err(AuthenticateError::Other(err)) + } + } + } + }) } }