Detailed changes
@@ -38,6 +38,8 @@ pub enum Model {
Custom {
name: String,
max_tokens: usize,
+ /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
+ display_name: Option<String>,
/// Override this model with a different Anthropic model for tool calls.
tool_override: Option<String>,
/// Indicates whether this custom model supports caching.
@@ -77,7 +79,9 @@ impl Model {
Self::Claude3Opus => "Claude 3 Opus",
Self::Claude3Sonnet => "Claude 3 Sonnet",
Self::Claude3Haiku => "Claude 3 Haiku",
- Self::Custom { name, .. } => name,
+ Self::Custom {
+ name, display_name, ..
+ } => display_name.as_ref().unwrap_or(name),
}
}
@@ -29,15 +29,22 @@ const PROVIDER_NAME: &str = "Anthropic";
pub struct AnthropicSettings {
pub api_url: String,
pub low_speed_timeout: Option<Duration>,
+ /// Extend Zed's list of Anthropic models.
pub available_models: Vec<AvailableModel>,
pub needs_setting_migration: bool,
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
pub struct AvailableModel {
+ /// The model's name in the Anthropic API. e.g. claude-3-5-sonnet-20240620
pub name: String,
+ /// The model's name in Zed's UI, such as in the model selector dropdown menu in the assistant panel.
+ pub display_name: Option<String>,
+ /// The model's context window size.
pub max_tokens: usize,
+ /// A model `name` to substitute when calling tools, in case the primary model doesn't support tool calling.
pub tool_override: Option<String>,
+ /// Configuration of Anthropic's caching API.
pub cache_configuration: Option<LanguageModelCacheConfiguration>,
pub max_output_tokens: Option<u32>,
}
@@ -171,6 +178,7 @@ impl LanguageModelProvider for AnthropicLanguageModelProvider {
model.name.clone(),
anthropic::Model::Custom {
name: model.name.clone(),
+ display_name: model.display_name.clone(),
max_tokens: model.max_tokens,
tool_override: model.tool_override.clone(),
cache_configuration: model.cache_configuration.as_ref().map(|config| {
@@ -52,12 +52,20 @@ pub enum AvailableProvider {
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
pub struct AvailableModel {
- provider: AvailableProvider,
- name: String,
- max_tokens: usize,
- tool_override: Option<String>,
- cache_configuration: Option<LanguageModelCacheConfiguration>,
- max_output_tokens: Option<u32>,
+ /// The provider of the language model.
+ pub provider: AvailableProvider,
+ /// The model's name in the provider's API. e.g. claude-3-5-sonnet-20240620
+ pub name: String,
+ /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
+ pub display_name: Option<String>,
+ /// The size of the context window, indicating the maximum number of tokens the model can process.
+ pub max_tokens: usize,
+ /// The maximum number of output tokens allowed by the model.
+ pub max_output_tokens: Option<u32>,
+ /// Override this model with a different Anthropic model for tool calls.
+ pub tool_override: Option<String>,
+ /// Indicates whether this custom model supports caching.
+ pub cache_configuration: Option<LanguageModelCacheConfiguration>,
}
pub struct CloudLanguageModelProvider {
@@ -202,6 +210,7 @@ impl LanguageModelProvider for CloudLanguageModelProvider {
AvailableProvider::Anthropic => {
CloudModel::Anthropic(anthropic::Model::Custom {
name: model.name.clone(),
+ display_name: model.display_name.clone(),
max_tokens: model.max_tokens,
tool_override: model.tool_override.clone(),
cache_configuration: model.cache_configuration.as_ref().map(|config| {
@@ -94,12 +94,14 @@ impl AnthropicSettingsContent {
.filter_map(|model| match model {
anthropic::Model::Custom {
name,
+ display_name,
max_tokens,
tool_override,
cache_configuration,
max_output_tokens,
} => Some(provider::anthropic::AvailableModel {
name,
+ display_name,
max_tokens,
tool_override,
cache_configuration: cache_configuration.as_ref().map(