@@ -958,6 +958,21 @@ pub enum ToolChoice {
#[serde(tag = "type", rename_all = "lowercase")]
pub enum Thinking {
Enabled { budget_tokens: Option<u32> },
+ Adaptive,
+}
+
+#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
+#[serde(rename_all = "snake_case")]
+pub enum Effort {
+ Low,
+ Medium,
+ High,
+ Max,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct OutputConfig {
+ pub effort: Option<Effort>,
}
#[derive(Debug, Serialize, Deserialize)]
@@ -982,6 +997,8 @@ pub struct Request {
pub system: Option<StringOrContents>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub metadata: Option<Metadata>,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub output_config: Option<OutputConfig>,
#[serde(default, skip_serializing_if = "Vec::is_empty")]
pub stop_sequences: Vec<String>,
#[serde(default, skip_serializing_if = "Option::is_none")]
@@ -47,6 +47,7 @@ impl PlainLlmClient {
tool_choice: None,
system: None,
metadata: None,
+ output_config: None,
stop_sequences: Vec::new(),
temperature: None,
top_k: None,
@@ -85,6 +86,7 @@ impl PlainLlmClient {
tool_choice: None,
system: None,
metadata: None,
+ output_config: None,
stop_sequences: Vec::new(),
temperature: None,
top_k: None,
@@ -560,6 +562,7 @@ impl BatchingLlmClient {
tool_choice: None,
system: None,
metadata: None,
+ output_config: None,
stop_sequences: Vec::new(),
temperature: None,
top_k: None,
@@ -779,6 +779,7 @@ pub fn into_anthropic(
LanguageModelToolChoice::None => anthropic::ToolChoice::None,
}),
metadata: None,
+ output_config: None,
stop_sequences: Vec::new(),
temperature: request.temperature.or(Some(default_temperature)),
top_k: None,