chore: add reasoning effort

Kujtim Hoxha created

Change summary

cmd/openrouter/main.go                 |  2 +-
internal/providers/configs/azure.json  | 28 +++++++++++++++++++---------
internal/providers/configs/openai.json | 22 ++++++++++++++++------
pkg/provider/provider.go               | 22 ++++++++++++----------
4 files changed, 48 insertions(+), 26 deletions(-)

Detailed changes

cmd/openrouter/main.go 🔗

@@ -167,7 +167,7 @@ func main() {
 			SupportsImages:     supportsImages,
 		}
 		if model.TopProvider.MaxCompletionTokens != nil {
-			m.DefaultMaxTokens = int64(*model.TopProvider.MaxCompletionTokens / 2)
+			m.DefaultMaxTokens = *model.TopProvider.MaxCompletionTokens / 2
 		} else {
 			m.DefaultMaxTokens = model.ContextLength / 10
 		}

internal/providers/configs/azure.json 🔗

@@ -16,7 +16,9 @@
       "cost_per_1m_out_cached": 0.375,
       "context_window": 200000,
       "default_max_tokens": 50000,
-      "can_reason": false,
+      "can_reason": true,
+      "has_reasoning_effort": true,
+      "default_reasoning_effort": "medium",
       "supports_attachments": true
     },
     {
@@ -28,7 +30,9 @@
       "cost_per_1m_out_cached": 0.275,
       "context_window": 200000,
       "default_max_tokens": 50000,
-      "can_reason": false,
+      "can_reason": true,
+      "has_reasoning_effort": true,
+      "default_reasoning_effort": "medium",
       "supports_attachments": true
     },
     {
@@ -40,7 +44,9 @@
       "cost_per_1m_out_cached": 0.5,
       "context_window": 200000,
       "default_max_tokens": 50000,
-      "can_reason": false,
+      "can_reason": true,
+      "has_reasoning_effort": true,
+      "default_reasoning_effort": "medium",
       "supports_attachments": true
     },
     {
@@ -52,7 +58,9 @@
       "cost_per_1m_out_cached": 0,
       "context_window": 200000,
       "default_max_tokens": 50000,
-      "can_reason": false,
+      "can_reason": true,
+      "has_reasoning_effort": true,
+      "default_reasoning_effort": "medium",
       "supports_attachments": true
     },
     {
@@ -63,7 +71,7 @@
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0.5,
       "context_window": 1047576,
-      "default_max_tokens": 20000,
+      "default_max_tokens": 50000,
       "can_reason": false,
       "supports_attachments": true
     },
@@ -75,7 +83,7 @@
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0.09999999999999999,
       "context_window": 1047576,
-      "default_max_tokens": 20000,
+      "default_max_tokens": 50000,
       "can_reason": false,
       "supports_attachments": true
     },
@@ -87,7 +95,7 @@
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 0.024999999999999998,
       "context_window": 1047576,
-      "default_max_tokens": 20000,
+      "default_max_tokens": 50000,
       "can_reason": false,
       "supports_attachments": true
     },
@@ -99,7 +107,7 @@
       "cost_per_1m_in_cached": 0,
       "cost_per_1m_out_cached": 37.5,
       "context_window": 128000,
-      "default_max_tokens": 20000,
+      "default_max_tokens": 50000,
       "can_reason": false,
       "supports_attachments": true
     },
@@ -112,7 +120,9 @@
       "cost_per_1m_out_cached": 0.55,
       "context_window": 200000,
       "default_max_tokens": 50000,
-      "can_reason": false,
+      "can_reason": true,
+      "has_reasoning_effort": true,
+      "default_reasoning_effort": "medium",
       "supports_attachments": false
     },
     {

internal/providers/configs/openai.json 🔗

@@ -4,7 +4,7 @@
   "type": "openai",
   "api_key": "$OPENAI_API_KEY",
   "api_endpoint": "$OPENAI_API_ENDPOINT",
-  "default_large_model_id": "o4-mini",
+  "default_large_model_id": "codex-mini-latest",
   "default_small_model_id": "gpt-4o",
   "models": [
     {
@@ -16,7 +16,9 @@
       "cost_per_1m_out_cached": 0.375,
       "context_window": 200000,
       "default_max_tokens": 50000,
-      "can_reason": false,
+      "can_reason": true,
+      "has_reasoning_effort": true,
+      "default_reasoning_effort": "medium",
       "supports_attachments": true
     },
     {
@@ -28,7 +30,9 @@
       "cost_per_1m_out_cached": 0.275,
       "context_window": 200000,
       "default_max_tokens": 50000,
-      "can_reason": false,
+      "can_reason": true,
+      "has_reasoning_effort": true,
+      "default_reasoning_effort": "medium",
       "supports_attachments": true
     },
     {
@@ -40,7 +44,9 @@
       "cost_per_1m_out_cached": 0.5,
       "context_window": 200000,
       "default_max_tokens": 50000,
-      "can_reason": false,
+      "can_reason": true,
+      "has_reasoning_effort": true,
+      "default_reasoning_effort": "medium",
       "supports_attachments": true
     },
     {
@@ -52,7 +58,9 @@
       "cost_per_1m_out_cached": 0,
       "context_window": 200000,
       "default_max_tokens": 50000,
-      "can_reason": false,
+      "can_reason": true,
+      "has_reasoning_effort": true,
+      "default_reasoning_effort": "medium",
       "supports_attachments": true
     },
     {
@@ -112,7 +120,9 @@
       "cost_per_1m_out_cached": 0.55,
       "context_window": 200000,
       "default_max_tokens": 50000,
-      "can_reason": false,
+      "can_reason": true,
+      "has_reasoning_effort": true,
+      "default_reasoning_effort": "medium",
       "supports_attachments": false
     },
     {

pkg/provider/provider.go 🔗

@@ -45,16 +45,18 @@ type Provider struct {
 
 // Model represents an AI model configuration.
 type Model struct {
-	ID                 string  `json:"id"`
-	Name               string  `json:"model"`
-	CostPer1MIn        float64 `json:"cost_per_1m_in"`
-	CostPer1MOut       float64 `json:"cost_per_1m_out"`
-	CostPer1MInCached  float64 `json:"cost_per_1m_in_cached"`
-	CostPer1MOutCached float64 `json:"cost_per_1m_out_cached"`
-	ContextWindow      int64   `json:"context_window"`
-	DefaultMaxTokens   int64   `json:"default_max_tokens"`
-	CanReason          bool    `json:"can_reason"`
-	SupportsImages     bool    `json:"supports_attachments"`
+	ID                     string  `json:"id"`
+	Name                   string  `json:"model"`
+	CostPer1MIn            float64 `json:"cost_per_1m_in"`
+	CostPer1MOut           float64 `json:"cost_per_1m_out"`
+	CostPer1MInCached      float64 `json:"cost_per_1m_in_cached"`
+	CostPer1MOutCached     float64 `json:"cost_per_1m_out_cached"`
+	ContextWindow          int64   `json:"context_window"`
+	DefaultMaxTokens       int64   `json:"default_max_tokens"`
+	CanReason              bool    `json:"can_reason"`
+	HasReasoningEffort     bool    `json:"has_reasoning_efforts"`
+	DefaultReasoningEffort string  `json:"default_reasoning_effort,omitempty"`
+	SupportsImages         bool    `json:"supports_attachments"`
 }
 
 // KnownProviders returns all the known inference providers.