Change summary 
  internal/config/config.go          |  7 +++++--
internal/llm/provider/anthropic.go | 30 +++++++-----------------------
2 files changed, 12 insertions(+), 25 deletions(-)
 
 
  Detailed changes 
  
  
    
    @@ -159,10 +159,13 @@ type Options struct {
 type PreferredModel struct {
 	ModelID  string                     `json:"model_id"`
 	Provider provider.InferenceProvider `json:"provider"`
-	// Overrides the default reasoning effort for this model
+	// ReasoningEffort overrides the default reasoning effort for this model 
 	ReasoningEffort string `json:"reasoning_effort,omitempty"`
-	// Overrides the default max tokens for this model
+	// MaxTokens overrides the default max tokens for this model 
 	MaxTokens int64 `json:"max_tokens,omitempty"`
+ 
+	// Think indicates if the model should think, only applicable for anthropic reasoning models 
+	Think bool `json:"think,omitempty"` 
 }
 
 type PreferredModels struct {
 
   
  
  
    
    @@ -6,7 +6,6 @@ import (
 	"errors"
 	"fmt"
 	"io"
-	"strings"
 	"time"
 
 	"github.com/anthropics/anthropic-sdk-go"
@@ -150,28 +149,18 @@ func (a *anthropicClient) finishReason(reason string) message.FinishReason {
 func (a *anthropicClient) preparedMessages(messages []anthropic.MessageParam, tools []anthropic.ToolUnionParam) anthropic.MessageNewParams {
 	model := a.providerOptions.model(a.providerOptions.modelType)
 	var thinkingParam anthropic.ThinkingConfigParamUnion
-	// TODO: Implement a proper thinking function
-	// lastMessage := messages[len(messages)-1]
-	// isUser := lastMessage.Role == anthropic.MessageParamRoleUser
-	// messageContent := ""
-	temperature := anthropic.Float(0)
-	// if isUser {
-	// 	for _, m := range lastMessage.Content {
-	// 		if m.OfText != nil && m.OfText.Text != "" {
-	// 			messageContent = m.OfText.Text
-	// 		}
-	// 	}
-	// 	if messageContent != "" && a.shouldThink != nil && a.options.shouldThink(messageContent) {
-	// 		thinkingParam = anthropic.ThinkingConfigParamOfEnabled(int64(float64(a.providerOptions.maxTokens) * 0.8))
-	// 		temperature = anthropic.Float(1)
-	// 	}
-	// }
-
 	cfg := config.Get()
 	modelConfig := cfg.Models.Large
 	if a.providerOptions.modelType == config.SmallModel {
 		modelConfig = cfg.Models.Small
 	}
+	temperature := anthropic.Float(0) 
+ 
+	if a.Model().CanReason && modelConfig.Think { 
+		thinkingParam = anthropic.ThinkingConfigParamOfEnabled(int64(float64(a.providerOptions.maxTokens) * 0.8)) 
+		temperature = anthropic.Float(1) 
+	} 
+ 
 	maxTokens := model.DefaultMaxTokens
 	if modelConfig.MaxTokens > 0 {
 		maxTokens = modelConfig.MaxTokens
@@ -456,8 +445,3 @@ func (a *anthropicClient) usage(msg anthropic.Message) TokenUsage {
 func (a *anthropicClient) Model() config.Model {
 	return a.providerOptions.model(a.providerOptions.modelType)
 }
-
-// TODO: check if we need
-func DefaultShouldThinkFn(s string) bool {
-	return strings.Contains(strings.ToLower(s), "think")
-}