diff --git a/providers/anthropic/anthropic.go b/providers/anthropic/anthropic.go index fbc1381f20b6d8a192bea1d1e2411234e9fe4439..7e17013b6557e4224a0d43b5151224f6e829424a 100644 --- a/providers/anthropic/anthropic.go +++ b/providers/anthropic/anthropic.go @@ -16,30 +16,6 @@ import ( "github.com/charmbracelet/ai" ) -type ProviderOptions struct { - SendReasoning *bool `json:"send_reasoning,omitempty"` - Thinking *ThinkingProviderOption `json:"thinking,omitempty"` - DisableParallelToolUse *bool `json:"disable_parallel_tool_use,omitempty"` -} - -type ThinkingProviderOption struct { - BudgetTokens int64 `json:"budget_tokens"` -} - -type ReasoningMetadata struct { - Signature string `json:"signature"` - RedactedData string `json:"redacted_data"` -} - -type CacheControlProviderOptions struct { - Type string `json:"type"` -} -type FilePartProviderOptions struct { - EnableCitations bool `json:"enable_citations"` - Title string `json:"title"` - Context string `json:"context"` -} - type options struct { baseURL string apiKey string @@ -147,7 +123,7 @@ func (a languageModel) Provider() string { func (a languageModel) prepareParams(call ai.Call) (*anthropic.MessageNewParams, []ai.CallWarning, error) { params := &anthropic.MessageNewParams{} - providerOptions := &ProviderOptions{} + providerOptions := &providerOptions{} if v, ok := call.ProviderOptions["anthropic"]; ok { err := ai.ParseOptions(v, providerOptions) if err != nil { @@ -246,11 +222,11 @@ func (a languageModel) prepareParams(call ai.Call) (*anthropic.MessageNewParams, return params, warnings, nil } -func getCacheControl(providerOptions ai.ProviderOptions) *CacheControlProviderOptions { +func getCacheControl(providerOptions ai.ProviderOptions) *cacheControlProviderOptions { if anthropicOptions, ok := providerOptions["anthropic"]; ok { if cacheControl, ok := anthropicOptions["cache_control"]; ok { if cc, ok := cacheControl.(map[string]any); ok { - cacheControlOption := &CacheControlProviderOptions{} + cacheControlOption := &cacheControlProviderOptions{} err := ai.ParseOptions(cc, cacheControlOption) if err != nil { return cacheControlOption @@ -258,7 +234,7 @@ func getCacheControl(providerOptions ai.ProviderOptions) *CacheControlProviderOp } } else if cacheControl, ok := anthropicOptions["cacheControl"]; ok { if cc, ok := cacheControl.(map[string]any); ok { - cacheControlOption := &CacheControlProviderOptions{} + cacheControlOption := &cacheControlProviderOptions{} err := ai.ParseOptions(cc, cacheControlOption) if err != nil { return cacheControlOption @@ -269,9 +245,9 @@ func getCacheControl(providerOptions ai.ProviderOptions) *CacheControlProviderOp return nil } -func getReasoningMetadata(providerOptions ai.ProviderOptions) *ReasoningMetadata { +func getReasoningMetadata(providerOptions ai.ProviderOptions) *reasoningMetadata { if anthropicOptions, ok := providerOptions["anthropic"]; ok { - reasoningMetadata := &ReasoningMetadata{} + reasoningMetadata := &reasoningMetadata{} err := ai.ParseOptions(anthropicOptions, reasoningMetadata) if err != nil { return reasoningMetadata diff --git a/providers/anthropic/provider_options.go b/providers/anthropic/provider_options.go new file mode 100644 index 0000000000000000000000000000000000000000..b98c31c36721b6331e3ad5d5d231461443d0dd34 --- /dev/null +++ b/providers/anthropic/provider_options.go @@ -0,0 +1,20 @@ +package anthropic + +type providerOptions struct { + SendReasoning *bool `json:"send_reasoning,omitempty"` + Thinking *thinkingProviderOption `json:"thinking,omitempty"` + DisableParallelToolUse *bool `json:"disable_parallel_tool_use,omitempty"` +} + +type thinkingProviderOption struct { + BudgetTokens int64 `json:"budget_tokens"` +} + +type reasoningMetadata struct { + Signature string `json:"signature"` + RedactedData string `json:"redacted_data"` +} + +type cacheControlProviderOptions struct { + Type string `json:"type"` +} diff --git a/providers/openai/openai.go b/providers/openai/openai.go index f7f152eb423c39ea43ea4d276c68c259dbdd8aff..d8e30cedd7607bd0cf8335425e2f67da6193207f 100644 --- a/providers/openai/openai.go +++ b/providers/openai/openai.go @@ -19,33 +19,6 @@ import ( "github.com/openai/openai-go/v2/shared" ) -type ReasoningEffort string - -const ( - ReasoningEffortMinimal ReasoningEffort = "minimal" - ReasoningEffortLow ReasoningEffort = "low" - ReasoningEffortMedium ReasoningEffort = "medium" - ReasoningEffortHigh ReasoningEffort = "high" -) - -type ProviderOptions struct { - LogitBias map[string]int64 `json:"logit_bias"` - LogProbs *bool `json:"log_probes"` - TopLogProbs *int64 `json:"top_log_probs"` - ParallelToolCalls *bool `json:"parallel_tool_calls"` - User *string `json:"user"` - ReasoningEffort *ReasoningEffort `json:"reasoning_effort"` - MaxCompletionTokens *int64 `json:"max_completion_tokens"` - TextVerbosity *string `json:"text_verbosity"` - Prediction map[string]any `json:"prediction"` - Store *bool `json:"store"` - Metadata map[string]any `json:"metadata"` - PromptCacheKey *string `json:"prompt_cache_key"` - SafetyIdentifier *string `json:"safety_identifier"` - ServiceTier *string `json:"service_tier"` - StructuredOutputs *bool `json:"structured_outputs"` -} - type provider struct { options options } @@ -179,7 +152,7 @@ func (o languageModel) Provider() string { func (o languageModel) prepareParams(call ai.Call) (*openai.ChatCompletionNewParams, []ai.CallWarning, error) { params := &openai.ChatCompletionNewParams{} messages, warnings := toPrompt(call.Prompt) - providerOptions := &ProviderOptions{} + providerOptions := &providerOptions{} if v, ok := call.ProviderOptions["openai"]; ok { err := ai.ParseOptions(v, providerOptions) if err != nil { @@ -273,13 +246,13 @@ func (o languageModel) prepareParams(call ai.Call) (*openai.ChatCompletionNewPar if providerOptions.ReasoningEffort != nil { switch *providerOptions.ReasoningEffort { - case ReasoningEffortMinimal: + case reasoningEffortMinimal: params.ReasoningEffort = shared.ReasoningEffortMinimal - case ReasoningEffortLow: + case reasoningEffortLow: params.ReasoningEffort = shared.ReasoningEffortLow - case ReasoningEffortMedium: + case reasoningEffortMedium: params.ReasoningEffort = shared.ReasoningEffortMedium - case ReasoningEffortHigh: + case reasoningEffortHigh: params.ReasoningEffort = shared.ReasoningEffortHigh default: return nil, nil, fmt.Errorf("reasoning model `%s` not supported", *providerOptions.ReasoningEffort) diff --git a/providers/openai/provider_options.go b/providers/openai/provider_options.go new file mode 100644 index 0000000000000000000000000000000000000000..5496563e1802c681cc1e37a9c52ea43024ae0647 --- /dev/null +++ b/providers/openai/provider_options.go @@ -0,0 +1,28 @@ +package openai + +type reasoningEffort string + +const ( + reasoningEffortMinimal reasoningEffort = "minimal" + reasoningEffortLow reasoningEffort = "low" + reasoningEffortMedium reasoningEffort = "medium" + reasoningEffortHigh reasoningEffort = "high" +) + +type providerOptions struct { + LogitBias map[string]int64 `json:"logit_bias"` + LogProbs *bool `json:"log_probes"` + TopLogProbs *int64 `json:"top_log_probs"` + ParallelToolCalls *bool `json:"parallel_tool_calls"` + User *string `json:"user"` + ReasoningEffort *reasoningEffort `json:"reasoning_effort"` + MaxCompletionTokens *int64 `json:"max_completion_tokens"` + TextVerbosity *string `json:"text_verbosity"` + Prediction map[string]any `json:"prediction"` + Store *bool `json:"store"` + Metadata map[string]any `json:"metadata"` + PromptCacheKey *string `json:"prompt_cache_key"` + SafetyIdentifier *string `json:"safety_identifier"` + ServiceTier *string `json:"service_tier"` + StructuredOutputs *bool `json:"structured_outputs"` +}