@@ -16,30 +16,6 @@ import (
"github.com/charmbracelet/ai"
)
-type ProviderOptions struct {
- SendReasoning *bool `json:"send_reasoning,omitempty"`
- Thinking *ThinkingProviderOption `json:"thinking,omitempty"`
- DisableParallelToolUse *bool `json:"disable_parallel_tool_use,omitempty"`
-}
-
-type ThinkingProviderOption struct {
- BudgetTokens int64 `json:"budget_tokens"`
-}
-
-type ReasoningMetadata struct {
- Signature string `json:"signature"`
- RedactedData string `json:"redacted_data"`
-}
-
-type CacheControlProviderOptions struct {
- Type string `json:"type"`
-}
-type FilePartProviderOptions struct {
- EnableCitations bool `json:"enable_citations"`
- Title string `json:"title"`
- Context string `json:"context"`
-}
-
type options struct {
baseURL string
apiKey string
@@ -147,7 +123,7 @@ func (a languageModel) Provider() string {
func (a languageModel) prepareParams(call ai.Call) (*anthropic.MessageNewParams, []ai.CallWarning, error) {
params := &anthropic.MessageNewParams{}
- providerOptions := &ProviderOptions{}
+ providerOptions := &providerOptions{}
if v, ok := call.ProviderOptions["anthropic"]; ok {
err := ai.ParseOptions(v, providerOptions)
if err != nil {
@@ -246,11 +222,11 @@ func (a languageModel) prepareParams(call ai.Call) (*anthropic.MessageNewParams,
return params, warnings, nil
}
-func getCacheControl(providerOptions ai.ProviderOptions) *CacheControlProviderOptions {
+func getCacheControl(providerOptions ai.ProviderOptions) *cacheControlProviderOptions {
if anthropicOptions, ok := providerOptions["anthropic"]; ok {
if cacheControl, ok := anthropicOptions["cache_control"]; ok {
if cc, ok := cacheControl.(map[string]any); ok {
- cacheControlOption := &CacheControlProviderOptions{}
+ cacheControlOption := &cacheControlProviderOptions{}
err := ai.ParseOptions(cc, cacheControlOption)
if err != nil {
return cacheControlOption
@@ -258,7 +234,7 @@ func getCacheControl(providerOptions ai.ProviderOptions) *CacheControlProviderOp
}
} else if cacheControl, ok := anthropicOptions["cacheControl"]; ok {
if cc, ok := cacheControl.(map[string]any); ok {
- cacheControlOption := &CacheControlProviderOptions{}
+ cacheControlOption := &cacheControlProviderOptions{}
err := ai.ParseOptions(cc, cacheControlOption)
if err != nil {
return cacheControlOption
@@ -269,9 +245,9 @@ func getCacheControl(providerOptions ai.ProviderOptions) *CacheControlProviderOp
return nil
}
-func getReasoningMetadata(providerOptions ai.ProviderOptions) *ReasoningMetadata {
+func getReasoningMetadata(providerOptions ai.ProviderOptions) *reasoningMetadata {
if anthropicOptions, ok := providerOptions["anthropic"]; ok {
- reasoningMetadata := &ReasoningMetadata{}
+ reasoningMetadata := &reasoningMetadata{}
err := ai.ParseOptions(anthropicOptions, reasoningMetadata)
if err != nil {
return reasoningMetadata
@@ -0,0 +1,20 @@
+package anthropic
+
+type providerOptions struct {
+ SendReasoning *bool `json:"send_reasoning,omitempty"`
+ Thinking *thinkingProviderOption `json:"thinking,omitempty"`
+ DisableParallelToolUse *bool `json:"disable_parallel_tool_use,omitempty"`
+}
+
+type thinkingProviderOption struct {
+ BudgetTokens int64 `json:"budget_tokens"`
+}
+
+type reasoningMetadata struct {
+ Signature string `json:"signature"`
+ RedactedData string `json:"redacted_data"`
+}
+
+type cacheControlProviderOptions struct {
+ Type string `json:"type"`
+}
@@ -19,33 +19,6 @@ import (
"github.com/openai/openai-go/v2/shared"
)
-type ReasoningEffort string
-
-const (
- ReasoningEffortMinimal ReasoningEffort = "minimal"
- ReasoningEffortLow ReasoningEffort = "low"
- ReasoningEffortMedium ReasoningEffort = "medium"
- ReasoningEffortHigh ReasoningEffort = "high"
-)
-
-type ProviderOptions struct {
- LogitBias map[string]int64 `json:"logit_bias"`
- LogProbs *bool `json:"log_probes"`
- TopLogProbs *int64 `json:"top_log_probs"`
- ParallelToolCalls *bool `json:"parallel_tool_calls"`
- User *string `json:"user"`
- ReasoningEffort *ReasoningEffort `json:"reasoning_effort"`
- MaxCompletionTokens *int64 `json:"max_completion_tokens"`
- TextVerbosity *string `json:"text_verbosity"`
- Prediction map[string]any `json:"prediction"`
- Store *bool `json:"store"`
- Metadata map[string]any `json:"metadata"`
- PromptCacheKey *string `json:"prompt_cache_key"`
- SafetyIdentifier *string `json:"safety_identifier"`
- ServiceTier *string `json:"service_tier"`
- StructuredOutputs *bool `json:"structured_outputs"`
-}
-
type provider struct {
options options
}
@@ -179,7 +152,7 @@ func (o languageModel) Provider() string {
func (o languageModel) prepareParams(call ai.Call) (*openai.ChatCompletionNewParams, []ai.CallWarning, error) {
params := &openai.ChatCompletionNewParams{}
messages, warnings := toPrompt(call.Prompt)
- providerOptions := &ProviderOptions{}
+ providerOptions := &providerOptions{}
if v, ok := call.ProviderOptions["openai"]; ok {
err := ai.ParseOptions(v, providerOptions)
if err != nil {
@@ -273,13 +246,13 @@ func (o languageModel) prepareParams(call ai.Call) (*openai.ChatCompletionNewPar
if providerOptions.ReasoningEffort != nil {
switch *providerOptions.ReasoningEffort {
- case ReasoningEffortMinimal:
+ case reasoningEffortMinimal:
params.ReasoningEffort = shared.ReasoningEffortMinimal
- case ReasoningEffortLow:
+ case reasoningEffortLow:
params.ReasoningEffort = shared.ReasoningEffortLow
- case ReasoningEffortMedium:
+ case reasoningEffortMedium:
params.ReasoningEffort = shared.ReasoningEffortMedium
- case ReasoningEffortHigh:
+ case reasoningEffortHigh:
params.ReasoningEffort = shared.ReasoningEffortHigh
default:
return nil, nil, fmt.Errorf("reasoning model `%s` not supported", *providerOptions.ReasoningEffort)
@@ -0,0 +1,28 @@
+package openai
+
+type reasoningEffort string
+
+const (
+ reasoningEffortMinimal reasoningEffort = "minimal"
+ reasoningEffortLow reasoningEffort = "low"
+ reasoningEffortMedium reasoningEffort = "medium"
+ reasoningEffortHigh reasoningEffort = "high"
+)
+
+type providerOptions struct {
+ LogitBias map[string]int64 `json:"logit_bias"`
+ LogProbs *bool `json:"log_probes"`
+ TopLogProbs *int64 `json:"top_log_probs"`
+ ParallelToolCalls *bool `json:"parallel_tool_calls"`
+ User *string `json:"user"`
+ ReasoningEffort *reasoningEffort `json:"reasoning_effort"`
+ MaxCompletionTokens *int64 `json:"max_completion_tokens"`
+ TextVerbosity *string `json:"text_verbosity"`
+ Prediction map[string]any `json:"prediction"`
+ Store *bool `json:"store"`
+ Metadata map[string]any `json:"metadata"`
+ PromptCacheKey *string `json:"prompt_cache_key"`
+ SafetyIdentifier *string `json:"safety_identifier"`
+ ServiceTier *string `json:"service_tier"`
+ StructuredOutputs *bool `json:"structured_outputs"`
+}