Detailed changes
@@ -18,26 +18,66 @@ import (
)
type languageModel struct {
- provider string
- modelID string
- client openai.Client
- prepareCallFunc PrepareLanguageModelCallFunc
+ provider string
+ modelID string
+ client openai.Client
+ prepareCallFunc LanguageModelPrepareCallFunc
+ mapFinishReasonFunc LanguageModelMapFinishReasonFunc
+ extraContentFunc LanguageModelExtraContentFunc
+ usageFunc LanguageModelUsageFunc
+ streamUsageFunc LanguageModelStreamUsageFunc
+ streamExtraFunc LanguageModelStreamExtraFunc
+ streamProviderMetadataFunc LanguageModelStreamProviderMetadataFunc
}
type LanguageModelOption = func(*languageModel)
-func WithPrepareLanguageModelCallFunc(fn PrepareLanguageModelCallFunc) LanguageModelOption {
+func WithLanguageModelPrepareCallFunc(fn LanguageModelPrepareCallFunc) LanguageModelOption {
return func(l *languageModel) {
l.prepareCallFunc = fn
}
}
+func WithLanguageModelMapFinishReasonFunc(fn LanguageModelMapFinishReasonFunc) LanguageModelOption {
+ return func(l *languageModel) {
+ l.mapFinishReasonFunc = fn
+ }
+}
+
+func WithLanguageModelExtraContentFunc(fn LanguageModelExtraContentFunc) LanguageModelOption {
+ return func(l *languageModel) {
+ l.extraContentFunc = fn
+ }
+}
+
+func WithLanguageModelStreamExtraFunc(fn LanguageModelStreamExtraFunc) LanguageModelOption {
+ return func(l *languageModel) {
+ l.streamExtraFunc = fn
+ }
+}
+
+func WithLanguageModelUsageFunc(fn LanguageModelUsageFunc) LanguageModelOption {
+ return func(l *languageModel) {
+ l.usageFunc = fn
+ }
+}
+
+func WithLanguageModelStreamUsageFunc(fn LanguageModelStreamUsageFunc) LanguageModelOption {
+ return func(l *languageModel) {
+ l.streamUsageFunc = fn
+ }
+}
+
func newLanguageModel(modelID string, provider string, client openai.Client, opts ...LanguageModelOption) languageModel {
model := languageModel{
- modelID: modelID,
- provider: provider,
- client: client,
- prepareCallFunc: defaultPrepareLanguageModelCall,
+ modelID: modelID,
+ provider: provider,
+ client: client,
+ prepareCallFunc: defaultPrepareLanguageModelCall,
+ mapFinishReasonFunc: defaultMapFinishReason,
+ usageFunc: defaultUsage,
+ streamUsageFunc: defaultStreamUsage,
+ streamProviderMetadataFunc: defaultStreamProviderMetadataFunc,
}
for _, o := range opts {
@@ -215,7 +255,10 @@ func (o languageModel) Generate(ctx context.Context, call ai.Call) (*ai.Response
Text: text,
})
}
-
+ if o.extraContentFunc != nil {
+ extraContent := o.extraContentFunc(choice)
+ content = append(content, extraContent...)
+ }
for _, tc := range choice.Message.ToolCalls {
toolCallID := tc.ID
if toolCallID == "" {
@@ -240,36 +283,12 @@ func (o languageModel) Generate(ctx context.Context, call ai.Call) (*ai.Response
}
}
- completionTokenDetails := response.Usage.CompletionTokensDetails
- promptTokenDetails := response.Usage.PromptTokensDetails
-
- // Build provider metadata
- providerMetadata := &ProviderMetadata{}
- // Add logprobs if available
- if len(choice.Logprobs.Content) > 0 {
- providerMetadata.Logprobs = choice.Logprobs.Content
- }
-
- // Add prediction tokens if available
- if completionTokenDetails.AcceptedPredictionTokens > 0 || completionTokenDetails.RejectedPredictionTokens > 0 {
- if completionTokenDetails.AcceptedPredictionTokens > 0 {
- providerMetadata.AcceptedPredictionTokens = completionTokenDetails.AcceptedPredictionTokens
- }
- if completionTokenDetails.RejectedPredictionTokens > 0 {
- providerMetadata.RejectedPredictionTokens = completionTokenDetails.RejectedPredictionTokens
- }
- }
+ usage, providerMetadata := o.usageFunc(*response)
return &ai.Response{
- Content: content,
- Usage: ai.Usage{
- InputTokens: response.Usage.PromptTokens,
- OutputTokens: response.Usage.CompletionTokens,
- TotalTokens: response.Usage.TotalTokens,
- ReasoningTokens: completionTokenDetails.ReasoningTokens,
- CacheReadTokens: promptTokenDetails.CachedTokens,
- },
- FinishReason: mapOpenAiFinishReason(choice.FinishReason),
+ Content: content,
+ Usage: usage,
+ FinishReason: defaultMapFinishReason(choice),
ProviderMetadata: ai.ProviderMetadata{
Name: providerMetadata,
},
@@ -293,8 +312,11 @@ func (o languageModel) Stream(ctx context.Context, call ai.Call) (ai.StreamRespo
toolCalls := make(map[int64]streamToolCall)
// Build provider metadata for streaming
- streamProviderMetadata := &ProviderMetadata{}
+ providerMetadata := ai.ProviderMetadata{
+ Name: &ProviderMetadata{},
+ }
acc := openai.ChatCompletionAccumulator{}
+ extraContext := make(map[string]any)
var usage ai.Usage
return func(yield func(ai.StreamPart) bool) {
if len(warnings) > 0 {
@@ -308,28 +330,7 @@ func (o languageModel) Stream(ctx context.Context, call ai.Call) (ai.StreamRespo
for stream.Next() {
chunk := stream.Current()
acc.AddChunk(chunk)
- if chunk.Usage.TotalTokens > 0 {
- // we do this here because the acc does not add prompt details
- completionTokenDetails := chunk.Usage.CompletionTokensDetails
- promptTokenDetails := chunk.Usage.PromptTokensDetails
- usage = ai.Usage{
- InputTokens: chunk.Usage.PromptTokens,
- OutputTokens: chunk.Usage.CompletionTokens,
- TotalTokens: chunk.Usage.TotalTokens,
- ReasoningTokens: completionTokenDetails.ReasoningTokens,
- CacheReadTokens: promptTokenDetails.CachedTokens,
- }
-
- // Add prediction tokens if available
- if completionTokenDetails.AcceptedPredictionTokens > 0 || completionTokenDetails.RejectedPredictionTokens > 0 {
- if completionTokenDetails.AcceptedPredictionTokens > 0 {
- streamProviderMetadata.AcceptedPredictionTokens = completionTokenDetails.AcceptedPredictionTokens
- }
- if completionTokenDetails.RejectedPredictionTokens > 0 {
- streamProviderMetadata.RejectedPredictionTokens = completionTokenDetails.RejectedPredictionTokens
- }
- }
- }
+ usage, providerMetadata = o.streamUsageFunc(chunk, extraContext, providerMetadata)
if len(chunk.Choices) == 0 {
continue
}
@@ -464,6 +465,14 @@ func (o languageModel) Stream(ctx context.Context, call ai.Call) (ai.StreamRespo
}
}
}
+
+ if o.streamExtraFunc != nil {
+ updatedContext, shouldContinue := o.streamExtraFunc(chunk, yield, extraContext)
+ if !shouldContinue {
+ return
+ }
+ extraContext = updatedContext
+ }
}
// Check for annotations in the delta's raw JSON
@@ -498,14 +507,13 @@ func (o languageModel) Stream(ctx context.Context, call ai.Call) (ai.StreamRespo
}
}
- // Add logprobs if available
- if len(acc.Choices) > 0 && len(acc.Choices[0].Logprobs.Content) > 0 {
- streamProviderMetadata.Logprobs = acc.Choices[0].Logprobs.Content
- }
-
- // Handle annotations/citations from accumulated response
if len(acc.Choices) > 0 {
- for _, annotation := range acc.Choices[0].Message.Annotations {
+ choice := acc.Choices[0]
+ // Add logprobs if available
+ providerMetadata = o.streamProviderMetadataFunc(choice, providerMetadata)
+
+ // Handle annotations/citations from accumulated response
+ for _, annotation := range choice.Message.Annotations {
if annotation.Type == "url_citation" {
if !yield(ai.StreamPart{
Type: ai.StreamPartTypeSource,
@@ -519,15 +527,15 @@ func (o languageModel) Stream(ctx context.Context, call ai.Call) (ai.StreamRespo
}
}
}
-
- finishReason := mapOpenAiFinishReason(acc.Choices[0].FinishReason)
+ finishReason := ai.FinishReasonUnknown
+ if len(acc.Choices) > 0 {
+ finishReason = o.mapFinishReasonFunc(acc.Choices[0])
+ }
yield(ai.StreamPart{
- Type: ai.StreamPartTypeFinish,
- Usage: usage,
- FinishReason: finishReason,
- ProviderMetadata: ai.ProviderMetadata{
- Name: streamProviderMetadata,
- },
+ Type: ai.StreamPartTypeFinish,
+ Usage: usage,
+ FinishReason: finishReason,
+ ProviderMetadata: providerMetadata,
})
return
} else {
@@ -540,21 +548,6 @@ func (o languageModel) Stream(ctx context.Context, call ai.Call) (ai.StreamRespo
}, nil
}
-func mapOpenAiFinishReason(finishReason string) ai.FinishReason {
- switch finishReason {
- case "stop":
- return ai.FinishReasonStop
- case "length":
- return ai.FinishReasonLength
- case "content_filter":
- return ai.FinishReasonContentFilter
- case "function_call", "tool_calls":
- return ai.FinishReasonToolCalls
- default:
- return ai.FinishReasonUnknown
- }
-}
-
func isReasoningModel(modelID string) bool {
return strings.HasPrefix(modelID, "o") || strings.HasPrefix(modelID, "gpt-5") || strings.HasPrefix(modelID, "gpt-5-chat")
}
@@ -9,7 +9,15 @@ import (
"github.com/openai/openai-go/v2/shared"
)
-type PrepareLanguageModelCallFunc = func(model ai.LanguageModel, params *openai.ChatCompletionNewParams, call ai.Call) ([]ai.CallWarning, error)
+type (
+ LanguageModelPrepareCallFunc = func(model ai.LanguageModel, params *openai.ChatCompletionNewParams, call ai.Call) ([]ai.CallWarning, error)
+ LanguageModelMapFinishReasonFunc = func(choice openai.ChatCompletionChoice) ai.FinishReason
+ LanguageModelUsageFunc = func(choice openai.ChatCompletion) (ai.Usage, ai.ProviderOptionsData)
+ LanguageModelExtraContentFunc = func(choice openai.ChatCompletionChoice) []ai.Content
+ LanguageModelStreamExtraFunc = func(chunk openai.ChatCompletionChunk, yield func(ai.StreamPart) bool, ctx map[string]any) (map[string]any, bool)
+ LanguageModelStreamUsageFunc = func(chunk openai.ChatCompletionChunk, ctx map[string]any, metadata ai.ProviderMetadata) (ai.Usage, ai.ProviderMetadata)
+ LanguageModelStreamProviderMetadataFunc = func(choice openai.ChatCompletionChoice, metadata ai.ProviderMetadata) ai.ProviderMetadata
+)
func defaultPrepareLanguageModelCall(model ai.LanguageModel, params *openai.ChatCompletionNewParams, call ai.Call) ([]ai.CallWarning, error) {
if call.ProviderOptions == nil {
@@ -147,3 +155,103 @@ func defaultPrepareLanguageModelCall(model ai.LanguageModel, params *openai.Chat
}
return warnings, nil
}
+
+func defaultMapFinishReason(choice openai.ChatCompletionChoice) ai.FinishReason {
+ finishReason := choice.FinishReason
+ switch finishReason {
+ case "stop":
+ return ai.FinishReasonStop
+ case "length":
+ return ai.FinishReasonLength
+ case "content_filter":
+ return ai.FinishReasonContentFilter
+ case "function_call", "tool_calls":
+ return ai.FinishReasonToolCalls
+ default:
+ return ai.FinishReasonUnknown
+ }
+}
+
+func defaultUsage(response openai.ChatCompletion) (ai.Usage, ai.ProviderOptionsData) {
+ if len(response.Choices) == 0 {
+ return ai.Usage{}, nil
+ }
+ choice := response.Choices[0]
+ completionTokenDetails := response.Usage.CompletionTokensDetails
+ promptTokenDetails := response.Usage.PromptTokensDetails
+
+ // Build provider metadata
+ providerMetadata := &ProviderMetadata{}
+ // Add logprobs if available
+ if len(choice.Logprobs.Content) > 0 {
+ providerMetadata.Logprobs = choice.Logprobs.Content
+ }
+
+ // Add prediction tokens if available
+ if completionTokenDetails.AcceptedPredictionTokens > 0 || completionTokenDetails.RejectedPredictionTokens > 0 {
+ if completionTokenDetails.AcceptedPredictionTokens > 0 {
+ providerMetadata.AcceptedPredictionTokens = completionTokenDetails.AcceptedPredictionTokens
+ }
+ if completionTokenDetails.RejectedPredictionTokens > 0 {
+ providerMetadata.RejectedPredictionTokens = completionTokenDetails.RejectedPredictionTokens
+ }
+ }
+ return ai.Usage{
+ InputTokens: response.Usage.PromptTokens,
+ OutputTokens: response.Usage.CompletionTokens,
+ TotalTokens: response.Usage.TotalTokens,
+ ReasoningTokens: completionTokenDetails.ReasoningTokens,
+ CacheReadTokens: promptTokenDetails.CachedTokens,
+ }, providerMetadata
+}
+
+func defaultStreamUsage(chunk openai.ChatCompletionChunk, ctx map[string]any, metadata ai.ProviderMetadata) (ai.Usage, ai.ProviderMetadata) {
+ if chunk.Usage.TotalTokens == 0 {
+ return ai.Usage{}, nil
+ }
+ streamProviderMetadata := &ProviderMetadata{}
+ if metadata != nil {
+ if providerMetadata, ok := metadata[Name]; ok {
+ converted, ok := providerMetadata.(*ProviderMetadata)
+ if ok {
+ streamProviderMetadata = converted
+ }
+ }
+ }
+ // we do this here because the acc does not add prompt details
+ completionTokenDetails := chunk.Usage.CompletionTokensDetails
+ promptTokenDetails := chunk.Usage.PromptTokensDetails
+ usage := ai.Usage{
+ InputTokens: chunk.Usage.PromptTokens,
+ OutputTokens: chunk.Usage.CompletionTokens,
+ TotalTokens: chunk.Usage.TotalTokens,
+ ReasoningTokens: completionTokenDetails.ReasoningTokens,
+ CacheReadTokens: promptTokenDetails.CachedTokens,
+ }
+
+ // Add prediction tokens if available
+ if completionTokenDetails.AcceptedPredictionTokens > 0 || completionTokenDetails.RejectedPredictionTokens > 0 {
+ if completionTokenDetails.AcceptedPredictionTokens > 0 {
+ streamProviderMetadata.AcceptedPredictionTokens = completionTokenDetails.AcceptedPredictionTokens
+ }
+ if completionTokenDetails.RejectedPredictionTokens > 0 {
+ streamProviderMetadata.RejectedPredictionTokens = completionTokenDetails.RejectedPredictionTokens
+ }
+ }
+
+ return usage, ai.ProviderMetadata{
+ Name: streamProviderMetadata,
+ }
+}
+
+func defaultStreamProviderMetadataFunc(choice openai.ChatCompletionChoice, metadata ai.ProviderMetadata) ai.ProviderMetadata {
+ streamProviderMetadata, ok := metadata[Name]
+ if !ok {
+ streamProviderMetadata = &ProviderMetadata{}
+ }
+ if converted, ok := streamProviderMetadata.(*ProviderMetadata); ok {
+ converted.Logprobs = choice.Logprobs.Content
+ metadata[Name] = converted
+ }
+ return metadata
+}
@@ -1,6 +1,8 @@
package openrouter
import (
+ "encoding/json"
+ "fmt"
"maps"
"github.com/charmbracelet/fantasy/ai"
@@ -8,7 +10,9 @@ import (
"github.com/openai/openai-go/v2/packages/param"
)
-func prepareLanguageModelCall(model ai.LanguageModel, params *openaisdk.ChatCompletionNewParams, call ai.Call) ([]ai.CallWarning, error) {
+const reasoningStartedCtx = "reasoning_started"
+
+func languagePrepareModelCall(model ai.LanguageModel, params *openaisdk.ChatCompletionNewParams, call ai.Call) ([]ai.CallWarning, error) {
providerOptions := &ProviderOptions{}
if v, ok := call.ProviderOptions[Name]; ok {
providerOptions, ok = v.(*ProviderOptions)
@@ -39,6 +43,10 @@ func prepareLanguageModelCall(model ai.LanguageModel, params *openaisdk.ChatComp
extraFields["usage"] = map[string]any{
"include": *providerOptions.IncludeUsage,
}
+ } else { // default include usage
+ extraFields["usage"] = map[string]any{
+ "include": true,
+ }
}
if providerOptions.LogitBias != nil {
params.LogitBias = providerOptions.LogitBias
@@ -57,3 +65,198 @@ func prepareLanguageModelCall(model ai.LanguageModel, params *openaisdk.ChatComp
params.SetExtraFields(extraFields)
return nil, nil
}
+
+func languageModelMapFinishReason(choice openaisdk.ChatCompletionChoice) ai.FinishReason {
+ finishReason := choice.FinishReason
+ switch finishReason {
+ case "stop":
+ return ai.FinishReasonStop
+ case "length":
+ return ai.FinishReasonLength
+ case "content_filter":
+ return ai.FinishReasonContentFilter
+ case "function_call", "tool_calls":
+ return ai.FinishReasonToolCalls
+ default:
+ // for streaming responses the openai accumulator is not working as expected with some provider
+ // therefore it is sending no finish reason so we need to manually handle it
+ if len(choice.Message.ToolCalls) > 0 {
+ return ai.FinishReasonToolCalls
+ } else if finishReason == "" {
+ return ai.FinishReasonStop
+ }
+ return ai.FinishReasonUnknown
+ }
+}
+
+func languageModelExtraContent(choice openaisdk.ChatCompletionChoice) []ai.Content {
+ var content []ai.Content
+ reasoningData := ReasoningData{}
+ err := json.Unmarshal([]byte(choice.Message.RawJSON()), &reasoningData)
+ if err != nil {
+ return content
+ }
+ for _, detail := range reasoningData.ReasoningDetails {
+ switch detail.Type {
+ case "reasoning.text":
+ content = append(content, ai.ReasoningContent{
+ Text: detail.Text,
+ })
+ case "reasoning.summary":
+ content = append(content, ai.ReasoningContent{
+ Text: detail.Summary,
+ })
+ case "reasoning.encrypted":
+ content = append(content, ai.ReasoningContent{
+ Text: "[REDACTED]",
+ })
+ }
+ }
+ return content
+}
+
+func extractReasoningContext(ctx map[string]any) bool {
+ reasoningStarted, ok := ctx[reasoningStartedCtx]
+ if !ok {
+ return false
+ }
+ b, ok := reasoningStarted.(bool)
+ if !ok {
+ return false
+ }
+ return b
+}
+
+func languageModelStreamExtra(chunk openaisdk.ChatCompletionChunk, yield func(ai.StreamPart) bool, ctx map[string]any) (map[string]any, bool) {
+ if len(chunk.Choices) == 0 {
+ return ctx, true
+ }
+
+ reasoningStarted := extractReasoningContext(ctx)
+
+ for inx, choice := range chunk.Choices {
+ reasoningData := ReasoningData{}
+ err := json.Unmarshal([]byte(choice.Delta.RawJSON()), &reasoningData)
+ if err != nil {
+ yield(ai.StreamPart{
+ Type: ai.StreamPartTypeError,
+ Error: ai.NewAIError("Unexpected", "error unmarshalling delta", err),
+ })
+ return ctx, false
+ }
+
+ emitEvent := func(reasoningContent string) bool {
+ if !reasoningStarted {
+ shouldContinue := yield(ai.StreamPart{
+ Type: ai.StreamPartTypeReasoningStart,
+ ID: fmt.Sprintf("%d", inx),
+ })
+ if !shouldContinue {
+ return false
+ }
+ }
+
+ return yield(ai.StreamPart{
+ Type: ai.StreamPartTypeReasoningDelta,
+ ID: fmt.Sprintf("%d", inx),
+ Delta: reasoningContent,
+ })
+ }
+ if len(reasoningData.ReasoningDetails) > 0 {
+ for _, detail := range reasoningData.ReasoningDetails {
+ if !reasoningStarted {
+ ctx[reasoningStartedCtx] = true
+ }
+ switch detail.Type {
+ case "reasoning.text":
+ return ctx, emitEvent(detail.Text)
+ case "reasoning.summary":
+ return ctx, emitEvent(detail.Summary)
+ case "reasoning.encrypted":
+ return ctx, emitEvent("[REDACTED]")
+ }
+ }
+ } else if reasoningData.Reasoning != "" {
+ return ctx, emitEvent(reasoningData.Reasoning)
+ }
+ if reasoningStarted && (choice.Delta.Content != "" || len(choice.Delta.ToolCalls) > 0) {
+ ctx[reasoningStartedCtx] = false
+ return ctx, yield(ai.StreamPart{
+ Type: ai.StreamPartTypeReasoningEnd,
+ ID: fmt.Sprintf("%d", inx),
+ })
+ }
+ }
+ return ctx, true
+}
+
+func languageModelUsage(response openaisdk.ChatCompletion) (ai.Usage, ai.ProviderOptionsData) {
+ if len(response.Choices) == 0 {
+ return ai.Usage{}, nil
+ }
+ openrouterUsage := UsageAccounting{}
+ usage := response.Usage
+
+ _ = json.Unmarshal([]byte(usage.RawJSON()), &openrouterUsage)
+
+ completionTokenDetails := usage.CompletionTokensDetails
+ promptTokenDetails := usage.PromptTokensDetails
+
+ var provider string
+ if p, ok := response.JSON.ExtraFields["provider"]; ok {
+ provider = p.Raw()
+ }
+
+ // Build provider metadata
+ providerMetadata := &ProviderMetadata{
+ Provider: provider,
+ Usage: openrouterUsage,
+ }
+
+ return ai.Usage{
+ InputTokens: usage.PromptTokens,
+ OutputTokens: usage.CompletionTokens,
+ TotalTokens: usage.TotalTokens,
+ ReasoningTokens: completionTokenDetails.ReasoningTokens,
+ CacheReadTokens: promptTokenDetails.CachedTokens,
+ }, providerMetadata
+}
+
+func languageModelStreamUsage(chunk openaisdk.ChatCompletionChunk, _ map[string]any, metadata ai.ProviderMetadata) (ai.Usage, ai.ProviderMetadata) {
+ usage := chunk.Usage
+ if usage.TotalTokens == 0 {
+ return ai.Usage{}, nil
+ }
+
+ streamProviderMetadata := &ProviderMetadata{}
+ if metadata != nil {
+ if providerMetadata, ok := metadata[Name]; ok {
+ converted, ok := providerMetadata.(*ProviderMetadata)
+ if ok {
+ streamProviderMetadata = converted
+ }
+ }
+ }
+ openrouterUsage := UsageAccounting{}
+ _ = json.Unmarshal([]byte(usage.RawJSON()), &openrouterUsage)
+ streamProviderMetadata.Usage = openrouterUsage
+
+ if p, ok := chunk.JSON.ExtraFields["provider"]; ok {
+ streamProviderMetadata.Provider = p.Raw()
+ }
+
+ // we do this here because the acc does not add prompt details
+ completionTokenDetails := usage.CompletionTokensDetails
+ promptTokenDetails := usage.PromptTokensDetails
+ aiUsage := ai.Usage{
+ InputTokens: usage.PromptTokens,
+ OutputTokens: usage.CompletionTokens,
+ TotalTokens: usage.TotalTokens,
+ ReasoningTokens: completionTokenDetails.ReasoningTokens,
+ CacheReadTokens: promptTokenDetails.CachedTokens,
+ }
+
+ return aiUsage, ai.ProviderMetadata{
+ Name: streamProviderMetadata,
+ }
+}
@@ -14,6 +14,7 @@ type options struct {
const (
DefaultURL = "https://openrouter.ai/api/v1"
+ Name = "openrouter"
)
type Option = func(*options)
@@ -21,9 +22,15 @@ type Option = func(*options)
func New(opts ...Option) ai.Provider {
providerOptions := options{
openaiOptions: []openai.Option{
+ openai.WithName(Name),
openai.WithBaseURL(DefaultURL),
openai.WithLanguageModelOptions(
- openai.WithPrepareLanguageModelCallFunc(prepareLanguageModelCall),
+ openai.WithLanguageModelPrepareCallFunc(languagePrepareModelCall),
+ openai.WithLanguageModelUsageFunc(languageModelUsage),
+ openai.WithLanguageModelStreamUsageFunc(languageModelStreamUsage),
+ openai.WithLanguageModelStreamExtraFunc(languageModelStreamExtra),
+ openai.WithLanguageModelExtraContentFunc(languageModelExtraContent),
+ openai.WithLanguageModelMapFinishReasonFunc(languageModelMapFinishReason),
),
},
}
@@ -4,8 +4,6 @@ import (
"github.com/charmbracelet/fantasy/ai"
)
-const Name = "openrouter"
-
type ReasoningEffort string
const (
@@ -14,7 +12,34 @@ const (
ReasoningEffortHigh ReasoningEffort = "high"
)
-type ProviderMetadata struct{}
+type PromptTokensDetails struct {
+ CachedTokens int64
+}
+
+type CompletionTokensDetails struct {
+ ReasoningTokens int64
+}
+
+type CostDetails struct {
+ UpstreamInferenceCost float64 `json:"upstream_inference_cost"`
+ UpstreamInferencePromptCost float64 `json:"upstream_inference_prompt_cost"`
+ UpstreamInferenceCompletionsCost float64 `json:"upstream_inference_completions_cost"`
+}
+
+type UsageAccounting struct {
+ PromptTokens int64 `json:"prompt_tokens"`
+ PromptTokensDetails PromptTokensDetails `json:"prompt_tokens_details"`
+ CompletionTokens int64 `json:"completion_tokens"`
+ CompletionTokensDetails CompletionTokensDetails `json:"completion_tokens_details"`
+ TotalTokens int64 `json:"total_tokens"`
+ Cost float64 `json:"cost"`
+ CostDetails CostDetails `json:"cost_details"`
+}
+
+type ProviderMetadata struct {
+ Provider string `json:"provider"`
+ Usage UsageAccounting `json:"usage"`
+}
func (*ProviderMetadata) Options() {}
@@ -70,6 +95,16 @@ type ProviderOptions struct {
func (*ProviderOptions) Options() {}
+type ReasoningDetail struct {
+ Type string `json:"type"`
+ Text string `json:"text"`
+ Summary string `json:"summary"`
+}
+type ReasoningData struct {
+ Reasoning string `json:"reasoning"`
+ ReasoningDetails []ReasoningDetail `json:"reasoning_details"`
+}
+
func ReasoningEffortOption(e ReasoningEffort) *ReasoningEffort {
return &e
}
@@ -9,6 +9,7 @@ import (
"github.com/charmbracelet/fantasy/anthropic"
"github.com/charmbracelet/fantasy/google"
"github.com/charmbracelet/fantasy/openai"
+ "github.com/charmbracelet/fantasy/openrouter"
"gopkg.in/dnaeon/go-vcr.v4/pkg/recorder"
)
@@ -25,12 +26,14 @@ var languageModelBuilders = []builderPair{
{"anthropic-claude-sonnet", builderAnthropicClaudeSonnet4},
{"google-gemini-2.5-flash", builderGoogleGemini25Flash},
{"google-gemini-2.5-pro", builderGoogleGemini25Pro},
+ {"openrouter-kimi-k2", builderOpenrouterKimiK2},
}
var thinkingLanguageModelBuilders = []builderPair{
{"openai-gpt-5", builderOpenaiGpt5},
{"anthropic-claude-sonnet", builderAnthropicClaudeSonnet4},
{"google-gemini-2.5-pro", builderGoogleGemini25Pro},
+ {"openrouter-glm-4.5", builderOpenrouterGLM45},
}
func builderOpenaiGpt4o(r *recorder.Recorder) (ai.LanguageModel, error) {
@@ -80,3 +83,19 @@ func builderGoogleGemini25Pro(r *recorder.Recorder) (ai.LanguageModel, error) {
)
return provider.LanguageModel("gemini-2.5-pro")
}
+
+func builderOpenrouterKimiK2(r *recorder.Recorder) (ai.LanguageModel, error) {
+ provider := openrouter.New(
+ openrouter.WithAPIKey(os.Getenv("OPENROUTER_API_KEY")),
+ openrouter.WithHTTPClient(&http.Client{Transport: r}),
+ )
+ return provider.LanguageModel("moonshotai/kimi-k2-0905")
+}
+
+func builderOpenrouterGLM45(r *recorder.Recorder) (ai.LanguageModel, error) {
+ provider := openrouter.New(
+ openrouter.WithAPIKey(os.Getenv("OPENROUTER_API_KEY")),
+ openrouter.WithHTTPClient(&http.Client{Transport: r}),
+ )
+ return provider.LanguageModel("z-ai/glm-4.5")
+}
@@ -10,6 +10,7 @@ import (
"github.com/charmbracelet/fantasy/anthropic"
"github.com/charmbracelet/fantasy/google"
"github.com/charmbracelet/fantasy/openai"
+ "github.com/charmbracelet/fantasy/openrouter"
_ "github.com/joho/godotenv/autoload"
"github.com/stretchr/testify/require"
)
@@ -119,6 +120,11 @@ func TestThinking(t *testing.T) {
"openai": &openai.ProviderOptions{
ReasoningEffort: openai.ReasoningEffortOption(openai.ReasoningEffortMedium),
},
+ "openrouter": &openrouter.ProviderOptions{
+ Reasoning: &openrouter.ReasoningOptions{
+ Effort: openrouter.ReasoningEffortOption(openrouter.ReasoningEffortHigh),
+ },
+ },
},
})
require.NoError(t, err, "failed to generate")
@@ -128,7 +134,7 @@ func TestThinking(t *testing.T) {
got := result.Response.Content.Text()
require.True(t, strings.Contains(got, want1) && strings.Contains(got, want2), "unexpected response: got %q, want %q %q", got, want1, want2)
- testThinkingSteps(t, languageModel.Provider(), result.Steps)
+ testThinking(t, languageModel.Provider(), result.Steps)
})
}
}
@@ -184,7 +190,7 @@ func TestThinkingStreaming(t *testing.T) {
got := result.Response.Content.Text()
require.True(t, strings.Contains(got, want1) && strings.Contains(got, want2), "unexpected response: got %q, want %q %q", got, want1, want2)
- testThinkingSteps(t, languageModel.Provider(), result.Steps)
+ testThinking(t, languageModel.Provider(), result.Steps)
})
}
}
@@ -299,3 +305,74 @@ func TestStreamWithTools(t *testing.T) {
})
}
}
+
+func TestStreamWithMultipleTools(t *testing.T) {
+ for _, pair := range languageModelBuilders {
+ t.Run(pair.name, func(t *testing.T) {
+ r := newRecorder(t)
+
+ languageModel, err := pair.builder(r)
+ require.NoError(t, err, "failed to build language model")
+
+ type CalculatorInput struct {
+ A int `json:"a" description:"first number"`
+ B int `json:"b" description:"second number"`
+ }
+
+ addTool := ai.NewAgentTool(
+ "add",
+ "Add two numbers",
+ func(ctx context.Context, input CalculatorInput, _ ai.ToolCall) (ai.ToolResponse, error) {
+ result := input.A + input.B
+ return ai.NewTextResponse(strings.TrimSpace(strconv.Itoa(result))), nil
+ },
+ )
+ multiplyTool := ai.NewAgentTool(
+ "multiply",
+ "Multiply two numbers",
+ func(ctx context.Context, input CalculatorInput, _ ai.ToolCall) (ai.ToolResponse, error) {
+ result := input.A * input.B
+ return ai.NewTextResponse(strings.TrimSpace(strconv.Itoa(result))), nil
+ },
+ )
+
+ agent := ai.NewAgent(
+ languageModel,
+ ai.WithSystemPrompt("You are a helpful assistant. Always use both add and multiply at the same time."),
+ ai.WithTools(addTool),
+ ai.WithTools(multiplyTool),
+ )
+
+ toolCallCount := 0
+ toolResultCount := 0
+ var collectedText strings.Builder
+
+ streamCall := ai.AgentStreamCall{
+ Prompt: "Add and multiply the number 2 and 3",
+ OnTextDelta: func(id, text string) error {
+ collectedText.WriteString(text)
+ return nil
+ },
+ OnToolCall: func(toolCall ai.ToolCallContent) error {
+ toolCallCount++
+ return nil
+ },
+ OnToolResult: func(result ai.ToolResultContent) error {
+ toolResultCount++
+ return nil
+ },
+ }
+
+ result, err := agent.Stream(t.Context(), streamCall)
+ require.NoError(t, err, "failed to stream")
+ require.Equal(t, len(result.Steps), 2, "expected all tool calls in step 1")
+ finalText := result.Response.Content.Text()
+ require.Contains(t, finalText, "5", "expected response to contain '5', got: %q", finalText)
+ require.Contains(t, finalText, "6", "expected response to contain '5', got: %q", finalText)
+
+ require.Greater(t, toolCallCount, 0, "expected at least one tool call")
+
+ require.Greater(t, toolResultCount, 0, "expected at least one tool result")
+ })
+ }
+}
@@ -0,0 +1,33 @@
+---
+version: 2
+interactions:
+ - id: 0
+ request:
+ proto: HTTP/1.1
+ proto_major: 1
+ proto_minor: 1
+ content_length: 180
+ host: ""
+ body: '{"messages":[{"content":"You are a helpful assistant","role":"system"},{"content":"Say hi in Portuguese","role":"user"}],"model":"moonshotai/kimi-k2-0905","usage":{"include":true}}'
+ headers:
+ Accept:
+ - application/json
+ Content-Type:
+ - application/json
+ User-Agent:
+ - OpenAI/Go 2.3.0
+ url: https://openrouter.ai/api/v1/chat/completions
+ method: POST
+ response:
+ proto: HTTP/2.0
+ proto_major: 2
+ proto_minor: 0
+ content_length: -1
+ uncompressed: true
+ body: '{"id":"gen-1758536663-isAYACl4o5P2svHApNjR","provider":"DeepInfra","model":"moonshotai/kimi-k2-0905","object":"chat.completion","created":1758536663,"choices":[{"logprobs":null,"finish_reason":"stop","native_finish_reason":"stop","index":0,"message":{"role":"assistant","content":"Olรก!","refusal":null,"reasoning":null}}],"usage":{"prompt_tokens":20,"completion_tokens":3,"total_tokens":23,"cost":0.000016,"is_byok":false,"prompt_tokens_details":{"cached_tokens":0,"audio_tokens":0},"cost_details":{"upstream_inference_cost":null,"upstream_inference_prompt_cost":0.00001,"upstream_inference_completions_cost":0.000006},"completion_tokens_details":{"reasoning_tokens":0,"image_tokens":0}}}'
+ headers:
+ Content-Type:
+ - application/json
+ status: 200 OK
+ code: 200
+ duration: 937.162041ms
@@ -0,0 +1,32 @@
+---
+version: 2
+interactions:
+- id: 0
+ request:
+ proto: HTTP/1.1
+ proto_major: 1
+ proto_minor: 1
+ content_length: 242
+ host: ""
+ body: "{\"messages\":[{\"content\":\"You are a helpful assistant\",\"role\":\"system\"},{\"content\":\"Count from 1 to 3 in Spanish\",\"role\":\"user\"}],\"model\":\"moonshotai/kimi-k2-0905\",\"stream_options\":{\"include_usage\":true},\"usage\":{\"include\":true},\"stream\":true}"
+ headers:
+ Accept:
+ - application/json
+ Content-Type:
+ - application/json
+ User-Agent:
+ - OpenAI/Go 2.3.0
+ url: https://openrouter.ai/api/v1/chat/completions
+ method: POST
+ response:
+ proto: HTTP/2.0
+ proto_major: 2
+ proto_minor: 0
+ content_length: -1
@@ -0,0 +1,61 @@
+---
+version: 2
+interactions:
+- id: 0
+ request:
+ proto: HTTP/1.1
+ proto_major: 1
+ proto_minor: 1
+ content_length: 812
+ host: ""
+ body: "{\"max_tokens\":4096,\"messages\":[{\"content\":[{\"text\":\"Add and multiply the number 2 and 3\",\"type\":\"text\"}],\"role\":\"user\"}],\"model\":\"claude-sonnet-4-20250514\",\"system\":[{\"text\":\"You are a helpful assistant. Always use both add and multiply at the same time.\",\"type\":\"text\"}],\"tool_choice\":{\"disable_parallel_tool_use\":false,\"type\":\"auto\"},\"tools\":[{\"input_schema\":{\"properties\":{\"a\":{\"description\":\"first number\",\"type\":\"integer\"},\"b\":{\"description\":\"second number\",\"type\":\"integer\"}},\"required\":[\"a\",\"b\"],\"type\":\"object\"},\"name\":\"add\",\"description\":\"Add two numbers\"},{\"input_schema\":{\"properties\":{\"a\":{\"description\":\"first number\",\"type\":\"integer\"},\"b\":{\"description\":\"second number\",\"type\":\"integer\"}},\"required\":[\"a\",\"b\"],\"type\":\"object\"},\"name\":\"multiply\",\"description\":\"Multiply two numbers\"}],\"stream\":true}"
+ headers:
+ Accept:
+ - application/json
+ Content-Type:
+ - application/json
+ User-Agent:
+ - Anthropic/Go 1.10.0
+ url: https://api.anthropic.com/v1/messages
+ method: POST
+ response:
+ proto: HTTP/2.0
+ proto_major: 2
+ proto_minor: 0
+ content_length: -1
@@ -0,0 +1,63 @@
+---
+version: 2
+interactions:
+- id: 0
+ request:
+ proto: HTTP/1.1
+ proto_major: 1
+ proto_minor: 1
+ content_length: 787
+ host: generativelanguage.googleapis.com
+ body: "{\"contents\":[{\"parts\":[{\"text\":\"Add and multiply the number 2 and 3\"}],\"role\":\"user\"}],\"generationConfig\":{},\"systemInstruction\":{\"parts\":[{\"text\":\"You are a helpful assistant. Always use both add and multiply at the same time.\"}],\"role\":\"user\"},\"toolConfig\":{\"functionCallingConfig\":{\"mode\":\"AUTO\"}},\"tools\":[{\"functionDeclarations\":[{\"description\":\"Add two numbers\",\"name\":\"add\",\"parameters\":{\"properties\":{\"a\":{\"description\":\"first number\",\"type\":\"INTEGER\"},\"b\":{\"description\":\"second number\",\"type\":\"INTEGER\"}},\"required\":[\"a\",\"b\"],\"type\":\"OBJECT\"}},{\"description\":\"Multiply two numbers\",\"name\":\"multiply\",\"parameters\":{\"properties\":{\"a\":{\"description\":\"first number\",\"type\":\"INTEGER\"},\"b\":{\"description\":\"second number\",\"type\":\"INTEGER\"}},\"required\":[\"a\",\"b\"],\"type\":\"OBJECT\"}}]}]}\n"
+ form:
+ alt:
+ - sse
+ headers:
+ Content-Type:
+ - application/json
+ User-Agent:
+ - google-genai-sdk/1.23.0 gl-go/go1.25.1
+ url: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-flash:streamGenerateContent?alt=sse
+ method: POST
+ response:
+ proto: HTTP/2.0
+ proto_major: 2
+ proto_minor: 0
+ content_length: -1
+ body: "data: {\"candidates\": [{\"content\": {\"parts\": [{\"functionCall\": {\"name\": \"add\",\"args\": {\"a\": 2,\"b\": 3}},\"thoughtSignature\": \"CikB0e2Kb9Cbo981mR8v4MhGuu8kkazE7e5W2Yaehwjb6iqXgIQTnlkiEQptAdHtim/2wYeqmL8JTZVWRWzYrj1Yh72I2/C1joGgI7SMVWgEv1EcSoryWwxuWg8XnVTwE8etvpuv42AzaAjwdyTHeRpMZRFTwCsbs72ZcfZaNp5fGcdsULLk8ofUEgdjv3traQpoVe6f8gwCqgpnAdHtim8gitawv43Dk2C2hFdN2eoi7yDLD4TzcSJlI30wo3U2t3dO08m4RSdj9n8cItxy0Xgr2oMrYlx0gxObVepGmwdc0JVLiruE9sWE8FC/Jr4nq9avS6jXag4yzb1cmWSb9jqdmA==\"},{\"functionCall\": {\"name\": \"multiply\",\"args\": {\"a\": 2,\"b\": 3}}}],\"role\": \"model\"},\"finishReason\": \"STOP\",\"index\": 0}],\"usageMetadata\": {\"promptTokenCount\": 121,\"candidatesTokenCount\": 36,\"totalTokenCount\": 195,\"promptTokensDetails\": [{\"modality\": \"TEXT\",\"tokenCount\": 121}],\"thoughtsTokenCount\": 38},\"modelVersion\": \"gemini-2.5-flash\",\"responseId\": \"Cy7RaKyyIPGO28oPouv7mAg\"}\r\n\r\n"
+ headers:
+ Content-Type:
+ - text/event-stream
+ status: 200 OK
+ code: 200
+ duration: 974.596292ms
+- id: 1
+ request:
+ proto: HTTP/1.1
+ proto_major: 1
+ proto_minor: 1
+ content_length: 1134
+ host: generativelanguage.googleapis.com
@@ -0,0 +1,63 @@
+---
+version: 2
+interactions:
+- id: 0
+ request:
+ proto: HTTP/1.1
+ proto_major: 1
+ proto_minor: 1
+ content_length: 787
+ host: generativelanguage.googleapis.com
+ body: "{\"contents\":[{\"parts\":[{\"text\":\"Add and multiply the number 2 and 3\"}],\"role\":\"user\"}],\"generationConfig\":{},\"systemInstruction\":{\"parts\":[{\"text\":\"You are a helpful assistant. Always use both add and multiply at the same time.\"}],\"role\":\"user\"},\"toolConfig\":{\"functionCallingConfig\":{\"mode\":\"AUTO\"}},\"tools\":[{\"functionDeclarations\":[{\"description\":\"Add two numbers\",\"name\":\"add\",\"parameters\":{\"properties\":{\"a\":{\"description\":\"first number\",\"type\":\"INTEGER\"},\"b\":{\"description\":\"second number\",\"type\":\"INTEGER\"}},\"required\":[\"a\",\"b\"],\"type\":\"OBJECT\"}},{\"description\":\"Multiply two numbers\",\"name\":\"multiply\",\"parameters\":{\"properties\":{\"a\":{\"description\":\"first number\",\"type\":\"INTEGER\"},\"b\":{\"description\":\"second number\",\"type\":\"INTEGER\"}},\"required\":[\"a\",\"b\"],\"type\":\"OBJECT\"}}]}]}\n"
+ form:
+ alt:
+ - sse
+ headers:
+ Content-Type:
+ - application/json
+ User-Agent:
+ - google-genai-sdk/1.23.0 gl-go/go1.25.1
+ url: https://generativelanguage.googleapis.com/v1beta/models/gemini-2.5-pro:streamGenerateContent?alt=sse
+ method: POST
+ response:
+ proto: HTTP/2.0
+ proto_major: 2
+ proto_minor: 0
+ content_length: -1
@@ -0,0 +1,61 @@
+---
+version: 2
+interactions:
+- id: 0
+ request:
+ proto: HTTP/1.1
+ proto_major: 1
+ proto_minor: 1
+ content_length: 835
+ host: ""
+ body: "{\"messages\":[{\"content\":\"You are a helpful assistant. Always use both add and multiply at the same time.\",\"role\":\"system\"},{\"content\":\"Add and multiply the number 2 and 3\",\"role\":\"user\"}],\"model\":\"gpt-4o-mini\",\"stream_options\":{\"include_usage\":true},\"tool_choice\":\"auto\",\"tools\":[{\"function\":{\"name\":\"add\",\"strict\":false,\"description\":\"Add two numbers\",\"parameters\":{\"properties\":{\"a\":{\"description\":\"first number\",\"type\":\"integer\"},\"b\":{\"description\":\"second number\",\"type\":\"integer\"}},\"required\":[\"a\",\"b\"],\"type\":\"object\"}},\"type\":\"function\"},{\"function\":{\"name\":\"multiply\",\"strict\":false,\"description\":\"Multiply two numbers\",\"parameters\":{\"properties\":{\"a\":{\"description\":\"first number\",\"type\":\"integer\"},\"b\":{\"description\":\"second number\",\"type\":\"integer\"}},\"required\":[\"a\",\"b\"],\"type\":\"object\"}},\"type\":\"function\"}],\"stream\":true}"
+ headers:
+ Accept:
+ - application/json
+ Content-Type:
+ - application/json
+ User-Agent:
+ - OpenAI/Go 2.3.0
+ url: https://api.openai.com/v1/chat/completions
+ method: POST
+ response:
+ proto: HTTP/2.0
+ proto_major: 2
+ proto_minor: 0
+ content_length: -1
@@ -0,0 +1,61 @@
+---
+version: 2
+interactions:
+- id: 0
+ request:
+ proto: HTTP/1.1
+ proto_major: 1
+ proto_minor: 1
+ content_length: 830
+ host: ""
+ body: "{\"messages\":[{\"content\":\"You are a helpful assistant. Always use both add and multiply at the same time.\",\"role\":\"system\"},{\"content\":\"Add and multiply the number 2 and 3\",\"role\":\"user\"}],\"model\":\"gpt-4o\",\"stream_options\":{\"include_usage\":true},\"tool_choice\":\"auto\",\"tools\":[{\"function\":{\"name\":\"add\",\"strict\":false,\"description\":\"Add two numbers\",\"parameters\":{\"properties\":{\"a\":{\"description\":\"first number\",\"type\":\"integer\"},\"b\":{\"description\":\"second number\",\"type\":\"integer\"}},\"required\":[\"a\",\"b\"],\"type\":\"object\"}},\"type\":\"function\"},{\"function\":{\"name\":\"multiply\",\"strict\":false,\"description\":\"Multiply two numbers\",\"parameters\":{\"properties\":{\"a\":{\"description\":\"first number\",\"type\":\"integer\"},\"b\":{\"description\":\"second number\",\"type\":\"integer\"}},\"required\":[\"a\",\"b\"],\"type\":\"object\"}},\"type\":\"function\"}],\"stream\":true}"
+ headers:
+ Accept:
+ - application/json
+ Content-Type:
+ - application/json
+ User-Agent:
+ - OpenAI/Go 2.3.0
+ url: https://api.openai.com/v1/chat/completions
+ method: POST
+ response:
+ proto: HTTP/2.0
+ proto_major: 2
+ proto_minor: 0
+ content_length: -1
@@ -0,0 +1,61 @@
+---
+version: 2
+interactions:
+- id: 0
+ request:
+ proto: HTTP/1.1
+ proto_major: 1
+ proto_minor: 1
+ content_length: 872
+ host: ""
@@ -0,0 +1,90 @@
+---
+version: 2
+interactions:
+- id: 0
+ request:
+ proto: HTTP/1.1
+ proto_major: 1
+ proto_minor: 1
+ content_length: 569
+ host: ""
+ body: "{\"messages\":[{\"content\":\"You are a helpful assistant. Use the add tool to perform calculations.\",\"role\":\"system\"},{\"content\":\"What is 15 + 27?\",\"role\":\"user\"}],\"model\":\"moonshotai/kimi-k2-0905\",\"stream_options\":{\"include_usage\":true},\"tool_choice\":\"auto\",\"tools\":[{\"function\":{\"name\":\"add\",\"strict\":false,\"description\":\"Add two numbers\",\"parameters\":{\"properties\":{\"a\":{\"description\":\"first number\",\"type\":\"integer\"},\"b\":{\"description\":\"second number\",\"type\":\"integer\"}},\"required\":[\"a\",\"b\"],\"type\":\"object\"}},\"type\":\"function\"}],\"usage\":{\"include\":true},\"stream\":true}"
+ headers:
+ Accept:
+ - application/json
+ Content-Type:
+ - application/json
+ User-Agent:
+ - OpenAI/Go 2.3.0
+ url: https://openrouter.ai/api/v1/chat/completions
+ method: POST
+ response:
+ proto: HTTP/2.0
+ proto_major: 2
+ proto_minor: 0
+ content_length: -1
@@ -0,0 +1,63 @@
+---
+version: 2
+interactions:
+- id: 0
+ request:
+ proto: HTTP/1.1
+ proto_major: 1
+ proto_minor: 1
+ content_length: 492
+ host: ""
+ body: "{\"messages\":[{\"content\":\"You are a helpful assistant\",\"role\":\"system\"},{\"content\":\"What's the weather in Florence, Italy?\",\"role\":\"user\"}],\"model\":\"z-ai/glm-4.5\",\"tool_choice\":\"auto\",\"tools\":[{\"function\":{\"name\":\"weather\",\"strict\":false,\"description\":\"Get weather information for a location\",\"parameters\":{\"properties\":{\"location\":{\"description\":\"the city\",\"type\":\"string\"}},\"required\":[\"location\"],\"type\":\"object\"}},\"type\":\"function\"}],\"reasoning\":{\"effort\":\"high\"},\"usage\":{\"include\":true}}"
+ headers:
+ Accept:
+ - application/json
+ Content-Type:
+ - application/json
+ User-Agent:
+ - OpenAI/Go 2.3.0
+ url: https://openrouter.ai/api/v1/chat/completions
+ method: POST
+ response:
+ proto: HTTP/2.0
+ proto_major: 2
+ proto_minor: 0
+ content_length: -1
+ uncompressed: true
@@ -0,0 +1,61 @@
+---
+version: 2
+interactions:
+- id: 0
+ request:
+ proto: HTTP/1.1
+ proto_major: 1
+ proto_minor: 1
+ content_length: 516
+ host: ""
+ body: "{\"messages\":[{\"content\":\"You are a helpful assistant\",\"role\":\"system\"},{\"content\":\"What's the weather in Florence, Italy?\",\"role\":\"user\"}],\"model\":\"z-ai/glm-4.5\",\"stream_options\":{\"include_usage\":true},\"tool_choice\":\"auto\",\"tools\":[{\"function\":{\"name\":\"weather\",\"strict\":false,\"description\":\"Get weather information for a location\",\"parameters\":{\"properties\":{\"location\":{\"description\":\"the city\",\"type\":\"string\"}},\"required\":[\"location\"],\"type\":\"object\"}},\"type\":\"function\"}],\"usage\":{\"include\":true},\"stream\":true}"
+ headers:
+ Accept:
+ - application/json
+ Content-Type:
+ - application/json
+ User-Agent:
+ - OpenAI/Go 2.3.0
+ url: https://openrouter.ai/api/v1/chat/completions
+ method: POST
+ response:
+ proto: HTTP/2.0
+ proto_major: 2
+ proto_minor: 0
+ content_length: -1
@@ -0,0 +1,63 @@
+---
+version: 2
+interactions:
+ - id: 0
+ request:
+ proto: HTTP/1.1
+ proto_major: 1
+ proto_minor: 1
+ content_length: 466
+ host: ""
+ body: '{"messages":[{"content":"You are a helpful assistant","role":"system"},{"content":"What''s the weather in Florence?","role":"user"}],"model":"moonshotai/kimi-k2-0905","tool_choice":"auto","tools":[{"function":{"name":"weather","strict":false,"description":"Get weather information for a location","parameters":{"properties":{"location":{"description":"the city","type":"string"}},"required":["location"],"type":"object"}},"type":"function"}],"usage":{"include":true}}'
+ headers:
+ Accept:
+ - application/json
+ Content-Type:
+ - application/json
+ User-Agent:
+ - OpenAI/Go 2.3.0
+ url: https://openrouter.ai/api/v1/chat/completions
+ method: POST
+ response:
+ proto: HTTP/2.0
+ proto_major: 2
+ proto_minor: 0
+ content_length: -1
+ uncompressed: true
@@ -1,20 +1,24 @@
package providertests
import (
+ "fmt"
"testing"
"github.com/charmbracelet/fantasy/ai"
"github.com/charmbracelet/fantasy/anthropic"
"github.com/charmbracelet/fantasy/google"
+ "github.com/charmbracelet/fantasy/openrouter"
"github.com/stretchr/testify/require"
)
-func testThinkingSteps(t *testing.T, providerName string, steps []ai.StepResult) {
+func testThinking(t *testing.T, providerName string, steps []ai.StepResult) {
switch providerName {
case anthropic.Name:
testAnthropicThinking(t, steps)
case google.Name:
testGoogleThinking(t, steps)
+ case openrouter.Name:
+ testOpenrouterThinking(t, steps)
}
}
@@ -33,6 +37,22 @@ func testGoogleThinking(t *testing.T, steps []ai.StepResult) {
require.Greater(t, reasoningContentCount, 0)
}
+func testOpenrouterThinking(t *testing.T, steps []ai.StepResult) {
+ reasoningContentCount := 0
+ // Test if we got the signature
+ for _, step := range steps {
+ for _, msg := range step.Messages {
+ for _, content := range msg.Content {
+ if content.GetType() == ai.ContentTypeReasoning {
+ reasoningContentCount += 1
+ }
+ }
+ }
+ }
+ fmt.Println(reasoningContentCount)
+ require.Greater(t, reasoningContentCount, 0)
+}
+
func testAnthropicThinking(t *testing.T, steps []ai.StepResult) {
reasoningContentCount := 0
signaturesCount := 0