lint: enable `revive`, fix issues + add all missing doc comments (#25)

Andrey Nering created

Change summary

.golangci.yml                                  |  2 
agent.go                                       | 33 +++++++++++
content.go                                     | 47 +++++++++++++---
doc.go                                         |  2 
errors.go                                      | 11 +++-
examples/agent/main.go                         |  3 
examples/simple/main.go                        |  1 
examples/stream/main.go                        |  1 
examples/streaming-agent-simple/main.go        |  7 +-
examples/streaming-agent/main.go               | 25 ++++----
model.go                                       | 53 +++++++++++++++----
provider.go                                    |  1 
providers/anthropic/anthropic.go               | 19 ++++++-
providers/anthropic/provider_options.go        | 12 ++++
providers/azure/azure.go                       | 12 ++++
providers/bedrock/bedrock.go                   |  7 ++
providers/google/auth.go                       |  4 +
providers/google/google.go                     | 48 ++++++++++-------
providers/google/provider_options.go           |  7 ++
providers/openai/language_model.go             |  9 +++
providers/openai/language_model_hooks.go       | 36 +++++++++---
providers/openai/openai.go                     | 19 ++++++-
providers/openai/provider_options.go           | 22 +++++++-
providers/openai/responses_options.go          | 43 ++++++++++++---
providers/openaicompat/language_model_hooks.go |  5 +
providers/openaicompat/openaicompat.go         | 10 +++
providers/openaicompat/provider_options.go     |  6 ++
providers/openrouter/language_model_hooks.go   |  2 
providers/openrouter/openrouter.go             | 11 +++
providers/openrouter/provider_options.go       | 27 +++++++++
retry.go                                       |  6 +
util.go                                        |  2 
32 files changed, 393 insertions(+), 100 deletions(-)

Detailed changes

.golangci.yml 🔗

@@ -17,7 +17,7 @@ linters:
     - noctx
     - nolintlint
     - prealloc
-    # - revive
+    - revive
     - rowserrcheck
     - sqlclosecheck
     - tparallel

agent.go 🔗

@@ -11,11 +11,13 @@ import (
 	"sync"
 )
 
+// StepResult represents the result of a single step in an agent execution.
 type StepResult struct {
 	Response
 	Messages []Message
 }
 
+// StopCondition defines a function that determines when an agent should stop executing.
 type StopCondition = func(steps []StepResult) bool
 
 // StepCountIs returns a stop condition that stops after the specified number of steps.
@@ -80,6 +82,7 @@ func MaxTokensUsed(maxTokens int64) StopCondition {
 	}
 }
 
+// PrepareStepFunctionOptions contains the options for preparing a step in an agent execution.
 type PrepareStepFunctionOptions struct {
 	Steps      []StepResult
 	StepNumber int
@@ -87,6 +90,7 @@ type PrepareStepFunctionOptions struct {
 	Messages   []Message
 }
 
+// PrepareStepResult contains the result of preparing a step in an agent execution.
 type PrepareStepResult struct {
 	Model           LanguageModel
 	Messages        []Message
@@ -96,6 +100,7 @@ type PrepareStepResult struct {
 	DisableAllTools bool
 }
 
+// ToolCallRepairOptions contains the options for repairing a tool call.
 type ToolCallRepairOptions struct {
 	OriginalToolCall ToolCallContent
 	ValidationError  error
@@ -105,8 +110,13 @@ type ToolCallRepairOptions struct {
 }
 
 type (
-	PrepareStepFunction    = func(ctx context.Context, options PrepareStepFunctionOptions) (context.Context, PrepareStepResult, error)
+	// PrepareStepFunction defines a function that prepares a step in an agent execution.
+	PrepareStepFunction = func(ctx context.Context, options PrepareStepFunctionOptions) (context.Context, PrepareStepResult, error)
+
+	// OnStepFinishedFunction defines a function that is called when a step finishes.
 	OnStepFinishedFunction = func(step StepResult)
+
+	// RepairToolCallFunction defines a function that repairs a tool call.
 	RepairToolCallFunction = func(ctx context.Context, options ToolCallRepairOptions) (*ToolCallContent, error)
 )
 
@@ -133,6 +143,7 @@ type agentSettings struct {
 	onRetry        OnRetryCallback
 }
 
+// AgentCall represents a call to an agent.
 type AgentCall struct {
 	Prompt           string     `json:"prompt"`
 	Files            []FilePart `json:"files"`
@@ -222,6 +233,7 @@ type (
 	OnStreamFinishFunc func(usage Usage, finishReason FinishReason, providerMetadata ProviderMetadata) error
 )
 
+// AgentStreamCall represents a streaming call to an agent.
 type AgentStreamCall struct {
 	Prompt           string     `json:"prompt"`
 	Files            []FilePart `json:"files"`
@@ -268,6 +280,7 @@ type AgentStreamCall struct {
 	OnStreamFinish   OnStreamFinishFunc   // Called when stream finishes
 }
 
+// AgentResult represents the result of an agent execution.
 type AgentResult struct {
 	Steps []StepResult
 	// Final response
@@ -275,17 +288,20 @@ type AgentResult struct {
 	TotalUsage Usage
 }
 
+// Agent represents an AI agent that can generate responses and stream responses.
 type Agent interface {
 	Generate(context.Context, AgentCall) (*AgentResult, error)
 	Stream(context.Context, AgentStreamCall) (*AgentResult, error)
 }
 
+// AgentOption defines a function that configures agent settings.
 type AgentOption = func(*agentSettings)
 
 type agent struct {
 	settings agentSettings
 }
 
+// NewAgent creates a new agent with the given language model and options.
 func NewAgent(model LanguageModel, opts ...AgentOption) Agent {
 	settings := agentSettings{
 		model: model,
@@ -926,7 +942,7 @@ func (a *agent) prepareTools(tools []AgentTool, activeTools []string, disableAll
 func (a *agent) validateAndRepairToolCall(ctx context.Context, toolCall ToolCallContent, availableTools []AgentTool, systemPrompt string, messages []Message, repairFunc RepairToolCallFunction) ToolCallContent {
 	if err := a.validateToolCall(toolCall, availableTools); err == nil {
 		return toolCall
-	} else {
+	} else { //nolint: revive
 		if repairFunc != nil {
 			repairOptions := ToolCallRepairOptions{
 				OriginalToolCall: toolCall,
@@ -996,66 +1012,77 @@ func (a *agent) createPrompt(system, prompt string, messages []Message, files ..
 	return preparedPrompt, nil
 }
 
+// WithSystemPrompt sets the system prompt for the agent.
 func WithSystemPrompt(prompt string) AgentOption {
 	return func(s *agentSettings) {
 		s.systemPrompt = prompt
 	}
 }
 
+// WithMaxOutputTokens sets the maximum output tokens for the agent.
 func WithMaxOutputTokens(tokens int64) AgentOption {
 	return func(s *agentSettings) {
 		s.maxOutputTokens = &tokens
 	}
 }
 
+// WithTemperature sets the temperature for the agent.
 func WithTemperature(temp float64) AgentOption {
 	return func(s *agentSettings) {
 		s.temperature = &temp
 	}
 }
 
+// WithTopP sets the top-p value for the agent.
 func WithTopP(topP float64) AgentOption {
 	return func(s *agentSettings) {
 		s.topP = &topP
 	}
 }
 
+// WithTopK sets the top-k value for the agent.
 func WithTopK(topK int64) AgentOption {
 	return func(s *agentSettings) {
 		s.topK = &topK
 	}
 }
 
+// WithPresencePenalty sets the presence penalty for the agent.
 func WithPresencePenalty(penalty float64) AgentOption {
 	return func(s *agentSettings) {
 		s.presencePenalty = &penalty
 	}
 }
 
+// WithFrequencyPenalty sets the frequency penalty for the agent.
 func WithFrequencyPenalty(penalty float64) AgentOption {
 	return func(s *agentSettings) {
 		s.frequencyPenalty = &penalty
 	}
 }
 
+// WithTools sets the tools for the agent.
 func WithTools(tools ...AgentTool) AgentOption {
 	return func(s *agentSettings) {
 		s.tools = append(s.tools, tools...)
 	}
 }
 
+// WithStopConditions sets the stop conditions for the agent.
 func WithStopConditions(conditions ...StopCondition) AgentOption {
 	return func(s *agentSettings) {
 		s.stopWhen = append(s.stopWhen, conditions...)
 	}
 }
 
+// WithPrepareStep sets the prepare step function for the agent.
 func WithPrepareStep(fn PrepareStepFunction) AgentOption {
 	return func(s *agentSettings) {
 		s.prepareStep = fn
 	}
 }
 
+// WithRepairToolCall sets the repair tool call function for the agent.
 func WithRepairToolCall(fn RepairToolCallFunction) AgentOption {
 	return func(s *agentSettings) {
 		s.repairToolCall = fn
@@ -1313,12 +1340,14 @@ func addUsage(a, b Usage) Usage {
 	}
 }
 
+// WithHeaders sets the headers for the agent.
 func WithHeaders(headers map[string]string) AgentOption {
 	return func(s *agentSettings) {
 		s.headers = headers
 	}
 }
 
+// WithProviderOptions sets the provider options for the agent.
 func WithProviderOptions(providerOptions ProviderOptions) AgentOption {
 	return func(s *agentSettings) {
 		s.providerOptions = providerOptions

content.go 🔗

@@ -54,13 +54,20 @@ type ProviderOptions map[string]ProviderOptionsData
 type FinishReason string
 
 const (
-	FinishReasonStop          FinishReason = "stop"           // model generated stop sequence
-	FinishReasonLength        FinishReason = "length"         // model generated maximum number of tokens
+	// FinishReasonStop indicates the model generated a stop sequence.
+	FinishReasonStop FinishReason = "stop" // model generated stop sequence
+	// FinishReasonLength indicates the model generated maximum number of tokens.
+	FinishReasonLength FinishReason = "length" // model generated maximum number of tokens
+	// FinishReasonContentFilter indicates content filter violation stopped the model.
 	FinishReasonContentFilter FinishReason = "content-filter" // content filter violation stopped the model
-	FinishReasonToolCalls     FinishReason = "tool-calls"     // model triggered tool calls
-	FinishReasonError         FinishReason = "error"          // model stopped because of an error
-	FinishReasonOther         FinishReason = "other"          // model stopped for other reasons
-	FinishReasonUnknown       FinishReason = "unknown"        // the model has not transmitted a finish reason
+	// FinishReasonToolCalls indicates the model triggered tool calls.
+	FinishReasonToolCalls FinishReason = "tool-calls" // model triggered tool calls
+	// FinishReasonError indicates the model stopped because of an error.
+	FinishReasonError FinishReason = "error" // model stopped because of an error
+	// FinishReasonOther indicates the model stopped for other reasons.
+	FinishReasonOther FinishReason = "other" // model stopped for other reasons
+	// FinishReasonUnknown indicates the model has not transmitted a finish reason.
+	FinishReasonUnknown FinishReason = "unknown" // the model has not transmitted a finish reason
 )
 
 // Prompt represents a list of messages for the language model.
@@ -70,10 +77,14 @@ type Prompt []Message
 type MessageRole string
 
 const (
-	MessageRoleSystem    MessageRole = "system"
-	MessageRoleUser      MessageRole = "user"
+	// MessageRoleSystem represents a system message.
+	MessageRoleSystem MessageRole = "system"
+	// MessageRoleUser represents a user message.
+	MessageRoleUser MessageRole = "user"
+	// MessageRoleAssistant represents an assistant message.
 	MessageRoleAssistant MessageRole = "assistant"
-	MessageRoleTool      MessageRole = "tool"
+	// MessageRoleTool represents a tool message.
+	MessageRoleTool MessageRole = "tool"
 )
 
 // Message represents a message in a prompt.
@@ -83,6 +94,7 @@ type Message struct {
 	ProviderOptions ProviderOptions `json:"provider_options"`
 }
 
+// AsContentType converts a Content interface to a specific content type.
 func AsContentType[T Content](content Content) (T, bool) {
 	var zero T
 	if content == nil {
@@ -98,6 +110,7 @@ func AsContentType[T Content](content Content) (T, bool) {
 	}
 }
 
+// AsMessagePart converts a MessagePart interface to a specific message part type.
 func AsMessagePart[T MessagePart](content MessagePart) (T, bool) {
 	var zero T
 	if content == nil {
@@ -130,6 +143,7 @@ func (t TextPart) GetType() ContentType {
 	return ContentTypeText
 }
 
+// Options returns the provider options for the text part.
 func (t TextPart) Options() ProviderOptions {
 	return t.ProviderOptions
 }
@@ -145,6 +159,7 @@ func (r ReasoningPart) GetType() ContentType {
 	return ContentTypeReasoning
 }
 
+// Options returns the provider options for the reasoning part.
 func (r ReasoningPart) Options() ProviderOptions {
 	return r.ProviderOptions
 }
@@ -162,6 +177,7 @@ func (f FilePart) GetType() ContentType {
 	return ContentTypeFile
 }
 
+// Options returns the provider options for the file part.
 func (f FilePart) Options() ProviderOptions {
 	return f.ProviderOptions
 }
@@ -180,6 +196,7 @@ func (t ToolCallPart) GetType() ContentType {
 	return ContentTypeToolCall
 }
 
+// Options returns the provider options for the tool call part.
 func (t ToolCallPart) Options() ProviderOptions {
 	return t.ProviderOptions
 }
@@ -196,6 +213,7 @@ func (t ToolResultPart) GetType() ContentType {
 	return ContentTypeToolResult
 }
 
+// Options returns the provider options for the tool result part.
 func (t ToolResultPart) Options() ProviderOptions {
 	return t.ProviderOptions
 }
@@ -212,35 +230,43 @@ const (
 	ToolResultContentTypeMedia ToolResultContentType = "media"
 )
 
+// ToolResultOutputContent represents the output content of a tool result.
 type ToolResultOutputContent interface {
 	GetType() ToolResultContentType
 }
 
+// ToolResultOutputContentText represents text output content of a tool result.
 type ToolResultOutputContentText struct {
 	Text string `json:"text"`
 }
 
+// GetType returns the type of the tool result output content text.
 func (t ToolResultOutputContentText) GetType() ToolResultContentType {
 	return ToolResultContentTypeText
 }
 
+// ToolResultOutputContentError represents error output content of a tool result.
 type ToolResultOutputContentError struct {
 	Error error `json:"error"`
 }
 
+// GetType returns the type of the tool result output content error.
 func (t ToolResultOutputContentError) GetType() ToolResultContentType {
 	return ToolResultContentTypeError
 }
 
+// ToolResultOutputContentMedia represents media output content of a tool result.
 type ToolResultOutputContentMedia struct {
 	Data      string `json:"data"`       // for media type (base64)
 	MediaType string `json:"media_type"` // for media type
 }
 
+// GetType returns the type of the tool result output content media.
 func (t ToolResultOutputContentMedia) GetType() ToolResultContentType {
 	return ToolResultContentTypeMedia
 }
 
+// AsToolResultOutputType converts a ToolResultOutputContent interface to a specific type.
 func AsToolResultOutputType[T ToolResultOutputContent](content ToolResultOutputContent) (T, bool) {
 	var zero T
 	if content == nil {
@@ -455,7 +481,7 @@ func (p ProviderDefinedTool) GetName() string {
 	return p.Name
 }
 
-// Helpers.
+// NewUserMessage creates a new user message with the given prompt and optional files.
 func NewUserMessage(prompt string, files ...FilePart) Message {
 	content := []MessagePart{
 		TextPart{
@@ -473,6 +499,7 @@ func NewUserMessage(prompt string, files ...FilePart) Message {
 	}
 }
 
+// NewSystemMessage creates a new system message with the given prompts.
 func NewSystemMessage(prompt ...string) Message {
 	content := make([]MessagePart, 0, len(prompt))
 	for _, p := range prompt {

doc.go 🔗

@@ -0,0 +1,2 @@
+// Package fantasy provides a unified interface for interacting with various AI language models.
+package fantasy

errors.go 🔗

@@ -190,11 +190,16 @@ func NewNoContentGeneratedError(message string) *NoContentGeneratedError {
 type ModelType string
 
 const (
-	ModelTypeLanguage      ModelType = "languageModel"
+	// ModelTypeLanguage represents a language model.
+	ModelTypeLanguage ModelType = "languageModel"
+	// ModelTypeTextEmbedding represents a text embedding model.
 	ModelTypeTextEmbedding ModelType = "textEmbeddingModel"
-	ModelTypeImage         ModelType = "imageModel"
+	// ModelTypeImage represents an image model.
+	ModelTypeImage ModelType = "imageModel"
+	// ModelTypeTranscription represents a transcription model.
 	ModelTypeTranscription ModelType = "transcriptionModel"
-	ModelTypeSpeech        ModelType = "speechModel"
+	// ModelTypeSpeech represents a speech model.
+	ModelTypeSpeech ModelType = "speechModel"
 )
 
 // NoSuchModelError represents an error when a model is not found.

examples/agent/main.go 🔗

@@ -1,3 +1,4 @@
+// Package main provides an example of using the fantasy AI SDK with an agent.
 package main
 
 import (
@@ -27,7 +28,7 @@ func main() {
 	weatherTool := fantasy.NewAgentTool(
 		"weather",
 		"Get weather information for a location",
-		func(ctx context.Context, input WeatherInput, _ fantasy.ToolCall) (fantasy.ToolResponse, error) {
+		func(_ context.Context, _ WeatherInput, _ fantasy.ToolCall) (fantasy.ToolResponse, error) {
 			return fantasy.NewTextResponse("40 C"), nil
 		},
 	)

examples/simple/main.go 🔗

@@ -1,3 +1,4 @@
+// Package main provides a simple example of using the fantasy AI SDK.
 package main
 
 import (

examples/stream/main.go 🔗

@@ -1,3 +1,4 @@
+// Package main provides a streaming example of using the fantasy AI SDK.
 package main
 
 import (

examples/streaming-agent-simple/main.go 🔗

@@ -1,3 +1,4 @@
+// Package main provides a simple streaming agent example of using the fantasy AI SDK.
 package main
 
 import (
@@ -35,7 +36,7 @@ func main() {
 	echoTool := fantasy.NewAgentTool(
 		"echo",
 		"Echo back the provided message",
-		func(ctx context.Context, input EchoInput, _ fantasy.ToolCall) (fantasy.ToolResponse, error) {
+		func(_ context.Context, input EchoInput, _ fantasy.ToolCall) (fantasy.ToolResponse, error) {
 			return fantasy.NewTextResponse("Echo: " + input.Message), nil
 		},
 	)
@@ -58,7 +59,7 @@ func main() {
 		Prompt: "Please echo back 'Hello, streaming world!'",
 
 		// Show real-time text as it streams
-		OnTextDelta: func(id, text string) error {
+		OnTextDelta: func(_ string, text string) error {
 			fmt.Print(text)
 			return nil
 		},
@@ -70,7 +71,7 @@ func main() {
 		},
 
 		// Show tool results
-		OnToolResult: func(result fantasy.ToolResultContent) error {
+		OnToolResult: func(_ fantasy.ToolResultContent) error {
 			fmt.Printf("[Tool result received]\n")
 			return nil
 		},

examples/streaming-agent/main.go 🔗

@@ -1,3 +1,4 @@
+// Package main provides a comprehensive streaming agent example of using the fantasy AI SDK.
 package main
 
 import (
@@ -45,7 +46,7 @@ func main() {
 	weatherTool := fantasy.NewAgentTool(
 		"get_weather",
 		"Get the current weather for a specific location",
-		func(ctx context.Context, input WeatherInput, _ fantasy.ToolCall) (fantasy.ToolResponse, error) {
+		func(_ context.Context, input WeatherInput, _ fantasy.ToolCall) (fantasy.ToolResponse, error) {
 			// Simulate weather lookup with some fake data
 			location := input.Location
 			if location == "" {
@@ -86,7 +87,7 @@ func main() {
 	calculatorTool := fantasy.NewAgentTool(
 		"calculate",
 		"Perform basic mathematical calculations",
-		func(ctx context.Context, input CalculatorInput, _ fantasy.ToolCall) (fantasy.ToolResponse, error) {
+		func(_ context.Context, input CalculatorInput, _ fantasy.ToolCall) (fantasy.ToolResponse, error) {
 			// Simple calculator simulation
 			expr := strings.TrimSpace(input.Expression)
 			if strings.Contains(expr, "2 + 2") || strings.Contains(expr, "2+2") {
@@ -153,43 +154,43 @@ func main() {
 			}
 			return nil
 		},
-		OnTextStart: func(id string) error {
+		OnTextStart: func(_ string) error {
 			fmt.Print("💭 Assistant: ")
 			return nil
 		},
-		OnTextDelta: func(id, text string) error {
+		OnTextDelta: func(_ string, text string) error {
 			fmt.Print(text)
 			textBuffer.WriteString(text)
 			return nil
 		},
-		OnTextEnd: func(id string) error {
+		OnTextEnd: func(_ string) error {
 			fmt.Println()
 			return nil
 		},
-		OnReasoningStart: func(id string, _ fantasy.ReasoningContent) error {
+		OnReasoningStart: func(_ string, _ fantasy.ReasoningContent) error {
 			fmt.Print("🤔 Thinking: ")
 			return nil
 		},
-		OnReasoningDelta: func(id, text string) error {
+		OnReasoningDelta: func(_ string, text string) error {
 			reasoningBuffer.WriteString(text)
 			return nil
 		},
-		OnReasoningEnd: func(id string, content fantasy.ReasoningContent) error {
+		OnReasoningEnd: func(_ string, _ fantasy.ReasoningContent) error {
 			if reasoningBuffer.Len() > 0 {
 				fmt.Printf("%s\n", reasoningBuffer.String())
 				reasoningBuffer.Reset()
 			}
 			return nil
 		},
-		OnToolInputStart: func(id, toolName string) error {
+		OnToolInputStart: func(_ string, toolName string) error {
 			fmt.Printf("🔧 Calling tool: %s\n", toolName)
 			return nil
 		},
-		OnToolInputDelta: func(id, delta string) error {
+		OnToolInputDelta: func(_ string, _ string) error {
 			// Could show tool input being built, but it's often noisy
 			return nil
 		},
-		OnToolInputEnd: func(id string) error {
+		OnToolInputEnd: func(_ string) error {
 			// Tool input complete
 			return nil
 		},
@@ -212,7 +213,7 @@ func main() {
 			fmt.Printf("📚 Source: %s (%s)\n", source.Title, source.URL)
 			return nil
 		},
-		OnStreamFinish: func(usage fantasy.Usage, finishReason fantasy.FinishReason, providerMetadata fantasy.ProviderMetadata) error {
+		OnStreamFinish: func(usage fantasy.Usage, finishReason fantasy.FinishReason, _ fantasy.ProviderMetadata) error {
 			fmt.Printf("📊 Stream finished (reason: %s, tokens: %d)\n", finishReason, usage.TotalTokens)
 			return nil
 		},

model.go 🔗

@@ -6,6 +6,7 @@ import (
 	"iter"
 )
 
+// Usage represents token usage statistics for a model call.
 type Usage struct {
 	InputTokens         int64 `json:"input_tokens"`
 	OutputTokens        int64 `json:"output_tokens"`
@@ -26,8 +27,10 @@ func (u Usage) String() string {
 	)
 }
 
+// ResponseContent represents the content of a model response.
 type ResponseContent []Content
 
+// Text returns the text content of the response.
 func (r ResponseContent) Text() string {
 	for _, c := range r {
 		if c.GetType() == ContentTypeText {
@@ -111,6 +114,7 @@ func (r ResponseContent) ToolResults() []ToolResultContent {
 	return toolResults
 }
 
+// Response represents a response from a language model.
 type Response struct {
 	Content      ResponseContent `json:"content"`
 	FinishReason FinishReason    `json:"finish_reason"`
@@ -121,27 +125,44 @@ type Response struct {
 	ProviderMetadata ProviderMetadata `json:"provider_metadata"`
 }
 
+// StreamPartType represents the type of a stream part.
 type StreamPartType string
 
 const (
-	StreamPartTypeWarnings  StreamPartType = "warnings"
+	// StreamPartTypeWarnings represents warnings stream part type.
+	StreamPartTypeWarnings StreamPartType = "warnings"
+	// StreamPartTypeTextStart represents text start stream part type.
 	StreamPartTypeTextStart StreamPartType = "text_start"
+	// StreamPartTypeTextDelta represents text delta stream part type.
 	StreamPartTypeTextDelta StreamPartType = "text_delta"
-	StreamPartTypeTextEnd   StreamPartType = "text_end"
+	// StreamPartTypeTextEnd represents text end stream part type.
+	StreamPartTypeTextEnd StreamPartType = "text_end"
 
+	// StreamPartTypeReasoningStart represents reasoning start stream part type.
 	StreamPartTypeReasoningStart StreamPartType = "reasoning_start"
+	// StreamPartTypeReasoningDelta represents reasoning delta stream part type.
 	StreamPartTypeReasoningDelta StreamPartType = "reasoning_delta"
-	StreamPartTypeReasoningEnd   StreamPartType = "reasoning_end"
+	// StreamPartTypeReasoningEnd represents reasoning end stream part type.
+	StreamPartTypeReasoningEnd StreamPartType = "reasoning_end"
+	// StreamPartTypeToolInputStart represents tool input start stream part type.
 	StreamPartTypeToolInputStart StreamPartType = "tool_input_start"
+	// StreamPartTypeToolInputDelta represents tool input delta stream part type.
 	StreamPartTypeToolInputDelta StreamPartType = "tool_input_delta"
-	StreamPartTypeToolInputEnd   StreamPartType = "tool_input_end"
-	StreamPartTypeToolCall       StreamPartType = "tool_call"
-	StreamPartTypeToolResult     StreamPartType = "tool_result"
-	StreamPartTypeSource         StreamPartType = "source"
-	StreamPartTypeFinish         StreamPartType = "finish"
-	StreamPartTypeError          StreamPartType = "error"
+	// StreamPartTypeToolInputEnd represents tool input end stream part type.
+	StreamPartTypeToolInputEnd StreamPartType = "tool_input_end"
+	// StreamPartTypeToolCall represents tool call stream part type.
+	StreamPartTypeToolCall StreamPartType = "tool_call"
+	// StreamPartTypeToolResult represents tool result stream part type.
+	StreamPartTypeToolResult StreamPartType = "tool_result"
+	// StreamPartTypeSource represents source stream part type.
+	StreamPartTypeSource StreamPartType = "source"
+	// StreamPartTypeFinish represents finish stream part type.
+	StreamPartTypeFinish StreamPartType = "finish"
+	// StreamPartTypeError represents error stream part type.
+	StreamPartTypeError StreamPartType = "error"
 )
 
+// StreamPart represents a part of a streaming response.
 type StreamPart struct {
 	Type             StreamPartType `json:"type"`
 	ID               string         `json:"id"`
@@ -161,20 +182,28 @@ type StreamPart struct {
 
 	ProviderMetadata ProviderMetadata `json:"provider_metadata"`
 }
+
+// StreamResponse represents a streaming response sequence.
 type StreamResponse = iter.Seq[StreamPart]
 
+// ToolChoice represents the tool choice preference for a model call.
 type ToolChoice string
 
 const (
-	ToolChoiceNone     ToolChoice = "none"
-	ToolChoiceAuto     ToolChoice = "auto"
+	// ToolChoiceNone indicates no tools should be used.
+	ToolChoiceNone ToolChoice = "none"
+	// ToolChoiceAuto indicates tools should be used automatically.
+	ToolChoiceAuto ToolChoice = "auto"
+	// ToolChoiceRequired indicates tools are required.
 	ToolChoiceRequired ToolChoice = "required"
 )
 
+// SpecificToolChoice creates a tool choice for a specific tool name.
 func SpecificToolChoice(name string) ToolChoice {
 	return ToolChoice(name)
 }
 
+// Call represents a call to a language model.
 type Call struct {
 	Prompt           Prompt      `json:"prompt"`
 	MaxOutputTokens  *int64      `json:"max_output_tokens"`
@@ -190,6 +219,7 @@ type Call struct {
 	ProviderOptions ProviderOptions `json:"provider_options"`
 }
 
+// CallWarningType represents the type of call warning.
 // CallWarningType represents the type of call warning.
 type CallWarningType string
 
@@ -213,6 +243,7 @@ type CallWarning struct {
 	Message string          `json:"message"`
 }
 
+// LanguageModel represents a language model that can generate responses and stream responses.
 type LanguageModel interface {
 	Generate(context.Context, Call) (*Response, error)
 	Stream(context.Context, Call) (StreamResponse, error)

provider.go 🔗

@@ -1,5 +1,6 @@
 package fantasy
 
+// Provider represents a provider of language models.
 type Provider interface {
 	Name() string
 	LanguageModel(modelID string) (LanguageModel, error)

providers/anthropic/anthropic.go 🔗

@@ -1,3 +1,4 @@
+// Package anthropic provides an implementation of the fantasy AI SDK for Anthropic's language models.
 package anthropic
 
 import (
@@ -21,7 +22,9 @@ import (
 )
 
 const (
-	Name       = "anthropic"
+	// Name is the name of the Anthropic provider.
+	Name = "anthropic"
+	// DefaultURL is the default URL for the Anthropic API.
 	DefaultURL = "https://api.anthropic.com"
 )
 
@@ -43,8 +46,10 @@ type provider struct {
 	options options
 }
 
+// Option defines a function that configures Anthropic provider options.
 type Option = func(*options)
 
+// New creates a new Anthropic provider with the given options.
 func New(opts ...Option) fantasy.Provider {
 	providerOptions := options{
 		headers: map[string]string{},
@@ -58,18 +63,21 @@ func New(opts ...Option) fantasy.Provider {
 	return &provider{options: providerOptions}
 }
 
+// WithBaseURL sets the base URL for the Anthropic provider.
 func WithBaseURL(baseURL string) Option {
 	return func(o *options) {
 		o.baseURL = baseURL
 	}
 }
 
+// WithAPIKey sets the API key for the Anthropic provider.
 func WithAPIKey(apiKey string) Option {
 	return func(o *options) {
 		o.apiKey = apiKey
 	}
 }
 
+// WithVertex configures the Anthropic provider to use Vertex AI.
 func WithVertex(project, location string) Option {
 	return func(o *options) {
 		o.vertexProject = project
@@ -77,30 +85,35 @@ func WithVertex(project, location string) Option {
 	}
 }
 
+// WithSkipAuth configures whether to skip authentication for the Anthropic provider.
 func WithSkipAuth(skip bool) Option {
 	return func(o *options) {
 		o.skipAuth = skip
 	}
 }
 
+// WithBedrock configures the Anthropic provider to use AWS Bedrock.
 func WithBedrock() Option {
 	return func(o *options) {
 		o.useBedrock = true
 	}
 }
 
+// WithName sets the name for the Anthropic provider.
 func WithName(name string) Option {
 	return func(o *options) {
 		o.name = name
 	}
 }
 
+// WithHeaders sets the headers for the Anthropic provider.
 func WithHeaders(headers map[string]string) Option {
 	return func(o *options) {
 		maps.Copy(o.headers, headers)
 	}
 }
 
+// WithHTTPClient sets the HTTP client for the Anthropic provider.
 func WithHTTPClient(client option.HTTPClient) Option {
 	return func(o *options) {
 		o.client = client
@@ -673,7 +686,7 @@ func toPrompt(prompt fantasy.Prompt, sendReasoningData bool) ([]anthropic.TextBl
 	return systemBlocks, messages, warnings
 }
 
-func (o languageModel) handleError(err error) error {
+func (a languageModel) handleError(err error) error {
 	var apiErr *anthropic.Error
 	if errors.As(err, &apiErr) {
 		requestDump := apiErr.DumpRequest(true)
@@ -948,7 +961,7 @@ func (a languageModel) Stream(ctx context.Context, call fantasy.Call) (fantasy.S
 				ProviderMetadata: fantasy.ProviderMetadata{},
 			})
 			return
-		} else {
+		} else { //nolint: revive
 			yield(fantasy.StreamPart{
 				Type:  fantasy.StreamPartTypeError,
 				Error: a.handleError(err),

providers/anthropic/provider_options.go 🔗

@@ -1,48 +1,60 @@
+// Package anthropic provides an implementation of the fantasy AI SDK for Anthropic's language models.
 package anthropic
 
 import "charm.land/fantasy"
 
+// ProviderOptions represents additional options for the Anthropic provider.
 type ProviderOptions struct {
 	SendReasoning          *bool                   `json:"send_reasoning"`
 	Thinking               *ThinkingProviderOption `json:"thinking"`
 	DisableParallelToolUse *bool                   `json:"disable_parallel_tool_use"`
 }
 
+// Options implements the ProviderOptions interface.
 func (o *ProviderOptions) Options() {}
 
+// ThinkingProviderOption represents thinking options for the Anthropic provider.
 type ThinkingProviderOption struct {
 	BudgetTokens int64 `json:"budget_tokens"`
 }
 
+// ReasoningOptionMetadata represents reasoning metadata for the Anthropic provider.
 type ReasoningOptionMetadata struct {
 	Signature    string `json:"signature"`
 	RedactedData string `json:"redacted_data"`
 }
 
+// Options implements the ProviderOptions interface.
 func (*ReasoningOptionMetadata) Options() {}
 
+// ProviderCacheControlOptions represents cache control options for the Anthropic provider.
 type ProviderCacheControlOptions struct {
 	CacheControl CacheControl `json:"cache_control"`
 }
 
+// Options implements the ProviderOptions interface.
 func (*ProviderCacheControlOptions) Options() {}
 
+// CacheControl represents cache control settings for the Anthropic provider.
 type CacheControl struct {
 	Type string `json:"type"`
 }
 
+// NewProviderOptions creates new provider options for the Anthropic provider.
 func NewProviderOptions(opts *ProviderOptions) fantasy.ProviderOptions {
 	return fantasy.ProviderOptions{
 		Name: opts,
 	}
 }
 
+// NewProviderCacheControlOptions creates new cache control options for the Anthropic provider.
 func NewProviderCacheControlOptions(opts *ProviderCacheControlOptions) fantasy.ProviderOptions {
 	return fantasy.ProviderOptions{
 		Name: opts,
 	}
 }
 
+// ParseOptions parses provider options from a map for the Anthropic provider.
 func ParseOptions(data map[string]any) (*ProviderOptions, error) {
 	var options ProviderOptions
 	if err := fantasy.ParseOptions(data, &options); err != nil {

providers/azure/azure.go 🔗

@@ -1,3 +1,4 @@
+// Package azure provides an implementation of the fantasy AI SDK for Azure's language models.
 package azure
 
 import (
@@ -16,12 +17,16 @@ type options struct {
 }
 
 const (
-	Name              = "azure"
+	// Name is the name of the Azure provider.
+	Name = "azure"
+	// defaultAPIVersion is the default API version for Azure.
 	defaultAPIVersion = "2025-01-01-preview"
 )
 
+// Option defines a function that configures Azure provider options.
 type Option = func(*options)
 
+// New creates a new Azure provider with the given options.
 func New(opts ...Option) fantasy.Provider {
 	o := options{
 		apiVersion: defaultAPIVersion,
@@ -41,30 +46,35 @@ func New(opts ...Option) fantasy.Provider {
 	)
 }
 
+// WithBaseURL sets the base URL for the Azure provider.
 func WithBaseURL(baseURL string) Option {
 	return func(o *options) {
 		o.baseURL = baseURL
 	}
 }
 
+// WithAPIKey sets the API key for the Azure provider.
 func WithAPIKey(apiKey string) Option {
 	return func(o *options) {
 		o.apiKey = apiKey
 	}
 }
 
+// WithHeaders sets the headers for the Azure provider.
 func WithHeaders(headers map[string]string) Option {
 	return func(o *options) {
 		o.openaiOptions = append(o.openaiOptions, openaicompat.WithHeaders(headers))
 	}
 }
 
+// WithAPIVersion sets the API version for the Azure provider.
 func WithAPIVersion(version string) Option {
 	return func(o *options) {
 		o.apiVersion = version
 	}
 }
 
+// WithHTTPClient sets the HTTP client for the Azure provider.
 func WithHTTPClient(client option.HTTPClient) Option {
 	return func(o *options) {
 		o.openaiOptions = append(o.openaiOptions, openaicompat.WithHTTPClient(client))

providers/bedrock/bedrock.go 🔗

@@ -1,3 +1,4 @@
+// Package bedrock provides an implementation of the fantasy AI SDK for AWS Bedrock's language models.
 package bedrock
 
 import (
@@ -12,11 +13,14 @@ type options struct {
 }
 
 const (
+	// Name is the name of the Bedrock provider.
 	Name = "bedrock"
 )
 
+// Option defines a function that configures Bedrock provider options.
 type Option = func(*options)
 
+// New creates a new Bedrock provider with the given options.
 func New(opts ...Option) fantasy.Provider {
 	var o options
 	for _, opt := range opts {
@@ -32,18 +36,21 @@ func New(opts ...Option) fantasy.Provider {
 	)
 }
 
+// WithHeaders sets the headers for the Bedrock provider.
 func WithHeaders(headers map[string]string) Option {
 	return func(o *options) {
 		o.anthropicOptions = append(o.anthropicOptions, anthropic.WithHeaders(headers))
 	}
 }
 
+// WithHTTPClient sets the HTTP client for the Bedrock provider.
 func WithHTTPClient(client option.HTTPClient) Option {
 	return func(o *options) {
 		o.anthropicOptions = append(o.anthropicOptions, anthropic.WithHTTPClient(client))
 	}
 }
 
+// WithSkipAuth configures whether to skip authentication for the Bedrock provider.
 func WithSkipAuth(skipAuth bool) Option {
 	return func(o *options) {
 		o.skipAuth = skipAuth

providers/google/auth.go 🔗

@@ -1,3 +1,4 @@
+// Package google provides an implementation of the fantasy AI SDK for Google's language models.
 package google
 
 import (
@@ -8,6 +9,7 @@ import (
 
 type dummyTokenProvider struct{}
 
-func (dummyTokenProvider) Token(ctx context.Context) (*auth.Token, error) {
+// Token implements the auth.TokenProvider interface.
+func (dummyTokenProvider) Token(_ context.Context) (*auth.Token, error) {
 	return &auth.Token{Value: "dummy-token"}, nil
 }

providers/google/google.go 🔗

@@ -19,6 +19,7 @@ import (
 	"google.golang.org/genai"
 )
 
+// Name is the name of the Google provider.
 const Name = "google"
 
 type provider struct {
@@ -37,8 +38,10 @@ type options struct {
 	skipAuth bool
 }
 
+// Option defines a function that configures Google provider options.
 type Option = func(*options)
 
+// New creates a new Google provider with the given options.
 func New(opts ...Option) fantasy.Provider {
 	options := options{
 		headers: map[string]string{},
@@ -54,12 +57,14 @@ func New(opts ...Option) fantasy.Provider {
 	}
 }
 
+// WithBaseURL sets the base URL for the Google provider.
 func WithBaseURL(baseURL string) Option {
 	return func(o *options) {
 		o.baseURL = baseURL
 	}
 }
 
+// WithGeminiAPIKey sets the Gemini API key for the Google provider.
 func WithGeminiAPIKey(apiKey string) Option {
 	return func(o *options) {
 		o.backend = genai.BackendGeminiAPI
@@ -69,6 +74,7 @@ func WithGeminiAPIKey(apiKey string) Option {
 	}
 }
 
+// WithVertex configures the Google provider to use Vertex AI.
 func WithVertex(project, location string) Option {
 	if project == "" || location == "" {
 		panic("project and location must be provided")
@@ -81,24 +87,28 @@ func WithVertex(project, location string) Option {
 	}
 }
 
+// WithSkipAuth configures whether to skip authentication for the Google provider.
 func WithSkipAuth(skipAuth bool) Option {
 	return func(o *options) {
 		o.skipAuth = skipAuth
 	}
 }
 
+// WithName sets the name for the Google provider.
 func WithName(name string) Option {
 	return func(o *options) {
 		o.name = name
 	}
 }
 
+// WithHeaders sets the headers for the Google provider.
 func WithHeaders(headers map[string]string) Option {
 	return func(o *options) {
 		maps.Copy(o.headers, headers)
 	}
 }
 
+// WithHTTPClient sets the HTTP client for the Google provider.
 func WithHTTPClient(client *http.Client) Option {
 	return func(o *options) {
 		o.client = client
@@ -117,33 +127,33 @@ type languageModel struct {
 }
 
 // LanguageModel implements fantasy.Provider.
-func (g *provider) LanguageModel(modelID string) (fantasy.LanguageModel, error) {
+func (a *provider) LanguageModel(modelID string) (fantasy.LanguageModel, error) {
 	if strings.Contains(modelID, "anthropic") || strings.Contains(modelID, "claude") {
 		return anthropic.New(
-			anthropic.WithVertex(g.options.project, g.options.location),
-			anthropic.WithHTTPClient(g.options.client),
-			anthropic.WithSkipAuth(g.options.skipAuth),
+			anthropic.WithVertex(a.options.project, a.options.location),
+			anthropic.WithHTTPClient(a.options.client),
+			anthropic.WithSkipAuth(a.options.skipAuth),
 		).LanguageModel(modelID)
 	}
 
 	cc := &genai.ClientConfig{
-		HTTPClient: g.options.client,
-		Backend:    g.options.backend,
-		APIKey:     g.options.apiKey,
-		Project:    g.options.project,
-		Location:   g.options.location,
+		HTTPClient: a.options.client,
+		Backend:    a.options.backend,
+		APIKey:     a.options.apiKey,
+		Project:    a.options.project,
+		Location:   a.options.location,
 	}
-	if g.options.skipAuth {
+	if a.options.skipAuth {
 		cc.Credentials = &auth.Credentials{TokenProvider: dummyTokenProvider{}}
 	}
 
-	if g.options.baseURL != "" || len(g.options.headers) > 0 {
+	if a.options.baseURL != "" || len(a.options.headers) > 0 {
 		headers := http.Header{}
-		for k, v := range g.options.headers {
+		for k, v := range a.options.headers {
 			headers.Add(k, v)
 		}
 		cc.HTTPOptions = genai.HTTPOptions{
-			BaseURL: g.options.baseURL,
+			BaseURL: a.options.baseURL,
 			Headers: headers,
 		}
 	}
@@ -153,13 +163,13 @@ func (g *provider) LanguageModel(modelID string) (fantasy.LanguageModel, error)
 	}
 	return &languageModel{
 		modelID:         modelID,
-		provider:        g.options.name,
-		providerOptions: g.options,
+		provider:        a.options.name,
+		providerOptions: a.options,
 		client:          client,
 	}, nil
 }
 
-func (a languageModel) prepareParams(call fantasy.Call) (*genai.GenerateContentConfig, []*genai.Content, []fantasy.CallWarning, error) {
+func (g languageModel) prepareParams(call fantasy.Call) (*genai.GenerateContentConfig, []*genai.Content, []fantasy.CallWarning, error) {
 	config := &genai.GenerateContentConfig{}
 
 	providerOptions := &ProviderOptions{}
@@ -175,12 +185,12 @@ func (a languageModel) prepareParams(call fantasy.Call) (*genai.GenerateContentC
 	if providerOptions.ThinkingConfig != nil {
 		if providerOptions.ThinkingConfig.IncludeThoughts != nil &&
 			*providerOptions.ThinkingConfig.IncludeThoughts &&
-			strings.HasPrefix(a.provider, "google.vertex.") {
+			strings.HasPrefix(g.provider, "google.vertex.") {
 			warnings = append(warnings, fantasy.CallWarning{
 				Type: fantasy.CallWarningTypeOther,
 				Message: "The 'includeThoughts' option is only supported with the Google Vertex provider " +
 					"and might not be supported or could behave unexpectedly with the current Google provider " +
-					fmt.Sprintf("(%s)", a.provider),
+					fmt.Sprintf("(%s)", g.provider),
 			})
 		}
 
@@ -194,7 +204,7 @@ func (a languageModel) prepareParams(call fantasy.Call) (*genai.GenerateContentC
 		}
 	}
 
-	isGemmaModel := strings.HasPrefix(strings.ToLower(a.modelID), "gemma-")
+	isGemmaModel := strings.HasPrefix(strings.ToLower(g.modelID), "gemma-")
 
 	if isGemmaModel && systemInstructions != nil && len(systemInstructions.Parts) > 0 {
 		if len(content) > 0 && content[0].Role == genai.RoleUser {

providers/google/provider_options.go 🔗

@@ -1,12 +1,15 @@
+// Package google provides an implementation of the fantasy AI SDK for Google's language models.
 package google
 
 import "charm.land/fantasy"
 
+// ThinkingConfig represents thinking configuration for the Google provider.
 type ThinkingConfig struct {
 	ThinkingBudget  *int64 `json:"thinking_budget"`
 	IncludeThoughts *bool  `json:"include_thoughts"`
 }
 
+// SafetySetting represents safety settings for the Google provider.
 type SafetySetting struct {
 	// 'HARM_CATEGORY_UNSPECIFIED',
 	// 'HARM_CATEGORY_HATE_SPEECH',
@@ -24,6 +27,8 @@ type SafetySetting struct {
 	// 'OFF',
 	Threshold string `json:"threshold"`
 }
+
+// ProviderOptions represents additional options for the Google provider.
 type ProviderOptions struct {
 	ThinkingConfig *ThinkingConfig `json:"thinking_config"`
 
@@ -43,8 +48,10 @@ type ProviderOptions struct {
 	Threshold string `json:"threshold"`
 }
 
+// Options implements the ProviderOptionsData interface for ProviderOptions.
 func (o *ProviderOptions) Options() {}
 
+// ParseOptions parses provider options from a map for the Google provider.
 func ParseOptions(data map[string]any) (*ProviderOptions, error) {
 	var options ProviderOptions
 	if err := fantasy.ParseOptions(data, &options); err != nil {

providers/openai/language_model.go 🔗

@@ -30,38 +30,45 @@ type languageModel struct {
 	streamProviderMetadataFunc LanguageModelStreamProviderMetadataFunc
 }
 
+// LanguageModelOption is a function that configures a languageModel.
 type LanguageModelOption = func(*languageModel)
 
+// WithLanguageModelPrepareCallFunc sets the prepare call function for the language model.
 func WithLanguageModelPrepareCallFunc(fn LanguageModelPrepareCallFunc) LanguageModelOption {
 	return func(l *languageModel) {
 		l.prepareCallFunc = fn
 	}
 }
 
+// WithLanguageModelMapFinishReasonFunc sets the map finish reason function for the language model.
 func WithLanguageModelMapFinishReasonFunc(fn LanguageModelMapFinishReasonFunc) LanguageModelOption {
 	return func(l *languageModel) {
 		l.mapFinishReasonFunc = fn
 	}
 }
 
+// WithLanguageModelExtraContentFunc sets the extra content function for the language model.
 func WithLanguageModelExtraContentFunc(fn LanguageModelExtraContentFunc) LanguageModelOption {
 	return func(l *languageModel) {
 		l.extraContentFunc = fn
 	}
 }
 
+// WithLanguageModelStreamExtraFunc sets the stream extra function for the language model.
 func WithLanguageModelStreamExtraFunc(fn LanguageModelStreamExtraFunc) LanguageModelOption {
 	return func(l *languageModel) {
 		l.streamExtraFunc = fn
 	}
 }
 
+// WithLanguageModelUsageFunc sets the usage function for the language model.
 func WithLanguageModelUsageFunc(fn LanguageModelUsageFunc) LanguageModelOption {
 	return func(l *languageModel) {
 		l.usageFunc = fn
 	}
 }
 
+// WithLanguageModelStreamUsageFunc sets the stream usage function for the language model.
 func WithLanguageModelStreamUsageFunc(fn LanguageModelStreamUsageFunc) LanguageModelOption {
 	return func(l *languageModel) {
 		l.streamUsageFunc = fn
@@ -546,7 +553,7 @@ func (o languageModel) Stream(ctx context.Context, call fantasy.Call) (fantasy.S
 				ProviderMetadata: providerMetadata,
 			})
 			return
-		} else {
+		} else { //nolint: revive
 			yield(fantasy.StreamPart{
 				Type:  fantasy.StreamPartTypeError,
 				Error: o.handleError(err),

providers/openai/language_model_hooks.go 🔗

@@ -9,16 +9,28 @@ import (
 	"github.com/openai/openai-go/v2/shared"
 )
 
-type (
-	LanguageModelPrepareCallFunc            = func(model fantasy.LanguageModel, params *openai.ChatCompletionNewParams, call fantasy.Call) ([]fantasy.CallWarning, error)
-	LanguageModelMapFinishReasonFunc        = func(finishReason string) fantasy.FinishReason
-	LanguageModelUsageFunc                  = func(choice openai.ChatCompletion) (fantasy.Usage, fantasy.ProviderOptionsData)
-	LanguageModelExtraContentFunc           = func(choice openai.ChatCompletionChoice) []fantasy.Content
-	LanguageModelStreamExtraFunc            = func(chunk openai.ChatCompletionChunk, yield func(fantasy.StreamPart) bool, ctx map[string]any) (map[string]any, bool)
-	LanguageModelStreamUsageFunc            = func(chunk openai.ChatCompletionChunk, ctx map[string]any, metadata fantasy.ProviderMetadata) (fantasy.Usage, fantasy.ProviderMetadata)
-	LanguageModelStreamProviderMetadataFunc = func(choice openai.ChatCompletionChoice, metadata fantasy.ProviderMetadata) fantasy.ProviderMetadata
-)
+// LanguageModelPrepareCallFunc is a function that prepares the call for the language model.
+type LanguageModelPrepareCallFunc = func(model fantasy.LanguageModel, params *openai.ChatCompletionNewParams, call fantasy.Call) ([]fantasy.CallWarning, error)
+
+// LanguageModelMapFinishReasonFunc is a function that maps the finish reason for the language model.
+type LanguageModelMapFinishReasonFunc = func(finishReason string) fantasy.FinishReason
+
+// LanguageModelUsageFunc is a function that calculates usage for the language model.
+type LanguageModelUsageFunc = func(choice openai.ChatCompletion) (fantasy.Usage, fantasy.ProviderOptionsData)
+
+// LanguageModelExtraContentFunc is a function that adds extra content for the language model.
+type LanguageModelExtraContentFunc = func(choice openai.ChatCompletionChoice) []fantasy.Content
+
+// LanguageModelStreamExtraFunc is a function that handles stream extra functionality for the language model.
+type LanguageModelStreamExtraFunc = func(chunk openai.ChatCompletionChunk, yield func(fantasy.StreamPart) bool, ctx map[string]any) (map[string]any, bool)
+
+// LanguageModelStreamUsageFunc is a function that calculates stream usage for the language model.
+type LanguageModelStreamUsageFunc = func(chunk openai.ChatCompletionChunk, ctx map[string]any, metadata fantasy.ProviderMetadata) (fantasy.Usage, fantasy.ProviderMetadata)
+
+// LanguageModelStreamProviderMetadataFunc is a function that handles stream provider metadata for the language model.
+type LanguageModelStreamProviderMetadataFunc = func(choice openai.ChatCompletionChoice, metadata fantasy.ProviderMetadata) fantasy.ProviderMetadata
 
+// DefaultPrepareCallFunc is the default implementation for preparing a call to the language model.
 func DefaultPrepareCallFunc(model fantasy.LanguageModel, params *openai.ChatCompletionNewParams, call fantasy.Call) ([]fantasy.CallWarning, error) {
 	if call.ProviderOptions == nil {
 		return nil, nil
@@ -156,6 +168,7 @@ func DefaultPrepareCallFunc(model fantasy.LanguageModel, params *openai.ChatComp
 	return warnings, nil
 }
 
+// DefaultMapFinishReasonFunc is the default implementation for mapping finish reasons.
 func DefaultMapFinishReasonFunc(finishReason string) fantasy.FinishReason {
 	switch finishReason {
 	case "stop":
@@ -171,6 +184,7 @@ func DefaultMapFinishReasonFunc(finishReason string) fantasy.FinishReason {
 	}
 }
 
+// DefaultUsageFunc is the default implementation for calculating usage.
 func DefaultUsageFunc(response openai.ChatCompletion) (fantasy.Usage, fantasy.ProviderOptionsData) {
 	completionTokenDetails := response.Usage.CompletionTokensDetails
 	promptTokenDetails := response.Usage.PromptTokensDetails
@@ -201,7 +215,8 @@ func DefaultUsageFunc(response openai.ChatCompletion) (fantasy.Usage, fantasy.Pr
 	}, providerMetadata
 }
 
-func DefaultStreamUsageFunc(chunk openai.ChatCompletionChunk, ctx map[string]any, metadata fantasy.ProviderMetadata) (fantasy.Usage, fantasy.ProviderMetadata) {
+// DefaultStreamUsageFunc is the default implementation for calculating stream usage.
+func DefaultStreamUsageFunc(chunk openai.ChatCompletionChunk, _ map[string]any, metadata fantasy.ProviderMetadata) (fantasy.Usage, fantasy.ProviderMetadata) {
 	if chunk.Usage.TotalTokens == 0 {
 		return fantasy.Usage{}, nil
 	}
@@ -240,6 +255,7 @@ func DefaultStreamUsageFunc(chunk openai.ChatCompletionChunk, ctx map[string]any
 	}
 }
 
+// DefaultStreamProviderMetadataFunc is the default implementation for handling stream provider metadata.
 func DefaultStreamProviderMetadataFunc(choice openai.ChatCompletionChoice, metadata fantasy.ProviderMetadata) fantasy.ProviderMetadata {
 	streamProviderMetadata, ok := metadata[Name]
 	if !ok {

providers/openai/openai.go 🔗

@@ -1,4 +1,4 @@
-// Package openai contains the openai provider
+// Package openai provides an implementation of the fantasy AI SDK for OpenAI's language models.
 package openai
 
 import (
@@ -11,7 +11,9 @@ import (
 )
 
 const (
-	Name       = "openai"
+	// Name is the name of the OpenAI provider.
+	Name = "openai"
+	// DefaultURL is the default URL for the OpenAI API.
 	DefaultURL = "https://api.openai.com/v1"
 )
 
@@ -32,8 +34,10 @@ type options struct {
 	languageModelOptions []LanguageModelOption
 }
 
+// Option defines a function that configures OpenAI provider options.
 type Option = func(*options)
 
+// New creates a new OpenAI provider with the given options.
 func New(opts ...Option) fantasy.Provider {
 	providerOptions := options{
 		headers:              map[string]string{},
@@ -56,61 +60,70 @@ func New(opts ...Option) fantasy.Provider {
 	return &provider{options: providerOptions}
 }
 
+// WithBaseURL sets the base URL for the OpenAI provider.
 func WithBaseURL(baseURL string) Option {
 	return func(o *options) {
 		o.baseURL = baseURL
 	}
 }
 
+// WithAPIKey sets the API key for the OpenAI provider.
 func WithAPIKey(apiKey string) Option {
 	return func(o *options) {
 		o.apiKey = apiKey
 	}
 }
 
+// WithOrganization sets the organization for the OpenAI provider.
 func WithOrganization(organization string) Option {
 	return func(o *options) {
 		o.organization = organization
 	}
 }
 
+// WithProject sets the project for the OpenAI provider.
 func WithProject(project string) Option {
 	return func(o *options) {
 		o.project = project
 	}
 }
 
+// WithName sets the name for the OpenAI provider.
 func WithName(name string) Option {
 	return func(o *options) {
 		o.name = name
 	}
 }
 
+// WithHeaders sets the headers for the OpenAI provider.
 func WithHeaders(headers map[string]string) Option {
 	return func(o *options) {
 		maps.Copy(o.headers, headers)
 	}
 }
 
+// WithHTTPClient sets the HTTP client for the OpenAI provider.
 func WithHTTPClient(client option.HTTPClient) Option {
 	return func(o *options) {
 		o.client = client
 	}
 }
 
+// WithSDKOptions sets the SDK options for the OpenAI provider.
 func WithSDKOptions(opts ...option.RequestOption) Option {
 	return func(o *options) {
 		o.sdkOptions = append(o.sdkOptions, opts...)
 	}
 }
 
+// WithLanguageModelOptions sets the language model options for the OpenAI provider.
 func WithLanguageModelOptions(opts ...LanguageModelOption) Option {
 	return func(o *options) {
 		o.languageModelOptions = append(o.languageModelOptions, opts...)
 	}
 }
 
-// WithUseResponsesAPI makes it so the provider uses responses API for models that support it.
+// WithUseResponsesAPI configures the provider to use the responses API for models that support it.
 func WithUseResponsesAPI() Option {
 	return func(o *options) {
 		o.useResponsesAPI = true

providers/openai/provider_options.go 🔗

@@ -1,3 +1,4 @@
+// Package openai provides an implementation of the fantasy AI SDK for OpenAI's language models.
 package openai
 
 import (
@@ -5,23 +6,31 @@ import (
 	"github.com/openai/openai-go/v2"
 )
 
+// ReasoningEffort represents the reasoning effort level for OpenAI models.
 type ReasoningEffort string
 
 const (
+	// ReasoningEffortMinimal represents minimal reasoning effort.
 	ReasoningEffortMinimal ReasoningEffort = "minimal"
-	ReasoningEffortLow     ReasoningEffort = "low"
-	ReasoningEffortMedium  ReasoningEffort = "medium"
-	ReasoningEffortHigh    ReasoningEffort = "high"
+	// ReasoningEffortLow represents low reasoning effort.
+	ReasoningEffortLow ReasoningEffort = "low"
+	// ReasoningEffortMedium represents medium reasoning effort.
+	ReasoningEffortMedium ReasoningEffort = "medium"
+	// ReasoningEffortHigh represents high reasoning effort.
+	ReasoningEffortHigh ReasoningEffort = "high"
 )
 
+// ProviderMetadata represents additional metadata from OpenAI provider.
 type ProviderMetadata struct {
 	Logprobs                 []openai.ChatCompletionTokenLogprob `json:"logprobs"`
 	AcceptedPredictionTokens int64                               `json:"accepted_prediction_tokens"`
 	RejectedPredictionTokens int64                               `json:"rejected_prediction_tokens"`
 }
 
+// Options implements the ProviderOptions interface.
 func (*ProviderMetadata) Options() {}
 
+// ProviderOptions represents additional options for OpenAI provider.
 type ProviderOptions struct {
 	LogitBias           map[string]int64 `json:"logit_bias"`
 	LogProbs            *bool            `json:"log_probs"`
@@ -40,30 +49,37 @@ type ProviderOptions struct {
 	StructuredOutputs   *bool            `json:"structured_outputs"`
 }
 
+// Options implements the ProviderOptions interface.
 func (*ProviderOptions) Options() {}
 
+// ProviderFileOptions represents file options for OpenAI provider.
 type ProviderFileOptions struct {
 	ImageDetail string `json:"image_detail"`
 }
 
+// Options implements the ProviderOptions interface.
 func (*ProviderFileOptions) Options() {}
 
+// ReasoningEffortOption creates a pointer to a ReasoningEffort value.
 func ReasoningEffortOption(e ReasoningEffort) *ReasoningEffort {
 	return &e
 }
 
+// NewProviderOptions creates new provider options for OpenAI.
 func NewProviderOptions(opts *ProviderOptions) fantasy.ProviderOptions {
 	return fantasy.ProviderOptions{
 		Name: opts,
 	}
 }
 
+// NewProviderFileOptions creates new file options for OpenAI.
 func NewProviderFileOptions(opts *ProviderFileOptions) fantasy.ProviderOptions {
 	return fantasy.ProviderOptions{
 		Name: opts,
 	}
 }
 
+// ParseOptions parses provider options from a map.
 func ParseOptions(data map[string]any) (*ProviderOptions, error) {
 	var options ProviderOptions
 	if err := fantasy.ParseOptions(data, &options); err != nil {

providers/openai/responses_options.go 🔗

@@ -1,3 +1,4 @@
+// Package openai provides an implementation of the fantasy AI SDK for OpenAI's language models.
 package openai
 
 import (
@@ -6,38 +7,53 @@ import (
 	"charm.land/fantasy"
 )
 
+// ResponsesReasoningMetadata represents reasoning metadata for OpenAI Responses API.
 type ResponsesReasoningMetadata struct {
 	ItemID           string   `json:"item_id"`
 	EncryptedContent *string  `json:"encrypted_content"`
 	Summary          []string `json:"summary"`
 }
 
+// Options implements the ProviderOptions interface.
 func (*ResponsesReasoningMetadata) Options() {}
 
+// IncludeType represents the type of content to include for OpenAI Responses API.
 type IncludeType string
 
 const (
+	// IncludeReasoningEncryptedContent includes encrypted reasoning content.
 	IncludeReasoningEncryptedContent IncludeType = "reasoning.encrypted_content"
-	IncludeFileSearchCallResults     IncludeType = "file_search_call.results"
+	// IncludeFileSearchCallResults includes file search call results.
+	IncludeFileSearchCallResults IncludeType = "file_search_call.results"
+	// IncludeMessageOutputTextLogprobs includes message output text log probabilities.
 	IncludeMessageOutputTextLogprobs IncludeType = "message.output_text.logprobs"
 )
 
+// ServiceTier represents the service tier for OpenAI Responses API.
 type ServiceTier string
 
 const (
-	ServiceTierAuto     ServiceTier = "auto"
-	ServiceTierFlex     ServiceTier = "flex"
+	// ServiceTierAuto represents the auto service tier.
+	ServiceTierAuto ServiceTier = "auto"
+	// ServiceTierFlex represents the flex service tier.
+	ServiceTierFlex ServiceTier = "flex"
+	// ServiceTierPriority represents the priority service tier.
 	ServiceTierPriority ServiceTier = "priority"
 )
 
+// TextVerbosity represents the text verbosity level for OpenAI Responses API.
 type TextVerbosity string
 
 const (
-	TextVerbosityLow    TextVerbosity = "low"
+	// TextVerbosityLow represents low text verbosity.
+	TextVerbosityLow TextVerbosity = "low"
+	// TextVerbosityMedium represents medium text verbosity.
 	TextVerbosityMedium TextVerbosity = "medium"
-	TextVerbosityHigh   TextVerbosity = "high"
+	// TextVerbosityHigh represents high text verbosity.
+	TextVerbosityHigh TextVerbosity = "high"
 )
 
+// ResponsesProviderOptions represents additional options for OpenAI Responses API.
 type ResponsesProviderOptions struct {
 	Include           []IncludeType    `json:"include"`
 	Instructions      *string          `json:"instructions"`
@@ -55,7 +71,8 @@ type ResponsesProviderOptions struct {
 	User              *string          `json:"user"`
 }
 
-var responsesReasoningModelIds = []string{
+// responsesReasoningModelIds lists the model IDs that support reasoning for OpenAI Responses API.
+var responsesReasoningModelIDs = []string{
 	"o1",
 	"o1-2024-12-17",
 	"o3-mini",
@@ -74,7 +91,8 @@ var responsesReasoningModelIds = []string{
 	"gpt-5-codex",
 }
 
-var responsesModelIds = append([]string{
+// responsesModelIds lists all model IDs for OpenAI Responses API.
+var responsesModelIDs = append([]string{
 	"gpt-4.1",
 	"gpt-4.1-2025-04-14",
 	"gpt-4.1-mini",
@@ -101,16 +119,19 @@ var responsesModelIds = append([]string{
 	"gpt-3.5-turbo-1106",
 	"chatgpt-4o-latest",
 	"gpt-5-chat-latest",
-}, responsesReasoningModelIds...)
+}, responsesReasoningModelIDs...)
 
+// Options implements the ProviderOptions interface.
 func (*ResponsesProviderOptions) Options() {}
 
+// NewResponsesProviderOptions creates new provider options for OpenAI Responses API.
 func NewResponsesProviderOptions(opts *ResponsesProviderOptions) fantasy.ProviderOptions {
 	return fantasy.ProviderOptions{
 		Name: opts,
 	}
 }
 
+// ParseResponsesOptions parses provider options from a map for OpenAI Responses API.
 func ParseResponsesOptions(data map[string]any) (*ResponsesProviderOptions, error) {
 	var options ResponsesProviderOptions
 	if err := fantasy.ParseOptions(data, &options); err != nil {
@@ -119,10 +140,12 @@ func ParseResponsesOptions(data map[string]any) (*ResponsesProviderOptions, erro
 	return &options, nil
 }
 
+// IsResponsesModel checks if a model ID is a Responses API model for OpenAI.
 func IsResponsesModel(modelID string) bool {
-	return slices.Contains(responsesModelIds, modelID)
+	return slices.Contains(responsesModelIDs, modelID)
 }
 
+// IsResponsesReasoningModel checks if a model ID is a Responses API reasoning model for OpenAI.
 func IsResponsesReasoningModel(modelID string) bool {
-	return slices.Contains(responsesReasoningModelIds, modelID)
+	return slices.Contains(responsesReasoningModelIDs, modelID)
 }

providers/openaicompat/language_model_hooks.go 🔗

@@ -13,7 +13,8 @@ import (
 
 const reasoningStartedCtx = "reasoning_started"
 
-func PrepareCallFunc(model fantasy.LanguageModel, params *openaisdk.ChatCompletionNewParams, call fantasy.Call) ([]fantasy.CallWarning, error) {
+// PrepareCallFunc prepares the call for the language model.
+func PrepareCallFunc(_ fantasy.LanguageModel, params *openaisdk.ChatCompletionNewParams, call fantasy.Call) ([]fantasy.CallWarning, error) {
 	providerOptions := &ProviderOptions{}
 	if v, ok := call.ProviderOptions[Name]; ok {
 		providerOptions, ok = v.(*ProviderOptions)
@@ -43,6 +44,7 @@ func PrepareCallFunc(model fantasy.LanguageModel, params *openaisdk.ChatCompleti
 	return nil, nil
 }
 
+// ExtraContentFunc adds extra content to the response.
 func ExtraContentFunc(choice openaisdk.ChatCompletionChoice) []fantasy.Content {
 	var content []fantasy.Content
 	reasoningData := ReasoningData{}
@@ -70,6 +72,7 @@ func extractReasoningContext(ctx map[string]any) bool {
 	return b
 }
 
+// StreamExtraFunc handles extra functionality for streaming responses.
 func StreamExtraFunc(chunk openaisdk.ChatCompletionChunk, yield func(fantasy.StreamPart) bool, ctx map[string]any) (map[string]any, bool) {
 	if len(chunk.Choices) == 0 {
 		return ctx, true

providers/openaicompat/openaicompat.go 🔗

@@ -1,3 +1,4 @@
+// Package openaicompat provides an implementation of the fantasy AI SDK for OpenAI-compatible APIs.
 package openaicompat
 
 import (
@@ -13,11 +14,14 @@ type options struct {
 }
 
 const (
+	// Name is the name of the OpenAI-compatible provider.
 	Name = "openai-compat"
 )
 
+// Option defines a function that configures OpenAI-compatible provider options.
 type Option = func(*options)
 
+// New creates a new OpenAI-compatible provider with the given options.
 func New(opts ...Option) fantasy.Provider {
 	providerOptions := options{
 		openaiOptions: []openai.Option{
@@ -41,36 +45,42 @@ func New(opts ...Option) fantasy.Provider {
 	return openai.New(providerOptions.openaiOptions...)
 }
 
+// WithBaseURL sets the base URL for the OpenAI-compatible provider.
 func WithBaseURL(url string) Option {
 	return func(o *options) {
 		o.openaiOptions = append(o.openaiOptions, openai.WithBaseURL(url))
 	}
 }
 
+// WithAPIKey sets the API key for the OpenAI-compatible provider.
 func WithAPIKey(apiKey string) Option {
 	return func(o *options) {
 		o.openaiOptions = append(o.openaiOptions, openai.WithAPIKey(apiKey))
 	}
 }
 
+// WithName sets the name for the OpenAI-compatible provider.
 func WithName(name string) Option {
 	return func(o *options) {
 		o.openaiOptions = append(o.openaiOptions, openai.WithName(name))
 	}
 }
 
+// WithHeaders sets the headers for the OpenAI-compatible provider.
 func WithHeaders(headers map[string]string) Option {
 	return func(o *options) {
 		o.openaiOptions = append(o.openaiOptions, openai.WithHeaders(headers))
 	}
 }
 
+// WithHTTPClient sets the HTTP client for the OpenAI-compatible provider.
 func WithHTTPClient(client option.HTTPClient) Option {
 	return func(o *options) {
 		o.openaiOptions = append(o.openaiOptions, openai.WithHTTPClient(client))
 	}
 }
 
+// WithSDKOptions sets the SDK options for the OpenAI-compatible provider.
 func WithSDKOptions(opts ...option.RequestOption) Option {
 	return func(o *options) {
 		o.sdkOptions = append(o.sdkOptions, opts...)

providers/openaicompat/provider_options.go 🔗

@@ -1,3 +1,4 @@
+// Package openaicompat provides an implementation of the fantasy AI SDK for OpenAI-compatible APIs.
 package openaicompat
 
 import (
@@ -5,23 +6,28 @@ import (
 	"charm.land/fantasy/providers/openai"
 )
 
+// ProviderOptions represents additional options for the OpenAI-compatible provider.
 type ProviderOptions struct {
 	User            *string                 `json:"user"`
 	ReasoningEffort *openai.ReasoningEffort `json:"reasoning_effort"`
 }
 
+// ReasoningData represents reasoning data for OpenAI-compatible provider.
 type ReasoningData struct {
 	ReasoningContent string `json:"reasoning_content"`
 }
 
+// Options implements the ProviderOptions interface.
 func (*ProviderOptions) Options() {}
 
+// NewProviderOptions creates new provider options for the OpenAI-compatible provider.
 func NewProviderOptions(opts *ProviderOptions) fantasy.ProviderOptions {
 	return fantasy.ProviderOptions{
 		Name: opts,
 	}
 }
 
+// ParseOptions parses provider options from a map for OpenAI-compatible provider.
 func ParseOptions(data map[string]any) (*ProviderOptions, error) {
 	var options ProviderOptions
 	if err := fantasy.ParseOptions(data, &options); err != nil {

providers/openrouter/language_model_hooks.go 🔗

@@ -13,7 +13,7 @@ import (
 
 const reasoningStartedCtx = "reasoning_started"
 
-func languagePrepareModelCall(model fantasy.LanguageModel, params *openaisdk.ChatCompletionNewParams, call fantasy.Call) ([]fantasy.CallWarning, error) {
+func languagePrepareModelCall(_ fantasy.LanguageModel, params *openaisdk.ChatCompletionNewParams, call fantasy.Call) ([]fantasy.CallWarning, error) {
 	providerOptions := &ProviderOptions{}
 	if v, ok := call.ProviderOptions[Name]; ok {
 		providerOptions, ok = v.(*ProviderOptions)

providers/openrouter/openrouter.go 🔗

@@ -1,3 +1,4 @@
+// Package openrouter provides an implementation of the fantasy AI SDK for OpenRouter's language models.
 package openrouter
 
 import (
@@ -14,12 +15,16 @@ type options struct {
 }
 
 const (
+	// DefaultURL is the default URL for the OpenRouter API.
 	DefaultURL = "https://openrouter.ai/api/v1"
-	Name       = "openrouter"
+	// Name is the name of the OpenRouter provider.
+	Name = "openrouter"
 )
 
+// Option defines a function that configures OpenRouter provider options.
 type Option = func(*options)
 
+// New creates a new OpenRouter provider with the given options.
 func New(opts ...Option) fantasy.Provider {
 	providerOptions := options{
 		openaiOptions: []openai.Option{
@@ -42,24 +47,28 @@ func New(opts ...Option) fantasy.Provider {
 	return openai.New(providerOptions.openaiOptions...)
 }
 
+// WithAPIKey sets the API key for the OpenRouter provider.
 func WithAPIKey(apiKey string) Option {
 	return func(o *options) {
 		o.openaiOptions = append(o.openaiOptions, openai.WithAPIKey(apiKey))
 	}
 }
 
+// WithName sets the name for the OpenRouter provider.
 func WithName(name string) Option {
 	return func(o *options) {
 		o.openaiOptions = append(o.openaiOptions, openai.WithName(name))
 	}
 }
 
+// WithHeaders sets the headers for the OpenRouter provider.
 func WithHeaders(headers map[string]string) Option {
 	return func(o *options) {
 		o.openaiOptions = append(o.openaiOptions, openai.WithHeaders(headers))
 	}
 }
 
+// WithHTTPClient sets the HTTP client for the OpenRouter provider.
 func WithHTTPClient(client option.HTTPClient) Option {
 	return func(o *options) {
 		o.openaiOptions = append(o.openaiOptions, openai.WithHTTPClient(client))

providers/openrouter/provider_options.go 🔗

@@ -1,31 +1,40 @@
+// Package openrouter provides an implementation of the fantasy AI SDK for OpenRouter's language models.
 package openrouter
 
 import (
 	"charm.land/fantasy"
 )
 
+// ReasoningEffort represents the reasoning effort level for OpenRouter models.
 type ReasoningEffort string
 
 const (
-	ReasoningEffortLow    ReasoningEffort = "low"
+	// ReasoningEffortLow represents low reasoning effort.
+	ReasoningEffortLow ReasoningEffort = "low"
+	// ReasoningEffortMedium represents medium reasoning effort.
 	ReasoningEffortMedium ReasoningEffort = "medium"
-	ReasoningEffortHigh   ReasoningEffort = "high"
+	// ReasoningEffortHigh represents high reasoning effort.
+	ReasoningEffortHigh ReasoningEffort = "high"
 )
 
+// PromptTokensDetails represents details about prompt tokens for OpenRouter.
 type PromptTokensDetails struct {
 	CachedTokens int64
 }
 
+// CompletionTokensDetails represents details about completion tokens for OpenRouter.
 type CompletionTokensDetails struct {
 	ReasoningTokens int64
 }
 
+// CostDetails represents cost details for OpenRouter.
 type CostDetails struct {
 	UpstreamInferenceCost            float64 `json:"upstream_inference_cost"`
 	UpstreamInferencePromptCost      float64 `json:"upstream_inference_prompt_cost"`
 	UpstreamInferenceCompletionsCost float64 `json:"upstream_inference_completions_cost"`
 }
 
+// UsageAccounting represents usage accounting details for OpenRouter.
 type UsageAccounting struct {
 	PromptTokens            int64                   `json:"prompt_tokens"`
 	PromptTokensDetails     PromptTokensDetails     `json:"prompt_tokens_details"`
@@ -36,19 +45,24 @@ type UsageAccounting struct {
 	CostDetails             CostDetails             `json:"cost_details"`
 }
 
+// ProviderMetadata represents metadata from OpenRouter provider.
 type ProviderMetadata struct {
 	Provider string          `json:"provider"`
 	Usage    UsageAccounting `json:"usage"`
 }
 
+// Options implements the ProviderOptionsData interface for ProviderMetadata.
 func (*ProviderMetadata) Options() {}
 
+// ReasoningMetadata represents reasoning metadata for OpenRouter.
 type ReasoningMetadata struct {
 	Signature string `json:"signature"`
 }
 
+// Options implements the ProviderOptionsData interface for ReasoningMetadata.
 func (*ReasoningMetadata) Options() {}
 
+// ReasoningOptions represents reasoning options for OpenRouter.
 type ReasoningOptions struct {
 	// Whether reasoning is enabled
 	Enabled *bool `json:"enabled,omitempty"`
@@ -60,6 +74,7 @@ type ReasoningOptions struct {
 	Effort *ReasoningEffort `json:"effort,omitempty"`
 }
 
+// Provider represents provider routing preferences for OpenRouter.
 type Provider struct {
 	// List of provider slugs to try in order (e.g. ["anthropic", "openai"])
 	Order []string `json:"order,omitempty"`
@@ -79,6 +94,7 @@ type Provider struct {
 	Sort *string `json:"sort,omitempty"`
 }
 
+// ProviderOptions represents additional options for OpenRouter provider.
 type ProviderOptions struct {
 	Reasoning    *ReasoningOptions `json:"reasoning,omitempty"`
 	ExtraBody    map[string]any    `json:"extra_body,omitempty"`
@@ -99,29 +115,36 @@ type ProviderOptions struct {
 	// TODO: add the web search plugin config
 }
 
+// Options implements the ProviderOptionsData interface for ProviderOptions.
 func (*ProviderOptions) Options() {}
 
+// ReasoningDetail represents a reasoning detail for OpenRouter.
 type ReasoningDetail struct {
 	Type      string `json:"type"`
 	Text      string `json:"text"`
 	Summary   string `json:"summary"`
 	Signature string `json:"signature"`
 }
+
+// ReasoningData represents reasoning data for OpenRouter.
 type ReasoningData struct {
 	Reasoning        string            `json:"reasoning"`
 	ReasoningDetails []ReasoningDetail `json:"reasoning_details"`
 }
 
+// ReasoningEffortOption creates a pointer to a ReasoningEffort value for OpenRouter.
 func ReasoningEffortOption(e ReasoningEffort) *ReasoningEffort {
 	return &e
 }
 
+// NewProviderOptions creates new provider options for OpenRouter.
 func NewProviderOptions(opts *ProviderOptions) fantasy.ProviderOptions {
 	return fantasy.ProviderOptions{
 		Name: opts,
 	}
 }
 
+// ParseOptions parses provider options from a map for OpenRouter.
 func ParseOptions(data map[string]any) (*ProviderOptions, error) {
 	var options ProviderOptions
 	if err := fantasy.ParseOptions(data, &options); err != nil {

retry.go 🔗

@@ -18,8 +18,10 @@ type RetryFunction[T any] func(ctx context.Context, fn RetryFn[T]) (T, error)
 type RetryReason string
 
 const (
+	// RetryReasonMaxRetriesExceeded indicates the maximum number of retries was exceeded.
 	RetryReasonMaxRetriesExceeded RetryReason = "maxRetriesExceeded"
-	RetryReasonErrorNotRetryable  RetryReason = "errorNotRetryable"
+	// RetryReasonErrorNotRetryable indicates the error is not retryable.
+	RetryReasonErrorNotRetryable RetryReason = "errorNotRetryable"
 )
 
 // RetryError represents an error that occurred during retry operations.
@@ -98,8 +100,10 @@ type RetryOptions struct {
 	OnRetry        OnRetryCallback
 }
 
+// OnRetryCallback defines a function that is called when a retry occurs.
 type OnRetryCallback = func(err *APICallError, delay time.Duration)
 
+// DefaultRetryOptions returns the default retry options.
 // DefaultRetryOptions returns the default retry options.
 func DefaultRetryOptions() RetryOptions {
 	return RetryOptions{

util.go 🔗

@@ -2,10 +2,12 @@ package fantasy
 
 import "github.com/go-viper/mapstructure/v2"
 
+// Opt creates a pointer to the given value.
 func Opt[T any](v T) *T {
 	return &v
 }
 
+// ParseOptions parses the given options map into the provided struct.
 func ParseOptions[T any](options map[string]any, m *T) error {
 	decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
 		TagName: "json",