package config

import (
	"bytes"
	"cmp"
	"context"
	"errors"
	"fmt"
	"io"
	"log/slog"
	"maps"
	"net/http"
	"net/url"
	"regexp"
	"slices"
	"strings"
	"time"

	"charm.land/catwalk/pkg/catwalk"
	"github.com/charmbracelet/crush/internal/csync"
	"github.com/charmbracelet/crush/internal/env"
	"github.com/charmbracelet/crush/internal/oauth"
	"github.com/charmbracelet/crush/internal/oauth/copilot"
	"github.com/invopop/jsonschema"
)

const (
	appName              = "crush"
	defaultDataDirectory = ".crush"
	defaultInitializeAs  = "AGENTS.md"
)

var defaultContextPaths = []string{
	".github/copilot-instructions.md",
	".cursorrules",
	".cursor/rules/",
	"CLAUDE.md",
	"CLAUDE.local.md",
	"GEMINI.md",
	"gemini.md",
	"crush.md",
	"crush.local.md",
	"Crush.md",
	"Crush.local.md",
	"CRUSH.md",
	"CRUSH.local.md",
	"AGENTS.md",
	"agents.md",
	"Agents.md",
}

type SelectedModelType string

// String returns the string representation of the [SelectedModelType].
func (s SelectedModelType) String() string {
	return string(s)
}

const (
	SelectedModelTypeLarge SelectedModelType = "large"
	SelectedModelTypeSmall SelectedModelType = "small"
)

const (
	AgentCoder string = "coder"
	AgentTask  string = "task"
)

type SelectedModel struct {
	// The model id as used by the provider API.
	// Required.
	Model string `json:"model" jsonschema:"required,description=The model ID as used by the provider API,example=gpt-4o"`
	// The model provider, same as the key/id used in the providers config.
	// Required.
	Provider string `json:"provider" jsonschema:"required,description=The model provider ID that matches a key in the providers config,example=openai"`

	// Only used by models that use the openai provider and need this set.
	ReasoningEffort string `json:"reasoning_effort,omitempty" jsonschema:"description=Reasoning effort level for OpenAI models that support it,enum=low,enum=medium,enum=high"`

	// Used by anthropic models that can reason to indicate if the model should think.
	Think bool `json:"think,omitempty" jsonschema:"description=Enable thinking mode for Anthropic models that support reasoning"`

	// Overrides the default model configuration.
	MaxTokens        int64    `json:"max_tokens,omitempty" jsonschema:"description=Maximum number of tokens for model responses,maximum=200000,example=4096"`
	Temperature      *float64 `json:"temperature,omitempty" jsonschema:"description=Sampling temperature,minimum=0,maximum=1,example=0.7"`
	TopP             *float64 `json:"top_p,omitempty" jsonschema:"description=Top-p (nucleus) sampling parameter,minimum=0,maximum=1,example=0.9"`
	TopK             *int64   `json:"top_k,omitempty" jsonschema:"description=Top-k sampling parameter"`
	FrequencyPenalty *float64 `json:"frequency_penalty,omitempty" jsonschema:"description=Frequency penalty to reduce repetition"`
	PresencePenalty  *float64 `json:"presence_penalty,omitempty" jsonschema:"description=Presence penalty to increase topic diversity"`

	// Override provider specific options.
	ProviderOptions map[string]any `json:"provider_options,omitempty" jsonschema:"description=Additional provider-specific options for the model"`
}

type ProviderConfig struct {
	// The provider's id.
	ID string `json:"id,omitempty" jsonschema:"description=Unique identifier for the provider,example=openai"`
	// The provider's name, used for display purposes.
	Name string `json:"name,omitempty" jsonschema:"description=Human-readable name for the provider,example=OpenAI"`
	// The provider's API endpoint.
	BaseURL string `json:"base_url,omitempty" jsonschema:"description=Base URL for the provider's API,format=uri,example=https://api.openai.com/v1"`
	// The provider type, e.g. "openai", "anthropic", etc. if empty it defaults to openai.
	Type catwalk.Type `json:"type,omitempty" jsonschema:"description=Provider type that determines the API format,enum=openai,enum=openai-compat,enum=anthropic,enum=gemini,enum=azure,enum=vertexai,default=openai"`
	// The provider's API key.
	APIKey string `json:"api_key,omitempty" jsonschema:"description=API key for authentication with the provider,example=$OPENAI_API_KEY"`
	// The original API key template before resolution (for re-resolution on auth errors).
	APIKeyTemplate string `json:"-"`
	// OAuthToken for providers that use OAuth2 authentication.
	OAuthToken *oauth.Token `json:"oauth,omitempty" jsonschema:"description=OAuth2 token for authentication with the provider"`
	// Marks the provider as disabled.
	Disable bool `json:"disable,omitempty" jsonschema:"description=Whether this provider is disabled,default=false"`

	// Custom system prompt prefix.
	SystemPromptPrefix string `json:"system_prompt_prefix,omitempty" jsonschema:"description=Custom prefix to add to system prompts for this provider"`

	// Extra headers to send with each request to the provider.
	ExtraHeaders map[string]string `json:"extra_headers,omitempty" jsonschema:"description=Additional HTTP headers to send with requests"`
	// Extra body
	ExtraBody map[string]any `json:"extra_body,omitempty" jsonschema:"description=Additional fields to include in request bodies, only works with openai-compatible providers"`

	ProviderOptions map[string]any `json:"provider_options,omitempty" jsonschema:"description=Additional provider-specific options for this provider"`

	// Used to pass extra parameters to the provider.
	ExtraParams map[string]string `json:"-"`

	// The provider models
	Models []catwalk.Model `json:"models,omitempty" jsonschema:"description=List of models available from this provider"`
}

// ToProvider converts the [ProviderConfig] to a [catwalk.Provider].
func (c *ProviderConfig) ToProvider() catwalk.Provider {
	// Convert config provider to provider.Provider format
	provider := catwalk.Provider{
		Name:   c.Name,
		ID:     catwalk.InferenceProvider(c.ID),
		Models: make([]catwalk.Model, len(c.Models)),
	}

	// Convert models
	for i, model := range c.Models {
		provider.Models[i] = catwalk.Model{
			ID:                     model.ID,
			Name:                   model.Name,
			CostPer1MIn:            model.CostPer1MIn,
			CostPer1MOut:           model.CostPer1MOut,
			CostPer1MInCached:      model.CostPer1MInCached,
			CostPer1MOutCached:     model.CostPer1MOutCached,
			ContextWindow:          model.ContextWindow,
			DefaultMaxTokens:       model.DefaultMaxTokens,
			CanReason:              model.CanReason,
			ReasoningLevels:        model.ReasoningLevels,
			DefaultReasoningEffort: model.DefaultReasoningEffort,
			SupportsImages:         model.SupportsImages,
		}
	}

	return provider
}

func (c *ProviderConfig) SetupGitHubCopilot() {
	maps.Copy(c.ExtraHeaders, copilot.Headers())
}

type MCPType string

const (
	MCPStdio MCPType = "stdio"
	MCPSSE   MCPType = "sse"
	MCPHttp  MCPType = "http"
)

type MCPConfig struct {
	Command       string            `json:"command,omitempty" jsonschema:"description=Command to execute for stdio MCP servers,example=npx"`
	Env           map[string]string `json:"env,omitempty" jsonschema:"description=Environment variables to set for the MCP server"`
	Args          []string          `json:"args,omitempty" jsonschema:"description=Arguments to pass to the MCP server command"`
	Type          MCPType           `json:"type" jsonschema:"required,description=Type of MCP connection,enum=stdio,enum=sse,enum=http,default=stdio"`
	URL           string            `json:"url,omitempty" jsonschema:"description=URL for HTTP or SSE MCP servers,format=uri,example=http://localhost:3000/mcp"`
	Disabled      bool              `json:"disabled,omitempty" jsonschema:"description=Whether this MCP server is disabled,default=false"`
	DisabledTools []string          `json:"disabled_tools,omitempty" jsonschema:"description=List of tools from this MCP server to disable,example=get-library-doc"`
	Timeout       int               `json:"timeout,omitempty" jsonschema:"description=Timeout in seconds for MCP server connections,default=15,example=30,example=60,example=120"`

	// TODO: maybe make it possible to get the value from the env
	Headers map[string]string `json:"headers,omitempty" jsonschema:"description=HTTP headers for HTTP/SSE MCP servers"`
}

type LSPConfig struct {
	Disabled    bool              `json:"disabled,omitempty" jsonschema:"description=Whether this LSP server is disabled,default=false"`
	Command     string            `json:"command,omitempty" jsonschema:"description=Command to execute for the LSP server,example=gopls"`
	Args        []string          `json:"args,omitempty" jsonschema:"description=Arguments to pass to the LSP server command"`
	Env         map[string]string `json:"env,omitempty" jsonschema:"description=Environment variables to set to the LSP server command"`
	FileTypes   []string          `json:"filetypes,omitempty" jsonschema:"description=File types this LSP server handles,example=go,example=mod,example=rs,example=c,example=js,example=ts"`
	RootMarkers []string          `json:"root_markers,omitempty" jsonschema:"description=Files or directories that indicate the project root,example=go.mod,example=package.json,example=Cargo.toml"`
	InitOptions map[string]any    `json:"init_options,omitempty" jsonschema:"description=Initialization options passed to the LSP server during initialize request"`
	Options     map[string]any    `json:"options,omitempty" jsonschema:"description=LSP server-specific settings passed during initialization"`
	Timeout     int               `json:"timeout,omitempty" jsonschema:"description=Timeout in seconds for LSP server initialization,default=30,example=60,example=120"`
}

type TUIOptions struct {
	CompactMode bool   `json:"compact_mode,omitempty" jsonschema:"description=Enable compact mode for the TUI interface,default=false"`
	DiffMode    string `json:"diff_mode,omitempty" jsonschema:"description=Diff mode for the TUI interface,enum=unified,enum=split"`
	// Here we can add themes later or any TUI related options
	//

	Completions Completions `json:"completions,omitzero" jsonschema:"description=Completions UI options"`
	Transparent *bool       `json:"transparent,omitempty" jsonschema:"description=Enable transparent background for the TUI interface,default=false"`
}

// Completions defines options for the completions UI.
type Completions struct {
	MaxDepth *int `json:"max_depth,omitempty" jsonschema:"description=Maximum depth for the ls tool,default=0,example=10"`
	MaxItems *int `json:"max_items,omitempty" jsonschema:"description=Maximum number of items to return for the ls tool,default=1000,example=100"`
}

func (c Completions) Limits() (depth, items int) {
	return ptrValOr(c.MaxDepth, 0), ptrValOr(c.MaxItems, 0)
}

type Permissions struct {
	AllowedTools []string `json:"allowed_tools,omitempty" jsonschema:"description=List of tools that don't require permission prompts,example=bash,example=view"`
}

type TrailerStyle string

const (
	TrailerStyleNone         TrailerStyle = "none"
	TrailerStyleCoAuthoredBy TrailerStyle = "co-authored-by"
	TrailerStyleAssistedBy   TrailerStyle = "assisted-by"
)

type Attribution struct {
	TrailerStyle  TrailerStyle `json:"trailer_style,omitempty" jsonschema:"description=Style of attribution trailer to add to commits,enum=none,enum=co-authored-by,enum=assisted-by,default=assisted-by"`
	CoAuthoredBy  *bool        `json:"co_authored_by,omitempty" jsonschema:"description=Deprecated: use trailer_style instead"`
	GeneratedWith bool         `json:"generated_with,omitempty" jsonschema:"description=Add Generated with Crush line to commit messages and issues and PRs,default=true"`
}

// JSONSchemaExtend marks the co_authored_by field as deprecated in the schema.
func (Attribution) JSONSchemaExtend(schema *jsonschema.Schema) {
	if schema.Properties != nil {
		if prop, ok := schema.Properties.Get("co_authored_by"); ok {
			prop.Deprecated = true
		}
	}
}

type Options struct {
	ContextPaths              []string     `json:"context_paths,omitempty" jsonschema:"description=Paths to files containing context information for the AI,example=.cursorrules,example=CRUSH.md"`
	SkillsPaths               []string     `json:"skills_paths,omitempty" jsonschema:"description=Paths to directories containing Agent Skills (folders with SKILL.md files),example=~/.config/crush/skills,example=./skills"`
	TUI                       *TUIOptions  `json:"tui,omitempty" jsonschema:"description=Terminal user interface options"`
	Debug                     bool         `json:"debug,omitempty" jsonschema:"description=Enable debug logging,default=false"`
	DebugLSP                  bool         `json:"debug_lsp,omitempty" jsonschema:"description=Enable debug logging for LSP servers,default=false"`
	DisableAutoSummarize      bool         `json:"disable_auto_summarize,omitempty" jsonschema:"description=Disable automatic conversation summarization,default=false"`
	DataDirectory             string       `json:"data_directory,omitempty" jsonschema:"description=Directory for storing application data (relative to working directory),default=.crush,example=.crush"` // Relative to the cwd
	DisabledTools             []string     `json:"disabled_tools,omitempty" jsonschema:"description=List of built-in tools to disable and hide from the agent,example=bash,example=sourcegraph"`
	DisableProviderAutoUpdate bool         `json:"disable_provider_auto_update,omitempty" jsonschema:"description=Disable providers auto-update,default=false"`
	DisableDefaultProviders   bool         `json:"disable_default_providers,omitempty" jsonschema:"description=Ignore all default/embedded providers. When enabled, providers must be fully specified in the config file with base_url, models, and api_key - no merging with defaults occurs,default=false"`
	Attribution               *Attribution `json:"attribution,omitempty" jsonschema:"description=Attribution settings for generated content"`
	DisableMetrics            bool         `json:"disable_metrics,omitempty" jsonschema:"description=Disable sending metrics,default=false"`
	InitializeAs              string       `json:"initialize_as,omitempty" jsonschema:"description=Name of the context file to create/update during project initialization,default=AGENTS.md,example=AGENTS.md,example=CRUSH.md,example=CLAUDE.md,example=docs/LLMs.md"`
	AutoLSP                   *bool        `json:"auto_lsp,omitempty" jsonschema:"description=Automatically setup LSPs based on root markers,default=true"`
	Progress                  *bool        `json:"progress,omitempty" jsonschema:"description=Show indeterminate progress updates during long operations,default=true"`
	DisableNotifications      bool         `json:"disable_notifications,omitempty" jsonschema:"description=Disable desktop notifications,default=false"`
	DisabledSkills            []string     `json:"disabled_skills,omitempty" jsonschema:"description=List of skill names to disable and hide from the agent,example=crush-config"`
}

type MCPs map[string]MCPConfig

type MCP struct {
	Name string    `json:"name"`
	MCP  MCPConfig `json:"mcp"`
}

func (m MCPs) Sorted() []MCP {
	sorted := make([]MCP, 0, len(m))
	for k, v := range m {
		sorted = append(sorted, MCP{
			Name: k,
			MCP:  v,
		})
	}
	slices.SortFunc(sorted, func(a, b MCP) int {
		return strings.Compare(a.Name, b.Name)
	})
	return sorted
}

type LSPs map[string]LSPConfig

type LSP struct {
	Name string    `json:"name"`
	LSP  LSPConfig `json:"lsp"`
}

func (l LSPs) Sorted() []LSP {
	sorted := make([]LSP, 0, len(l))
	for k, v := range l {
		sorted = append(sorted, LSP{
			Name: k,
			LSP:  v,
		})
	}
	slices.SortFunc(sorted, func(a, b LSP) int {
		return strings.Compare(a.Name, b.Name)
	})
	return sorted
}

func (l LSPConfig) ResolvedEnv() []string {
	return resolveEnvs(l.Env)
}

func (m MCPConfig) ResolvedEnv() []string {
	return resolveEnvs(m.Env)
}

func (m MCPConfig) ResolvedHeaders() map[string]string {
	resolver := NewShellVariableResolver(env.New())
	for e, v := range m.Headers {
		var err error
		m.Headers[e], err = resolver.ResolveValue(v)
		if err != nil {
			slog.Error("Error resolving header variable", "error", err, "variable", e, "value", v)
			continue
		}
	}
	return m.Headers
}

type Agent struct {
	ID          string `json:"id,omitempty"`
	Name        string `json:"name,omitempty"`
	Description string `json:"description,omitempty"`
	// This is the id of the system prompt used by the agent
	Disabled bool `json:"disabled,omitempty"`

	Model SelectedModelType `json:"model" jsonschema:"required,description=The model type to use for this agent,enum=large,enum=small,default=large"`

	// The available tools for the agent
	//  if this is nil, all tools are available
	AllowedTools []string `json:"allowed_tools,omitempty"`

	// this tells us which MCPs are available for this agent
	//  if this is empty all mcps are available
	//  the string array is the list of tools from the AllowedMCP the agent has available
	//  if the string array is nil, all tools from the AllowedMCP are available
	AllowedMCP map[string][]string `json:"allowed_mcp,omitempty"`

	// Overrides the context paths for this agent
	ContextPaths []string `json:"context_paths,omitempty"`
}

type Tools struct {
	Ls   ToolLs   `json:"ls,omitzero"`
	Grep ToolGrep `json:"grep,omitzero"`
}

type ToolLs struct {
	MaxDepth *int `json:"max_depth,omitempty" jsonschema:"description=Maximum depth for the ls tool,default=0,example=10"`
	MaxItems *int `json:"max_items,omitempty" jsonschema:"description=Maximum number of items to return for the ls tool,default=1000,example=100"`
}

// Limits returns the user-defined max-depth and max-items, or their defaults.
func (t ToolLs) Limits() (depth, items int) {
	return ptrValOr(t.MaxDepth, 0), ptrValOr(t.MaxItems, 0)
}

type ToolGrep struct {
	Timeout *time.Duration `json:"timeout,omitempty" jsonschema:"description=Timeout for the grep tool call,default=5s,example=10s"`
}

// GetTimeout returns the user-defined timeout or the default.
func (t ToolGrep) GetTimeout() time.Duration {
	return ptrValOr(t.Timeout, 5*time.Second)
}

// HookConfig defines a user-configured shell command that fires on a hook
// event (e.g. PreToolUse).
type HookConfig struct {
	// Regex pattern tested against the tool name. Empty means match all.
	Matcher string `json:"matcher,omitempty" jsonschema:"description=Regex pattern tested against the tool name. Empty means match all tools."`
	// Shell command to execute.
	Command string `json:"command" jsonschema:"required,description=Shell command to execute when the hook fires"`
	// Timeout in seconds. Default 30.
	Timeout int `json:"timeout,omitempty" jsonschema:"description=Timeout in seconds for the hook command,default=30"`

	// Compiled matcher regex. Not serialized.
	matcherRegex *regexp.Regexp
}

// MatcherRegex returns the compiled matcher regex, or nil if no matcher is
// set.
func (h *HookConfig) MatcherRegex() *regexp.Regexp {
	return h.matcherRegex
}

// TimeoutDuration returns the hook timeout as a time.Duration, defaulting
// to 30s.
func (h *HookConfig) TimeoutDuration() time.Duration {
	if h.Timeout <= 0 {
		return 30 * time.Second
	}
	return time.Duration(h.Timeout) * time.Second
}

// Config holds the configuration for crush.
type Config struct {
	Schema string `json:"$schema,omitempty"`

	// We currently only support large/small as values here.
	Models map[SelectedModelType]SelectedModel `json:"models,omitempty" jsonschema:"description=Model configurations for different model types,example={\"large\":{\"model\":\"gpt-4o\",\"provider\":\"openai\"}}"`

	// Recently used models stored in the data directory config.
	RecentModels map[SelectedModelType][]SelectedModel `json:"recent_models,omitempty" jsonschema:"-"`

	// The providers that are configured
	Providers *csync.Map[string, ProviderConfig] `json:"providers,omitempty" jsonschema:"description=AI provider configurations"`

	MCP MCPs `json:"mcp,omitempty" jsonschema:"description=Model Context Protocol server configurations"`

	LSP LSPs `json:"lsp,omitempty" jsonschema:"description=Language Server Protocol configurations"`

	Options *Options `json:"options,omitempty" jsonschema:"description=General application options"`

	Permissions *Permissions `json:"permissions,omitempty" jsonschema:"description=Permission settings for tool usage"`

	Tools Tools `json:"tools,omitzero" jsonschema:"description=Tool configurations"`

	Hooks map[string][]HookConfig `json:"hooks,omitempty" jsonschema:"description=User-defined shell commands that fire on hook events (e.g. PreToolUse)"`

	Agents map[string]Agent `json:"-"`
}

func (c *Config) EnabledProviders() []ProviderConfig {
	var enabled []ProviderConfig
	for p := range c.Providers.Seq() {
		if !p.Disable {
			enabled = append(enabled, p)
		}
	}
	return enabled
}

// IsConfigured  return true if at least one provider is configured
func (c *Config) IsConfigured() bool {
	return len(c.EnabledProviders()) > 0
}

func (c *Config) GetModel(provider, model string) *catwalk.Model {
	if providerConfig, ok := c.Providers.Get(provider); ok {
		for _, m := range providerConfig.Models {
			if m.ID == model {
				return &m
			}
		}
	}
	return nil
}

func (c *Config) GetProviderForModel(modelType SelectedModelType) *ProviderConfig {
	model, ok := c.Models[modelType]
	if !ok {
		return nil
	}
	if providerConfig, ok := c.Providers.Get(model.Provider); ok {
		return &providerConfig
	}
	return nil
}

func (c *Config) GetModelByType(modelType SelectedModelType) *catwalk.Model {
	model, ok := c.Models[modelType]
	if !ok {
		return nil
	}
	return c.GetModel(model.Provider, model.Model)
}

func (c *Config) LargeModel() *catwalk.Model {
	model, ok := c.Models[SelectedModelTypeLarge]
	if !ok {
		return nil
	}
	return c.GetModel(model.Provider, model.Model)
}

func (c *Config) SmallModel() *catwalk.Model {
	model, ok := c.Models[SelectedModelTypeSmall]
	if !ok {
		return nil
	}
	return c.GetModel(model.Provider, model.Model)
}

const maxRecentModelsPerType = 5

func allToolNames() []string {
	return []string{
		"agent",
		"bash",
		"crush_info",
		"crush_logs",
		"job_output",
		"job_kill",
		"download",
		"edit",
		"multiedit",
		"lsp_diagnostics",
		"lsp_references",
		"lsp_restart",
		"fetch",
		"agentic_fetch",
		"glob",
		"grep",
		"ls",
		"sourcegraph",
		"todos",
		"view",
		"write",
		"list_mcp_resources",
		"read_mcp_resource",
	}
}

func resolveAllowedTools(allTools []string, disabledTools []string) []string {
	if disabledTools == nil {
		return allTools
	}
	// filter out disabled tools (exclude mode)
	return filterSlice(allTools, disabledTools, false)
}

func resolveReadOnlyTools(tools []string) []string {
	readOnlyTools := []string{"glob", "grep", "ls", "sourcegraph", "view"}
	// filter to only include tools that are in allowedtools (include mode)
	return filterSlice(tools, readOnlyTools, true)
}

func filterSlice(data []string, mask []string, include bool) []string {
	var filtered []string
	for _, s := range data {
		// if include is true, we include items that ARE in the mask
		// if include is false, we include items that are NOT in the mask
		if include == slices.Contains(mask, s) {
			filtered = append(filtered, s)
		}
	}
	return filtered
}

func (c *Config) SetupAgents() {
	allowedTools := resolveAllowedTools(allToolNames(), c.Options.DisabledTools)

	agents := map[string]Agent{
		AgentCoder: {
			ID:           AgentCoder,
			Name:         "Coder",
			Description:  "An agent that helps with executing coding tasks.",
			Model:        SelectedModelTypeLarge,
			ContextPaths: c.Options.ContextPaths,
			AllowedTools: allowedTools,
		},

		AgentTask: {
			ID:           AgentTask,
			Name:         "Task",
			Description:  "An agent that helps with searching for context and finding implementation details.",
			Model:        SelectedModelTypeLarge,
			ContextPaths: c.Options.ContextPaths,
			AllowedTools: resolveReadOnlyTools(allowedTools),
			// NO MCPs or LSPs by default
			AllowedMCP: map[string][]string{},
		},
	}
	c.Agents = agents
}

// API key validation lives between this block and [ProviderConfig.TestConnection]
// below. See internal/config/VALIDATION.md for the full contract, the
// per-provider probe table, the classifier inventory, and the checklist for
// adding or changing a provider's validation behavior. Any change to
// [buildValidationProbe], the classify* functions, or
// [openaiCompatModelsAllowlist] must be reflected in that document.

// ErrValidationUnsupported is returned from [ProviderConfig.TestConnection]
// when the provider does not expose a deterministic endpoint that proves API
// key authentication without performing inference. Callers should treat this
// as "saved but not verified" rather than as a validation failure.
var ErrValidationUnsupported = errors.New("provider does not expose a deterministic validation probe")

// validationProbe describes a single HTTP request used to prove authentication
// for a given provider configuration.
type validationProbe struct {
	method   string
	url      string
	headers  map[string]string
	body     []byte
	classify func(statusCode int) error
}

// classifyAuthGated treats the probe endpoint as one that is expected to
// return 200 with a valid key and 401/403 with an invalid one. Any other
// status is considered non-deterministic and reported as unsupported so the
// UI can show "not verified" instead of a misleading "invalid key".
func classifyAuthGated(c *ProviderConfig) func(int) error {
	return func(status int) error {
		switch status {
		case http.StatusOK:
			return nil
		case http.StatusUnauthorized, http.StatusForbidden:
			return fmt.Errorf("failed to connect to provider %s: %s", c.ID, http.StatusText(status))
		default:
			return ErrValidationUnsupported
		}
	}
}

// classifyOpenAIChatMalformed classifies responses from a deliberately
// malformed POST {baseURL}/chat/completions probe. On most OpenAI-compatible
// gateways authentication happens before schema validation, so 401/403 means
// the key is bad while 400/422 means the key was accepted and only the body
// was rejected. Anything else is treated as unsupported / transient.
func classifyOpenAIChatMalformed(c *ProviderConfig) func(int) error {
	return func(status int) error {
		switch status {
		case http.StatusUnauthorized, http.StatusForbidden:
			return fmt.Errorf("failed to connect to provider %s: %s", c.ID, http.StatusText(status))
		case http.StatusBadRequest, http.StatusUnprocessableEntity:
			return nil
		default:
			return ErrValidationUnsupported
		}
	}
}

// classifyGoogleModels classifies responses from Google's
// `/v1beta/models?key=…` probe. Google returns 400 INVALID_ARGUMENT for a
// malformed or unknown API key, so 400/401/403 all indicate an invalid key.
func classifyGoogleModels(c *ProviderConfig) func(int) error {
	return func(status int) error {
		switch status {
		case http.StatusOK:
			return nil
		case http.StatusBadRequest, http.StatusUnauthorized, http.StatusForbidden:
			return fmt.Errorf("failed to connect to provider %s: %s", c.ID, http.StatusText(status))
		default:
			return ErrValidationUnsupported
		}
	}
}

// classifyZAIModels preserves the historical ZAI-specific behaviour: the
// `/models` endpoint returns a variety of non-200 statuses even with a valid
// key, but reliably returns 401 when the key is bad. Treat 401 as invalid
// and anything else as valid (the endpoint is authoritative about bad keys
// but noisy about everything else).
func classifyZAIModels(c *ProviderConfig) func(int) error {
	return func(status int) error {
		if status == http.StatusUnauthorized {
			return fmt.Errorf("failed to connect to provider %s: %s", c.ID, http.StatusText(status))
		}
		return nil
	}
}

// openaiCompatModelsAllowlist lists openai-compat providers whose `/models`
// endpoint is known to authenticate the caller (i.e. return 401/403 for a
// bad key rather than 200 with a public listing). New openai-compat
// providers should NOT be added here unless their `/models` behaviour has
// been confirmed to gate on auth — otherwise they should use the malformed
// chat-completions probe or return [ErrValidationUnsupported].
var openaiCompatModelsAllowlist = map[catwalk.InferenceProvider]struct{}{
	"deepseek":                           {},
	catwalk.InferenceProviderGROQ:        {},
	catwalk.InferenceProviderXAI:         {},
	catwalk.InferenceProviderZhipu:       {},
	catwalk.InferenceProviderZhipuCoding: {},
	catwalk.InferenceProviderCerebras:    {},
	catwalk.InferenceProviderNebius:      {},
	catwalk.InferenceProviderCopilot:     {},
}

// openaiCompatChatProbe builds a malformed-body POST /chat/completions probe
// for OpenAI-compatible providers whose chat-completions endpoint is known to
// gate on auth before validating the request body.
func openaiCompatChatProbe(c *ProviderConfig, baseURL, apiKey string) (*validationProbe, error) {
	if baseURL == "" {
		return nil, ErrValidationUnsupported
	}
	return &validationProbe{
		method: http.MethodPost,
		url:    baseURL + "/chat/completions",
		headers: map[string]string{
			"Authorization": "Bearer " + apiKey,
			"Content-Type":  "application/json",
		},
		// Intentionally malformed: required fields missing so the gateway
		// rejects the payload after authenticating the caller.
		body:     []byte(`{"__crush_probe__": true}`),
		classify: classifyOpenAIChatMalformed(c),
	}, nil
}

// buildValidationProbe returns the probe to use for this provider, or a
// sentinel error if verification is impossible without performing inference.
// A nil probe with a nil error means "the key is valid by virtue of its
// format and no network probe is necessary" (e.g. Bedrock/Vercel prefix
// checks).
func (c *ProviderConfig) buildValidationProbe(resolver VariableResolver) (*validationProbe, error) {
	providerID := catwalk.InferenceProvider(c.ID)
	apiKey, _ := resolver.ResolveValue(c.APIKey)
	baseURL, _ := resolver.ResolveValue(c.BaseURL)

	// Provider-ID-specific probes take precedence over type-based defaults.
	switch providerID {
	case catwalk.InferenceProviderMiniMax, catwalk.InferenceProviderMiniMaxChina:
		base := cmp.Or(baseURL, "https://api.minimax.io/anthropic")
		return &validationProbe{
			method: http.MethodGet,
			url:    base + "/v1/models",
			headers: map[string]string{
				"x-api-key":         apiKey,
				"anthropic-version": "2023-06-01",
			},
			classify: classifyAuthGated(c),
		}, nil
	case catwalk.InferenceProviderVenice:
		base := cmp.Or(baseURL, "https://api.venice.ai/api/v1")
		return &validationProbe{
			method: http.MethodGet,
			url:    base + "/api_keys/rate_limits",
			headers: map[string]string{
				"Authorization": "Bearer " + apiKey,
			},
			classify: classifyAuthGated(c),
		}, nil
	case catwalk.InferenceAIHubMix,
		catwalk.InferenceProviderAvian,
		catwalk.InferenceProviderCortecs,
		catwalk.InferenceProviderHuggingFace,
		catwalk.InferenceProviderIoNet,
		catwalk.InferenceProviderOpenCodeGo,
		catwalk.InferenceProviderOpenCodeZen,
		catwalk.InferenceProviderQiniuCloud,
		catwalk.InferenceProviderSynthetic:
		return openaiCompatChatProbe(c, baseURL, apiKey)
	case catwalk.InferenceProviderChutes, catwalk.InferenceProviderNeuralwatt:
		// These providers have been observed to return ambiguous responses
		// for unauthenticated requests, so we cannot safely validate.
		return nil, ErrValidationUnsupported
	case catwalk.InferenceProviderZAI:
		// ZAI's `/models` endpoint is authoritative about bad keys (always
		// 401) but returns assorted non-200 statuses for valid keys, so it
		// needs its own classifier.
		base := baseURL
		if base == "" {
			return nil, ErrValidationUnsupported
		}
		return &validationProbe{
			method: http.MethodGet,
			url:    base + "/models",
			headers: map[string]string{
				"Authorization": "Bearer " + apiKey,
			},
			classify: classifyZAIModels(c),
		}, nil
	}

	// Type-based defaults for providers without an explicit override.
	switch c.Type {
	case catwalk.TypeOpenAI:
		base := cmp.Or(baseURL, "https://api.openai.com/v1")
		return &validationProbe{
			method: http.MethodGet,
			url:    base + "/models",
			headers: map[string]string{
				"Authorization": "Bearer " + apiKey,
			},
			classify: classifyAuthGated(c),
		}, nil
	case catwalk.TypeOpenRouter:
		base := cmp.Or(baseURL, "https://openrouter.ai/api/v1")
		return &validationProbe{
			method: http.MethodGet,
			url:    base + "/credits",
			headers: map[string]string{
				"Authorization": "Bearer " + apiKey,
			},
			classify: classifyAuthGated(c),
		}, nil
	case catwalk.TypeAnthropic:
		base := cmp.Or(baseURL, "https://api.anthropic.com/v1")
		testURL := base + "/models"
		if providerID == catwalk.InferenceKimiCoding {
			testURL = base + "/v1/models"
		}
		return &validationProbe{
			method: http.MethodGet,
			url:    testURL,
			headers: map[string]string{
				"x-api-key":         apiKey,
				"anthropic-version": "2023-06-01",
			},
			classify: classifyAuthGated(c),
		}, nil
	case catwalk.TypeGoogle:
		base := cmp.Or(baseURL, "https://generativelanguage.googleapis.com")
		return &validationProbe{
			method:   http.MethodGet,
			url:      base + "/v1beta/models?key=" + url.QueryEscape(apiKey),
			classify: classifyGoogleModels(c),
		}, nil
	case catwalk.TypeBedrock:
		// NOTE: Bedrock has a `/foundation-models` endpoint that we could in
		// theory use, but apparently the authorization is region-specific,
		// so it's not so trivial. Fall back to a prefix check.
		if strings.HasPrefix(apiKey, "ABSK") {
			return nil, nil
		}
		return nil, errors.New("not a valid bedrock api key")
	case catwalk.TypeVercel:
		// NOTE: Vercel does not validate API keys on the `/models` endpoint.
		if strings.HasPrefix(apiKey, "vck_") {
			return nil, nil
		}
		return nil, errors.New("not a valid vercel api key")
	case catwalk.TypeOpenAICompat:
		// Generic openai-compat providers often expose a public /models
		// endpoint, so hitting it proves nothing about the caller's key.
		// Only providers we've confirmed to gate /models on auth use the
		// /models probe; everyone else needs an explicit override above or
		// returns ErrValidationUnsupported.
		if _, ok := openaiCompatModelsAllowlist[providerID]; !ok {
			return nil, ErrValidationUnsupported
		}
		if baseURL == "" {
			return nil, ErrValidationUnsupported
		}
		return &validationProbe{
			method: http.MethodGet,
			url:    baseURL + "/models",
			headers: map[string]string{
				"Authorization": "Bearer " + apiKey,
			},
			classify: classifyAuthGated(c),
		}, nil
	}

	return nil, ErrValidationUnsupported
}

// TestConnection attempts to prove that the configured API key authenticates
// with the provider. It returns nil on confirmed success, [ErrValidationUnsupported]
// when the provider has no deterministic validation probe, or a non-nil error
// describing the validation failure.
func (c *ProviderConfig) TestConnection(resolver VariableResolver) error {
	probe, err := c.buildValidationProbe(resolver)
	if err != nil {
		return err
	}
	if probe == nil {
		// A nil probe with no error means the configuration was accepted
		// without needing a network round-trip (e.g. Bedrock/Vercel prefix
		// checks).
		return nil
	}
	if probe.url == "" {
		return ErrValidationUnsupported
	}

	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
	defer cancel()

	var body io.Reader
	if len(probe.body) > 0 {
		body = bytes.NewReader(probe.body)
	}
	req, err := http.NewRequestWithContext(ctx, probe.method, probe.url, body)
	if err != nil {
		// Probe construction failures shouldn't surface as low-signal user
		// errors; treat them as "cannot verify" instead.
		return ErrValidationUnsupported
	}
	for k, v := range probe.headers {
		req.Header.Set(k, v)
	}
	for k, v := range c.ExtraHeaders {
		req.Header.Set(k, v)
	}

	client := &http.Client{}
	resp, err := client.Do(req)
	if err != nil {
		return fmt.Errorf("failed to connect to provider %s: %w", c.ID, err)
	}
	defer resp.Body.Close()

	return probe.classify(resp.StatusCode)
}

func resolveEnvs(envs map[string]string) []string {
	resolver := NewShellVariableResolver(env.New())
	for e, v := range envs {
		var err error
		envs[e], err = resolver.ResolveValue(v)
		if err != nil {
			slog.Error("Error resolving environment variable", "error", err, "variable", e, "value", v)
			continue
		}
	}

	res := make([]string, 0, len(envs))
	for k, v := range envs {
		res = append(res, fmt.Sprintf("%s=%s", k, v))
	}
	return res
}

func ptrValOr[T any](t *T, el T) T {
	if t == nil {
		return el
	}
	return *t
}
