Detailed changes
@@ -114,7 +114,6 @@ require (
github.com/ncruces/julianday v1.0.0 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/raphamorim/notify v0.9.4
github.com/rivo/uniseg v0.4.7
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
github.com/sethvargo/go-retry v0.3.0 // indirect
@@ -237,8 +237,6 @@ github.com/pressly/goose/v3 v3.25.0 h1:6WeYhMWGRCzpyd89SpODFnCBCKz41KrVbRT58nVjG
github.com/pressly/goose/v3 v3.25.0/go.mod h1:4hC1KrritdCxtuFsqgs1R4AU5bWtTAf+cnWvfhf2DNY=
github.com/qjebbs/go-jsons v0.0.0-20221222033332-a534c5fc1c4c h1:kmzxiX+OB0knCo1V0dkEkdPelzCdAzCURCfmFArn2/A=
github.com/qjebbs/go-jsons v0.0.0-20221222033332-a534c5fc1c4c/go.mod h1:wNJrtinHyC3YSf6giEh4FJN8+yZV7nXBjvmfjhBIcw4=
-github.com/raphamorim/notify v0.9.4 h1:JXAGOzeR/cnclKkRCZINKS4EtB47O5TD1N1iCkkarTM=
-github.com/raphamorim/notify v0.9.4/go.mod h1:3FXSIPyrunV10GCnLGPrpSxoY/Dxi+saeQb9hf+TDSo=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
@@ -370,7 +368,6 @@ golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
-golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -7,14 +7,11 @@ import (
"fmt"
"log/slog"
"maps"
- "os/exec"
- "strings"
"sync"
"time"
tea "github.com/charmbracelet/bubbletea/v2"
"github.com/charmbracelet/crush/internal/config"
- "github.com/charmbracelet/crush/internal/csync"
"github.com/charmbracelet/crush/internal/db"
"github.com/charmbracelet/crush/internal/format"
"github.com/charmbracelet/crush/internal/history"
@@ -23,7 +20,6 @@ import (
"github.com/charmbracelet/crush/internal/pubsub"
"github.com/charmbracelet/crush/internal/lsp"
- "github.com/charmbracelet/crush/internal/lsp/watcher"
"github.com/charmbracelet/crush/internal/message"
"github.com/charmbracelet/crush/internal/permission"
"github.com/charmbracelet/crush/internal/session"
@@ -41,9 +37,6 @@ type App struct {
clientsMutex sync.RWMutex
- watcherCancelFuncs *csync.Slice[context.CancelFunc]
- lspWatcherWG sync.WaitGroup
-
config *config.Config
serviceEventsWG *sync.WaitGroup
@@ -56,16 +49,6 @@ type App struct {
cleanupFuncs []func() error
}
-// isGitRepo checks if the current directory is a git repository
-func isGitRepo() bool {
- bts, err := exec.CommandContext(
- context.Background(),
- "git", "rev-parse",
- "--is-inside-work-tree",
- ).CombinedOutput()
- return err == nil && strings.TrimSpace(string(bts)) == "true"
-}
-
// New initializes a new applcation instance.
func New(ctx context.Context, conn *sql.DB, cfg *config.Config) (*App, error) {
q := db.New(conn)
@@ -89,8 +72,6 @@ func New(ctx context.Context, conn *sql.DB, cfg *config.Config) (*App, error) {
config: cfg,
- watcherCancelFuncs: csync.NewSlice[context.CancelFunc](),
-
events: make(chan tea.Msg, 100),
serviceEventsWG: &sync.WaitGroup{},
tuiWG: &sync.WaitGroup{},
@@ -98,15 +79,6 @@ func New(ctx context.Context, conn *sql.DB, cfg *config.Config) (*App, error) {
app.setupEvents()
- // Start the global watcher only if this is a git repository
- if isGitRepo() {
- if err := watcher.Start(); err != nil {
- return nil, fmt.Errorf("app: %w", err)
- }
- } else {
- slog.Warn("Not starting global watcher: not a git repository")
- }
-
// Initialize LSP clients in the background.
app.initLSPClients(ctx)
@@ -352,13 +324,6 @@ func (app *App) Shutdown() {
app.CoderAgent.CancelAll()
}
- for cancel := range app.watcherCancelFuncs.Seq() {
- cancel()
- }
-
- // Wait for all LSP watchers to finish.
- app.lspWatcherWG.Wait()
-
// Get all LSP clients.
app.clientsMutex.RLock()
clients := make(map[string]*lsp.Client, len(app.LSPClients))
@@ -374,9 +339,6 @@ func (app *App) Shutdown() {
cancel()
}
- // Shutdown the global watcher
- watcher.Shutdown()
-
// Call call cleanup functions.
for _, cleanup := range app.cleanupFuncs {
if cleanup != nil {
@@ -6,9 +6,7 @@ import (
"time"
"github.com/charmbracelet/crush/internal/config"
- "github.com/charmbracelet/crush/internal/log"
"github.com/charmbracelet/crush/internal/lsp"
- "github.com/charmbracelet/crush/internal/lsp/watcher"
)
// initLSPClients initializes LSP clients.
@@ -77,64 +75,8 @@ func (app *App) createAndStartLSPClient(ctx context.Context, name string, config
slog.Info("LSP client initialized", "name", name)
- // Create a child context that can be canceled when the app is shutting
- // down.
- watchCtx, cancelFunc := context.WithCancel(ctx)
-
- // Create the workspace watcher.
- workspaceWatcher := watcher.New(name, lspClient)
-
- // Store the cancel function to be called during cleanup.
- app.watcherCancelFuncs.Append(cancelFunc)
-
// Add to map with mutex protection before starting goroutine
app.clientsMutex.Lock()
app.LSPClients[name] = lspClient
app.clientsMutex.Unlock()
-
- // Run workspace watcher.
- app.lspWatcherWG.Add(1)
- go app.runWorkspaceWatcher(watchCtx, name, workspaceWatcher)
-}
-
-// runWorkspaceWatcher executes the workspace watcher for an LSP client.
-func (app *App) runWorkspaceWatcher(ctx context.Context, name string, workspaceWatcher *watcher.Client) {
- defer app.lspWatcherWG.Done()
- defer log.RecoverPanic("LSP-"+name, func() {
- // Try to restart the client.
- app.restartLSPClient(ctx, name)
- })
-
- workspaceWatcher.Watch(ctx, app.config.WorkingDir())
- slog.Info("Workspace watcher stopped", "client", name)
-}
-
-// restartLSPClient attempts to restart a crashed or failed LSP client.
-func (app *App) restartLSPClient(ctx context.Context, name string) {
- // Get the original configuration.
- clientConfig, exists := app.config.LSP[name]
- if !exists {
- slog.Error("Cannot restart client, configuration not found", "client", name)
- return
- }
-
- // Clean up the old client if it exists.
- app.clientsMutex.Lock()
- oldClient, exists := app.LSPClients[name]
- if exists {
- // Remove from map before potentially slow shutdown.
- delete(app.LSPClients, name)
- }
- app.clientsMutex.Unlock()
-
- if exists && oldClient != nil {
- // Try to shut down client gracefully, but don't block on errors.
- shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- _ = oldClient.Close(shutdownCtx)
- cancel()
- }
-
- // Create a new client using the shared function.
- app.createAndStartLSPClient(ctx, name, clientConfig)
- slog.Info("Successfully restarted LSP client", "client", name)
}
@@ -70,10 +70,10 @@ func (m *Map[K, V]) GetOrSet(key K, fn func() V) V {
// Take gets an item and then deletes it.
func (m *Map[K, V]) Take(key K) (V, bool) {
- m.mu.Lock()
- defer m.mu.Unlock()
- v, ok := m.inner[key]
- delete(m.inner, key)
+ v, ok := m.Get(key)
+ if ok {
+ m.Del(key)
+ }
return v, ok
}
@@ -0,0 +1,35 @@
+package csync
+
+import (
+ "sync/atomic"
+)
+
+// NewVersionedMap creates a new versioned, thread-safe map.
+func NewVersionedMap[K comparable, V any]() *VersionedMap[K, V] {
+ return &VersionedMap[K, V]{
+ Map: NewMap[K, V](),
+ }
+}
+
+// VersionedMap is a thread-safe map that keeps track of its version.
+type VersionedMap[K comparable, V any] struct {
+ *Map[K, V]
+ v atomic.Uint64
+}
+
+// Set sets the value for the specified key in the map and increments the version.
+func (m *VersionedMap[K, V]) Set(key K, value V) {
+ m.Map.Set(key, value)
+ m.v.Add(1)
+}
+
+// Del deletes the specified key from the map and increments the version.
+func (m *VersionedMap[K, V]) Del(key K) {
+ m.Map.Del(key)
+ m.v.Add(1)
+}
+
+// Version returns the current version of the map.
+func (m *VersionedMap[K, V]) Version() uint64 {
+ return m.v.Load()
+}
@@ -0,0 +1,89 @@
+package csync
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestVersionedMap_Set(t *testing.T) {
+ t.Parallel()
+
+ vm := NewVersionedMap[string, int]()
+ require.Equal(t, uint64(0), vm.Version())
+
+ vm.Set("key1", 42)
+ require.Equal(t, uint64(1), vm.Version())
+
+ value, ok := vm.Get("key1")
+ require.True(t, ok)
+ require.Equal(t, 42, value)
+}
+
+func TestVersionedMap_Del(t *testing.T) {
+ t.Parallel()
+
+ vm := NewVersionedMap[string, int]()
+ vm.Set("key1", 42)
+ initialVersion := vm.Version()
+
+ vm.Del("key1")
+ require.Equal(t, initialVersion+1, vm.Version())
+
+ _, ok := vm.Get("key1")
+ require.False(t, ok)
+}
+
+func TestVersionedMap_VersionIncrement(t *testing.T) {
+ t.Parallel()
+
+ vm := NewVersionedMap[string, int]()
+ initialVersion := vm.Version()
+
+ // Setting a value should increment the version
+ vm.Set("key1", 42)
+ require.Equal(t, initialVersion+1, vm.Version())
+
+ // Deleting a value should increment the version
+ vm.Del("key1")
+ require.Equal(t, initialVersion+2, vm.Version())
+
+ // Deleting a non-existent key should still increment the version
+ vm.Del("nonexistent")
+ require.Equal(t, initialVersion+3, vm.Version())
+}
+
+func TestVersionedMap_ConcurrentAccess(t *testing.T) {
+ t.Parallel()
+
+ vm := NewVersionedMap[int, int]()
+ const numGoroutines = 100
+ const numOperations = 100
+
+ // Initial version
+ initialVersion := vm.Version()
+
+ // Perform concurrent Set and Del operations
+ for i := range numGoroutines {
+ go func(id int) {
+ for j := range numOperations {
+ key := id*numOperations + j
+ vm.Set(key, key*2)
+ vm.Del(key)
+ }
+ }(i)
+ }
+
+ // Wait for operations to complete by checking the version
+ // This is a simplified check - in a real test you might want to use sync.WaitGroup
+ expectedMinVersion := initialVersion + uint64(numGoroutines*numOperations*2)
+
+ // Allow some time for operations to complete
+ for vm.Version() < expectedMinVersion {
+ // Busy wait - in a real test you'd use proper synchronization
+ }
+
+ // Final version should be at least the expected minimum
+ require.GreaterOrEqual(t, vm.Version(), expectedMinVersion)
+ require.Equal(t, 0, vm.Len())
+}
@@ -16,6 +16,7 @@ import (
type DiagnosticsParams struct {
FilePath string `json:"file_path"`
}
+
type diagnosticsTool struct {
lspClients map[string]*lsp.Client
}
@@ -76,91 +77,26 @@ func (b *diagnosticsTool) Run(ctx context.Context, call ToolCall) (ToolResponse,
}
lsps := b.lspClients
-
if len(lsps) == 0 {
return NewTextErrorResponse("no LSP clients available"), nil
}
-
- if params.FilePath != "" {
- notifyLspOpenFile(ctx, params.FilePath, lsps)
- waitForLspDiagnostics(ctx, params.FilePath, lsps)
- }
-
+ notifyLSPs(ctx, lsps, params.FilePath)
output := getDiagnostics(params.FilePath, lsps)
-
return NewTextResponse(output), nil
}
-func notifyLspOpenFile(ctx context.Context, filePath string, lsps map[string]*lsp.Client) {
- for _, client := range lsps {
- err := client.OpenFile(ctx, filePath)
- if err != nil {
- continue
- }
- }
-}
-
-func waitForLspDiagnostics(ctx context.Context, filePath string, lsps map[string]*lsp.Client) {
- if len(lsps) == 0 {
+func notifyLSPs(ctx context.Context, lsps map[string]*lsp.Client, filepath string) {
+ if filepath == "" {
return
}
-
- diagChan := make(chan struct{}, 1)
-
for _, client := range lsps {
- originalDiags := client.GetDiagnostics()
-
- handler := func(_ context.Context, _ string, params json.RawMessage) {
- lsp.HandleDiagnostics(client, params)
- var diagParams protocol.PublishDiagnosticsParams
- if err := json.Unmarshal(params, &diagParams); err != nil {
- return
- }
-
- path, err := diagParams.URI.Path()
- if err != nil {
- slog.Error("Failed to convert diagnostic URI to path", "uri", diagParams.URI, "error", err)
- return
- }
-
- if path == filePath || hasDiagnosticsChanged(client.GetDiagnostics(), originalDiags) {
- select {
- case diagChan <- struct{}{}:
- default:
- }
- }
- }
-
- client.RegisterNotificationHandler("textDocument/publishDiagnostics", handler)
-
- if client.IsFileOpen(filePath) {
- err := client.NotifyChange(ctx, filePath)
- if err != nil {
- continue
- }
- } else {
- err := client.OpenFile(ctx, filePath)
- if err != nil {
- continue
- }
- }
- }
-
- select {
- case <-diagChan:
- case <-time.After(5 * time.Second):
- case <-ctx.Done():
- }
-}
-
-func hasDiagnosticsChanged(current, original map[protocol.DocumentURI][]protocol.Diagnostic) bool {
- for uri, diags := range current {
- origDiags, exists := original[uri]
- if !exists || len(diags) != len(origDiags) {
- return true
+ if !client.HandlesFile(filepath) {
+ continue
}
+ _ = client.OpenFileOnDemand(ctx, filepath)
+ _ = client.NotifyChange(ctx, filepath)
+ client.WaitForDiagnostics(ctx, 5*time.Second)
}
- return false
}
func getDiagnostics(filePath string, lsps map[string]*lsp.Client) string {
@@ -198,7 +134,6 @@ func getDiagnostics(filePath string, lsps map[string]*lsp.Client) string {
fileWarnings := countSeverity(fileDiagnostics, "Warn")
projectErrors := countSeverity(projectDiagnostics, "Error")
projectWarnings := countSeverity(projectDiagnostics, "Warn")
-
output.WriteString("\n<diagnostic_summary>\n")
fmt.Fprintf(&output, "Current file: %d errors, %d warnings\n", fileErrors, fileWarnings)
fmt.Fprintf(&output, "Project: %d errors, %d warnings\n", projectErrors, projectWarnings)
@@ -184,7 +184,8 @@ func (e *editTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error)
return response, nil
}
- waitForLspDiagnostics(ctx, params.FilePath, e.lspClients)
+ notifyLSPs(ctx, e.lspClients, params.FilePath)
+
text := fmt.Sprintf("<result>\n%s\n</result>\n", response.Content)
text += getDiagnostics(params.FilePath, e.lspClients)
response.Content = text
@@ -188,8 +188,10 @@ func (m *multiEditTool) Run(ctx context.Context, call ToolCall) (ToolResponse, e
return response, nil
}
+ // Notify LSP clients about the change
+ notifyLSPs(ctx, m.lspClients, params.FilePath)
+
// Wait for LSP diagnostics and add them to the response
- waitForLspDiagnostics(ctx, params.FilePath, m.lspClients)
text := fmt.Sprintf("<result>\n%s\n</result>\n", response.Content)
text += getDiagnostics(params.FilePath, m.lspClients)
response.Content = text
@@ -233,7 +233,7 @@ func (v *viewTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error)
return ToolResponse{}, fmt.Errorf("error reading file: %w", err)
}
- notifyLspOpenFile(ctx, filePath, v.lspClients)
+ notifyLSPs(ctx, v.lspClients, filePath)
output := "<file>\n"
// Format the output with line numbers
output += addLineNumbers(content, params.Offset+1)
@@ -221,7 +221,8 @@ func (w *writeTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error
recordFileWrite(filePath)
recordFileRead(filePath)
- waitForLspDiagnostics(ctx, filePath, w.lspClients)
+
+ notifyLSPs(ctx, w.lspClients, params.FilePath)
result := fmt.Sprintf("File successfully written: %s", filePath)
result = fmt.Sprintf("<result>\n%s\n</result>", result)
@@ -34,7 +34,7 @@ type Client struct {
onDiagnosticsChanged func(name string, count int)
// Diagnostic cache
- diagnostics *csync.Map[protocol.DocumentURI, []protocol.Diagnostic]
+ diagnostics *csync.VersionedMap[protocol.DocumentURI, []protocol.Diagnostic]
// Files are currently opened by the LSP
openFiles *csync.Map[string, *OpenFileInfo]
@@ -83,7 +83,7 @@ func New(ctx context.Context, name string, config config.LSPConfig) (*Client, er
client: powernapClient,
name: name,
fileTypes: config.FileTypes,
- diagnostics: csync.NewMap[protocol.DocumentURI, []protocol.Diagnostic](),
+ diagnostics: csync.NewVersionedMap[protocol.DocumentURI, []protocol.Diagnostic](),
openFiles: csync.NewMap[string, *OpenFileInfo](),
config: config,
}
@@ -314,6 +314,8 @@ func (c *Client) NotifyChange(ctx context.Context, filepath string) error {
}
// CloseFile closes a file in the LSP server.
+//
+// NOTE: this is only ever called on LSP shutdown.
func (c *Client) CloseFile(ctx context.Context, filepath string) error {
cfg := config.Get()
uri := string(protocol.URIFromPath(filepath))
@@ -454,6 +456,26 @@ func (c *Client) openKeyConfigFiles(ctx context.Context) {
}
}
+// WaitForDiagnostics waits until diagnostics change or the timeout is reached.
+func (c *Client) WaitForDiagnostics(ctx context.Context, d time.Duration) {
+ ticker := time.NewTicker(200 * time.Millisecond)
+ defer ticker.Stop()
+ timeout := time.After(d)
+ pv := c.diagnostics.Version()
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-timeout:
+ return
+ case <-ticker.C:
+ if pv != c.diagnostics.Version() {
+ return
+ }
+ }
+ }
+}
+
// HasRootMarkers checks if any of the specified root marker patterns exist in the given directory.
// Uses glob patterns to match files, allowing for more flexible matching.
func HasRootMarkers(dir string, rootMarkers []string) bool {
@@ -1,394 +0,0 @@
-package watcher
-
-import (
- "context"
- "errors"
- "fmt"
- "log/slog"
- "os"
- "path/filepath"
- "sync"
- "sync/atomic"
- "syscall"
- "time"
-
- "github.com/charmbracelet/crush/internal/config"
- "github.com/charmbracelet/crush/internal/csync"
- "github.com/charmbracelet/crush/internal/fsext"
- "github.com/charmbracelet/x/powernap/pkg/lsp/protocol"
- "github.com/raphamorim/notify"
-)
-
-// global manages file watching shared across all LSP clients.
-//
-// IMPORTANT: This implementation uses github.com/raphamorim/notify which provides
-// recursive watching on all platforms. On macOS it uses FSEvents, on Linux it
-// uses inotify (with recursion handled by the library), and on Windows it uses
-// ReadDirectoryChangesW.
-//
-// Key benefits:
-// - Single watch point for entire directory tree
-// - Automatic recursive watching without manually adding subdirectories
-// - No file descriptor exhaustion issues
-// - Built-in ignore system for filtering file events
-type global struct {
- // Channel for receiving file system events
- events chan notify.EventInfo
-
- // Map of workspace watchers by client name
- watchers *csync.Map[string, *Client]
-
- // Single workspace root directory for ignore checking
- root string
-
- started atomic.Bool
-
- // Debouncing for file events (shared across all clients)
- debounceTime time.Duration
- debounceMap *csync.Map[string, *time.Timer]
-
- // Context for shutdown
- ctx context.Context
- cancel context.CancelFunc
-
- // Wait group for cleanup
- wg sync.WaitGroup
-}
-
-// instance returns the singleton global watcher instance
-var instance = sync.OnceValue(func() *global {
- ctx, cancel := context.WithCancel(context.Background())
- gw := &global{
- events: make(chan notify.EventInfo, 4096), // Large buffer to prevent dropping events
- watchers: csync.NewMap[string, *Client](),
- debounceTime: 300 * time.Millisecond,
- debounceMap: csync.NewMap[string, *time.Timer](),
- ctx: ctx,
- cancel: cancel,
- }
-
- return gw
-})
-
-// register registers a workspace watcher with the global watcher
-func (gw *global) register(name string, watcher *Client) {
- gw.watchers.Set(name, watcher)
- slog.Debug("lsp watcher: Registered workspace watcher", "name", name)
-}
-
-// unregister removes a workspace watcher from the global watcher
-func (gw *global) unregister(name string) {
- gw.watchers.Del(name)
- slog.Debug("lsp watcher: Unregistered workspace watcher", "name", name)
-}
-
-// Start sets up recursive watching on the workspace root.
-//
-// Note: We use github.com/raphamorim/notify which provides recursive watching
-// with a single watch point. The "..." suffix means watch recursively.
-// This is much more efficient than manually walking and watching each directory.
-func Start() error {
- gw := instance()
-
- // technically workspace root is always the same...
- if gw.started.Load() {
- slog.Debug("lsp watcher: watcher already set up, skipping")
- return nil
- }
-
- cfg := config.Get()
- root := cfg.WorkingDir()
- slog.Debug("lsp watcher: set workspace directory to global watcher", "path", root)
-
- // Store the workspace root for hierarchical ignore checking
- gw.root = root
- gw.started.Store(true)
-
- // Set up ignore system
- if err := setupIgnoreSystem(root); err != nil {
- slog.Warn("lsp watcher: Failed to set up ignore system", "error", err)
- // Continue anyway, but without ignore functionality
- }
-
- // Start the event processing goroutine
- gw.wg.Add(1)
- go gw.processEvents()
-
- // Set up recursive watching on the root directory
- // The "..." suffix tells notify to watch recursively
- watchPath := filepath.Join(root, "...")
-
- // Watch for all event types we care about
- events := notify.Create | notify.Write | notify.Remove | notify.Rename
-
- if err := notify.Watch(watchPath, gw.events, events); err != nil {
- // Check if the error might be due to file descriptor limits
- if isFileLimitError(err) {
- slog.Warn("lsp watcher: Hit file descriptor limit, attempting to increase", "error", err)
- if newLimit, rlimitErr := maximizeOpenFileLimit(); rlimitErr == nil {
- slog.Info("lsp watcher: Increased file descriptor limit", "limit", newLimit)
- // Retry the watch operation
- if err = notify.Watch(watchPath, gw.events, events); err == nil {
- slog.Info("lsp watcher: Successfully set up watch after increasing limit")
- goto watchSuccess
- }
- err = fmt.Errorf("still failed after increasing limit: %w", err)
- } else {
- slog.Warn("lsp watcher: Failed to increase file descriptor limit", "error", rlimitErr)
- }
- }
- return fmt.Errorf("lsp watcher: error setting up recursive watch on %s: %w", root, err)
- }
-watchSuccess:
-
- slog.Info("lsp watcher: Started recursive watching", "root", root)
- return nil
-}
-
-// processEvents processes file system events from the notify library.
-// Since notify handles recursive watching for us, we don't need to manually
-// add new directories - they're automatically included.
-func (gw *global) processEvents() {
- defer gw.wg.Done()
- cfg := config.Get()
-
- if !gw.started.Load() {
- slog.Error("lsp watcher: Global watcher not initialized")
- return
- }
-
- for {
- select {
- case <-gw.ctx.Done():
- return
-
- case event, ok := <-gw.events:
- if !ok {
- return
- }
-
- path := event.Path()
-
- if cfg != nil && cfg.Options.DebugLSP {
- slog.Debug("lsp watcher: Global watcher received event", "path", path, "event", event.Event().String())
- }
-
- // Convert notify event to our internal format and handle it
- gw.handleFileEvent(event)
- }
- }
-}
-
-// handleFileEvent processes a file system event and distributes notifications to relevant clients
-func (gw *global) handleFileEvent(event notify.EventInfo) {
- cfg := config.Get()
- path := event.Path()
- uri := string(protocol.URIFromPath(path))
-
- // Map notify events to our change types
- var changeType protocol.FileChangeType
- var watchKindNeeded protocol.WatchKind
-
- switch event.Event() {
- case notify.Create:
- changeType = protocol.FileChangeType(protocol.Created)
- watchKindNeeded = protocol.WatchCreate
- // Handle file creation for all relevant clients
- if !isDir(path) && !fsext.ShouldExcludeFile(gw.root, path) {
- gw.openMatchingFileForClients(path)
- }
- case notify.Write:
- changeType = protocol.FileChangeType(protocol.Changed)
- watchKindNeeded = protocol.WatchChange
- case notify.Remove:
- changeType = protocol.FileChangeType(protocol.Deleted)
- watchKindNeeded = protocol.WatchDelete
- case notify.Rename:
- // Treat rename as delete + create
- // First handle as delete
- for _, watcher := range gw.watchers.Seq2() {
- if !watcher.client.HandlesFile(path) {
- continue
- }
- if watched, watchKind := watcher.isPathWatched(path); watched {
- if watchKind&protocol.WatchDelete != 0 {
- gw.handleFileEventForClient(watcher, uri, protocol.FileChangeType(protocol.Deleted))
- }
- }
- }
- // Then check if renamed file exists and treat as create
- if !isDir(path) {
- changeType = protocol.FileChangeType(protocol.Created)
- watchKindNeeded = protocol.WatchCreate
- } else {
- return // Already handled delete, nothing more to do for directories
- }
- default:
- // Unknown event type, skip
- return
- }
-
- // Process the event for each relevant client
- for client, watcher := range gw.watchers.Seq2() {
- if !watcher.client.HandlesFile(path) {
- continue // client doesn't handle this filetype
- }
-
- // Debug logging per client
- if cfg.Options.DebugLSP {
- matched, kind := watcher.isPathWatched(path)
- slog.Debug("lsp watcher: File event for client",
- "path", path,
- "event", event.Event().String(),
- "watched", matched,
- "kind", kind,
- "client", client,
- )
- }
-
- // Check if this path should be watched according to server registrations
- if watched, watchKind := watcher.isPathWatched(path); watched {
- if watchKind&watchKindNeeded != 0 {
- // Skip directory events for non-delete operations
- if changeType != protocol.FileChangeType(protocol.Deleted) && isDir(path) {
- continue
- }
-
- if changeType == protocol.FileChangeType(protocol.Deleted) {
- // Don't debounce deletes
- gw.handleFileEventForClient(watcher, uri, changeType)
- } else {
- // Debounce creates and changes
- gw.debounceHandleFileEventForClient(watcher, uri, changeType)
- }
- }
- }
- }
-}
-
-// isDir checks if a path is a directory
-func isDir(path string) bool {
- info, err := os.Stat(path)
- return err == nil && info.IsDir()
-}
-
-// openMatchingFileForClients opens a newly created file for all clients that handle it (only once per file)
-func (gw *global) openMatchingFileForClients(path string) {
- // Skip directories
- info, err := os.Stat(path)
- if err != nil || info.IsDir() {
- return
- }
-
- // Skip excluded files
- if fsext.ShouldExcludeFile(gw.root, path) {
- return
- }
-
- // Open the file for each client that handles it and has matching patterns
- for _, watcher := range gw.watchers.Seq2() {
- if watcher.client.HandlesFile(path) {
- watcher.openMatchingFile(gw.ctx, path)
- }
- }
-}
-
-// debounceHandleFileEventForClient handles file events with debouncing for a specific client
-func (gw *global) debounceHandleFileEventForClient(watcher *Client, uri string, changeType protocol.FileChangeType) {
- // Create a unique key based on URI, change type, and client name
- key := fmt.Sprintf("%s:%d:%s", uri, changeType, watcher.name)
-
- // Cancel existing timer if any
- if timer, exists := gw.debounceMap.Get(key); exists {
- timer.Stop()
- }
-
- // Create new timer
- gw.debounceMap.Set(key, time.AfterFunc(gw.debounceTime, func() {
- gw.handleFileEventForClient(watcher, uri, changeType)
-
- // Cleanup timer after execution
- gw.debounceMap.Del(key)
- }))
-}
-
-// handleFileEventForClient sends file change notifications to a specific client
-func (gw *global) handleFileEventForClient(watcher *Client, uri string, changeType protocol.FileChangeType) {
- // If the file is open and it's a change event, use didChange notification
- filePath, err := protocol.DocumentURI(uri).Path()
- if err != nil {
- slog.Error("lsp watcher: Error converting URI to path", "uri", uri, "error", err)
- return
- }
-
- if changeType == protocol.FileChangeType(protocol.Deleted) {
- watcher.client.ClearDiagnosticsForURI(protocol.DocumentURI(uri))
- } else if changeType == protocol.FileChangeType(protocol.Changed) && watcher.client.IsFileOpen(filePath) {
- err := watcher.client.NotifyChange(gw.ctx, filePath)
- if err != nil {
- slog.Error("lsp watcher: Error notifying change", "error", err)
- }
- return
- }
-
- // Notify LSP server about the file event using didChangeWatchedFiles
- if err := watcher.notifyFileEvent(gw.ctx, uri, changeType); err != nil {
- slog.Error("lsp watcher: Error notifying LSP server about file event", "error", err)
- }
-}
-
-// shutdown gracefully shuts down the global watcher
-func (gw *global) shutdown() {
- if gw.cancel != nil {
- gw.cancel()
- }
-
- // Stop watching and close the event channel
- notify.Stop(gw.events)
- close(gw.events)
-
- gw.wg.Wait()
- slog.Debug("lsp watcher: Global watcher shutdown complete")
-}
-
-// Shutdown shuts down the singleton global watcher
-func Shutdown() {
- instance().shutdown()
-}
-
-// isFileLimitError checks if an error is related to file descriptor limits
-func isFileLimitError(err error) bool {
- if err == nil {
- return false
- }
- // Check for common file limit errors
- return errors.Is(err, syscall.EMFILE) || errors.Is(err, syscall.ENFILE)
-}
-
-// setupIgnoreSystem configures the notify library's ignore system
-// to use .crushignore and .gitignore files for filtering file events
-func setupIgnoreSystem(root string) error {
- // Create a new ignore matcher for the workspace root
- im := notify.NewIgnoreMatcher(root)
-
- // Load .crushignore file if it exists
- crushignorePath := filepath.Join(root, ".crushignore")
- if _, err := os.Stat(crushignorePath); err == nil {
- if err := im.LoadIgnoreFile(crushignorePath); err != nil {
- slog.Warn("lsp watcher: Failed to load .crushignore file", "error", err)
- }
- }
-
- // Load .gitignore file if it exists
- gitignorePath := filepath.Join(root, ".gitignore")
- if _, err := os.Stat(gitignorePath); err == nil {
- if err := im.LoadIgnoreFile(gitignorePath); err != nil {
- slog.Warn("lsp watcher: Failed to load .gitignore file", "error", err)
- }
- }
-
- // Set as the global ignore matcher
- notify.SetIgnoreMatcher(im)
-
- return nil
-}
@@ -1,302 +0,0 @@
-package watcher
-
-import (
- "context"
- "os"
- "path/filepath"
- "testing"
- "time"
-
- "github.com/charmbracelet/crush/internal/csync"
- "github.com/raphamorim/notify"
-)
-
-func TestGlobalWatcher(t *testing.T) {
- t.Parallel()
-
- // Test that we can get the global watcher instance
- gw1 := instance()
- if gw1 == nil {
- t.Fatal("Expected global watcher instance, got nil")
- }
-
- // Test that subsequent calls return the same instance (singleton)
- gw2 := instance()
- if gw1 != gw2 {
- t.Fatal("Expected same global watcher instance, got different instances")
- }
-
- // Test registration and unregistration
- mockWatcher := &Client{
- name: "test-watcher",
- }
-
- gw1.register("test", mockWatcher)
-
- // Check that it was registered
- registered, _ := gw1.watchers.Get("test")
-
- if registered != mockWatcher {
- t.Fatal("Expected workspace watcher to be registered")
- }
-
- // Test unregistration
- gw1.unregister("test")
-
- unregistered, _ := gw1.watchers.Get("test")
-
- if unregistered != nil {
- t.Fatal("Expected workspace watcher to be unregistered")
- }
-}
-
-func TestGlobalWatcherWorkspaceIdempotent(t *testing.T) {
- t.Parallel()
-
- // Create a temporary directory for testing
- tempDir := t.TempDir()
-
- // Create a new global watcher instance for this test
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- gw := &global{
- events: make(chan notify.EventInfo, 100),
- watchers: csync.NewMap[string, *Client](),
- debounceTime: 300 * time.Millisecond,
- debounceMap: csync.NewMap[string, *time.Timer](),
- ctx: ctx,
- cancel: cancel,
- }
-
- // Test that watching the same workspace multiple times is safe (idempotent)
- // With notify, we use recursive watching with "..."
- watchPath := filepath.Join(tempDir, "...")
-
- err1 := notify.Watch(watchPath, gw.events, notify.All)
- if err1 != nil {
- t.Fatalf("First Watch call failed: %v", err1)
- }
- defer notify.Stop(gw.events)
-
- // Watching the same path again should be safe (notify handles this)
- err2 := notify.Watch(watchPath, gw.events, notify.All)
- if err2 != nil {
- t.Fatalf("Second Watch call failed: %v", err2)
- }
-
- err3 := notify.Watch(watchPath, gw.events, notify.All)
- if err3 != nil {
- t.Fatalf("Third Watch call failed: %v", err3)
- }
-
- // All calls should succeed - notify handles deduplication internally
- // This test verifies that multiple Watch calls are safe
-}
-
-func TestGlobalWatcherRecursiveWatching(t *testing.T) {
- t.Parallel()
-
- // Create a temporary directory structure for testing
- tempDir := t.TempDir()
- subDir := filepath.Join(tempDir, "subdir")
- if err := os.Mkdir(subDir, 0o755); err != nil {
- t.Fatalf("Failed to create subdirectory: %v", err)
- }
-
- // Create some files
- file1 := filepath.Join(tempDir, "file1.txt")
- file2 := filepath.Join(subDir, "file2.txt")
- if err := os.WriteFile(file1, []byte("content1"), 0o644); err != nil {
- t.Fatalf("Failed to create file1: %v", err)
- }
- if err := os.WriteFile(file2, []byte("content2"), 0o644); err != nil {
- t.Fatalf("Failed to create file2: %v", err)
- }
-
- // Create a new global watcher instance for this test
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- gw := &global{
- events: make(chan notify.EventInfo, 100),
- watchers: csync.NewMap[string, *Client](),
- debounceTime: 300 * time.Millisecond,
- debounceMap: csync.NewMap[string, *time.Timer](),
- ctx: ctx,
- cancel: cancel,
- root: tempDir,
- }
-
- // Set up recursive watching on the root directory
- watchPath := filepath.Join(tempDir, "...")
- if err := notify.Watch(watchPath, gw.events, notify.All); err != nil {
- t.Fatalf("Failed to set up recursive watch: %v", err)
- }
- defer notify.Stop(gw.events)
-
- // Verify that our expected directories and files exist
- expectedDirs := []string{tempDir, subDir}
-
- for _, expectedDir := range expectedDirs {
- info, err := os.Stat(expectedDir)
- if err != nil {
- t.Fatalf("Expected directory %s doesn't exist: %v", expectedDir, err)
- }
- if !info.IsDir() {
- t.Fatalf("Expected %s to be a directory, but it's not", expectedDir)
- }
- }
-
- // Verify that files exist
- testFiles := []string{file1, file2}
- for _, file := range testFiles {
- info, err := os.Stat(file)
- if err != nil {
- t.Fatalf("Test file %s doesn't exist: %v", file, err)
- }
- if info.IsDir() {
- t.Fatalf("Expected %s to be a file, but it's a directory", file)
- }
- }
-
- // Create a new file in the subdirectory to test recursive watching
- newFile := filepath.Join(subDir, "new.txt")
- if err := os.WriteFile(newFile, []byte("new content"), 0o644); err != nil {
- t.Fatalf("Failed to create new file: %v", err)
- }
-
- // We should receive an event for the file creation
- select {
- case event := <-gw.events:
- // On macOS, paths might have /private prefix, so we need to compare the real paths
- eventPath, _ := filepath.EvalSymlinks(event.Path())
- expectedPath, _ := filepath.EvalSymlinks(newFile)
- if eventPath != expectedPath {
- // Also try comparing just the base names as a fallback
- if filepath.Base(event.Path()) != filepath.Base(newFile) {
- t.Errorf("Expected event for %s, got %s", newFile, event.Path())
- }
- }
- case <-time.After(2 * time.Second):
- t.Fatal("Timeout waiting for file creation event")
- }
-}
-
-func TestNotifyDeduplication(t *testing.T) {
- t.Parallel()
-
- // Create a temporary directory for testing
- tempDir := t.TempDir()
-
- // Create an event channel
- events := make(chan notify.EventInfo, 100)
- defer close(events)
-
- // Add the same directory multiple times with recursive watching
- watchPath := filepath.Join(tempDir, "...")
-
- err1 := notify.Watch(watchPath, events, notify.All)
- if err1 != nil {
- t.Fatalf("First Watch failed: %v", err1)
- }
- defer notify.Stop(events)
-
- err2 := notify.Watch(watchPath, events, notify.All)
- if err2 != nil {
- t.Fatalf("Second Watch failed: %v", err2)
- }
-
- err3 := notify.Watch(watchPath, events, notify.All)
- if err3 != nil {
- t.Fatalf("Third Watch failed: %v", err3)
- }
-
- // All should succeed - notify handles deduplication internally
- // This test verifies the notify behavior we're relying on
-}
-
-func TestGlobalWatcherRespectsIgnoreFiles(t *testing.T) {
- t.Parallel()
-
- // Create a temporary directory structure for testing
- tempDir := t.TempDir()
-
- // Create directories that should be ignored
- nodeModules := filepath.Join(tempDir, "node_modules")
- target := filepath.Join(tempDir, "target")
- customIgnored := filepath.Join(tempDir, "custom_ignored")
- normalDir := filepath.Join(tempDir, "src")
-
- for _, dir := range []string{nodeModules, target, customIgnored, normalDir} {
- if err := os.MkdirAll(dir, 0o755); err != nil {
- t.Fatalf("Failed to create directory %s: %v", dir, err)
- }
- }
-
- // Create .gitignore file
- gitignoreContent := "node_modules/\ntarget/\n"
- if err := os.WriteFile(filepath.Join(tempDir, ".gitignore"), []byte(gitignoreContent), 0o644); err != nil {
- t.Fatalf("Failed to create .gitignore: %v", err)
- }
-
- // Create .crushignore file
- crushignoreContent := "custom_ignored/\n"
- if err := os.WriteFile(filepath.Join(tempDir, ".crushignore"), []byte(crushignoreContent), 0o644); err != nil {
- t.Fatalf("Failed to create .crushignore: %v", err)
- }
-
- // Create a new global watcher instance for this test
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- gw := &global{
- events: make(chan notify.EventInfo, 100),
- watchers: csync.NewMap[string, *Client](),
- debounceTime: 300 * time.Millisecond,
- debounceMap: csync.NewMap[string, *time.Timer](),
- ctx: ctx,
- cancel: cancel,
- root: tempDir,
- }
-
- // Set up recursive watching
- watchPath := filepath.Join(tempDir, "...")
- if err := notify.Watch(watchPath, gw.events, notify.All); err != nil {
- t.Fatalf("Failed to set up recursive watch: %v", err)
- }
- defer notify.Stop(gw.events)
-
- // The notify library watches everything, but our processEvents
- // function should filter out ignored files using fsext.ShouldExcludeFile
- // This test verifies that the structure is set up correctly
-}
-
-func TestGlobalWatcherShutdown(t *testing.T) {
- t.Parallel()
-
- // Create a new context for this test
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
-
- // Create a temporary global watcher for testing
- gw := &global{
- events: make(chan notify.EventInfo, 100),
- watchers: csync.NewMap[string, *Client](),
- debounceTime: 300 * time.Millisecond,
- debounceMap: csync.NewMap[string, *time.Timer](),
- ctx: ctx,
- cancel: cancel,
- }
-
- // Test shutdown doesn't panic
- gw.shutdown()
-
- // Verify context was cancelled
- select {
- case <-gw.ctx.Done():
- // Expected
- case <-time.After(100 * time.Millisecond):
- t.Fatal("Expected context to be cancelled after shutdown")
- }
-}
@@ -1,12 +0,0 @@
-//go:build !unix
-
-package watcher
-
-// maximizeOpenFileLimit is a no-op on non-Unix systems.
-// Returns a high value to indicate no practical limit.
-func maximizeOpenFileLimit() (int, error) {
- // Windows and other non-Unix systems don't have file descriptor limits
- // in the same way Unix systems do. Return a very high value to indicate
- // there's no practical limit to worry about.
- return 10000000, nil // 10M handles - way more than any process would use
-}
@@ -1,57 +0,0 @@
-//go:build unix
-
-// This file contains code inspired by Syncthing's rlimit implementation
-// Syncthing is licensed under the Mozilla Public License Version 2.0
-// See: https://github.com/syncthing/syncthing/blob/main/LICENSE
-
-package watcher
-
-import (
- "runtime"
- "syscall"
-)
-
-const (
- // macOS has a specific limit for RLIMIT_NOFILE
- darwinOpenMax = 10240
-)
-
-// maximizeOpenFileLimit tries to set the resource limit RLIMIT_NOFILE (number
-// of open file descriptors) to the max (hard limit), if the current (soft
-// limit) is below the max. Returns the new (though possibly unchanged) limit,
-// or an error if it could not be changed.
-func maximizeOpenFileLimit() (int, error) {
- // Get the current limit on number of open files.
- var lim syscall.Rlimit
- if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lim); err != nil {
- return 0, err
- }
-
- // If we're already at max, there's no need to try to raise the limit.
- if lim.Cur >= lim.Max {
- return int(lim.Cur), nil
- }
-
- // macOS doesn't like a soft limit greater than OPEN_MAX
- if runtime.GOOS == "darwin" && lim.Max > darwinOpenMax {
- lim.Max = darwinOpenMax
- }
-
- // Try to increase the limit to the max.
- oldLimit := lim.Cur
- lim.Cur = lim.Max
- if err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &lim); err != nil {
- return int(oldLimit), err
- }
-
- // If the set succeeded, perform a new get to see what happened. We might
- // have gotten a value lower than the one in lim.Max, if lim.Max was
- // something that indicated "unlimited" (i.e. intmax).
- if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &lim); err != nil {
- // We don't really know the correct value here since Getrlimit
- // mysteriously failed after working once... Shouldn't ever happen.
- return 0, err
- }
-
- return int(lim.Cur), nil
-}
@@ -1,548 +0,0 @@
-package watcher
-
-import (
- "context"
- "fmt"
- "log/slog"
- "os"
- "path/filepath"
- "strings"
- "time"
-
- "github.com/bmatcuk/doublestar/v4"
- "github.com/charmbracelet/crush/internal/config"
- "github.com/charmbracelet/crush/internal/csync"
- "github.com/charmbracelet/crush/internal/lsp"
- "github.com/charmbracelet/x/powernap/pkg/lsp/protocol"
-)
-
-// Client manages LSP file watching for a specific client
-// It now delegates actual file watching to the GlobalWatcher
-type Client struct {
- client *lsp.Client
- name string
- workspacePath string
-
- // File watchers registered by the server
- registrations *csync.Slice[protocol.FileSystemWatcher]
-}
-
-// New creates a new workspace watcher for the given client.
-func New(name string, client *lsp.Client) *Client {
- return &Client{
- name: name,
- client: client,
- registrations: csync.NewSlice[protocol.FileSystemWatcher](),
- }
-}
-
-// register adds file watchers to track
-func (w *Client) register(ctx context.Context, id string, watchers []protocol.FileSystemWatcher) {
- cfg := config.Get()
-
- w.registrations.Append(watchers...)
-
- if cfg.Options.DebugLSP {
- slog.Debug("Adding file watcher registrations",
- "id", id,
- "watchers", len(watchers),
- "total", w.registrations.Len(),
- )
-
- for i, watcher := range watchers {
- slog.Debug("Registration", "index", i+1)
-
- // Log the GlobPattern
- switch v := watcher.GlobPattern.Value.(type) {
- case string:
- slog.Debug("GlobPattern", "pattern", v)
- case protocol.RelativePattern:
- slog.Debug("GlobPattern", "pattern", v.Pattern)
-
- // Log BaseURI details
- switch u := v.BaseURI.Value.(type) {
- case string:
- slog.Debug("BaseURI", "baseURI", u)
- case protocol.DocumentURI:
- slog.Debug("BaseURI", "baseURI", u)
- default:
- slog.Debug("BaseURI", "baseURI", u)
- }
- default:
- slog.Debug("GlobPattern unknown type", "type", fmt.Sprintf("%T", v))
- }
-
- // Log WatchKind
- watchKind := protocol.WatchKind(protocol.WatchChange | protocol.WatchCreate | protocol.WatchDelete)
- if watcher.Kind != nil {
- watchKind = *watcher.Kind
- }
-
- slog.Debug("WatchKind", "kind", watchKind)
- }
- }
-
- // For servers that need file preloading, open high-priority files only
- if shouldPreloadFiles(w.name) {
- go func() {
- highPriorityFilesOpened := w.openHighPriorityFiles(ctx, w.name)
- if cfg.Options.DebugLSP {
- slog.Debug("Opened high-priority files",
- "count", highPriorityFilesOpened,
- "serverName", w.name)
- }
- }()
- }
-}
-
-// openHighPriorityFiles opens important files for the server type
-// Returns the number of files opened
-func (w *Client) openHighPriorityFiles(ctx context.Context, serverName string) int {
- cfg := config.Get()
- filesOpened := 0
-
- // Define patterns for high-priority files based on server type
- var patterns []string
-
- // TODO: move this to LSP config
- switch serverName {
- case "typescript", "typescript-language-server", "tsserver", "vtsls":
- patterns = []string{
- "**/tsconfig.json",
- "**/package.json",
- "**/jsconfig.json",
- "**/index.ts",
- "**/index.js",
- "**/main.ts",
- "**/main.js",
- }
- case "gopls":
- patterns = []string{
- "**/go.mod",
- "**/go.sum",
- "**/main.go",
- }
- case "rust-analyzer":
- patterns = []string{
- "**/Cargo.toml",
- "**/Cargo.lock",
- "**/src/lib.rs",
- "**/src/main.rs",
- }
- case "python", "pyright", "pylsp":
- patterns = []string{
- "**/pyproject.toml",
- "**/setup.py",
- "**/requirements.txt",
- "**/__init__.py",
- "**/__main__.py",
- }
- case "clangd":
- patterns = []string{
- "**/CMakeLists.txt",
- "**/Makefile",
- "**/compile_commands.json",
- }
- case "java", "jdtls":
- patterns = []string{
- "**/pom.xml",
- "**/build.gradle",
- "**/src/main/java/**/*.java",
- }
- default:
- // For unknown servers, use common configuration files
- patterns = []string{
- "**/package.json",
- "**/Makefile",
- "**/CMakeLists.txt",
- "**/.editorconfig",
- }
- }
-
- // Collect all files to open first
- var filesToOpen []string
-
- // For each pattern, find matching files
- for _, pattern := range patterns {
- // Use doublestar.Glob to find files matching the pattern (supports ** patterns)
- matches, err := doublestar.Glob(os.DirFS(w.workspacePath), pattern)
- if err != nil {
- if cfg.Options.DebugLSP {
- slog.Debug("Error finding high-priority files", "pattern", pattern, "error", err)
- }
- continue
- }
-
- for _, match := range matches {
- // Convert relative path to absolute
- fullPath := filepath.Join(w.workspacePath, match)
-
- // Skip directories and excluded files
- info, err := os.Stat(fullPath)
- if err != nil || info.IsDir() || shouldExcludeFile(fullPath) {
- continue
- }
-
- filesToOpen = append(filesToOpen, fullPath)
-
- // Limit the number of files per pattern
- if len(filesToOpen) >= 5 && (serverName != "java" && serverName != "jdtls") {
- break
- }
- }
- }
-
- // Open files in batches to reduce overhead
- batchSize := 3
- for i := 0; i < len(filesToOpen); i += batchSize {
- end := min(i+batchSize, len(filesToOpen))
-
- // Open batch of files
- for j := i; j < end; j++ {
- fullPath := filesToOpen[j]
- if err := w.client.OpenFile(ctx, fullPath); err != nil {
- if cfg.Options.DebugLSP {
- slog.Debug("Error opening high-priority file", "path", fullPath, "error", err)
- }
- } else {
- filesOpened++
- if cfg.Options.DebugLSP {
- slog.Debug("Opened high-priority file", "path", fullPath)
- }
- }
- }
-
- // Only add delay between batches, not individual files
- if end < len(filesToOpen) {
- time.Sleep(50 * time.Millisecond)
- }
- }
-
- return filesOpened
-}
-
-// Watch sets up file watching for a workspace using the global watcher
-func (w *Client) Watch(ctx context.Context, workspacePath string) {
- w.workspacePath = workspacePath
-
- slog.Debug("Starting workspace watcher", "workspacePath", workspacePath, "serverName", w.name)
-
- // Register this workspace watcher with the global watcher
- instance().register(w.name, w)
- defer instance().unregister(w.name)
-
- // Register handler for file watcher registrations from the server
- lsp.RegisterFileWatchHandler(func(id string, watchers []protocol.FileSystemWatcher) {
- w.register(ctx, id, watchers)
- })
-
- // Wait for context cancellation
- <-ctx.Done()
- slog.Debug("Workspace watcher stopped", "name", w.name)
-}
-
-// isPathWatched checks if a path should be watched based on server registrations
-// If no explicit registrations, watch everything
-func (w *Client) isPathWatched(path string) (bool, protocol.WatchKind) {
- if w.registrations.Len() == 0 {
- return true, protocol.WatchKind(protocol.WatchChange | protocol.WatchCreate | protocol.WatchDelete)
- }
-
- // Check each registration
- for reg := range w.registrations.Seq() {
- isMatch := w.matchesPattern(path, reg.GlobPattern)
- if isMatch {
- kind := protocol.WatchKind(protocol.WatchChange | protocol.WatchCreate | protocol.WatchDelete)
- if reg.Kind != nil {
- kind = *reg.Kind
- }
- return true, kind
- }
- }
-
- return false, 0
-}
-
-// matchesGlob handles glob patterns using the doublestar library
-func matchesGlob(pattern, path string) bool {
- // Use doublestar for all glob matching - it handles ** and other complex patterns
- matched, err := doublestar.Match(pattern, path)
- if err != nil {
- slog.Error("Error matching pattern", "pattern", pattern, "path", path, "error", err)
- return false
- }
- return matched
-}
-
-// matchesPattern checks if a path matches the glob pattern
-func (w *Client) matchesPattern(path string, pattern protocol.GlobPattern) bool {
- patternInfo, err := pattern.AsPattern()
- if err != nil {
- slog.Error("Error parsing pattern", "pattern", pattern, "error", err)
- return false
- }
-
- basePath := patternInfo.GetBasePath()
- patternText := patternInfo.GetPattern()
-
- path = filepath.ToSlash(path)
-
- // For simple patterns without base path
- if basePath == "" {
- // Check if the pattern matches the full path or just the file extension
- fullPathMatch := matchesGlob(patternText, path)
- baseNameMatch := matchesGlob(patternText, filepath.Base(path))
-
- return fullPathMatch || baseNameMatch
- }
-
- if basePath == "" {
- return false
- }
-
- // Make path relative to basePath for matching
- relPath, err := filepath.Rel(basePath, path)
- if err != nil {
- slog.Error("Error getting relative path", "path", path, "basePath", basePath, "error", err, "server", w.name)
- return false
- }
- relPath = filepath.ToSlash(relPath)
-
- isMatch := matchesGlob(patternText, relPath)
-
- return isMatch
-}
-
-// notifyFileEvent sends a didChangeWatchedFiles notification for a file event
-func (w *Client) notifyFileEvent(ctx context.Context, uri string, changeType protocol.FileChangeType) error {
- cfg := config.Get()
- if cfg.Options.DebugLSP {
- slog.Debug("Notifying file event",
- "uri", uri,
- "changeType", changeType,
- )
- }
-
- params := protocol.DidChangeWatchedFilesParams{
- Changes: []protocol.FileEvent{
- {
- URI: protocol.DocumentURI(uri),
- Type: changeType,
- },
- },
- }
-
- return w.client.DidChangeWatchedFiles(ctx, params)
-}
-
-// shouldPreloadFiles determines if we should preload files for a specific language server
-// Some servers work better with preloaded files, others don't need it
-func shouldPreloadFiles(serverName string) bool {
- // TypeScript/JavaScript servers typically need some files preloaded
- // to properly resolve imports and provide intellisense
- switch serverName {
- case "typescript", "typescript-language-server", "tsserver", "vtsls":
- return true
- case "java", "jdtls":
- // Java servers often need to see source files to build the project model
- return true
- default:
- // For most servers, we'll use lazy loading by default
- return false
- }
-}
-
-// Common patterns for directories and files to exclude
-// TODO: make configurable
-var (
- excludedFileExtensions = map[string]bool{
- ".swp": true,
- ".swo": true,
- ".tmp": true,
- ".temp": true,
- ".bak": true,
- ".log": true,
- ".o": true, // Object files
- ".so": true, // Shared libraries
- ".dylib": true, // macOS shared libraries
- ".dll": true, // Windows shared libraries
- ".a": true, // Static libraries
- ".exe": true, // Windows executables
- ".lock": true, // Lock files
- }
-
- // Large binary files that shouldn't be opened
- largeBinaryExtensions = map[string]bool{
- ".png": true,
- ".jpg": true,
- ".jpeg": true,
- ".gif": true,
- ".bmp": true,
- ".ico": true,
- ".zip": true,
- ".tar": true,
- ".gz": true,
- ".rar": true,
- ".7z": true,
- ".pdf": true,
- ".mp3": true,
- ".mp4": true,
- ".mov": true,
- ".wav": true,
- ".wasm": true,
- }
-
- // Maximum file size to open (5MB)
- maxFileSize int64 = 5 * 1024 * 1024
-)
-
-// shouldExcludeFile returns true if the file should be excluded from opening
-func shouldExcludeFile(filePath string) bool {
- fileName := filepath.Base(filePath)
- cfg := config.Get()
-
- // Skip dot files
- if strings.HasPrefix(fileName, ".") {
- return true
- }
-
- // Check file extension
- ext := strings.ToLower(filepath.Ext(filePath))
- if excludedFileExtensions[ext] || largeBinaryExtensions[ext] {
- return true
- }
-
- info, err := os.Stat(filePath)
- if err != nil {
- // If we can't stat the file, skip it
- return true
- }
-
- // Skip large files
- if info.Size() > maxFileSize {
- if cfg.Options.DebugLSP {
- slog.Debug("Skipping large file",
- "path", filePath,
- "size", info.Size(),
- "maxSize", maxFileSize,
- "debug", cfg.Options.Debug,
- "sizeMB", float64(info.Size())/(1024*1024),
- "maxSizeMB", float64(maxFileSize)/(1024*1024),
- )
- }
- return true
- }
-
- return false
-}
-
-// openMatchingFile opens a file if it matches any of the registered patterns
-func (w *Client) openMatchingFile(ctx context.Context, path string) {
- cfg := config.Get()
- // Skip directories
- info, err := os.Stat(path)
- if err != nil || info.IsDir() {
- return
- }
-
- // Skip excluded files
- if shouldExcludeFile(path) {
- return
- }
-
- // Check if this path should be watched according to server registrations
- if watched, _ := w.isPathWatched(path); !watched {
- return
- }
-
- serverName := w.name
-
- // Get server name for specialized handling
- // Check if the file is a high-priority file that should be opened immediately
- // This helps with project initialization for certain language servers
- if isHighPriorityFile(path, serverName) {
- if cfg.Options.DebugLSP {
- slog.Debug("Opening high-priority file", "path", path, "serverName", serverName)
- }
- if err := w.client.OpenFile(ctx, path); err != nil && cfg.Options.DebugLSP {
- slog.Error("Error opening high-priority file", "path", path, "error", err)
- }
- return
- }
-
- // For non-high-priority files, we'll use different strategies based on server type
- if !shouldPreloadFiles(serverName) {
- return
- }
- // For servers that benefit from preloading, open files but with limits
-
- // Check file size - for preloading we're more conservative
- if info.Size() > (1 * 1024 * 1024) { // 1MB limit for preloaded files
- if cfg.Options.DebugLSP {
- slog.Debug("Skipping large file for preloading", "path", path, "size", info.Size())
- }
- return
- }
-
- // File type is already validated by HandlesFile() and isPathWatched() checks earlier,
- // so we know this client handles this file type. Just open it.
- if err := w.client.OpenFile(ctx, path); err != nil && cfg.Options.DebugLSP {
- slog.Error("Error opening file", "path", path, "error", err)
- }
-}
-
-// isHighPriorityFile determines if a file should be opened immediately
-// regardless of the preloading strategy
-func isHighPriorityFile(path string, serverName string) bool {
- fileName := filepath.Base(path)
- ext := filepath.Ext(path)
-
- switch serverName {
- case "typescript", "typescript-language-server", "tsserver", "vtsls":
- // For TypeScript, we want to open configuration files immediately
- return fileName == "tsconfig.json" ||
- fileName == "package.json" ||
- fileName == "jsconfig.json" ||
- // Also open main entry points
- fileName == "index.ts" ||
- fileName == "index.js" ||
- fileName == "main.ts" ||
- fileName == "main.js"
- case "gopls":
- // For Go, we want to open go.mod files immediately
- return fileName == "go.mod" ||
- fileName == "go.sum" ||
- // Also open main.go files
- fileName == "main.go"
- case "rust-analyzer":
- // For Rust, we want to open Cargo.toml files immediately
- return fileName == "Cargo.toml" ||
- fileName == "Cargo.lock" ||
- // Also open lib.rs and main.rs
- fileName == "lib.rs" ||
- fileName == "main.rs"
- case "python", "pyright", "pylsp":
- // For Python, open key project files
- return fileName == "pyproject.toml" ||
- fileName == "setup.py" ||
- fileName == "requirements.txt" ||
- fileName == "__init__.py" ||
- fileName == "__main__.py"
- case "clangd":
- // For C/C++, open key project files
- return fileName == "CMakeLists.txt" ||
- fileName == "Makefile" ||
- fileName == "compile_commands.json"
- case "java", "jdtls":
- // For Java, open key project files
- return fileName == "pom.xml" ||
- fileName == "build.gradle" ||
- ext == ".java" // Java servers often need to see source files
- }
-
- // For unknown servers, prioritize common configuration files
- return fileName == "package.json" ||
- fileName == "Makefile" ||
- fileName == "CMakeLists.txt" ||
- fileName == ".editorconfig"
-}