wip: refactoring tui

This commit is contained in:
adamdottv
2025-05-29 15:10:44 -05:00
parent 2a132f86d6
commit 37c0c1f358
43 changed files with 348 additions and 20254 deletions

View File

@@ -1,348 +0,0 @@
package tools
import (
"context"
"encoding/json"
"fmt"
"strings"
"time"
"github.com/sst/opencode/internal/config"
"github.com/sst/opencode/internal/llm/tools/shell"
"github.com/sst/opencode/internal/permission"
)
type BashParams struct {
Command string `json:"command"`
Timeout int `json:"timeout"`
}
type BashPermissionsParams struct {
Command string `json:"command"`
Timeout int `json:"timeout"`
}
type BashResponseMetadata struct {
StartTime int64 `json:"start_time"`
EndTime int64 `json:"end_time"`
}
type bashTool struct {
permissions permission.Service
}
const (
BashToolName = "bash"
DefaultTimeout = 1 * 60 * 1000 // 1 minutes in milliseconds
MaxTimeout = 10 * 60 * 1000 // 10 minutes in milliseconds
MaxOutputLength = 30000
)
var bannedCommands = []string{
"alias", "curl", "curlie", "wget", "axel", "aria2c",
"nc", "telnet", "lynx", "w3m", "links", "httpie", "xh",
"http-prompt", "chrome", "firefox", "safari",
}
var safeReadOnlyCommands = []string{
"ls", "echo", "pwd", "date", "cal", "uptime", "whoami", "id", "groups", "env", "printenv", "set", "unset", "which", "type", "whereis",
"whatis", "uname", "hostname", "df", "du", "free", "top", "ps", "kill", "killall", "nice", "nohup", "time", "timeout",
"git status", "git log", "git diff", "git show", "git branch", "git tag", "git remote", "git ls-files", "git ls-remote",
"git rev-parse", "git config --get", "git config --list", "git describe", "git blame", "git grep", "git shortlog",
"go version", "go help", "go list", "go env", "go doc", "go vet", "go fmt", "go mod", "go test", "go build", "go run", "go install", "go clean",
}
func bashDescription() string {
bannedCommandsStr := strings.Join(bannedCommands, ", ")
return fmt.Sprintf(`Executes a given bash command in a persistent shell session with optional timeout, ensuring proper handling and security measures.
Before executing the command, please follow these steps:
1. Directory Verification:
- If the command will create new directories or files, first use the LS tool to verify the parent directory exists and is the correct location
- For example, before running "mkdir foo/bar", first use LS to check that "foo" exists and is the intended parent directory
2. Security Check:
- For security and to limit the threat of a prompt injection attack, some commands are limited or banned. If you use a disallowed command, you will receive an error message explaining the restriction. Explain the error to the User.
- Verify that the command is not one of the banned commands: %s.
3. Command Execution:
- After ensuring proper quoting, execute the command.
- Capture the output of the command.
4. Output Processing:
- If the output exceeds %d characters, output will be truncated before being returned to you.
- Prepare the output for display to the user.
5. Return Result:
- Provide the processed output of the command.
- If any errors occurred during execution, include those in the output.
Usage notes:
- The command argument is required.
- You can specify an optional timeout in milliseconds (up to 600000ms / 10 minutes). If not specified, commands will timeout after 30 minutes.
- VERY IMPORTANT: You MUST avoid using search commands like 'find' and 'grep'. Instead use Grep, Glob, or Agent tools to search. You MUST avoid read tools like 'cat', 'head', 'tail', and 'ls', and use FileRead and LS tools to read files.
- When issuing multiple commands, use the ';' or '&&' operator to separate them. DO NOT use newlines (newlines are ok in quoted strings).
- IMPORTANT: All commands share the same shell session. Shell state (environment variables, virtual environments, current directory, etc.) persist between commands. For example, if you set an environment variable as part of a command, the environment variable will persist for subsequent commands.
- Try to maintain your current working directory throughout the session by using absolute paths and avoiding usage of 'cd'. You may use 'cd' if the User explicitly requests it.
<good-example>
pytest /foo/bar/tests
</good-example>
<bad-example>
cd /foo/bar && pytest tests
</bad-example>
# Committing changes with git
When the user asks you to create a new git commit, follow these steps carefully:
1. Start with a single message that contains exactly three tool_use blocks that do the following (it is VERY IMPORTANT that you send these tool_use blocks in a single message, otherwise it will feel slow to the user!):
- Run a git status command to see all untracked files.
- Run a git diff command to see both staged and unstaged changes that will be committed.
- Run a git log command to see recent commit messages, so that you can follow this repository's commit message style.
2. Use the git context at the start of this conversation to determine which files are relevant to your commit. Add relevant untracked files to the staging area. Do not commit files that were already modified at the start of this conversation, if they are not relevant to your commit.
3. Analyze all staged changes (both previously staged and newly added) and draft a commit message. Wrap your analysis process in <commit_analysis> tags:
<commit_analysis>
- List the files that have been changed or added
- Summarize the nature of the changes (eg. new feature, enhancement to an existing feature, bug fix, refactoring, test, docs, etc.)
- Brainstorm the purpose or motivation behind these changes
- Do not use tools to explore code, beyond what is available in the git context
- Assess the impact of these changes on the overall project
- Check for any sensitive information that shouldn't be committed
- Draft a concise (1-2 sentences) commit message that focuses on the "why" rather than the "what"
- Ensure your language is clear, concise, and to the point
- Ensure the message accurately reflects the changes and their purpose (i.e. "add" means a wholly new feature, "update" means an enhancement to an existing feature, "fix" means a bug fix, etc.)
- Ensure the message is not generic (avoid words like "Update" or "Fix" without context)
- Review the draft message to ensure it accurately reflects the changes and their purpose
</commit_analysis>
4. Create the commit with a message ending with:
🤖 Generated with opencode
Co-Authored-By: opencode <noreply@opencode.ai>
- In order to ensure good formatting, ALWAYS pass the commit message via a HEREDOC, a la this example:
<example>
git commit -m "$(cat <<'EOF'
Commit message here.
🤖 Generated with opencode
Co-Authored-By: opencode <noreply@opencode.ai>
EOF
)"
</example>
5. If the commit fails due to pre-commit hook changes, retry the commit ONCE to include these automated changes. If it fails again, it usually means a pre-commit hook is preventing the commit. If the commit succeeds but you notice that files were modified by the pre-commit hook, you MUST amend your commit to include them.
6. Finally, run git status to make sure the commit succeeded.
Important notes:
- When possible, combine the "git add" and "git commit" commands into a single "git commit -am" command, to speed things up
- However, be careful not to stage files (e.g. with 'git add .') for commits that aren't part of the change, they may have untracked files they want to keep around, but not commit.
- NEVER update the git config
- DO NOT push to the remote repository
- IMPORTANT: Never use git commands with the -i flag (like git rebase -i or git add -i) since they require interactive input which is not supported.
- If there are no changes to commit (i.e., no untracked files and no modifications), do not create an empty commit
- Ensure your commit message is meaningful and concise. It should explain the purpose of the changes, not just describe them.
- Return an empty response - the user will see the git output directly
# Creating pull requests
Use the gh command via the Bash tool for ALL GitHub-related tasks including working with issues, pull requests, checks, and releases. If given a Github URL use the gh command to get the information needed.
IMPORTANT: When the user asks you to create a pull request, follow these steps carefully:
1. Understand the current state of the branch. Remember to send a single message that contains multiple tool_use blocks (it is VERY IMPORTANT that you do this in a single message, otherwise it will feel slow to the user!):
- Run a git status command to see all untracked files.
- Run a git diff command to see both staged and unstaged changes that will be committed.
- Check if the current branch tracks a remote branch and is up to date with the remote, so you know if you need to push to the remote
- Run a git log command and 'git diff main...HEAD' to understand the full commit history for the current branch (from the time it diverged from the 'main' branch.)
2. Create new branch if needed
3. Commit changes if needed
4. Push to remote with -u flag if needed
5. Analyze all changes that will be included in the pull request, making sure to look at all relevant commits (not just the latest commit, but all commits that will be included in the pull request!), and draft a pull request summary. Wrap your analysis process in <pr_analysis> tags:
<pr_analysis>
- List the commits since diverging from the main branch
- Summarize the nature of the changes (eg. new feature, enhancement to an existing feature, bug fix, refactoring, test, docs, etc.)
- Brainstorm the purpose or motivation behind these changes
- Assess the impact of these changes on the overall project
- Do not use tools to explore code, beyond what is available in the git context
- Check for any sensitive information that shouldn't be committed
- Draft a concise (1-2 bullet points) pull request summary that focuses on the "why" rather than the "what"
- Ensure the summary accurately reflects all changes since diverging from the main branch
- Ensure your language is clear, concise, and to the point
- Ensure the summary accurately reflects the changes and their purpose (ie. "add" means a wholly new feature, "update" means an enhancement to an existing feature, "fix" means a bug fix, etc.)
- Ensure the summary is not generic (avoid words like "Update" or "Fix" without context)
- Review the draft summary to ensure it accurately reflects the changes and their purpose
</pr_analysis>
6. Create PR using gh pr create with the format below. Use a HEREDOC to pass the body to ensure correct formatting.
<example>
gh pr create --title "the pr title" --body "$(cat <<'EOF'
## Summary
<1-3 bullet points>
## Test plan
[Checklist of TODOs for testing the pull request...]
🤖 Generated with opencode
EOF
)"
</example>
Important:
- Return an empty response - the user will see the gh output directly
- Never update git config`, bannedCommandsStr, MaxOutputLength)
}
func NewBashTool(permission permission.Service) BaseTool {
return &bashTool{
permissions: permission,
}
}
func (b *bashTool) Info() ToolInfo {
return ToolInfo{
Name: BashToolName,
Description: bashDescription(),
Parameters: map[string]any{
"command": map[string]any{
"type": "string",
"description": "The command to execute",
},
"timeout": map[string]any{
"type": "number",
"description": "Optional timeout in milliseconds (max 600000)",
},
},
Required: []string{"command"},
}
}
func (b *bashTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params BashParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse("invalid parameters"), nil
}
if params.Timeout > MaxTimeout {
params.Timeout = MaxTimeout
} else if params.Timeout <= 0 {
params.Timeout = DefaultTimeout
}
if params.Command == "" {
return NewTextErrorResponse("missing command"), nil
}
baseCmd := strings.Fields(params.Command)[0]
for _, banned := range bannedCommands {
if strings.EqualFold(baseCmd, banned) {
return NewTextErrorResponse(fmt.Sprintf("command '%s' is not allowed", baseCmd)), nil
}
}
isSafeReadOnly := false
cmdLower := strings.ToLower(params.Command)
for _, safe := range safeReadOnlyCommands {
if strings.HasPrefix(cmdLower, strings.ToLower(safe)) {
if len(cmdLower) == len(safe) || cmdLower[len(safe)] == ' ' || cmdLower[len(safe)] == '-' {
isSafeReadOnly = true
break
}
}
}
sessionID, messageID := GetContextValues(ctx)
if sessionID == "" || messageID == "" {
return ToolResponse{}, fmt.Errorf("session ID and message ID are required for creating a new file")
}
if !isSafeReadOnly {
p := b.permissions.Request(
ctx,
permission.CreatePermissionRequest{
SessionID: sessionID,
Path: config.WorkingDirectory(),
ToolName: BashToolName,
Action: "execute",
Description: fmt.Sprintf("Execute command: %s", params.Command),
Params: BashPermissionsParams{
Command: params.Command,
},
},
)
if !p {
return ToolResponse{}, permission.ErrorPermissionDenied
}
}
startTime := time.Now()
shell := shell.GetPersistentShell(config.WorkingDirectory())
stdout, stderr, exitCode, interrupted, err := shell.Exec(ctx, params.Command, params.Timeout)
if err != nil {
return ToolResponse{}, fmt.Errorf("error executing command: %w", err)
}
stdout = truncateOutput(stdout)
stderr = truncateOutput(stderr)
errorMessage := stderr
if interrupted {
if errorMessage != "" {
errorMessage += "\n"
}
errorMessage += "Command was aborted before completion"
} else if exitCode != 0 {
if errorMessage != "" {
errorMessage += "\n"
}
errorMessage += fmt.Sprintf("Exit code %d", exitCode)
}
hasBothOutputs := stdout != "" && stderr != ""
if hasBothOutputs {
stdout += "\n"
}
if errorMessage != "" {
stdout += "\n" + errorMessage
}
metadata := BashResponseMetadata{
StartTime: startTime.UnixMilli(),
EndTime: time.Now().UnixMilli(),
}
if stdout == "" {
return WithResponseMetadata(NewTextResponse("no output"), metadata), nil
}
return WithResponseMetadata(NewTextResponse(stdout), metadata), nil
}
func truncateOutput(content string) string {
if len(content) <= MaxOutputLength {
return content
}
halfLength := MaxOutputLength / 2
start := content[:halfLength]
end := content[len(content)-halfLength:]
truncatedLinesCount := countLines(content[halfLength : len(content)-halfLength])
return fmt.Sprintf("%s\n\n... [%d lines truncated] ...\n\n%s", start, truncatedLinesCount, end)
}
func countLines(s string) int {
if s == "" {
return 0
}
return len(strings.Split(s, "\n"))
}

View File

@@ -1,191 +0,0 @@
package tools
import (
"context"
"encoding/json"
"fmt"
"strings"
"sync"
)
type BatchToolCall struct {
Name string `json:"name"`
Input json.RawMessage `json:"input"`
}
type BatchParams struct {
Calls []BatchToolCall `json:"calls"`
}
type BatchToolResult struct {
ToolName string `json:"tool_name"`
ToolInput json.RawMessage `json:"tool_input"`
Result json.RawMessage `json:"result"`
Error string `json:"error,omitempty"`
// Added for better formatting and separation between results
Separator string `json:"separator,omitempty"`
}
type BatchResult struct {
Results []BatchToolResult `json:"results"`
}
type batchTool struct {
tools map[string]BaseTool
}
const (
BatchToolName = "batch"
BatchToolDescription = `Executes multiple tool calls in parallel and returns their results.
WHEN TO USE THIS TOOL:
- Use when you need to run multiple independent tool calls at once
- Helpful for improving performance by parallelizing operations
- Great for gathering information from multiple sources simultaneously
HOW TO USE:
- Provide an array of tool calls, each with a name and input
- Each tool call will be executed in parallel
- Results are returned in the same order as the input calls
FEATURES:
- Runs tool calls concurrently for better performance
- Returns both results and errors for each call
- Maintains the order of results to match input calls
LIMITATIONS:
- All tools must be available in the current context
- Complex error handling may be required for some use cases
- Not suitable for tool calls that depend on each other's results
TIPS:
- Use for independent operations like multiple file reads or searches
- Great for batch operations like searching multiple directories
- Combine with other tools for more complex workflows`
)
func NewBatchTool(tools map[string]BaseTool) BaseTool {
return &batchTool{
tools: tools,
}
}
func (b *batchTool) Info() ToolInfo {
return ToolInfo{
Name: BatchToolName,
Description: BatchToolDescription,
Parameters: map[string]any{
"calls": map[string]any{
"type": "array",
"description": "Array of tool calls to execute in parallel",
"items": map[string]any{
"type": "object",
"properties": map[string]any{
"name": map[string]any{
"type": "string",
"description": "Name of the tool to call",
},
"input": map[string]any{
"type": "object",
"description": "Input parameters for the tool",
},
},
"required": []string{"name", "input"},
},
},
},
Required: []string{"calls"},
}
}
func (b *batchTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params BatchParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse(fmt.Sprintf("error parsing parameters: %s", err)), nil
}
if len(params.Calls) == 0 {
return NewTextErrorResponse("no tool calls provided"), nil
}
var wg sync.WaitGroup
results := make([]BatchToolResult, len(params.Calls))
for i, toolCall := range params.Calls {
wg.Add(1)
go func(index int, tc BatchToolCall) {
defer wg.Done()
// Create separator for better visual distinction between results
separator := ""
if index > 0 {
separator = fmt.Sprintf("\n%s\n", strings.Repeat("=", 80))
}
result := BatchToolResult{
ToolName: tc.Name,
ToolInput: tc.Input,
Separator: separator,
}
tool, ok := b.tools[tc.Name]
if !ok {
result.Error = fmt.Sprintf("tool not found: %s", tc.Name)
results[index] = result
return
}
// Create a proper ToolCall object
callObj := ToolCall{
ID: fmt.Sprintf("batch-%d", index),
Name: tc.Name,
Input: string(tc.Input),
}
response, err := tool.Run(ctx, callObj)
if err != nil {
result.Error = fmt.Sprintf("error executing tool %s: %s", tc.Name, err)
results[index] = result
return
}
// Standardize metadata format if present
if response.Metadata != "" {
var metadata map[string]interface{}
if err := json.Unmarshal([]byte(response.Metadata), &metadata); err == nil {
// Add tool name to metadata for better context
metadata["tool"] = tc.Name
// Re-marshal with consistent formatting
if metadataBytes, err := json.MarshalIndent(metadata, "", " "); err == nil {
response.Metadata = string(metadataBytes)
}
}
}
// Convert the response to JSON
responseJSON, err := json.Marshal(response)
if err != nil {
result.Error = fmt.Sprintf("error marshaling response: %s", err)
results[index] = result
return
}
result.Result = responseJSON
results[index] = result
}(i, toolCall)
}
wg.Wait()
batchResult := BatchResult{
Results: results,
}
resultJSON, err := json.Marshal(batchResult)
if err != nil {
return NewTextErrorResponse(fmt.Sprintf("error marshaling batch result: %s", err)), nil
}
return NewTextResponse(string(resultJSON)), nil
}

View File

@@ -1,224 +0,0 @@
package tools
import (
"context"
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
)
// MockTool is a simple tool implementation for testing
type MockTool struct {
name string
description string
response ToolResponse
err error
}
func (m *MockTool) Info() ToolInfo {
return ToolInfo{
Name: m.name,
Description: m.description,
Parameters: map[string]any{},
Required: []string{},
}
}
func (m *MockTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
return m.response, m.err
}
func TestBatchTool(t *testing.T) {
t.Parallel()
t.Run("successful batch execution", func(t *testing.T) {
t.Parallel()
// Create mock tools
mockTools := map[string]BaseTool{
"tool1": &MockTool{
name: "tool1",
description: "Mock Tool 1",
response: NewTextResponse("Tool 1 Response"),
err: nil,
},
"tool2": &MockTool{
name: "tool2",
description: "Mock Tool 2",
response: NewTextResponse("Tool 2 Response"),
err: nil,
},
}
// Create batch tool
batchTool := NewBatchTool(mockTools)
// Create batch call
input := `{
"calls": [
{
"name": "tool1",
"input": {}
},
{
"name": "tool2",
"input": {}
}
]
}`
call := ToolCall{
ID: "test-batch",
Name: "batch",
Input: input,
}
// Execute batch
response, err := batchTool.Run(context.Background(), call)
// Verify results
assert.NoError(t, err)
assert.Equal(t, ToolResponseTypeText, response.Type)
assert.False(t, response.IsError)
// Parse the response
var batchResult BatchResult
err = json.Unmarshal([]byte(response.Content), &batchResult)
assert.NoError(t, err)
// Verify batch results
assert.Len(t, batchResult.Results, 2)
assert.Empty(t, batchResult.Results[0].Error)
assert.Empty(t, batchResult.Results[1].Error)
assert.Empty(t, batchResult.Results[0].Separator)
assert.NotEmpty(t, batchResult.Results[1].Separator)
// Verify individual results
var result1 ToolResponse
err = json.Unmarshal(batchResult.Results[0].Result, &result1)
assert.NoError(t, err)
assert.Equal(t, "Tool 1 Response", result1.Content)
var result2 ToolResponse
err = json.Unmarshal(batchResult.Results[1].Result, &result2)
assert.NoError(t, err)
assert.Equal(t, "Tool 2 Response", result2.Content)
})
t.Run("tool not found", func(t *testing.T) {
t.Parallel()
// Create mock tools
mockTools := map[string]BaseTool{
"tool1": &MockTool{
name: "tool1",
description: "Mock Tool 1",
response: NewTextResponse("Tool 1 Response"),
err: nil,
},
}
// Create batch tool
batchTool := NewBatchTool(mockTools)
// Create batch call with non-existent tool
input := `{
"calls": [
{
"name": "tool1",
"input": {}
},
{
"name": "nonexistent",
"input": {}
}
]
}`
call := ToolCall{
ID: "test-batch",
Name: "batch",
Input: input,
}
// Execute batch
response, err := batchTool.Run(context.Background(), call)
// Verify results
assert.NoError(t, err)
assert.Equal(t, ToolResponseTypeText, response.Type)
assert.False(t, response.IsError)
// Parse the response
var batchResult BatchResult
err = json.Unmarshal([]byte(response.Content), &batchResult)
assert.NoError(t, err)
// Verify batch results
assert.Len(t, batchResult.Results, 2)
assert.Empty(t, batchResult.Results[0].Error)
assert.Contains(t, batchResult.Results[1].Error, "tool not found: nonexistent")
})
t.Run("empty calls", func(t *testing.T) {
t.Parallel()
// Create batch tool with empty tools map
batchTool := NewBatchTool(map[string]BaseTool{})
// Create batch call with empty calls
input := `{
"calls": []
}`
call := ToolCall{
ID: "test-batch",
Name: "batch",
Input: input,
}
// Execute batch
response, err := batchTool.Run(context.Background(), call)
// Verify results
assert.NoError(t, err)
assert.Equal(t, ToolResponseTypeText, response.Type)
assert.True(t, response.IsError)
assert.Contains(t, response.Content, "no tool calls provided")
})
t.Run("invalid input", func(t *testing.T) {
t.Parallel()
// Create batch tool with empty tools map
batchTool := NewBatchTool(map[string]BaseTool{})
// Create batch call with invalid JSON
input := `{
"calls": [
{
"name": "tool1",
"input": {
"invalid": json
}
}
]
}`
call := ToolCall{
ID: "test-batch",
Name: "batch",
Input: input,
}
// Execute batch
response, err := batchTool.Run(context.Background(), call)
// Verify results
assert.NoError(t, err)
assert.Equal(t, ToolResponseTypeText, response.Type)
assert.True(t, response.IsError)
assert.Contains(t, response.Content, "error parsing parameters")
})
}

View File

@@ -1,477 +0,0 @@
package tools
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/sst/opencode/internal/config"
"github.com/sst/opencode/internal/diff"
"github.com/sst/opencode/internal/history"
"github.com/sst/opencode/internal/lsp"
"github.com/sst/opencode/internal/permission"
"log/slog"
)
type EditParams struct {
FilePath string `json:"file_path"`
OldString string `json:"old_string"`
NewString string `json:"new_string"`
}
type EditPermissionsParams struct {
FilePath string `json:"file_path"`
Diff string `json:"diff"`
}
type EditResponseMetadata struct {
Diff string `json:"diff"`
Additions int `json:"additions"`
Removals int `json:"removals"`
}
type editTool struct {
lspClients map[string]*lsp.Client
permissions permission.Service
history history.Service
}
const (
EditToolName = "edit"
editDescription = `Edits files by replacing text, creating new files, or deleting content. For moving or renaming files, use the Bash tool with the 'mv' command instead. For larger file edits, use the FileWrite tool to overwrite files.
Before using this tool:
1. Use the FileRead tool to understand the file's contents and context
2. Verify the directory path is correct (only applicable when creating new files):
- Use the LS tool to verify the parent directory exists and is the correct location
To make a file edit, provide the following:
1. file_path: The absolute path to the file to modify (must be absolute, not relative)
2. old_string: The text to replace (must be unique within the file, and must match the file contents exactly, including all whitespace and indentation)
3. new_string: The edited text to replace the old_string
Special cases:
- To create a new file: provide file_path and new_string, leave old_string empty
- To delete content: provide file_path and old_string, leave new_string empty
The tool will replace ONE occurrence of old_string with new_string in the specified file.
CRITICAL REQUIREMENTS FOR USING THIS TOOL:
1. UNIQUENESS: The old_string MUST uniquely identify the specific instance you want to change. This means:
- Include AT LEAST 3-5 lines of context BEFORE the change point
- Include AT LEAST 3-5 lines of context AFTER the change point
- Include all whitespace, indentation, and surrounding code exactly as it appears in the file
2. SINGLE INSTANCE: This tool can only change ONE instance at a time. If you need to change multiple instances:
- Make separate calls to this tool for each instance
- Each call must uniquely identify its specific instance using extensive context
3. VERIFICATION: Before using this tool:
- Check how many instances of the target text exist in the file
- If multiple instances exist, gather enough context to uniquely identify each one
- Plan separate tool calls for each instance
WARNING: If you do not follow these requirements:
- The tool will fail if old_string matches multiple locations
- The tool will fail if old_string doesn't match exactly (including whitespace)
- You may change the wrong instance if you don't include enough context
When making edits:
- Ensure the edit results in idiomatic, correct code
- Do not leave the code in a broken state
- Always use absolute file paths (starting with /)
Remember: when making multiple file edits in a row to the same file, you should prefer to send all edits in a single message with multiple calls to this tool, rather than multiple messages with a single call each.`
)
func NewEditTool(lspClients map[string]*lsp.Client, permissions permission.Service, files history.Service) BaseTool {
return &editTool{
lspClients: lspClients,
permissions: permissions,
history: files,
}
}
func (e *editTool) Info() ToolInfo {
return ToolInfo{
Name: EditToolName,
Description: editDescription,
Parameters: map[string]any{
"file_path": map[string]any{
"type": "string",
"description": "The absolute path to the file to modify",
},
"old_string": map[string]any{
"type": "string",
"description": "The text to replace",
},
"new_string": map[string]any{
"type": "string",
"description": "The text to replace it with",
},
},
Required: []string{"file_path", "old_string", "new_string"},
}
}
func (e *editTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params EditParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse("invalid parameters"), nil
}
if params.FilePath == "" {
return NewTextErrorResponse("file_path is required"), nil
}
if !filepath.IsAbs(params.FilePath) {
wd := config.WorkingDirectory()
params.FilePath = filepath.Join(wd, params.FilePath)
}
var response ToolResponse
var err error
if params.OldString == "" {
response, err = e.createNewFile(ctx, params.FilePath, params.NewString)
if err != nil {
return response, err
}
}
if params.NewString == "" {
response, err = e.deleteContent(ctx, params.FilePath, params.OldString)
if err != nil {
return response, err
}
}
response, err = e.replaceContent(ctx, params.FilePath, params.OldString, params.NewString)
if err != nil {
return response, err
}
if response.IsError {
// Return early if there was an error during content replacement
// This prevents unnecessary LSP diagnostics processing
return response, nil
}
waitForLspDiagnostics(ctx, params.FilePath, e.lspClients)
text := fmt.Sprintf("<result>\n%s\n</result>\n", response.Content)
text += getDiagnostics(params.FilePath, e.lspClients)
response.Content = text
return response, nil
}
func (e *editTool) createNewFile(ctx context.Context, filePath, content string) (ToolResponse, error) {
fileInfo, err := os.Stat(filePath)
if err == nil {
if fileInfo.IsDir() {
return NewTextErrorResponse(fmt.Sprintf("path is a directory, not a file: %s", filePath)), nil
}
return NewTextErrorResponse(fmt.Sprintf("file already exists: %s", filePath)), nil
} else if !os.IsNotExist(err) {
return ToolResponse{}, fmt.Errorf("failed to access file: %w", err)
}
dir := filepath.Dir(filePath)
if err = os.MkdirAll(dir, 0o755); err != nil {
return ToolResponse{}, fmt.Errorf("failed to create parent directories: %w", err)
}
sessionID, messageID := GetContextValues(ctx)
if sessionID == "" || messageID == "" {
return ToolResponse{}, fmt.Errorf("session ID and message ID are required for creating a new file")
}
diff, additions, removals := diff.GenerateDiff(
"",
content,
filePath,
)
p := e.permissions.Request(
ctx,
permission.CreatePermissionRequest{
SessionID: sessionID,
Path: filePath,
ToolName: EditToolName,
Action: "write",
Description: fmt.Sprintf("Create file %s", filePath),
Params: EditPermissionsParams{
FilePath: filePath,
Diff: diff,
},
},
)
if !p {
return ToolResponse{}, permission.ErrorPermissionDenied
}
err = os.WriteFile(filePath, []byte(content), 0o644)
if err != nil {
return ToolResponse{}, fmt.Errorf("failed to write file: %w", err)
}
// File can't be in the history so we create a new file history
_, err = e.history.Create(ctx, sessionID, filePath, "")
if err != nil {
// Log error but don't fail the operation
return ToolResponse{}, fmt.Errorf("error creating file history: %w", err)
}
// Add the new content to the file history
_, err = e.history.CreateVersion(ctx, sessionID, filePath, content)
if err != nil {
// Log error but don't fail the operation
slog.Debug("Error creating file history version", "error", err)
}
recordFileWrite(filePath)
recordFileRead(filePath)
return WithResponseMetadata(
NewTextResponse("File created: "+filePath),
EditResponseMetadata{
Diff: diff,
Additions: additions,
Removals: removals,
},
), nil
}
func (e *editTool) deleteContent(ctx context.Context, filePath, oldString string) (ToolResponse, error) {
fileInfo, err := os.Stat(filePath)
if err != nil {
if os.IsNotExist(err) {
return NewTextErrorResponse(fmt.Sprintf("file not found: %s", filePath)), nil
}
return ToolResponse{}, fmt.Errorf("failed to access file: %w", err)
}
if fileInfo.IsDir() {
return NewTextErrorResponse(fmt.Sprintf("path is a directory, not a file: %s", filePath)), nil
}
if getLastReadTime(filePath).IsZero() {
return NewTextErrorResponse("you must read the file before editing it. Use the View tool first"), nil
}
modTime := fileInfo.ModTime()
lastRead := getLastReadTime(filePath)
if modTime.After(lastRead) {
return NewTextErrorResponse(
fmt.Sprintf("file %s has been modified since it was last read (mod time: %s, last read: %s)",
filePath, modTime.Format(time.RFC3339), lastRead.Format(time.RFC3339),
)), nil
}
content, err := os.ReadFile(filePath)
if err != nil {
return ToolResponse{}, fmt.Errorf("failed to read file: %w", err)
}
oldContent := string(content)
index := strings.Index(oldContent, oldString)
if index == -1 {
return NewTextErrorResponse("old_string not found in file. Make sure it matches exactly, including whitespace and line breaks"), nil
}
lastIndex := strings.LastIndex(oldContent, oldString)
if index != lastIndex {
return NewTextErrorResponse("old_string appears multiple times in the file. Please provide more context to ensure a unique match"), nil
}
newContent := oldContent[:index] + oldContent[index+len(oldString):]
sessionID, messageID := GetContextValues(ctx)
if sessionID == "" || messageID == "" {
return ToolResponse{}, fmt.Errorf("session ID and message ID are required for creating a new file")
}
diff, additions, removals := diff.GenerateDiff(
oldContent,
newContent,
filePath,
)
p := e.permissions.Request(
ctx,
permission.CreatePermissionRequest{
SessionID: sessionID,
Path: filePath,
ToolName: EditToolName,
Action: "write",
Description: fmt.Sprintf("Delete content from file %s", filePath),
Params: EditPermissionsParams{
FilePath: filePath,
Diff: diff,
},
},
)
if !p {
return ToolResponse{}, permission.ErrorPermissionDenied
}
err = os.WriteFile(filePath, []byte(newContent), 0o644)
if err != nil {
return ToolResponse{}, fmt.Errorf("failed to write file: %w", err)
}
// Check if file exists in history
file, err := e.history.GetLatestByPathAndSession(ctx, filePath, sessionID)
if err != nil {
_, err = e.history.Create(ctx, sessionID, filePath, oldContent)
if err != nil {
// Log error but don't fail the operation
return ToolResponse{}, fmt.Errorf("error creating file history: %w", err)
}
}
if file.Content != oldContent {
// User Manually changed the content store an intermediate version
_, err = e.history.CreateVersion(ctx, sessionID, filePath, oldContent)
if err != nil {
slog.Debug("Error creating file history version", "error", err)
}
}
// Store the new version
_, err = e.history.CreateVersion(ctx, sessionID, filePath, "")
if err != nil {
slog.Debug("Error creating file history version", "error", err)
}
recordFileWrite(filePath)
recordFileRead(filePath)
return WithResponseMetadata(
NewTextResponse("Content deleted from file: "+filePath),
EditResponseMetadata{
Diff: diff,
Additions: additions,
Removals: removals,
},
), nil
}
func (e *editTool) replaceContent(ctx context.Context, filePath, oldString, newString string) (ToolResponse, error) {
fileInfo, err := os.Stat(filePath)
if err != nil {
if os.IsNotExist(err) {
return NewTextErrorResponse(fmt.Sprintf("file not found: %s", filePath)), nil
}
return ToolResponse{}, fmt.Errorf("failed to access file: %w", err)
}
if fileInfo.IsDir() {
return NewTextErrorResponse(fmt.Sprintf("path is a directory, not a file: %s", filePath)), nil
}
if getLastReadTime(filePath).IsZero() {
return NewTextErrorResponse("you must read the file before editing it. Use the View tool first"), nil
}
modTime := fileInfo.ModTime()
lastRead := getLastReadTime(filePath)
if modTime.After(lastRead) {
return NewTextErrorResponse(
fmt.Sprintf("file %s has been modified since it was last read (mod time: %s, last read: %s)",
filePath, modTime.Format(time.RFC3339), lastRead.Format(time.RFC3339),
)), nil
}
content, err := os.ReadFile(filePath)
if err != nil {
return ToolResponse{}, fmt.Errorf("failed to read file: %w", err)
}
oldContent := string(content)
index := strings.Index(oldContent, oldString)
if index == -1 {
return NewTextErrorResponse("old_string not found in file. Make sure it matches exactly, including whitespace and line breaks"), nil
}
lastIndex := strings.LastIndex(oldContent, oldString)
if index != lastIndex {
return NewTextErrorResponse("old_string appears multiple times in the file. Please provide more context to ensure a unique match"), nil
}
newContent := oldContent[:index] + newString + oldContent[index+len(oldString):]
if oldContent == newContent {
return NewTextErrorResponse("new content is the same as old content. No changes made."), nil
}
sessionID, messageID := GetContextValues(ctx)
if sessionID == "" || messageID == "" {
return ToolResponse{}, fmt.Errorf("session ID and message ID are required for creating a new file")
}
diff, additions, removals := diff.GenerateDiff(
oldContent,
newContent,
filePath,
)
p := e.permissions.Request(
ctx,
permission.CreatePermissionRequest{
SessionID: sessionID,
Path: filePath,
ToolName: EditToolName,
Action: "write",
Description: fmt.Sprintf("Replace content in file %s", filePath),
Params: EditPermissionsParams{
FilePath: filePath,
Diff: diff,
},
},
)
if !p {
return ToolResponse{}, permission.ErrorPermissionDenied
}
err = os.WriteFile(filePath, []byte(newContent), 0o644)
if err != nil {
return ToolResponse{}, fmt.Errorf("failed to write file: %w", err)
}
// Check if file exists in history
file, err := e.history.GetLatestByPathAndSession(ctx, filePath, sessionID)
if err != nil {
_, err = e.history.Create(ctx, sessionID, filePath, oldContent)
if err != nil {
// Log error but don't fail the operation
return ToolResponse{}, fmt.Errorf("error creating file history: %w", err)
}
}
if file.Content != oldContent {
// User Manually changed the content store an intermediate version
_, err = e.history.CreateVersion(ctx, sessionID, filePath, oldContent)
if err != nil {
slog.Debug("Error creating file history version", "error", err)
}
}
// Store the new version
_, err = e.history.CreateVersion(ctx, sessionID, filePath, newContent)
if err != nil {
slog.Debug("Error creating file history version", "error", err)
}
recordFileWrite(filePath)
recordFileRead(filePath)
return WithResponseMetadata(
NewTextResponse("Content replaced in file: "+filePath),
EditResponseMetadata{
Diff: diff,
Additions: additions,
Removals: removals,
}), nil
}

View File

@@ -1,228 +0,0 @@
package tools
import (
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"time"
md "github.com/JohannesKaufmann/html-to-markdown"
"github.com/PuerkitoBio/goquery"
"github.com/sst/opencode/internal/config"
"github.com/sst/opencode/internal/permission"
)
type FetchParams struct {
URL string `json:"url"`
Format string `json:"format"`
Timeout int `json:"timeout,omitempty"`
}
type FetchPermissionsParams struct {
URL string `json:"url"`
Format string `json:"format"`
Timeout int `json:"timeout,omitempty"`
}
type fetchTool struct {
client *http.Client
permissions permission.Service
}
const (
FetchToolName = "fetch"
fetchToolDescription = `Fetches content from a URL and returns it in the specified format.
WHEN TO USE THIS TOOL:
- Use when you need to download content from a URL
- Helpful for retrieving documentation, API responses, or web content
- Useful for getting external information to assist with tasks
HOW TO USE:
- Provide the URL to fetch content from
- Specify the desired output format (text, markdown, or html)
- Optionally set a timeout for the request
FEATURES:
- Supports three output formats: text, markdown, and html
- Automatically handles HTTP redirects
- Sets reasonable timeouts to prevent hanging
- Validates input parameters before making requests
LIMITATIONS:
- Maximum response size is 5MB
- Only supports HTTP and HTTPS protocols
- Cannot handle authentication or cookies
- Some websites may block automated requests
TIPS:
- Use text format for plain text content or simple API responses
- Use markdown format for content that should be rendered with formatting
- Use html format when you need the raw HTML structure
- Set appropriate timeouts for potentially slow websites`
)
func NewFetchTool(permissions permission.Service) BaseTool {
return &fetchTool{
client: &http.Client{
Timeout: 30 * time.Second,
},
permissions: permissions,
}
}
func (t *fetchTool) Info() ToolInfo {
return ToolInfo{
Name: FetchToolName,
Description: fetchToolDescription,
Parameters: map[string]any{
"url": map[string]any{
"type": "string",
"description": "The URL to fetch content from",
},
"format": map[string]any{
"type": "string",
"description": "The format to return the content in (text, markdown, or html)",
"enum": []string{"text", "markdown", "html"},
},
"timeout": map[string]any{
"type": "number",
"description": "Optional timeout in seconds (max 120)",
},
},
Required: []string{"url", "format"},
}
}
func (t *fetchTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params FetchParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse("Failed to parse fetch parameters: " + err.Error()), nil
}
if params.URL == "" {
return NewTextErrorResponse("URL parameter is required"), nil
}
format := strings.ToLower(params.Format)
if format != "text" && format != "markdown" && format != "html" {
return NewTextErrorResponse("Format must be one of: text, markdown, html"), nil
}
if !strings.HasPrefix(params.URL, "http://") && !strings.HasPrefix(params.URL, "https://") {
return NewTextErrorResponse("URL must start with http:// or https://"), nil
}
sessionID, messageID := GetContextValues(ctx)
if sessionID == "" || messageID == "" {
return ToolResponse{}, fmt.Errorf("session ID and message ID are required for creating a new file")
}
p := t.permissions.Request(
ctx,
permission.CreatePermissionRequest{
SessionID: sessionID,
Path: config.WorkingDirectory(),
ToolName: FetchToolName,
Action: "fetch",
Description: fmt.Sprintf("Fetch content from URL: %s", params.URL),
Params: FetchPermissionsParams(params),
},
)
if !p {
return ToolResponse{}, permission.ErrorPermissionDenied
}
client := t.client
if params.Timeout > 0 {
maxTimeout := 120 // 2 minutes
if params.Timeout > maxTimeout {
params.Timeout = maxTimeout
}
client = &http.Client{
Timeout: time.Duration(params.Timeout) * time.Second,
}
}
req, err := http.NewRequestWithContext(ctx, "GET", params.URL, nil)
if err != nil {
return ToolResponse{}, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("User-Agent", "opencode/1.0")
resp, err := client.Do(req)
if err != nil {
return ToolResponse{}, fmt.Errorf("failed to fetch URL: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return NewTextErrorResponse(fmt.Sprintf("Request failed with status code: %d", resp.StatusCode)), nil
}
maxSize := int64(5 * 1024 * 1024) // 5MB
body, err := io.ReadAll(io.LimitReader(resp.Body, maxSize))
if err != nil {
return NewTextErrorResponse("Failed to read response body: " + err.Error()), nil
}
content := string(body)
contentType := resp.Header.Get("Content-Type")
switch format {
case "text":
if strings.Contains(contentType, "text/html") {
text, err := extractTextFromHTML(content)
if err != nil {
return NewTextErrorResponse("Failed to extract text from HTML: " + err.Error()), nil
}
return NewTextResponse(text), nil
}
return NewTextResponse(content), nil
case "markdown":
if strings.Contains(contentType, "text/html") {
markdown, err := convertHTMLToMarkdown(content)
if err != nil {
return NewTextErrorResponse("Failed to convert HTML to Markdown: " + err.Error()), nil
}
return NewTextResponse(markdown), nil
}
return NewTextResponse("```\n" + content + "\n```"), nil
case "html":
return NewTextResponse(content), nil
default:
return NewTextResponse(content), nil
}
}
func extractTextFromHTML(html string) (string, error) {
doc, err := goquery.NewDocumentFromReader(strings.NewReader(html))
if err != nil {
return "", err
}
text := doc.Text()
text = strings.Join(strings.Fields(text), " ")
return text, nil
}
func convertHTMLToMarkdown(html string) (string, error) {
converter := md.NewConverter("", true, nil)
markdown, err := converter.ConvertString(html)
if err != nil {
return "", err
}
return markdown, nil
}

View File

@@ -1,53 +0,0 @@
package tools
import (
"sync"
"time"
)
// File record to track when files were read/written
type fileRecord struct {
path string
readTime time.Time
writeTime time.Time
}
var (
fileRecords = make(map[string]fileRecord)
fileRecordMutex sync.RWMutex
)
func recordFileRead(path string) {
fileRecordMutex.Lock()
defer fileRecordMutex.Unlock()
record, exists := fileRecords[path]
if !exists {
record = fileRecord{path: path}
}
record.readTime = time.Now()
fileRecords[path] = record
}
func getLastReadTime(path string) time.Time {
fileRecordMutex.RLock()
defer fileRecordMutex.RUnlock()
record, exists := fileRecords[path]
if !exists {
return time.Time{}
}
return record.readTime
}
func recordFileWrite(path string) {
fileRecordMutex.Lock()
defer fileRecordMutex.Unlock()
record, exists := fileRecords[path]
if !exists {
record = fileRecord{path: path}
}
record.writeTime = time.Now()
fileRecords[path] = record
}

View File

@@ -1,175 +0,0 @@
package tools
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os/exec"
"path/filepath"
"sort"
"strings"
"github.com/sst/opencode/internal/config"
"github.com/sst/opencode/internal/fileutil"
"github.com/sst/opencode/internal/status"
)
const (
GlobToolName = "glob"
globDescription = `Fast file pattern matching tool that finds files by name and pattern, returning matching paths sorted by modification time (newest first).
WHEN TO USE THIS TOOL:
- Use when you need to find files by name patterns or extensions
- Great for finding specific file types across a directory structure
- Useful for discovering files that match certain naming conventions
HOW TO USE:
- Provide a glob pattern to match against file paths
- Optionally specify a starting directory (defaults to current working directory)
- Results are sorted with most recently modified files first
GLOB PATTERN SYNTAX:
- '*' matches any sequence of non-separator characters
- '**' matches any sequence of characters, including separators
- '?' matches any single non-separator character
- '[...]' matches any character in the brackets
- '[!...]' matches any character not in the brackets
COMMON PATTERN EXAMPLES:
- '*.js' - Find all JavaScript files in the current directory
- '**/*.js' - Find all JavaScript files in any subdirectory
- 'src/**/*.{ts,tsx}' - Find all TypeScript files in the src directory
- '*.{html,css,js}' - Find all HTML, CSS, and JS files
LIMITATIONS:
- Results are limited to 100 files (newest first)
- Does not search file contents (use Grep tool for that)
- Hidden files (starting with '.') are skipped
TIPS:
- For the most useful results, combine with the Grep tool: first find files with Glob, then search their contents with Grep
- When doing iterative exploration that may require multiple rounds of searching, consider using the Agent tool instead
- Always check if results are truncated and refine your search pattern if needed`
)
type GlobParams struct {
Pattern string `json:"pattern"`
Path string `json:"path"`
}
type GlobResponseMetadata struct {
NumberOfFiles int `json:"number_of_files"`
Truncated bool `json:"truncated"`
}
type globTool struct{}
func NewGlobTool() BaseTool {
return &globTool{}
}
func (g *globTool) Info() ToolInfo {
return ToolInfo{
Name: GlobToolName,
Description: globDescription,
Parameters: map[string]any{
"pattern": map[string]any{
"type": "string",
"description": "The glob pattern to match files against",
},
"path": map[string]any{
"type": "string",
"description": "The directory to search in. Defaults to the current working directory.",
},
},
Required: []string{"pattern"},
}
}
func (g *globTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params GlobParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse(fmt.Sprintf("error parsing parameters: %s", err)), nil
}
if params.Pattern == "" {
return NewTextErrorResponse("pattern is required"), nil
}
searchPath := params.Path
if searchPath == "" {
searchPath = config.WorkingDirectory()
}
files, truncated, err := globFiles(params.Pattern, searchPath, 100)
if err != nil {
return ToolResponse{}, fmt.Errorf("error finding files: %w", err)
}
var output string
if len(files) == 0 {
output = "No files found"
} else {
output = strings.Join(files, "\n")
if truncated {
output += "\n\n(Results are truncated. Consider using a more specific path or pattern.)"
}
}
return WithResponseMetadata(
NewTextResponse(output),
GlobResponseMetadata{
NumberOfFiles: len(files),
Truncated: truncated,
},
), nil
}
func globFiles(pattern, searchPath string, limit int) ([]string, bool, error) {
cmdRg := fileutil.GetRgCmd(pattern)
if cmdRg != nil {
cmdRg.Dir = searchPath
matches, err := runRipgrep(cmdRg, searchPath, limit)
if err == nil {
return matches, len(matches) >= limit && limit > 0, nil
}
status.Warn(fmt.Sprintf("Ripgrep execution failed: %v. Falling back to doublestar.", err))
}
return fileutil.GlobWithDoublestar(pattern, searchPath, limit)
}
func runRipgrep(cmd *exec.Cmd, searchRoot string, limit int) ([]string, error) {
out, err := cmd.CombinedOutput()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok && ee.ExitCode() == 1 {
return nil, nil
}
return nil, fmt.Errorf("ripgrep: %w\n%s", err, out)
}
var matches []string
for _, p := range bytes.Split(out, []byte{0}) {
if len(p) == 0 {
continue
}
absPath := string(p)
if !filepath.IsAbs(absPath) {
absPath = filepath.Join(searchRoot, absPath)
}
if fileutil.SkipHidden(absPath) {
continue
}
matches = append(matches, absPath)
}
sort.SliceStable(matches, func(i, j int) bool {
return len(matches[i]) < len(matches[j])
})
if limit > 0 && len(matches) > limit {
matches = matches[:limit]
}
return matches, nil
}

View File

@@ -1,359 +0,0 @@
package tools
import (
"bufio"
"context"
"encoding/json"
"fmt"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"github.com/sst/opencode/internal/config"
"github.com/sst/opencode/internal/fileutil"
)
type GrepParams struct {
Pattern string `json:"pattern"`
Path string `json:"path"`
Include string `json:"include"`
LiteralText bool `json:"literal_text"`
}
type grepMatch struct {
path string
modTime time.Time
lineNum int
lineText string
}
type GrepResponseMetadata struct {
NumberOfMatches int `json:"number_of_matches"`
Truncated bool `json:"truncated"`
}
type grepTool struct{}
const (
GrepToolName = "grep"
grepDescription = `Fast content search tool that finds files containing specific text or patterns, returning matching file paths sorted by modification time (newest first).
WHEN TO USE THIS TOOL:
- Use when you need to find files containing specific text or patterns
- Great for searching code bases for function names, variable declarations, or error messages
- Useful for finding all files that use a particular API or pattern
HOW TO USE:
- Provide a regex pattern to search for within file contents
- Set literal_text=true if you want to search for the exact text with special characters (recommended for non-regex users)
- Optionally specify a starting directory (defaults to current working directory)
- Optionally provide an include pattern to filter which files to search
- Results are sorted with most recently modified files first
REGEX PATTERN SYNTAX (when literal_text=false):
- Supports standard regular expression syntax
- 'function' searches for the literal text "function"
- 'log\..*Error' finds text starting with "log." and ending with "Error"
- 'import\s+.*\s+from' finds import statements in JavaScript/TypeScript
COMMON INCLUDE PATTERN EXAMPLES:
- '*.js' - Only search JavaScript files
- '*.{ts,tsx}' - Only search TypeScript files
- '*.go' - Only search Go files
LIMITATIONS:
- Results are limited to 100 files (newest first)
- Performance depends on the number of files being searched
- Very large binary files may be skipped
- Hidden files (starting with '.') are skipped
TIPS:
- For faster, more targeted searches, first use Glob to find relevant files, then use Grep
- When doing iterative exploration that may require multiple rounds of searching, consider using the Agent tool instead
- Always check if results are truncated and refine your search pattern if needed
- Use literal_text=true when searching for exact text containing special characters like dots, parentheses, etc.`
)
func NewGrepTool() BaseTool {
return &grepTool{}
}
func (g *grepTool) Info() ToolInfo {
return ToolInfo{
Name: GrepToolName,
Description: grepDescription,
Parameters: map[string]any{
"pattern": map[string]any{
"type": "string",
"description": "The regex pattern to search for in file contents",
},
"path": map[string]any{
"type": "string",
"description": "The directory to search in. Defaults to the current working directory.",
},
"include": map[string]any{
"type": "string",
"description": "File pattern to include in the search (e.g. \"*.js\", \"*.{ts,tsx}\")",
},
"literal_text": map[string]any{
"type": "boolean",
"description": "If true, the pattern will be treated as literal text with special regex characters escaped. Default is false.",
},
},
Required: []string{"pattern"},
}
}
// escapeRegexPattern escapes special regex characters so they're treated as literal characters
func escapeRegexPattern(pattern string) string {
specialChars := []string{"\\", ".", "+", "*", "?", "(", ")", "[", "]", "{", "}", "^", "$", "|"}
escaped := pattern
for _, char := range specialChars {
escaped = strings.ReplaceAll(escaped, char, "\\"+char)
}
return escaped
}
func (g *grepTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params GrepParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse(fmt.Sprintf("error parsing parameters: %s", err)), nil
}
if params.Pattern == "" {
return NewTextErrorResponse("pattern is required"), nil
}
// If literal_text is true, escape the pattern
searchPattern := params.Pattern
if params.LiteralText {
searchPattern = escapeRegexPattern(params.Pattern)
}
searchPath := params.Path
if searchPath == "" {
searchPath = config.WorkingDirectory()
}
matches, truncated, err := searchFiles(searchPattern, searchPath, params.Include, 100)
if err != nil {
return ToolResponse{}, fmt.Errorf("error searching files: %w", err)
}
var output string
if len(matches) == 0 {
output = "No files found"
} else {
output = fmt.Sprintf("Found %d matches\n", len(matches))
currentFile := ""
for _, match := range matches {
if currentFile != match.path {
if currentFile != "" {
output += "\n"
}
currentFile = match.path
output += fmt.Sprintf("%s:\n", match.path)
}
if match.lineNum > 0 {
output += fmt.Sprintf(" Line %d: %s\n", match.lineNum, match.lineText)
} else {
output += fmt.Sprintf(" %s\n", match.path)
}
}
if truncated {
output += "\n(Results are truncated. Consider using a more specific path or pattern.)"
}
}
return WithResponseMetadata(
NewTextResponse(output),
GrepResponseMetadata{
NumberOfMatches: len(matches),
Truncated: truncated,
},
), nil
}
func searchFiles(pattern, rootPath, include string, limit int) ([]grepMatch, bool, error) {
matches, err := searchWithRipgrep(pattern, rootPath, include)
if err != nil {
matches, err = searchFilesWithRegex(pattern, rootPath, include)
if err != nil {
return nil, false, err
}
}
sort.Slice(matches, func(i, j int) bool {
return matches[i].modTime.After(matches[j].modTime)
})
truncated := len(matches) > limit
if truncated {
matches = matches[:limit]
}
return matches, truncated, nil
}
func searchWithRipgrep(pattern, path, include string) ([]grepMatch, error) {
_, err := exec.LookPath("rg")
if err != nil {
return nil, fmt.Errorf("ripgrep not found: %w", err)
}
// Use -n to show line numbers and include the matched line
args := []string{"-n", pattern}
if include != "" {
args = append(args, "--glob", include)
}
args = append(args, path)
cmd := exec.Command("rg", args...)
output, err := cmd.Output()
if err != nil {
if exitErr, ok := err.(*exec.ExitError); ok && exitErr.ExitCode() == 1 {
return []grepMatch{}, nil
}
return nil, err
}
lines := strings.Split(strings.TrimSpace(string(output)), "\n")
matches := make([]grepMatch, 0, len(lines))
for _, line := range lines {
if line == "" {
continue
}
// Parse ripgrep output format: file:line:content
parts := strings.SplitN(line, ":", 3)
if len(parts) < 3 {
continue
}
filePath := parts[0]
lineNum, err := strconv.Atoi(parts[1])
if err != nil {
continue
}
lineText := parts[2]
fileInfo, err := os.Stat(filePath)
if err != nil {
continue // Skip files we can't access
}
matches = append(matches, grepMatch{
path: filePath,
modTime: fileInfo.ModTime(),
lineNum: lineNum,
lineText: lineText,
})
}
return matches, nil
}
func searchFilesWithRegex(pattern, rootPath, include string) ([]grepMatch, error) {
matches := []grepMatch{}
regex, err := regexp.Compile(pattern)
if err != nil {
return nil, fmt.Errorf("invalid regex pattern: %w", err)
}
var includePattern *regexp.Regexp
if include != "" {
regexPattern := globToRegex(include)
includePattern, err = regexp.Compile(regexPattern)
if err != nil {
return nil, fmt.Errorf("invalid include pattern: %w", err)
}
}
err = filepath.Walk(rootPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil // Skip errors
}
if info.IsDir() {
return nil // Skip directories
}
if fileutil.SkipHidden(path) {
return nil
}
if includePattern != nil && !includePattern.MatchString(path) {
return nil
}
match, lineNum, lineText, err := fileContainsPattern(path, regex)
if err != nil {
return nil // Skip files we can't read
}
if match {
matches = append(matches, grepMatch{
path: path,
modTime: info.ModTime(),
lineNum: lineNum,
lineText: lineText,
})
if len(matches) >= 200 {
return filepath.SkipAll
}
}
return nil
})
if err != nil {
return nil, err
}
return matches, nil
}
func fileContainsPattern(filePath string, pattern *regexp.Regexp) (bool, int, string, error) {
file, err := os.Open(filePath)
if err != nil {
return false, 0, "", err
}
defer file.Close()
scanner := bufio.NewScanner(file)
lineNum := 0
for scanner.Scan() {
lineNum++
line := scanner.Text()
if pattern.MatchString(line) {
return true, lineNum, line, nil
}
}
return false, 0, "", scanner.Err()
}
func globToRegex(glob string) string {
regexPattern := strings.ReplaceAll(glob, ".", "\\.")
regexPattern = strings.ReplaceAll(regexPattern, "*", ".*")
regexPattern = strings.ReplaceAll(regexPattern, "?", ".")
re := regexp.MustCompile(`\{([^}]+)\}`)
regexPattern = re.ReplaceAllStringFunc(regexPattern, func(match string) string {
inner := match[1 : len(match)-1]
return "(" + strings.ReplaceAll(inner, ",", "|") + ")"
})
return regexPattern
}

View File

@@ -1,316 +0,0 @@
package tools
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/sst/opencode/internal/config"
)
type LSParams struct {
Path string `json:"path"`
Ignore []string `json:"ignore"`
}
type TreeNode struct {
Name string `json:"name"`
Path string `json:"path"`
Type string `json:"type"` // "file" or "directory"
Children []*TreeNode `json:"children,omitempty"`
}
type LSResponseMetadata struct {
NumberOfFiles int `json:"number_of_files"`
Truncated bool `json:"truncated"`
}
type lsTool struct{}
const (
LSToolName = "ls"
MaxLSFiles = 1000
lsDescription = `Directory listing tool that shows files and subdirectories in a tree structure, helping you explore and understand the project organization.
WHEN TO USE THIS TOOL:
- Use when you need to explore the structure of a directory
- Helpful for understanding the organization of a project
- Good first step when getting familiar with a new codebase
HOW TO USE:
- Provide a path to list (defaults to current working directory)
- Optionally specify glob patterns to ignore
- Results are displayed in a tree structure
FEATURES:
- Displays a hierarchical view of files and directories
- Automatically skips hidden files/directories (starting with '.')
- Skips common system directories like __pycache__
- Can filter out files matching specific patterns
LIMITATIONS:
- Results are limited to 1000 files
- Very large directories will be truncated
- Does not show file sizes or permissions
- Cannot recursively list all directories in a large project
TIPS:
- Use Glob tool for finding files by name patterns instead of browsing
- Use Grep tool for searching file contents
- Combine with other tools for more effective exploration`
)
func NewLsTool() BaseTool {
return &lsTool{}
}
func (l *lsTool) Info() ToolInfo {
return ToolInfo{
Name: LSToolName,
Description: lsDescription,
Parameters: map[string]any{
"path": map[string]any{
"type": "string",
"description": "The path to the directory to list (defaults to current working directory)",
},
"ignore": map[string]any{
"type": "array",
"description": "List of glob patterns to ignore",
"items": map[string]any{
"type": "string",
},
},
},
Required: []string{"path"},
}
}
func (l *lsTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params LSParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse(fmt.Sprintf("error parsing parameters: %s", err)), nil
}
searchPath := params.Path
if searchPath == "" {
searchPath = config.WorkingDirectory()
}
if !filepath.IsAbs(searchPath) {
searchPath = filepath.Join(config.WorkingDirectory(), searchPath)
}
if _, err := os.Stat(searchPath); os.IsNotExist(err) {
return NewTextErrorResponse(fmt.Sprintf("path does not exist: %s", searchPath)), nil
}
files, truncated, err := listDirectory(searchPath, params.Ignore, MaxLSFiles)
if err != nil {
return ToolResponse{}, fmt.Errorf("error listing directory: %w", err)
}
tree := createFileTree(files)
output := printTree(tree, searchPath)
if truncated {
output = fmt.Sprintf("There are more than %d files in the directory. Use a more specific path or use the Glob tool to find specific files. The first %d files and directories are included below:\n\n%s", MaxLSFiles, MaxLSFiles, output)
}
return WithResponseMetadata(
NewTextResponse(output),
LSResponseMetadata{
NumberOfFiles: len(files),
Truncated: truncated,
},
), nil
}
func listDirectory(initialPath string, ignorePatterns []string, limit int) ([]string, bool, error) {
var results []string
truncated := false
err := filepath.Walk(initialPath, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil // Skip files we don't have permission to access
}
if shouldSkip(path, ignorePatterns) {
if info.IsDir() {
return filepath.SkipDir
}
return nil
}
if path != initialPath {
if info.IsDir() {
path = path + string(filepath.Separator)
}
results = append(results, path)
}
if len(results) >= limit {
truncated = true
return filepath.SkipAll
}
return nil
})
if err != nil {
return nil, truncated, err
}
return results, truncated, nil
}
func shouldSkip(path string, ignorePatterns []string) bool {
base := filepath.Base(path)
if base != "." && strings.HasPrefix(base, ".") {
return true
}
commonIgnored := []string{
"__pycache__",
"node_modules",
"dist",
"build",
"target",
"vendor",
"bin",
"obj",
".git",
".idea",
".vscode",
".DS_Store",
"*.pyc",
"*.pyo",
"*.pyd",
"*.so",
"*.dll",
"*.exe",
}
if strings.Contains(path, filepath.Join("__pycache__", "")) {
return true
}
for _, ignored := range commonIgnored {
if strings.HasSuffix(ignored, "/") {
if strings.Contains(path, filepath.Join(ignored[:len(ignored)-1], "")) {
return true
}
} else if strings.HasPrefix(ignored, "*.") {
if strings.HasSuffix(base, ignored[1:]) {
return true
}
} else {
if base == ignored {
return true
}
}
}
for _, pattern := range ignorePatterns {
matched, err := filepath.Match(pattern, base)
if err == nil && matched {
return true
}
}
return false
}
func createFileTree(sortedPaths []string) []*TreeNode {
root := []*TreeNode{}
pathMap := make(map[string]*TreeNode)
for _, path := range sortedPaths {
parts := strings.Split(path, string(filepath.Separator))
currentPath := ""
var parentPath string
var cleanParts []string
for _, part := range parts {
if part != "" {
cleanParts = append(cleanParts, part)
}
}
parts = cleanParts
if len(parts) == 0 {
continue
}
for i, part := range parts {
if currentPath == "" {
currentPath = part
} else {
currentPath = filepath.Join(currentPath, part)
}
if _, exists := pathMap[currentPath]; exists {
parentPath = currentPath
continue
}
isLastPart := i == len(parts)-1
isDir := !isLastPart || strings.HasSuffix(path, string(filepath.Separator))
nodeType := "file"
if isDir {
nodeType = "directory"
}
newNode := &TreeNode{
Name: part,
Path: currentPath,
Type: nodeType,
Children: []*TreeNode{},
}
pathMap[currentPath] = newNode
if i > 0 && parentPath != "" {
if parent, ok := pathMap[parentPath]; ok {
parent.Children = append(parent.Children, newNode)
}
} else {
root = append(root, newNode)
}
parentPath = currentPath
}
}
return root
}
func printTree(tree []*TreeNode, rootPath string) string {
var result strings.Builder
result.WriteString(fmt.Sprintf("- %s%s\n", rootPath, string(filepath.Separator)))
for _, node := range tree {
printNode(&result, node, 1)
}
return result.String()
}
func printNode(builder *strings.Builder, node *TreeNode, level int) {
indent := strings.Repeat(" ", level)
nodeName := node.Name
if node.Type == "directory" {
nodeName += string(filepath.Separator)
}
fmt.Fprintf(builder, "%s- %s\n", indent, nodeName)
if node.Type == "directory" && len(node.Children) > 0 {
for _, child := range node.Children {
printNode(builder, child, level+1)
}
}
}

View File

@@ -1,457 +0,0 @@
package tools
import (
"context"
"encoding/json"
"os"
"path/filepath"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestLsTool_Info(t *testing.T) {
tool := NewLsTool()
info := tool.Info()
assert.Equal(t, LSToolName, info.Name)
assert.NotEmpty(t, info.Description)
assert.Contains(t, info.Parameters, "path")
assert.Contains(t, info.Parameters, "ignore")
assert.Contains(t, info.Required, "path")
}
func TestLsTool_Run(t *testing.T) {
// Create a temporary directory for testing
tempDir, err := os.MkdirTemp("", "ls_tool_test")
require.NoError(t, err)
defer os.RemoveAll(tempDir)
// Create a test directory structure
testDirs := []string{
"dir1",
"dir2",
"dir2/subdir1",
"dir2/subdir2",
"dir3",
"dir3/.hidden_dir",
"__pycache__",
}
testFiles := []string{
"file1.txt",
"file2.txt",
"dir1/file3.txt",
"dir2/file4.txt",
"dir2/subdir1/file5.txt",
"dir2/subdir2/file6.txt",
"dir3/file7.txt",
"dir3/.hidden_file.txt",
"__pycache__/cache.pyc",
".hidden_root_file.txt",
}
// Create directories
for _, dir := range testDirs {
dirPath := filepath.Join(tempDir, dir)
err := os.MkdirAll(dirPath, 0755)
require.NoError(t, err)
}
// Create files
for _, file := range testFiles {
filePath := filepath.Join(tempDir, file)
err := os.WriteFile(filePath, []byte("test content"), 0644)
require.NoError(t, err)
}
t.Run("lists directory successfully", func(t *testing.T) {
tool := NewLsTool()
params := LSParams{
Path: tempDir,
}
paramsJSON, err := json.Marshal(params)
require.NoError(t, err)
call := ToolCall{
Name: LSToolName,
Input: string(paramsJSON),
}
response, err := tool.Run(context.Background(), call)
require.NoError(t, err)
// Check that visible directories and files are included
assert.Contains(t, response.Content, "dir1")
assert.Contains(t, response.Content, "dir2")
assert.Contains(t, response.Content, "dir3")
assert.Contains(t, response.Content, "file1.txt")
assert.Contains(t, response.Content, "file2.txt")
// Check that hidden files and directories are not included
assert.NotContains(t, response.Content, ".hidden_dir")
assert.NotContains(t, response.Content, ".hidden_file.txt")
assert.NotContains(t, response.Content, ".hidden_root_file.txt")
// Check that __pycache__ is not included
assert.NotContains(t, response.Content, "__pycache__")
})
t.Run("handles non-existent path", func(t *testing.T) {
tool := NewLsTool()
params := LSParams{
Path: filepath.Join(tempDir, "non_existent_dir"),
}
paramsJSON, err := json.Marshal(params)
require.NoError(t, err)
call := ToolCall{
Name: LSToolName,
Input: string(paramsJSON),
}
response, err := tool.Run(context.Background(), call)
require.NoError(t, err)
assert.Contains(t, response.Content, "path does not exist")
})
t.Run("handles empty path parameter", func(t *testing.T) {
// For this test, we need to mock the config.WorkingDirectory function
// Since we can't easily do that, we'll just check that the response doesn't contain an error message
tool := NewLsTool()
params := LSParams{
Path: "",
}
paramsJSON, err := json.Marshal(params)
require.NoError(t, err)
call := ToolCall{
Name: LSToolName,
Input: string(paramsJSON),
}
response, err := tool.Run(context.Background(), call)
require.NoError(t, err)
// The response should either contain a valid directory listing or an error
// We'll just check that it's not empty
assert.NotEmpty(t, response.Content)
})
t.Run("handles invalid parameters", func(t *testing.T) {
tool := NewLsTool()
call := ToolCall{
Name: LSToolName,
Input: "invalid json",
}
response, err := tool.Run(context.Background(), call)
require.NoError(t, err)
assert.Contains(t, response.Content, "error parsing parameters")
})
t.Run("respects ignore patterns", func(t *testing.T) {
tool := NewLsTool()
params := LSParams{
Path: tempDir,
Ignore: []string{"file1.txt", "dir1"},
}
paramsJSON, err := json.Marshal(params)
require.NoError(t, err)
call := ToolCall{
Name: LSToolName,
Input: string(paramsJSON),
}
response, err := tool.Run(context.Background(), call)
require.NoError(t, err)
// The output format is a tree, so we need to check for specific patterns
// Check that file1.txt is not directly mentioned
assert.NotContains(t, response.Content, "- file1.txt")
// Check that dir1/ is not directly mentioned
assert.NotContains(t, response.Content, "- dir1/")
})
t.Run("handles relative path", func(t *testing.T) {
// Save original working directory
origWd, err := os.Getwd()
require.NoError(t, err)
defer func() {
os.Chdir(origWd)
}()
// Change to a directory above the temp directory
parentDir := filepath.Dir(tempDir)
err = os.Chdir(parentDir)
require.NoError(t, err)
tool := NewLsTool()
params := LSParams{
Path: filepath.Base(tempDir),
}
paramsJSON, err := json.Marshal(params)
require.NoError(t, err)
call := ToolCall{
Name: LSToolName,
Input: string(paramsJSON),
}
response, err := tool.Run(context.Background(), call)
require.NoError(t, err)
// Should list the temp directory contents
assert.Contains(t, response.Content, "dir1")
assert.Contains(t, response.Content, "file1.txt")
})
}
func TestShouldSkip(t *testing.T) {
testCases := []struct {
name string
path string
ignorePatterns []string
expected bool
}{
{
name: "hidden file",
path: "/path/to/.hidden_file",
ignorePatterns: []string{},
expected: true,
},
{
name: "hidden directory",
path: "/path/to/.hidden_dir",
ignorePatterns: []string{},
expected: true,
},
{
name: "pycache directory",
path: "/path/to/__pycache__/file.pyc",
ignorePatterns: []string{},
expected: true,
},
{
name: "node_modules directory",
path: "/path/to/node_modules/package",
ignorePatterns: []string{},
expected: false, // The shouldSkip function doesn't directly check for node_modules in the path
},
{
name: "normal file",
path: "/path/to/normal_file.txt",
ignorePatterns: []string{},
expected: false,
},
{
name: "normal directory",
path: "/path/to/normal_dir",
ignorePatterns: []string{},
expected: false,
},
{
name: "ignored by pattern",
path: "/path/to/ignore_me.txt",
ignorePatterns: []string{"ignore_*.txt"},
expected: true,
},
{
name: "not ignored by pattern",
path: "/path/to/keep_me.txt",
ignorePatterns: []string{"ignore_*.txt"},
expected: false,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
result := shouldSkip(tc.path, tc.ignorePatterns)
assert.Equal(t, tc.expected, result)
})
}
}
func TestCreateFileTree(t *testing.T) {
paths := []string{
"/path/to/file1.txt",
"/path/to/dir1/file2.txt",
"/path/to/dir1/subdir/file3.txt",
"/path/to/dir2/file4.txt",
}
tree := createFileTree(paths)
// Check the structure of the tree
assert.Len(t, tree, 1) // Should have one root node
// Check the root node
rootNode := tree[0]
assert.Equal(t, "path", rootNode.Name)
assert.Equal(t, "directory", rootNode.Type)
assert.Len(t, rootNode.Children, 1)
// Check the "to" node
toNode := rootNode.Children[0]
assert.Equal(t, "to", toNode.Name)
assert.Equal(t, "directory", toNode.Type)
assert.Len(t, toNode.Children, 3) // file1.txt, dir1, dir2
// Find the dir1 node
var dir1Node *TreeNode
for _, child := range toNode.Children {
if child.Name == "dir1" {
dir1Node = child
break
}
}
require.NotNil(t, dir1Node)
assert.Equal(t, "directory", dir1Node.Type)
assert.Len(t, dir1Node.Children, 2) // file2.txt and subdir
}
func TestPrintTree(t *testing.T) {
// Create a simple tree
tree := []*TreeNode{
{
Name: "dir1",
Path: "dir1",
Type: "directory",
Children: []*TreeNode{
{
Name: "file1.txt",
Path: "dir1/file1.txt",
Type: "file",
},
{
Name: "subdir",
Path: "dir1/subdir",
Type: "directory",
Children: []*TreeNode{
{
Name: "file2.txt",
Path: "dir1/subdir/file2.txt",
Type: "file",
},
},
},
},
},
{
Name: "file3.txt",
Path: "file3.txt",
Type: "file",
},
}
result := printTree(tree, "/root")
// Check the output format
assert.Contains(t, result, "- /root/")
assert.Contains(t, result, " - dir1/")
assert.Contains(t, result, " - file1.txt")
assert.Contains(t, result, " - subdir/")
assert.Contains(t, result, " - file2.txt")
assert.Contains(t, result, " - file3.txt")
}
func TestListDirectory(t *testing.T) {
// Create a temporary directory for testing
tempDir, err := os.MkdirTemp("", "list_directory_test")
require.NoError(t, err)
defer os.RemoveAll(tempDir)
// Create a test directory structure
testDirs := []string{
"dir1",
"dir1/subdir1",
".hidden_dir",
}
testFiles := []string{
"file1.txt",
"file2.txt",
"dir1/file3.txt",
"dir1/subdir1/file4.txt",
".hidden_file.txt",
}
// Create directories
for _, dir := range testDirs {
dirPath := filepath.Join(tempDir, dir)
err := os.MkdirAll(dirPath, 0755)
require.NoError(t, err)
}
// Create files
for _, file := range testFiles {
filePath := filepath.Join(tempDir, file)
err := os.WriteFile(filePath, []byte("test content"), 0644)
require.NoError(t, err)
}
t.Run("lists files with no limit", func(t *testing.T) {
files, truncated, err := listDirectory(tempDir, []string{}, 1000)
require.NoError(t, err)
assert.False(t, truncated)
// Check that visible files and directories are included
containsPath := func(paths []string, target string) bool {
targetPath := filepath.Join(tempDir, target)
for _, path := range paths {
if strings.HasPrefix(path, targetPath) {
return true
}
}
return false
}
assert.True(t, containsPath(files, "dir1"))
assert.True(t, containsPath(files, "file1.txt"))
assert.True(t, containsPath(files, "file2.txt"))
assert.True(t, containsPath(files, "dir1/file3.txt"))
// Check that hidden files and directories are not included
assert.False(t, containsPath(files, ".hidden_dir"))
assert.False(t, containsPath(files, ".hidden_file.txt"))
})
t.Run("respects limit and returns truncated flag", func(t *testing.T) {
files, truncated, err := listDirectory(tempDir, []string{}, 2)
require.NoError(t, err)
assert.True(t, truncated)
assert.Len(t, files, 2)
})
t.Run("respects ignore patterns", func(t *testing.T) {
files, truncated, err := listDirectory(tempDir, []string{"*.txt"}, 1000)
require.NoError(t, err)
assert.False(t, truncated)
// Check that no .txt files are included
for _, file := range files {
assert.False(t, strings.HasSuffix(file, ".txt"), "Found .txt file: %s", file)
}
// But directories should still be included
containsDir := false
for _, file := range files {
if strings.Contains(file, "dir1") {
containsDir = true
break
}
}
assert.True(t, containsDir)
})
}

View File

@@ -1,350 +0,0 @@
package tools
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/sst/opencode/internal/lsp"
"github.com/sst/opencode/internal/lsp/protocol"
"github.com/sst/opencode/internal/lsp/util"
)
type CodeActionParams struct {
FilePath string `json:"file_path"`
Line int `json:"line"`
Column int `json:"column"`
EndLine int `json:"end_line,omitempty"`
EndColumn int `json:"end_column,omitempty"`
ActionID int `json:"action_id,omitempty"`
LspName string `json:"lsp_name,omitempty"`
}
type codeActionTool struct {
lspClients map[string]*lsp.Client
}
const (
CodeActionToolName = "codeAction"
codeActionDescription = `Get available code actions at a specific position or range in a file.
WHEN TO USE THIS TOOL:
- Use when you need to find available fixes or refactorings for code issues
- Helpful for resolving errors, warnings, or improving code quality
- Great for discovering automated code transformations
HOW TO USE:
- Provide the path to the file containing the code
- Specify the line number (1-based) where the action should be applied
- Specify the column number (1-based) where the action should be applied
- Optionally specify end_line and end_column to define a range
- Results show available code actions with their titles and kinds
TO EXECUTE A CODE ACTION:
- After getting the list of available actions, call the tool again with the same parameters
- Add action_id parameter with the number of the action you want to execute (e.g., 1 for the first action)
- Add lsp_name parameter with the name of the LSP server that provided the action
FEATURES:
- Finds quick fixes for errors and warnings
- Discovers available refactorings
- Shows code organization actions
- Returns detailed information about each action
- Can execute selected code actions
LIMITATIONS:
- Requires a functioning LSP server for the file type
- May not work for all code issues depending on LSP capabilities
- Results depend on the accuracy of the LSP server
TIPS:
- Use in conjunction with Diagnostics tool to find issues that can be fixed
- First call without action_id to see available actions, then call again with action_id to execute
`
)
func NewCodeActionTool(lspClients map[string]*lsp.Client) BaseTool {
return &codeActionTool{
lspClients,
}
}
func (b *codeActionTool) Info() ToolInfo {
return ToolInfo{
Name: CodeActionToolName,
Description: codeActionDescription,
Parameters: map[string]any{
"file_path": map[string]any{
"type": "string",
"description": "The path to the file containing the code",
},
"line": map[string]any{
"type": "integer",
"description": "The line number (1-based) where the action should be applied",
},
"column": map[string]any{
"type": "integer",
"description": "The column number (1-based) where the action should be applied",
},
"end_line": map[string]any{
"type": "integer",
"description": "The ending line number (1-based) for a range (optional)",
},
"end_column": map[string]any{
"type": "integer",
"description": "The ending column number (1-based) for a range (optional)",
},
"action_id": map[string]any{
"type": "integer",
"description": "The ID of the code action to execute (optional)",
},
"lsp_name": map[string]any{
"type": "string",
"description": "The name of the LSP server that provided the action (optional)",
},
},
Required: []string{"file_path", "line", "column"},
}
}
func (b *codeActionTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params CodeActionParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse(fmt.Sprintf("error parsing parameters: %s", err)), nil
}
lsps := b.lspClients
if len(lsps) == 0 {
return NewTextResponse("\nLSP clients are still initializing. Code actions will be available once they're ready.\n"), nil
}
// Ensure file is open in LSP
notifyLspOpenFile(ctx, params.FilePath, lsps)
// Convert 1-based line/column to 0-based for LSP protocol
line := max(0, params.Line-1)
column := max(0, params.Column-1)
// Handle optional end line/column
endLine := line
endColumn := column
if params.EndLine > 0 {
endLine = max(0, params.EndLine-1)
}
if params.EndColumn > 0 {
endColumn = max(0, params.EndColumn-1)
}
// Check if we're executing a specific action
if params.ActionID > 0 && params.LspName != "" {
return executeCodeAction(ctx, params.FilePath, line, column, endLine, endColumn, params.ActionID, params.LspName, lsps)
}
// Otherwise, just list available actions
output := getCodeActions(ctx, params.FilePath, line, column, endLine, endColumn, lsps)
return NewTextResponse(output), nil
}
func getCodeActions(ctx context.Context, filePath string, line, column, endLine, endColumn int, lsps map[string]*lsp.Client) string {
var results []string
for lspName, client := range lsps {
// Create code action params
uri := fmt.Sprintf("file://%s", filePath)
codeActionParams := protocol.CodeActionParams{
TextDocument: protocol.TextDocumentIdentifier{
URI: protocol.DocumentUri(uri),
},
Range: protocol.Range{
Start: protocol.Position{
Line: uint32(line),
Character: uint32(column),
},
End: protocol.Position{
Line: uint32(endLine),
Character: uint32(endColumn),
},
},
Context: protocol.CodeActionContext{
// Request all kinds of code actions
Only: []protocol.CodeActionKind{
protocol.QuickFix,
protocol.Refactor,
protocol.RefactorExtract,
protocol.RefactorInline,
protocol.RefactorRewrite,
protocol.Source,
protocol.SourceOrganizeImports,
protocol.SourceFixAll,
},
},
}
// Get code actions
codeActions, err := client.CodeAction(ctx, codeActionParams)
if err != nil {
results = append(results, fmt.Sprintf("Error from %s: %s", lspName, err))
continue
}
if len(codeActions) == 0 {
results = append(results, fmt.Sprintf("No code actions found by %s", lspName))
continue
}
// Format the code actions
results = append(results, fmt.Sprintf("Code actions found by %s:", lspName))
for i, action := range codeActions {
actionInfo := formatCodeAction(action, i+1)
results = append(results, actionInfo)
}
}
if len(results) == 0 {
return "No code actions found at the specified position."
}
return strings.Join(results, "\n")
}
func formatCodeAction(action protocol.Or_Result_textDocument_codeAction_Item0_Elem, index int) string {
switch v := action.Value.(type) {
case protocol.CodeAction:
kind := "Unknown"
if v.Kind != "" {
kind = string(v.Kind)
}
var details []string
// Add edit information if available
if v.Edit != nil {
numChanges := 0
if v.Edit.Changes != nil {
numChanges = len(v.Edit.Changes)
}
if v.Edit.DocumentChanges != nil {
numChanges = len(v.Edit.DocumentChanges)
}
details = append(details, fmt.Sprintf("Edits: %d changes", numChanges))
}
// Add command information if available
if v.Command != nil {
details = append(details, fmt.Sprintf("Command: %s", v.Command.Title))
}
// Add diagnostics information if available
if v.Diagnostics != nil && len(v.Diagnostics) > 0 {
details = append(details, fmt.Sprintf("Fixes: %d diagnostics", len(v.Diagnostics)))
}
detailsStr := ""
if len(details) > 0 {
detailsStr = " (" + strings.Join(details, ", ") + ")"
}
return fmt.Sprintf(" %d. %s [%s]%s", index, v.Title, kind, detailsStr)
case protocol.Command:
return fmt.Sprintf(" %d. %s [Command]", index, v.Title)
}
return fmt.Sprintf(" %d. Unknown code action type", index)
}
func executeCodeAction(ctx context.Context, filePath string, line, column, endLine, endColumn, actionID int, lspName string, lsps map[string]*lsp.Client) (ToolResponse, error) {
client, ok := lsps[lspName]
if !ok {
return NewTextErrorResponse(fmt.Sprintf("LSP server '%s' not found", lspName)), nil
}
// Create code action params
uri := fmt.Sprintf("file://%s", filePath)
codeActionParams := protocol.CodeActionParams{
TextDocument: protocol.TextDocumentIdentifier{
URI: protocol.DocumentUri(uri),
},
Range: protocol.Range{
Start: protocol.Position{
Line: uint32(line),
Character: uint32(column),
},
End: protocol.Position{
Line: uint32(endLine),
Character: uint32(endColumn),
},
},
Context: protocol.CodeActionContext{
// Request all kinds of code actions
Only: []protocol.CodeActionKind{
protocol.QuickFix,
protocol.Refactor,
protocol.RefactorExtract,
protocol.RefactorInline,
protocol.RefactorRewrite,
protocol.Source,
protocol.SourceOrganizeImports,
protocol.SourceFixAll,
},
},
}
// Get code actions
codeActions, err := client.CodeAction(ctx, codeActionParams)
if err != nil {
return NewTextErrorResponse(fmt.Sprintf("Error getting code actions: %s", err)), nil
}
if len(codeActions) == 0 {
return NewTextErrorResponse("No code actions found"), nil
}
// Check if the requested action ID is valid
if actionID < 1 || actionID > len(codeActions) {
return NewTextErrorResponse(fmt.Sprintf("Invalid action ID: %d. Available actions: 1-%d", actionID, len(codeActions))), nil
}
// Get the selected action (adjust for 0-based index)
selectedAction := codeActions[actionID-1]
// Execute the action based on its type
switch v := selectedAction.Value.(type) {
case protocol.CodeAction:
// Apply workspace edit if available
if v.Edit != nil {
err := util.ApplyWorkspaceEdit(*v.Edit)
if err != nil {
return NewTextErrorResponse(fmt.Sprintf("Error applying edit: %s", err)), nil
}
}
// Execute command if available
if v.Command != nil {
_, err := client.ExecuteCommand(ctx, protocol.ExecuteCommandParams{
Command: v.Command.Command,
Arguments: v.Command.Arguments,
})
if err != nil {
return NewTextErrorResponse(fmt.Sprintf("Error executing command: %s", err)), nil
}
}
return NewTextResponse(fmt.Sprintf("Successfully executed code action: %s", v.Title)), nil
case protocol.Command:
// Execute the command
_, err := client.ExecuteCommand(ctx, protocol.ExecuteCommandParams{
Command: v.Command,
Arguments: v.Arguments,
})
if err != nil {
return NewTextErrorResponse(fmt.Sprintf("Error executing command: %s", err)), nil
}
return NewTextResponse(fmt.Sprintf("Successfully executed command: %s", v.Title)), nil
}
return NewTextErrorResponse("Unknown code action type"), nil
}

View File

@@ -1,198 +0,0 @@
package tools
import (
"context"
"encoding/json"
"fmt"
"log/slog"
"strings"
"github.com/sst/opencode/internal/lsp"
"github.com/sst/opencode/internal/lsp/protocol"
)
type DefinitionParams struct {
FilePath string `json:"file_path"`
Line int `json:"line"`
Column int `json:"column"`
}
type definitionTool struct {
lspClients map[string]*lsp.Client
}
const (
DefinitionToolName = "definition"
definitionDescription = `Find the definition of a symbol at a specific position in a file.
WHEN TO USE THIS TOOL:
- Use when you need to find where a symbol is defined
- Helpful for understanding code structure and relationships
- Great for navigating between implementation and interface
HOW TO USE:
- Provide the path to the file containing the symbol
- Specify the line number (1-based) where the symbol appears
- Specify the column number (1-based) where the symbol appears
- Results show the location of the symbol's definition
FEATURES:
- Finds definitions across files in the project
- Works with variables, functions, classes, interfaces, etc.
- Returns file path, line, and column of the definition
LIMITATIONS:
- Requires a functioning LSP server for the file type
- May not work for all symbols depending on LSP capabilities
- Results depend on the accuracy of the LSP server
TIPS:
- Use in conjunction with References tool to understand usage
- Combine with View tool to examine the definition
`
)
func NewDefinitionTool(lspClients map[string]*lsp.Client) BaseTool {
return &definitionTool{
lspClients,
}
}
func (b *definitionTool) Info() ToolInfo {
return ToolInfo{
Name: DefinitionToolName,
Description: definitionDescription,
Parameters: map[string]any{
"file_path": map[string]any{
"type": "string",
"description": "The path to the file containing the symbol",
},
"line": map[string]any{
"type": "integer",
"description": "The line number (1-based) where the symbol appears",
},
"column": map[string]any{
"type": "integer",
"description": "The column number (1-based) where the symbol appears",
},
},
Required: []string{"file_path", "line", "column"},
}
}
func (b *definitionTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params DefinitionParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse(fmt.Sprintf("error parsing parameters: %s", err)), nil
}
lsps := b.lspClients
if len(lsps) == 0 {
return NewTextResponse("\nLSP clients are still initializing. Definition lookup will be available once they're ready.\n"), nil
}
// Ensure file is open in LSP
notifyLspOpenFile(ctx, params.FilePath, lsps)
// Convert 1-based line/column to 0-based for LSP protocol
line := max(0, params.Line-1)
column := max(0, params.Column-1)
output := getDefinition(ctx, params.FilePath, line, column, lsps)
return NewTextResponse(output), nil
}
func getDefinition(ctx context.Context, filePath string, line, column int, lsps map[string]*lsp.Client) string {
var results []string
slog.Debug(fmt.Sprintf("Looking for definition in %s at line %d, column %d", filePath, line+1, column+1))
slog.Debug(fmt.Sprintf("Available LSP clients: %v", getClientNames(lsps)))
for lspName, client := range lsps {
slog.Debug(fmt.Sprintf("Trying LSP client: %s", lspName))
// Create definition params
uri := fmt.Sprintf("file://%s", filePath)
definitionParams := protocol.DefinitionParams{
TextDocumentPositionParams: protocol.TextDocumentPositionParams{
TextDocument: protocol.TextDocumentIdentifier{
URI: protocol.DocumentUri(uri),
},
Position: protocol.Position{
Line: uint32(line),
Character: uint32(column),
},
},
}
slog.Debug(fmt.Sprintf("Sending definition request with params: %+v", definitionParams))
// Get definition
definition, err := client.Definition(ctx, definitionParams)
if err != nil {
slog.Debug(fmt.Sprintf("Error from %s: %s", lspName, err))
results = append(results, fmt.Sprintf("Error from %s: %s", lspName, err))
continue
}
slog.Debug(fmt.Sprintf("Got definition result type: %T", definition.Value))
// Process the definition result
locations := processDefinitionResult(definition)
slog.Debug(fmt.Sprintf("Processed locations count: %d", len(locations)))
if len(locations) == 0 {
results = append(results, fmt.Sprintf("No definition found by %s", lspName))
continue
}
// Format the locations
for _, loc := range locations {
path := strings.TrimPrefix(string(loc.URI), "file://")
// Convert 0-based line/column to 1-based for display
defLine := loc.Range.Start.Line + 1
defColumn := loc.Range.Start.Character + 1
slog.Debug(fmt.Sprintf("Found definition at %s:%d:%d", path, defLine, defColumn))
results = append(results, fmt.Sprintf("Definition found by %s: %s:%d:%d", lspName, path, defLine, defColumn))
}
}
if len(results) == 0 {
return "No definition found for the symbol at the specified position."
}
return strings.Join(results, "\n")
}
func processDefinitionResult(result protocol.Or_Result_textDocument_definition) []protocol.Location {
var locations []protocol.Location
switch v := result.Value.(type) {
case protocol.Location:
locations = append(locations, v)
case []protocol.Location:
locations = append(locations, v...)
case []protocol.DefinitionLink:
for _, link := range v {
locations = append(locations, protocol.Location{
URI: link.TargetURI,
Range: link.TargetRange,
})
}
case protocol.Or_Definition:
switch d := v.Value.(type) {
case protocol.Location:
locations = append(locations, d)
case []protocol.Location:
locations = append(locations, d...)
}
}
return locations
}
// Helper function to get LSP client names for debugging
func getClientNames(lsps map[string]*lsp.Client) []string {
names := make([]string, 0, len(lsps))
for name := range lsps {
names = append(names, name)
}
return names
}

View File

@@ -1,296 +0,0 @@
package tools
import (
"context"
"encoding/json"
"fmt"
"maps"
"sort"
"strings"
"time"
"github.com/sst/opencode/internal/lsp"
"github.com/sst/opencode/internal/lsp/protocol"
)
type DiagnosticsParams struct {
FilePath string `json:"file_path"`
}
type diagnosticsTool struct {
lspClients map[string]*lsp.Client
}
const (
DiagnosticsToolName = "diagnostics"
diagnosticsDescription = `Get diagnostics for a file and/or project.
WHEN TO USE THIS TOOL:
- Use when you need to check for errors or warnings in your code
- Helpful for debugging and ensuring code quality
- Good for getting a quick overview of issues in a file or project
HOW TO USE:
- Provide a path to a file to get diagnostics for that file
- Leave the path empty to get diagnostics for the entire project
- Results are displayed in a structured format with severity levels
FEATURES:
- Displays errors, warnings, and hints
- Groups diagnostics by severity
- Provides detailed information about each diagnostic
LIMITATIONS:
- Results are limited to the diagnostics provided by the LSP clients
- May not cover all possible issues in the code
- Does not provide suggestions for fixing issues
TIPS:
- Use in conjunction with other tools for a comprehensive code review
- Combine with the LSP client for real-time diagnostics
`
)
func NewDiagnosticsTool(lspClients map[string]*lsp.Client) BaseTool {
return &diagnosticsTool{
lspClients,
}
}
func (b *diagnosticsTool) Info() ToolInfo {
return ToolInfo{
Name: DiagnosticsToolName,
Description: diagnosticsDescription,
Parameters: map[string]any{
"file_path": map[string]any{
"type": "string",
"description": "The path to the file to get diagnostics for (leave w empty for project diagnostics)",
},
},
Required: []string{},
}
}
func (b *diagnosticsTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params DiagnosticsParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse(fmt.Sprintf("error parsing parameters: %s", err)), nil
}
lsps := b.lspClients
if len(lsps) == 0 {
// Return a more helpful message when LSP clients aren't ready yet
return NewTextResponse("\n<diagnostic_summary>\nLSP clients are still initializing. Diagnostics will be available once they're ready.\n</diagnostic_summary>\n"), nil
}
if params.FilePath != "" {
notifyLspOpenFile(ctx, params.FilePath, lsps)
waitForLspDiagnostics(ctx, params.FilePath, lsps)
}
output := getDiagnostics(params.FilePath, lsps)
return NewTextResponse(output), nil
}
func notifyLspOpenFile(ctx context.Context, filePath string, lsps map[string]*lsp.Client) {
for _, client := range lsps {
err := client.OpenFile(ctx, filePath)
if err != nil {
continue
}
}
}
func waitForLspDiagnostics(ctx context.Context, filePath string, lsps map[string]*lsp.Client) {
if len(lsps) == 0 {
return
}
diagChan := make(chan struct{}, 1)
for _, client := range lsps {
originalDiags := make(map[protocol.DocumentUri][]protocol.Diagnostic)
maps.Copy(originalDiags, client.GetDiagnostics())
handler := func(params json.RawMessage) {
lsp.HandleDiagnostics(client, params)
var diagParams protocol.PublishDiagnosticsParams
if err := json.Unmarshal(params, &diagParams); err != nil {
return
}
if diagParams.URI.Path() == filePath || hasDiagnosticsChanged(client.GetDiagnostics(), originalDiags) {
select {
case diagChan <- struct{}{}:
default:
}
}
}
client.RegisterNotificationHandler("textDocument/publishDiagnostics", handler)
if client.IsFileOpen(filePath) {
err := client.NotifyChange(ctx, filePath)
if err != nil {
continue
}
} else {
err := client.OpenFile(ctx, filePath)
if err != nil {
continue
}
}
}
select {
case <-diagChan:
case <-time.After(5 * time.Second):
case <-ctx.Done():
}
}
func hasDiagnosticsChanged(current, original map[protocol.DocumentUri][]protocol.Diagnostic) bool {
for uri, diags := range current {
origDiags, exists := original[uri]
if !exists || len(diags) != len(origDiags) {
return true
}
}
return false
}
func getDiagnostics(filePath string, lsps map[string]*lsp.Client) string {
fileDiagnostics := []string{}
projectDiagnostics := []string{}
formatDiagnostic := func(pth string, diagnostic protocol.Diagnostic, source string) string {
severity := "Info"
switch diagnostic.Severity {
case protocol.SeverityError:
severity = "Error"
case protocol.SeverityWarning:
severity = "Warn"
case protocol.SeverityHint:
severity = "Hint"
}
location := fmt.Sprintf("%s:%d:%d", pth, diagnostic.Range.Start.Line+1, diagnostic.Range.Start.Character+1)
sourceInfo := ""
if diagnostic.Source != "" {
sourceInfo = diagnostic.Source
} else if source != "" {
sourceInfo = source
}
codeInfo := ""
if diagnostic.Code != nil {
codeInfo = fmt.Sprintf("[%v]", diagnostic.Code)
}
tagsInfo := ""
if len(diagnostic.Tags) > 0 {
tags := []string{}
for _, tag := range diagnostic.Tags {
switch tag {
case protocol.Unnecessary:
tags = append(tags, "unnecessary")
case protocol.Deprecated:
tags = append(tags, "deprecated")
}
}
if len(tags) > 0 {
tagsInfo = fmt.Sprintf(" (%s)", strings.Join(tags, ", "))
}
}
return fmt.Sprintf("%s: %s [%s]%s%s %s",
severity,
location,
sourceInfo,
codeInfo,
tagsInfo,
diagnostic.Message)
}
for lspName, client := range lsps {
diagnostics := client.GetDiagnostics()
if len(diagnostics) > 0 {
for location, diags := range diagnostics {
isCurrentFile := location.Path() == filePath
for _, diag := range diags {
formattedDiag := formatDiagnostic(location.Path(), diag, lspName)
if isCurrentFile {
fileDiagnostics = append(fileDiagnostics, formattedDiag)
} else {
projectDiagnostics = append(projectDiagnostics, formattedDiag)
}
}
}
}
}
sort.Slice(fileDiagnostics, func(i, j int) bool {
iIsError := strings.HasPrefix(fileDiagnostics[i], "Error")
jIsError := strings.HasPrefix(fileDiagnostics[j], "Error")
if iIsError != jIsError {
return iIsError // Errors come first
}
return fileDiagnostics[i] < fileDiagnostics[j] // Then alphabetically
})
sort.Slice(projectDiagnostics, func(i, j int) bool {
iIsError := strings.HasPrefix(projectDiagnostics[i], "Error")
jIsError := strings.HasPrefix(projectDiagnostics[j], "Error")
if iIsError != jIsError {
return iIsError
}
return projectDiagnostics[i] < projectDiagnostics[j]
})
output := ""
if len(fileDiagnostics) > 0 {
output += "\n<file_diagnostics>\n"
if len(fileDiagnostics) > 10 {
output += strings.Join(fileDiagnostics[:10], "\n")
output += fmt.Sprintf("\n... and %d more diagnostics", len(fileDiagnostics)-10)
} else {
output += strings.Join(fileDiagnostics, "\n")
}
output += "\n</file_diagnostics>\n"
}
if len(projectDiagnostics) > 0 {
output += "\n<project_diagnostics>\n"
if len(projectDiagnostics) > 10 {
output += strings.Join(projectDiagnostics[:10], "\n")
output += fmt.Sprintf("\n... and %d more diagnostics", len(projectDiagnostics)-10)
} else {
output += strings.Join(projectDiagnostics, "\n")
}
output += "\n</project_diagnostics>\n"
}
if len(fileDiagnostics) > 0 || len(projectDiagnostics) > 0 {
fileErrors := countSeverity(fileDiagnostics, "Error")
fileWarnings := countSeverity(fileDiagnostics, "Warn")
projectErrors := countSeverity(projectDiagnostics, "Error")
projectWarnings := countSeverity(projectDiagnostics, "Warn")
output += "\n<diagnostic_summary>\n"
output += fmt.Sprintf("Current file: %d errors, %d warnings\n", fileErrors, fileWarnings)
output += fmt.Sprintf("Project: %d errors, %d warnings\n", projectErrors, projectWarnings)
output += "</diagnostic_summary>\n"
}
return output
}
func countSeverity(diagnostics []string, severity string) int {
count := 0
for _, diag := range diagnostics {
if strings.HasPrefix(diag, severity) {
count++
}
}
return count
}

View File

@@ -1,204 +0,0 @@
package tools
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/sst/opencode/internal/lsp"
"github.com/sst/opencode/internal/lsp/protocol"
)
type DocSymbolsParams struct {
FilePath string `json:"file_path"`
}
type docSymbolsTool struct {
lspClients map[string]*lsp.Client
}
const (
DocSymbolsToolName = "docSymbols"
docSymbolsDescription = `Get document symbols for a file.
WHEN TO USE THIS TOOL:
- Use when you need to understand the structure of a file
- Helpful for finding classes, functions, methods, and variables in a file
- Great for getting an overview of a file's organization
HOW TO USE:
- Provide the path to the file to get symbols for
- Results show all symbols defined in the file with their kind and location
FEATURES:
- Lists all symbols in a hierarchical structure
- Shows symbol types (function, class, variable, etc.)
- Provides location information for each symbol
- Organizes symbols by their scope and relationship
LIMITATIONS:
- Requires a functioning LSP server for the file type
- Results depend on the accuracy of the LSP server
- May not work for all file types
TIPS:
- Use to quickly understand the structure of a large file
- Combine with Definition and References tools for deeper code exploration
`
)
func NewDocSymbolsTool(lspClients map[string]*lsp.Client) BaseTool {
return &docSymbolsTool{
lspClients,
}
}
func (b *docSymbolsTool) Info() ToolInfo {
return ToolInfo{
Name: DocSymbolsToolName,
Description: docSymbolsDescription,
Parameters: map[string]any{
"file_path": map[string]any{
"type": "string",
"description": "The path to the file to get symbols for",
},
},
Required: []string{"file_path"},
}
}
func (b *docSymbolsTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params DocSymbolsParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse(fmt.Sprintf("error parsing parameters: %s", err)), nil
}
lsps := b.lspClients
if len(lsps) == 0 {
return NewTextResponse("\nLSP clients are still initializing. Document symbols lookup will be available once they're ready.\n"), nil
}
// Ensure file is open in LSP
notifyLspOpenFile(ctx, params.FilePath, lsps)
output := getDocumentSymbols(ctx, params.FilePath, lsps)
return NewTextResponse(output), nil
}
func getDocumentSymbols(ctx context.Context, filePath string, lsps map[string]*lsp.Client) string {
var results []string
for lspName, client := range lsps {
// Create document symbol params
uri := fmt.Sprintf("file://%s", filePath)
symbolParams := protocol.DocumentSymbolParams{
TextDocument: protocol.TextDocumentIdentifier{
URI: protocol.DocumentUri(uri),
},
}
// Get document symbols
symbolResult, err := client.DocumentSymbol(ctx, symbolParams)
if err != nil {
results = append(results, fmt.Sprintf("Error from %s: %s", lspName, err))
continue
}
// Process the symbol result
symbols := processDocumentSymbolResult(symbolResult)
if len(symbols) == 0 {
results = append(results, fmt.Sprintf("No symbols found by %s", lspName))
continue
}
// Format the symbols
results = append(results, fmt.Sprintf("Symbols found by %s:", lspName))
for _, symbol := range symbols {
results = append(results, formatSymbol(symbol, 1))
}
}
if len(results) == 0 {
return "No symbols found in the specified file."
}
return strings.Join(results, "\n")
}
func processDocumentSymbolResult(result protocol.Or_Result_textDocument_documentSymbol) []SymbolInfo {
var symbols []SymbolInfo
switch v := result.Value.(type) {
case []protocol.SymbolInformation:
for _, si := range v {
symbols = append(symbols, SymbolInfo{
Name: si.Name,
Kind: symbolKindToString(si.Kind),
Location: locationToString(si.Location),
Children: nil,
})
}
case []protocol.DocumentSymbol:
for _, ds := range v {
symbols = append(symbols, documentSymbolToSymbolInfo(ds))
}
}
return symbols
}
// SymbolInfo represents a symbol in a document
type SymbolInfo struct {
Name string
Kind string
Location string
Children []SymbolInfo
}
func documentSymbolToSymbolInfo(symbol protocol.DocumentSymbol) SymbolInfo {
info := SymbolInfo{
Name: symbol.Name,
Kind: symbolKindToString(symbol.Kind),
Location: fmt.Sprintf("Line %d-%d",
symbol.Range.Start.Line+1,
symbol.Range.End.Line+1),
Children: []SymbolInfo{},
}
for _, child := range symbol.Children {
info.Children = append(info.Children, documentSymbolToSymbolInfo(child))
}
return info
}
func locationToString(location protocol.Location) string {
return fmt.Sprintf("Line %d-%d",
location.Range.Start.Line+1,
location.Range.End.Line+1)
}
func symbolKindToString(kind protocol.SymbolKind) string {
if kindStr, ok := protocol.TableKindMap[kind]; ok {
return kindStr
}
return "Unknown"
}
func formatSymbol(symbol SymbolInfo, level int) string {
indent := strings.Repeat(" ", level)
result := fmt.Sprintf("%s- %s (%s) %s", indent, symbol.Name, symbol.Kind, symbol.Location)
var childResults []string
for _, child := range symbol.Children {
childResults = append(childResults, formatSymbol(child, level+1))
}
if len(childResults) > 0 {
return result + "\n" + strings.Join(childResults, "\n")
}
return result
}

View File

@@ -1,161 +0,0 @@
package tools
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/sst/opencode/internal/lsp"
"github.com/sst/opencode/internal/lsp/protocol"
)
type ReferencesParams struct {
FilePath string `json:"file_path"`
Line int `json:"line"`
Column int `json:"column"`
IncludeDeclaration bool `json:"include_declaration"`
}
type referencesTool struct {
lspClients map[string]*lsp.Client
}
const (
ReferencesToolName = "references"
referencesDescription = `Find all references to a symbol at a specific position in a file.
WHEN TO USE THIS TOOL:
- Use when you need to find all places where a symbol is used
- Helpful for understanding code usage and dependencies
- Great for refactoring and impact analysis
HOW TO USE:
- Provide the path to the file containing the symbol
- Specify the line number (1-based) where the symbol appears
- Specify the column number (1-based) where the symbol appears
- Optionally set include_declaration to include the declaration in results
- Results show all locations where the symbol is referenced
FEATURES:
- Finds references across files in the project
- Works with variables, functions, classes, interfaces, etc.
- Returns file paths, lines, and columns of all references
LIMITATIONS:
- Requires a functioning LSP server for the file type
- May not find all references depending on LSP capabilities
- Results depend on the accuracy of the LSP server
TIPS:
- Use in conjunction with Definition tool to understand symbol origins
- Combine with View tool to examine the references
`
)
func NewReferencesTool(lspClients map[string]*lsp.Client) BaseTool {
return &referencesTool{
lspClients,
}
}
func (b *referencesTool) Info() ToolInfo {
return ToolInfo{
Name: ReferencesToolName,
Description: referencesDescription,
Parameters: map[string]any{
"file_path": map[string]any{
"type": "string",
"description": "The path to the file containing the symbol",
},
"line": map[string]any{
"type": "integer",
"description": "The line number (1-based) where the symbol appears",
},
"column": map[string]any{
"type": "integer",
"description": "The column number (1-based) where the symbol appears",
},
"include_declaration": map[string]any{
"type": "boolean",
"description": "Whether to include the declaration in the results",
},
},
Required: []string{"file_path", "line", "column"},
}
}
func (b *referencesTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params ReferencesParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse(fmt.Sprintf("error parsing parameters: %s", err)), nil
}
lsps := b.lspClients
if len(lsps) == 0 {
return NewTextResponse("\nLSP clients are still initializing. References lookup will be available once they're ready.\n"), nil
}
// Ensure file is open in LSP
notifyLspOpenFile(ctx, params.FilePath, lsps)
// Convert 1-based line/column to 0-based for LSP protocol
line := max(0, params.Line-1)
column := max(0, params.Column-1)
output := getReferences(ctx, params.FilePath, line, column, params.IncludeDeclaration, lsps)
return NewTextResponse(output), nil
}
func getReferences(ctx context.Context, filePath string, line, column int, includeDeclaration bool, lsps map[string]*lsp.Client) string {
var results []string
for lspName, client := range lsps {
// Create references params
uri := fmt.Sprintf("file://%s", filePath)
referencesParams := protocol.ReferenceParams{
TextDocumentPositionParams: protocol.TextDocumentPositionParams{
TextDocument: protocol.TextDocumentIdentifier{
URI: protocol.DocumentUri(uri),
},
Position: protocol.Position{
Line: uint32(line),
Character: uint32(column),
},
},
Context: protocol.ReferenceContext{
IncludeDeclaration: includeDeclaration,
},
}
// Get references
references, err := client.References(ctx, referencesParams)
if err != nil {
results = append(results, fmt.Sprintf("Error from %s: %s", lspName, err))
continue
}
if len(references) == 0 {
results = append(results, fmt.Sprintf("No references found by %s", lspName))
continue
}
// Format the locations
results = append(results, fmt.Sprintf("References found by %s:", lspName))
for _, loc := range references {
path := strings.TrimPrefix(string(loc.URI), "file://")
// Convert 0-based line/column to 1-based for display
refLine := loc.Range.Start.Line + 1
refColumn := loc.Range.Start.Character + 1
results = append(results, fmt.Sprintf(" %s:%d:%d", path, refLine, refColumn))
}
}
if len(results) == 0 {
return "No references found for the symbol at the specified position."
}
return strings.Join(results, "\n")
}

View File

@@ -1,162 +0,0 @@
package tools
import (
"context"
"encoding/json"
"fmt"
"strings"
"github.com/sst/opencode/internal/lsp"
"github.com/sst/opencode/internal/lsp/protocol"
)
type WorkspaceSymbolsParams struct {
Query string `json:"query"`
}
type workspaceSymbolsTool struct {
lspClients map[string]*lsp.Client
}
const (
WorkspaceSymbolsToolName = "workspaceSymbols"
workspaceSymbolsDescription = `Find symbols across the workspace matching a query.
WHEN TO USE THIS TOOL:
- Use when you need to find symbols across multiple files
- Helpful for locating classes, functions, or variables in a project
- Great for exploring large codebases
HOW TO USE:
- Provide a query string to search for symbols
- Results show matching symbols from across the workspace
FEATURES:
- Searches across all files in the workspace
- Shows symbol types (function, class, variable, etc.)
- Provides location information for each symbol
- Works with partial matches and fuzzy search (depending on LSP server)
LIMITATIONS:
- Requires a functioning LSP server for the file types
- Results depend on the accuracy of the LSP server
- Query capabilities vary by language server
- May not work for all file types
TIPS:
- Use specific queries to narrow down results
- Combine with DocSymbols tool for detailed file exploration
- Use with Definition tool to jump to symbol definitions
`
)
func NewWorkspaceSymbolsTool(lspClients map[string]*lsp.Client) BaseTool {
return &workspaceSymbolsTool{
lspClients,
}
}
func (b *workspaceSymbolsTool) Info() ToolInfo {
return ToolInfo{
Name: WorkspaceSymbolsToolName,
Description: workspaceSymbolsDescription,
Parameters: map[string]any{
"query": map[string]any{
"type": "string",
"description": "The query string to search for symbols",
},
},
Required: []string{"query"},
}
}
func (b *workspaceSymbolsTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params WorkspaceSymbolsParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse(fmt.Sprintf("error parsing parameters: %s", err)), nil
}
lsps := b.lspClients
if len(lsps) == 0 {
return NewTextResponse("\nLSP clients are still initializing. Workspace symbols lookup will be available once they're ready.\n"), nil
}
output := getWorkspaceSymbols(ctx, params.Query, lsps)
return NewTextResponse(output), nil
}
func getWorkspaceSymbols(ctx context.Context, query string, lsps map[string]*lsp.Client) string {
var results []string
for lspName, client := range lsps {
// Create workspace symbol params
symbolParams := protocol.WorkspaceSymbolParams{
Query: query,
}
// Get workspace symbols
symbolResult, err := client.Symbol(ctx, symbolParams)
if err != nil {
results = append(results, fmt.Sprintf("Error from %s: %s", lspName, err))
continue
}
// Process the symbol result
symbols := processWorkspaceSymbolResult(symbolResult)
if len(symbols) == 0 {
results = append(results, fmt.Sprintf("No symbols found by %s for query '%s'", lspName, query))
continue
}
// Format the symbols
results = append(results, fmt.Sprintf("Symbols found by %s for query '%s':", lspName, query))
for _, symbol := range symbols {
results = append(results, fmt.Sprintf(" %s (%s) - %s", symbol.Name, symbol.Kind, symbol.Location))
}
}
if len(results) == 0 {
return fmt.Sprintf("No symbols found matching query '%s'.", query)
}
return strings.Join(results, "\n")
}
func processWorkspaceSymbolResult(result protocol.Or_Result_workspace_symbol) []SymbolInfo {
var symbols []SymbolInfo
switch v := result.Value.(type) {
case []protocol.SymbolInformation:
for _, si := range v {
symbols = append(symbols, SymbolInfo{
Name: si.Name,
Kind: symbolKindToString(si.Kind),
Location: formatWorkspaceLocation(si.Location),
Children: nil,
})
}
case []protocol.WorkspaceSymbol:
for _, ws := range v {
location := "Unknown location"
if ws.Location.Value != nil {
if loc, ok := ws.Location.Value.(protocol.Location); ok {
location = formatWorkspaceLocation(loc)
}
}
symbols = append(symbols, SymbolInfo{
Name: ws.Name,
Kind: symbolKindToString(ws.Kind),
Location: location,
Children: nil,
})
}
}
return symbols
}
func formatWorkspaceLocation(location protocol.Location) string {
path := strings.TrimPrefix(string(location.URI), "file://")
return fmt.Sprintf("%s:%d:%d", path, location.Range.Start.Line+1, location.Range.Start.Character+1)
}

View File

@@ -1,375 +0,0 @@
package tools
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"
"github.com/sst/opencode/internal/config"
"github.com/sst/opencode/internal/diff"
"github.com/sst/opencode/internal/history"
"github.com/sst/opencode/internal/lsp"
"github.com/sst/opencode/internal/permission"
"log/slog"
)
type PatchParams struct {
PatchText string `json:"patch_text"`
}
type PatchResponseMetadata struct {
FilesChanged []string `json:"files_changed"`
Additions int `json:"additions"`
Removals int `json:"removals"`
}
type patchTool struct {
lspClients map[string]*lsp.Client
permissions permission.Service
files history.Service
}
const (
PatchToolName = "patch"
patchDescription = `Applies a patch to multiple files in one operation. This tool is useful for making coordinated changes across multiple files.
The patch text must follow this format:
*** Begin Patch
*** Update File: /path/to/file
@@ Context line (unique within the file)
Line to keep
-Line to remove
+Line to add
Line to keep
*** Add File: /path/to/new/file
+Content of the new file
+More content
*** Delete File: /path/to/file/to/delete
*** End Patch
Before using this tool:
1. Use the FileRead tool to understand the files' contents and context
2. Verify all file paths are correct (use the LS tool)
CRITICAL REQUIREMENTS FOR USING THIS TOOL:
1. UNIQUENESS: Context lines MUST uniquely identify the specific sections you want to change
2. PRECISION: All whitespace, indentation, and surrounding code must match exactly
3. VALIDATION: Ensure edits result in idiomatic, correct code
4. PATHS: Always use absolute file paths (starting with /)
The tool will apply all changes in a single atomic operation.`
)
func NewPatchTool(lspClients map[string]*lsp.Client, permissions permission.Service, files history.Service) BaseTool {
return &patchTool{
lspClients: lspClients,
permissions: permissions,
files: files,
}
}
func (p *patchTool) Info() ToolInfo {
return ToolInfo{
Name: PatchToolName,
Description: patchDescription,
Parameters: map[string]any{
"patch_text": map[string]any{
"type": "string",
"description": "The full patch text that describes all changes to be made",
},
},
Required: []string{"patch_text"},
}
}
func (p *patchTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params PatchParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse("invalid parameters"), nil
}
if params.PatchText == "" {
return NewTextErrorResponse("patch_text is required"), nil
}
// Identify all files needed for the patch and verify they've been read
filesToRead := diff.IdentifyFilesNeeded(params.PatchText)
for _, filePath := range filesToRead {
absPath := filePath
if !filepath.IsAbs(absPath) {
wd := config.WorkingDirectory()
absPath = filepath.Join(wd, absPath)
}
if getLastReadTime(absPath).IsZero() {
return NewTextErrorResponse(fmt.Sprintf("you must read the file %s before patching it. Use the FileRead tool first", filePath)), nil
}
fileInfo, err := os.Stat(absPath)
if err != nil {
if os.IsNotExist(err) {
return NewTextErrorResponse(fmt.Sprintf("file not found: %s", absPath)), nil
}
return ToolResponse{}, fmt.Errorf("failed to access file: %w", err)
}
if fileInfo.IsDir() {
return NewTextErrorResponse(fmt.Sprintf("path is a directory, not a file: %s", absPath)), nil
}
modTime := fileInfo.ModTime()
lastRead := getLastReadTime(absPath)
if modTime.After(lastRead) {
return NewTextErrorResponse(
fmt.Sprintf("file %s has been modified since it was last read (mod time: %s, last read: %s)",
absPath, modTime.Format(time.RFC3339), lastRead.Format(time.RFC3339),
)), nil
}
}
// Check for new files to ensure they don't already exist
filesToAdd := diff.IdentifyFilesAdded(params.PatchText)
for _, filePath := range filesToAdd {
absPath := filePath
if !filepath.IsAbs(absPath) {
wd := config.WorkingDirectory()
absPath = filepath.Join(wd, absPath)
}
_, err := os.Stat(absPath)
if err == nil {
return NewTextErrorResponse(fmt.Sprintf("file already exists and cannot be added: %s", absPath)), nil
} else if !os.IsNotExist(err) {
return ToolResponse{}, fmt.Errorf("failed to check file: %w", err)
}
}
// Load all required files
currentFiles := make(map[string]string)
for _, filePath := range filesToRead {
absPath := filePath
if !filepath.IsAbs(absPath) {
wd := config.WorkingDirectory()
absPath = filepath.Join(wd, absPath)
}
content, err := os.ReadFile(absPath)
if err != nil {
return ToolResponse{}, fmt.Errorf("failed to read file %s: %w", absPath, err)
}
currentFiles[filePath] = string(content)
}
// Process the patch
patch, fuzz, err := diff.TextToPatch(params.PatchText, currentFiles)
if err != nil {
return NewTextErrorResponse(fmt.Sprintf("failed to parse patch: %s", err)), nil
}
if fuzz > 3 {
return NewTextErrorResponse(fmt.Sprintf("patch contains fuzzy matches (fuzz level: %d). Please make your context lines more precise", fuzz)), nil
}
// Convert patch to commit
commit, err := diff.PatchToCommit(patch, currentFiles)
if err != nil {
return NewTextErrorResponse(fmt.Sprintf("failed to create commit from patch: %s", err)), nil
}
// Get session ID and message ID
sessionID, messageID := GetContextValues(ctx)
if sessionID == "" || messageID == "" {
return ToolResponse{}, fmt.Errorf("session ID and message ID are required for creating a patch")
}
// Request permission for all changes
for path, change := range commit.Changes {
switch change.Type {
case diff.ActionAdd:
dir := filepath.Dir(path)
patchDiff, _, _ := diff.GenerateDiff("", *change.NewContent, path)
p := p.permissions.Request(
ctx,
permission.CreatePermissionRequest{
SessionID: sessionID,
Path: dir,
ToolName: PatchToolName,
Action: "create",
Description: fmt.Sprintf("Create file %s", path),
Params: EditPermissionsParams{
FilePath: path,
Diff: patchDiff,
},
},
)
if !p {
return ToolResponse{}, permission.ErrorPermissionDenied
}
case diff.ActionUpdate:
currentContent := ""
if change.OldContent != nil {
currentContent = *change.OldContent
}
newContent := ""
if change.NewContent != nil {
newContent = *change.NewContent
}
patchDiff, _, _ := diff.GenerateDiff(currentContent, newContent, path)
dir := filepath.Dir(path)
p := p.permissions.Request(
ctx,
permission.CreatePermissionRequest{
SessionID: sessionID,
Path: dir,
ToolName: PatchToolName,
Action: "update",
Description: fmt.Sprintf("Update file %s", path),
Params: EditPermissionsParams{
FilePath: path,
Diff: patchDiff,
},
},
)
if !p {
return ToolResponse{}, permission.ErrorPermissionDenied
}
case diff.ActionDelete:
dir := filepath.Dir(path)
patchDiff, _, _ := diff.GenerateDiff(*change.OldContent, "", path)
p := p.permissions.Request(
ctx,
permission.CreatePermissionRequest{
SessionID: sessionID,
Path: dir,
ToolName: PatchToolName,
Action: "delete",
Description: fmt.Sprintf("Delete file %s", path),
Params: EditPermissionsParams{
FilePath: path,
Diff: patchDiff,
},
},
)
if !p {
return ToolResponse{}, permission.ErrorPermissionDenied
}
}
}
// Apply the changes to the filesystem
err = diff.ApplyCommit(commit, func(path string, content string) error {
absPath := path
if !filepath.IsAbs(absPath) {
wd := config.WorkingDirectory()
absPath = filepath.Join(wd, absPath)
}
// Create parent directories if needed
dir := filepath.Dir(absPath)
if err := os.MkdirAll(dir, 0o755); err != nil {
return fmt.Errorf("failed to create parent directories for %s: %w", absPath, err)
}
return os.WriteFile(absPath, []byte(content), 0o644)
}, func(path string) error {
absPath := path
if !filepath.IsAbs(absPath) {
wd := config.WorkingDirectory()
absPath = filepath.Join(wd, absPath)
}
return os.Remove(absPath)
})
if err != nil {
return NewTextErrorResponse(fmt.Sprintf("failed to apply patch: %s", err)), nil
}
// Update file history for all modified files
changedFiles := []string{}
totalAdditions := 0
totalRemovals := 0
for path, change := range commit.Changes {
absPath := path
if !filepath.IsAbs(absPath) {
wd := config.WorkingDirectory()
absPath = filepath.Join(wd, absPath)
}
changedFiles = append(changedFiles, absPath)
oldContent := ""
if change.OldContent != nil {
oldContent = *change.OldContent
}
newContent := ""
if change.NewContent != nil {
newContent = *change.NewContent
}
// Calculate diff statistics
_, additions, removals := diff.GenerateDiff(oldContent, newContent, path)
totalAdditions += additions
totalRemovals += removals
// Update history
file, err := p.files.GetLatestByPathAndSession(ctx, absPath, sessionID)
if err != nil && change.Type != diff.ActionAdd {
// If not adding a file, create history entry for existing file
_, err = p.files.Create(ctx, sessionID, absPath, oldContent)
if err != nil {
slog.Debug("Error creating file history", "error", err)
}
}
if err == nil && change.Type != diff.ActionAdd && file.Content != oldContent {
// User manually changed content, store intermediate version
_, err = p.files.CreateVersion(ctx, sessionID, absPath, oldContent)
if err != nil {
slog.Debug("Error creating file history version", "error", err)
}
}
// Store new version
if change.Type == diff.ActionDelete {
_, err = p.files.CreateVersion(ctx, sessionID, absPath, "")
} else {
_, err = p.files.CreateVersion(ctx, sessionID, absPath, newContent)
}
if err != nil {
slog.Debug("Error creating file history version", "error", err)
}
// Record file operations
recordFileWrite(absPath)
recordFileRead(absPath)
}
// Run LSP diagnostics on all changed files
for _, filePath := range changedFiles {
waitForLspDiagnostics(ctx, filePath, p.lspClients)
}
result := fmt.Sprintf("Patch applied successfully. %d files changed, %d additions, %d removals",
len(changedFiles), totalAdditions, totalRemovals)
diagnosticsText := ""
for _, filePath := range changedFiles {
diagnosticsText += getDiagnostics(filePath, p.lspClients)
}
if diagnosticsText != "" {
result += "\n\nDiagnostics:\n" + diagnosticsText
}
return WithResponseMetadata(
NewTextResponse(result),
PatchResponseMetadata{
FilesChanged: changedFiles,
Additions: totalAdditions,
Removals: totalRemovals,
}), nil
}

View File

@@ -1,324 +0,0 @@
package shell
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"syscall"
"time"
"github.com/sst/opencode/internal/config"
"github.com/sst/opencode/internal/status"
)
type PersistentShell struct {
cmd *exec.Cmd
stdin *os.File
isAlive bool
cwd string
mu sync.Mutex
commandQueue chan *commandExecution
}
type commandExecution struct {
command string
timeout time.Duration
resultChan chan commandResult
ctx context.Context
}
type commandResult struct {
stdout string
stderr string
exitCode int
interrupted bool
err error
}
var (
shellInstance *PersistentShell
shellInstanceOnce sync.Once
)
func GetPersistentShell(workingDir string) *PersistentShell {
shellInstanceOnce.Do(func() {
shellInstance = newPersistentShell(workingDir)
})
if shellInstance == nil {
shellInstance = newPersistentShell(workingDir)
} else if !shellInstance.isAlive {
shellInstance = newPersistentShell(shellInstance.cwd)
}
return shellInstance
}
func newPersistentShell(cwd string) *PersistentShell {
cfg := config.Get()
// Use shell from config if specified
shellPath := ""
shellArgs := []string{"-l"}
if cfg != nil && cfg.Shell.Path != "" {
shellPath = cfg.Shell.Path
if len(cfg.Shell.Args) > 0 {
shellArgs = cfg.Shell.Args
}
} else {
// Fall back to environment variable
shellPath = os.Getenv("SHELL")
if shellPath == "" {
// Default to bash if neither config nor environment variable is set
shellPath = "/bin/bash"
}
}
cmd := exec.Command(shellPath, shellArgs...)
cmd.Dir = cwd
stdinPipe, err := cmd.StdinPipe()
if err != nil {
return nil
}
cmd.Env = append(os.Environ(), "GIT_EDITOR=true")
err = cmd.Start()
if err != nil {
return nil
}
shell := &PersistentShell{
cmd: cmd,
stdin: stdinPipe.(*os.File),
isAlive: true,
cwd: cwd,
commandQueue: make(chan *commandExecution, 10),
}
go func() {
defer func() {
if r := recover(); r != nil {
fmt.Fprintf(os.Stderr, "Panic in shell command processor: %v\n", r)
shell.isAlive = false
close(shell.commandQueue)
}
}()
shell.processCommands()
}()
go func() {
err := cmd.Wait()
if err != nil {
status.Error(fmt.Sprintf("Shell process exited with error: %v", err))
}
shell.isAlive = false
close(shell.commandQueue)
}()
return shell
}
func (s *PersistentShell) processCommands() {
for cmd := range s.commandQueue {
result := s.execCommand(cmd.command, cmd.timeout, cmd.ctx)
cmd.resultChan <- result
}
}
func (s *PersistentShell) execCommand(command string, timeout time.Duration, ctx context.Context) commandResult {
s.mu.Lock()
defer s.mu.Unlock()
if !s.isAlive {
return commandResult{
stderr: "Shell is not alive",
exitCode: 1,
err: errors.New("shell is not alive"),
}
}
tempDir := os.TempDir()
stdoutFile := filepath.Join(tempDir, fmt.Sprintf("opencode-stdout-%d", time.Now().UnixNano()))
stderrFile := filepath.Join(tempDir, fmt.Sprintf("opencode-stderr-%d", time.Now().UnixNano()))
statusFile := filepath.Join(tempDir, fmt.Sprintf("opencode-status-%d", time.Now().UnixNano()))
cwdFile := filepath.Join(tempDir, fmt.Sprintf("opencode-cwd-%d", time.Now().UnixNano()))
defer func() {
os.Remove(stdoutFile)
os.Remove(stderrFile)
os.Remove(statusFile)
os.Remove(cwdFile)
}()
fullCommand := fmt.Sprintf(`
eval %s < /dev/null > %s 2> %s
EXEC_EXIT_CODE=$?
pwd > %s
echo $EXEC_EXIT_CODE > %s
`,
shellQuote(command),
shellQuote(stdoutFile),
shellQuote(stderrFile),
shellQuote(cwdFile),
shellQuote(statusFile),
)
_, err := s.stdin.Write([]byte(fullCommand + "\n"))
if err != nil {
return commandResult{
stderr: fmt.Sprintf("Failed to write command to shell: %v", err),
exitCode: 1,
err: err,
}
}
interrupted := false
startTime := time.Now()
done := make(chan bool)
go func() {
for {
select {
case <-ctx.Done():
s.killChildren()
interrupted = true
done <- true
return
case <-time.After(10 * time.Millisecond):
if fileExists(statusFile) && fileSize(statusFile) > 0 {
done <- true
return
}
if timeout > 0 {
elapsed := time.Since(startTime)
if elapsed > timeout {
s.killChildren()
interrupted = true
done <- true
return
}
}
}
}
}()
<-done
stdout := readFileOrEmpty(stdoutFile)
stderr := readFileOrEmpty(stderrFile)
exitCodeStr := readFileOrEmpty(statusFile)
newCwd := readFileOrEmpty(cwdFile)
exitCode := 0
if exitCodeStr != "" {
fmt.Sscanf(exitCodeStr, "%d", &exitCode)
} else if interrupted {
exitCode = 143
stderr += "\nCommand execution timed out or was interrupted"
}
if newCwd != "" {
s.cwd = strings.TrimSpace(newCwd)
}
return commandResult{
stdout: stdout,
stderr: stderr,
exitCode: exitCode,
interrupted: interrupted,
}
}
func (s *PersistentShell) killChildren() {
if s.cmd == nil || s.cmd.Process == nil {
return
}
pgrepCmd := exec.Command("pgrep", "-P", fmt.Sprintf("%d", s.cmd.Process.Pid))
output, err := pgrepCmd.Output()
if err != nil {
return
}
for pidStr := range strings.SplitSeq(string(output), "\n") {
if pidStr = strings.TrimSpace(pidStr); pidStr != "" {
var pid int
fmt.Sscanf(pidStr, "%d", &pid)
if pid > 0 {
proc, err := os.FindProcess(pid)
if err == nil {
proc.Signal(syscall.SIGTERM)
}
}
}
}
}
func (s *PersistentShell) Exec(ctx context.Context, command string, timeoutMs int) (string, string, int, bool, error) {
if !s.isAlive {
return "", "Shell is not alive", 1, false, errors.New("shell is not alive")
}
timeout := time.Duration(timeoutMs) * time.Millisecond
resultChan := make(chan commandResult)
s.commandQueue <- &commandExecution{
command: command,
timeout: timeout,
resultChan: resultChan,
ctx: ctx,
}
result := <-resultChan
return result.stdout, result.stderr, result.exitCode, result.interrupted, result.err
}
func (s *PersistentShell) Close() {
s.mu.Lock()
defer s.mu.Unlock()
if !s.isAlive {
return
}
s.stdin.Write([]byte("exit\n"))
s.cmd.Process.Kill()
s.isAlive = false
}
func shellQuote(s string) string {
return "'" + strings.ReplaceAll(s, "'", "'\\''") + "'"
}
func readFileOrEmpty(path string) string {
content, err := os.ReadFile(path)
if err != nil {
return ""
}
return string(content)
}
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil
}
func fileSize(path string) int64 {
info, err := os.Stat(path)
if err != nil {
return 0
}
return info.Size()
}

View File

@@ -1,84 +0,0 @@
package tools
import (
"context"
"encoding/json"
)
type ToolInfo struct {
Name string
Description string
Parameters map[string]any
Required []string
}
type toolResponseType string
type (
sessionIDContextKey string
messageIDContextKey string
)
const (
ToolResponseTypeText toolResponseType = "text"
ToolResponseTypeImage toolResponseType = "image"
SessionIDContextKey sessionIDContextKey = "session_id"
MessageIDContextKey messageIDContextKey = "message_id"
)
type ToolResponse struct {
Type toolResponseType `json:"type"`
Content string `json:"content"`
Metadata string `json:"metadata,omitempty"`
IsError bool `json:"is_error"`
}
func NewTextResponse(content string) ToolResponse {
return ToolResponse{
Type: ToolResponseTypeText,
Content: content,
}
}
func WithResponseMetadata(response ToolResponse, metadata any) ToolResponse {
if metadata != nil {
metadataBytes, err := json.Marshal(metadata)
if err != nil {
return response
}
response.Metadata = string(metadataBytes)
}
return response
}
func NewTextErrorResponse(content string) ToolResponse {
return ToolResponse{
Type: ToolResponseTypeText,
Content: content,
IsError: true,
}
}
type ToolCall struct {
ID string `json:"id"`
Name string `json:"name"`
Input string `json:"input"`
}
type BaseTool interface {
Info() ToolInfo
Run(ctx context.Context, params ToolCall) (ToolResponse, error)
}
func GetContextValues(ctx context.Context) (string, string) {
sessionID := ctx.Value(SessionIDContextKey)
messageID := ctx.Value(MessageIDContextKey)
if sessionID == nil {
return "", ""
}
if messageID == nil {
return sessionID.(string), ""
}
return sessionID.(string), messageID.(string)
}

View File

@@ -1,312 +0,0 @@
package tools
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/sst/opencode/internal/config"
"github.com/sst/opencode/internal/lsp"
)
type ViewParams struct {
FilePath string `json:"file_path"`
Offset int `json:"offset"`
Limit int `json:"limit"`
}
type viewTool struct {
lspClients map[string]*lsp.Client
}
type ViewResponseMetadata struct {
FilePath string `json:"file_path"`
Content string `json:"content"`
}
const (
ViewToolName = "view"
MaxReadSize = 250 * 1024
DefaultReadLimit = 2000
MaxLineLength = 2000
viewDescription = `File viewing tool that reads and displays the contents of files with line numbers, allowing you to examine code, logs, or text data.
WHEN TO USE THIS TOOL:
- Use when you need to read the contents of a specific file
- Helpful for examining source code, configuration files, or log files
- Perfect for looking at text-based file formats
HOW TO USE:
- Provide the path to the file you want to view
- Optionally specify an offset to start reading from a specific line
- Optionally specify a limit to control how many lines are read
FEATURES:
- Displays file contents with line numbers for easy reference
- Can read from any position in a file using the offset parameter
- Handles large files by limiting the number of lines read
- Automatically truncates very long lines for better display
- Suggests similar file names when the requested file isn't found
LIMITATIONS:
- Maximum file size is 250KB
- Default reading limit is 2000 lines
- Lines longer than 2000 characters are truncated
- Cannot display binary files or images
- Images can be identified but not displayed
TIPS:
- Use with Glob tool to first find files you want to view
- For code exploration, first use Grep to find relevant files, then View to examine them
- When viewing large files, use the offset parameter to read specific sections`
)
func NewViewTool(lspClients map[string]*lsp.Client) BaseTool {
return &viewTool{
lspClients,
}
}
func (v *viewTool) Info() ToolInfo {
return ToolInfo{
Name: ViewToolName,
Description: viewDescription,
Parameters: map[string]any{
"file_path": map[string]any{
"type": "string",
"description": "The path to the file to read",
},
"offset": map[string]any{
"type": "integer",
"description": "The line number to start reading from (0-based)",
},
"limit": map[string]any{
"type": "integer",
"description": "The number of lines to read (defaults to 2000)",
},
},
Required: []string{"file_path"},
}
}
// Run implements Tool.
func (v *viewTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params ViewParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse(fmt.Sprintf("error parsing parameters: %s", err)), nil
}
if params.FilePath == "" {
return NewTextErrorResponse("file_path is required"), nil
}
// Handle relative paths
filePath := params.FilePath
if !filepath.IsAbs(filePath) {
filePath = filepath.Join(config.WorkingDirectory(), filePath)
}
// Check if file exists
fileInfo, err := os.Stat(filePath)
if err != nil {
if os.IsNotExist(err) {
// Try to offer suggestions for similarly named files
dir := filepath.Dir(filePath)
base := filepath.Base(filePath)
dirEntries, dirErr := os.ReadDir(dir)
if dirErr == nil {
var suggestions []string
for _, entry := range dirEntries {
if strings.Contains(strings.ToLower(entry.Name()), strings.ToLower(base)) ||
strings.Contains(strings.ToLower(base), strings.ToLower(entry.Name())) {
suggestions = append(suggestions, filepath.Join(dir, entry.Name()))
if len(suggestions) >= 3 {
break
}
}
}
if len(suggestions) > 0 {
return NewTextErrorResponse(fmt.Sprintf("File not found: %s\n\nDid you mean one of these?\n%s",
filePath, strings.Join(suggestions, "\n"))), nil
}
}
return NewTextErrorResponse(fmt.Sprintf("File not found: %s", filePath)), nil
}
return ToolResponse{}, fmt.Errorf("error accessing file: %w", err)
}
// Check if it's a directory
if fileInfo.IsDir() {
return NewTextErrorResponse(fmt.Sprintf("Path is a directory, not a file: %s", filePath)), nil
}
// Check file size
if fileInfo.Size() > MaxReadSize {
return NewTextErrorResponse(fmt.Sprintf("File is too large (%d bytes). Maximum size is %d bytes",
fileInfo.Size(), MaxReadSize)), nil
}
// Set default limit if not provided
if params.Limit <= 0 {
params.Limit = DefaultReadLimit
}
// Check if it's an image file
isImage, imageType := isImageFile(filePath)
// TODO: handle images
if isImage {
return NewTextErrorResponse(fmt.Sprintf("This is an image file of type: %s\nUse a different tool to process images", imageType)), nil
}
// Read the file content
content, lineCount, err := readTextFile(filePath, params.Offset, params.Limit)
if err != nil {
return ToolResponse{}, fmt.Errorf("error reading file: %w", err)
}
notifyLspOpenFile(ctx, filePath, v.lspClients)
output := "<file>\n"
// Format the output with line numbers
output += addLineNumbers(content, params.Offset+1)
// Add a note if the content was truncated
if lineCount > params.Offset+len(strings.Split(content, "\n")) {
output += fmt.Sprintf("\n\n(File has more lines. Use 'offset' parameter to read beyond line %d)",
params.Offset+len(strings.Split(content, "\n")))
}
output += "\n</file>\n"
output += getDiagnostics(filePath, v.lspClients)
recordFileRead(filePath)
return WithResponseMetadata(
NewTextResponse(output),
ViewResponseMetadata{
FilePath: filePath,
Content: content,
},
), nil
}
func addLineNumbers(content string, startLine int) string {
if content == "" {
return ""
}
lines := strings.Split(content, "\n")
var result []string
for i, line := range lines {
line = strings.TrimSuffix(line, "\r")
lineNum := i + startLine
numStr := fmt.Sprintf("%d", lineNum)
if len(numStr) >= 6 {
result = append(result, fmt.Sprintf("%s|%s", numStr, line))
} else {
paddedNum := fmt.Sprintf("%6s", numStr)
result = append(result, fmt.Sprintf("%s|%s", paddedNum, line))
}
}
return strings.Join(result, "\n")
}
func readTextFile(filePath string, offset, limit int) (string, int, error) {
file, err := os.Open(filePath)
if err != nil {
return "", 0, err
}
defer file.Close()
lineCount := 0
scanner := NewLineScanner(file)
if offset > 0 {
for lineCount < offset && scanner.Scan() {
lineCount++
}
if err = scanner.Err(); err != nil {
return "", 0, err
}
}
if offset == 0 {
_, err = file.Seek(0, io.SeekStart)
if err != nil {
return "", 0, err
}
}
var lines []string
lineCount = offset
for scanner.Scan() && len(lines) < limit {
lineCount++
lineText := scanner.Text()
if len(lineText) > MaxLineLength {
lineText = lineText[:MaxLineLength] + "..."
}
lines = append(lines, lineText)
}
// Continue scanning to get total line count
for scanner.Scan() {
lineCount++
}
if err := scanner.Err(); err != nil {
return "", 0, err
}
return strings.Join(lines, "\n"), lineCount, nil
}
func isImageFile(filePath string) (bool, string) {
ext := strings.ToLower(filepath.Ext(filePath))
switch ext {
case ".jpg", ".jpeg":
return true, "JPEG"
case ".png":
return true, "PNG"
case ".gif":
return true, "GIF"
case ".bmp":
return true, "BMP"
case ".svg":
return true, "SVG"
case ".webp":
return true, "WebP"
default:
return false, ""
}
}
type LineScanner struct {
scanner *bufio.Scanner
}
func NewLineScanner(r io.Reader) *LineScanner {
return &LineScanner{
scanner: bufio.NewScanner(r),
}
}
func (s *LineScanner) Scan() bool {
return s.scanner.Scan()
}
func (s *LineScanner) Text() string {
return s.scanner.Text()
}
func (s *LineScanner) Err() error {
return s.scanner.Err()
}

View File

@@ -1,222 +0,0 @@
package tools
import (
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"
"github.com/sst/opencode/internal/config"
"github.com/sst/opencode/internal/diff"
"github.com/sst/opencode/internal/history"
"github.com/sst/opencode/internal/lsp"
"github.com/sst/opencode/internal/permission"
"log/slog"
)
type WriteParams struct {
FilePath string `json:"file_path"`
Content string `json:"content"`
}
type WritePermissionsParams struct {
FilePath string `json:"file_path"`
Diff string `json:"diff"`
}
type writeTool struct {
lspClients map[string]*lsp.Client
permissions permission.Service
files history.Service
}
type WriteResponseMetadata struct {
Diff string `json:"diff"`
Additions int `json:"additions"`
Removals int `json:"removals"`
}
const (
WriteToolName = "write"
writeDescription = `File writing tool that creates or updates files in the filesystem, allowing you to save or modify text content.
WHEN TO USE THIS TOOL:
- Use when you need to create a new file
- Helpful for updating existing files with modified content
- Perfect for saving generated code, configurations, or text data
HOW TO USE:
- Provide the path to the file you want to write
- Include the content to be written to the file
- The tool will create any necessary parent directories
FEATURES:
- Can create new files or overwrite existing ones
- Creates parent directories automatically if they don't exist
- Checks if the file has been modified since last read for safety
- Avoids unnecessary writes when content hasn't changed
LIMITATIONS:
- You should read a file before writing to it to avoid conflicts
- Cannot append to files (rewrites the entire file)
TIPS:
- Use the View tool first to examine existing files before modifying them
- Use the LS tool to verify the correct location when creating new files
- Combine with Glob and Grep tools to find and modify multiple files
- Always include descriptive comments when making changes to existing code`
)
func NewWriteTool(lspClients map[string]*lsp.Client, permissions permission.Service, files history.Service) BaseTool {
return &writeTool{
lspClients: lspClients,
permissions: permissions,
files: files,
}
}
func (w *writeTool) Info() ToolInfo {
return ToolInfo{
Name: WriteToolName,
Description: writeDescription,
Parameters: map[string]any{
"file_path": map[string]any{
"type": "string",
"description": "The path to the file to write",
},
"content": map[string]any{
"type": "string",
"description": "The content to write to the file",
},
},
Required: []string{"file_path", "content"},
}
}
func (w *writeTool) Run(ctx context.Context, call ToolCall) (ToolResponse, error) {
var params WriteParams
if err := json.Unmarshal([]byte(call.Input), &params); err != nil {
return NewTextErrorResponse(fmt.Sprintf("error parsing parameters: %s", err)), nil
}
if params.FilePath == "" {
return NewTextErrorResponse("file_path is required"), nil
}
if params.Content == "" {
return NewTextErrorResponse("content is required"), nil
}
filePath := params.FilePath
if !filepath.IsAbs(filePath) {
filePath = filepath.Join(config.WorkingDirectory(), filePath)
}
fileInfo, err := os.Stat(filePath)
if err == nil {
if fileInfo.IsDir() {
return NewTextErrorResponse(fmt.Sprintf("Path is a directory, not a file: %s", filePath)), nil
}
modTime := fileInfo.ModTime()
lastRead := getLastReadTime(filePath)
if modTime.After(lastRead) {
return NewTextErrorResponse(fmt.Sprintf("File %s has been modified since it was last read.\nLast modification: %s\nLast read: %s\n\nPlease read the file again before modifying it.",
filePath, modTime.Format(time.RFC3339), lastRead.Format(time.RFC3339))), nil
}
oldContent, readErr := os.ReadFile(filePath)
if readErr == nil && string(oldContent) == params.Content {
return NewTextErrorResponse(fmt.Sprintf("File %s already contains the exact content. No changes made.", filePath)), nil
}
} else if !os.IsNotExist(err) {
return ToolResponse{}, fmt.Errorf("error checking file: %w", err)
}
dir := filepath.Dir(filePath)
if err = os.MkdirAll(dir, 0o755); err != nil {
return ToolResponse{}, fmt.Errorf("error creating directory: %w", err)
}
oldContent := ""
if fileInfo != nil && !fileInfo.IsDir() {
oldBytes, readErr := os.ReadFile(filePath)
if readErr == nil {
oldContent = string(oldBytes)
}
}
sessionID, messageID := GetContextValues(ctx)
if sessionID == "" || messageID == "" {
return ToolResponse{}, fmt.Errorf("session_id and message_id are required")
}
diff, additions, removals := diff.GenerateDiff(
oldContent,
params.Content,
filePath,
)
p := w.permissions.Request(
ctx,
permission.CreatePermissionRequest{
SessionID: sessionID,
Path: filePath,
ToolName: WriteToolName,
Action: "write",
Description: fmt.Sprintf("Create file %s", filePath),
Params: WritePermissionsParams{
FilePath: filePath,
Diff: diff,
},
},
)
if !p {
return ToolResponse{}, permission.ErrorPermissionDenied
}
err = os.WriteFile(filePath, []byte(params.Content), 0o644)
if err != nil {
return ToolResponse{}, fmt.Errorf("error writing file: %w", err)
}
// Check if file exists in history
file, err := w.files.GetLatestByPathAndSession(ctx, filePath, sessionID)
if err != nil {
_, err = w.files.Create(ctx, sessionID, filePath, oldContent)
if err != nil {
// Log error but don't fail the operation
return ToolResponse{}, fmt.Errorf("error creating file history: %w", err)
}
}
if file.Content != oldContent {
// User Manually changed the content store an intermediate version
_, err = w.files.CreateVersion(ctx, sessionID, filePath, oldContent)
if err != nil {
slog.Debug("Error creating file history version", "error", err)
}
}
// Store the new version
_, err = w.files.CreateVersion(ctx, sessionID, filePath, params.Content)
if err != nil {
slog.Debug("Error creating file history version", "error", err)
}
recordFileWrite(filePath)
recordFileRead(filePath)
waitForLspDiagnostics(ctx, filePath, w.lspClients)
result := fmt.Sprintf("File successfully written: %s", filePath)
result = fmt.Sprintf("<result>\n%s\n</result>", result)
result += getDiagnostics(filePath, w.lspClients)
return WithResponseMetadata(NewTextResponse(result),
WriteResponseMetadata{
Diff: diff,
Additions: additions,
Removals: removals,
},
), nil
}