mirror of
https://github.com/aljazceru/goose.git
synced 2026-02-14 11:04:29 +01:00
11
.devcontainer/Dockerfile
Normal file
11
.devcontainer/Dockerfile
Normal file
@@ -0,0 +1,11 @@
|
||||
# Use Rust base image
|
||||
FROM mcr.microsoft.com/devcontainers/rust:1
|
||||
|
||||
# Install additional dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
libdbus-1-dev \
|
||||
gnome-keyring \
|
||||
libxcb1-dev \
|
||||
protobuf-compiler \
|
||||
&& apt-get clean
|
||||
28
.devcontainer/devcontainer.json
Normal file
28
.devcontainer/devcontainer.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"name": "Rust Dev Container",
|
||||
"build": {
|
||||
"dockerfile": "Dockerfile"
|
||||
},
|
||||
"features": {
|
||||
"ghcr.io/devcontainers/features/rust:1": {
|
||||
"version": "stable"
|
||||
}
|
||||
},
|
||||
"customizations": {
|
||||
"vscode": {
|
||||
"extensions": [
|
||||
"rust-lang.rust-analyzer",
|
||||
"vadimcn.vscode-lldb"
|
||||
]
|
||||
},
|
||||
"settings": {
|
||||
"terminal.integrated.defaultProfile.linux": "/bin/bash"
|
||||
}
|
||||
},
|
||||
"postCreateCommand": "cargo build",
|
||||
"remoteUser": "vscode",
|
||||
"mounts": [
|
||||
"source=${localWorkspaceFolder}/crates,target=/workspace/crates,type=bind"
|
||||
]
|
||||
}
|
||||
|
||||
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
8
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Goose Discord discussion
|
||||
url: https://discord.gg/block-opensource
|
||||
about: Please ask and answer questions here.
|
||||
- name: Report a security vulnerability
|
||||
url: https://github.com/block/goose/security/policy
|
||||
about: Please report security vulnerabilities here.
|
||||
110
.github/ISSUE_TEMPLATE/submit-recipe.yml
vendored
Normal file
110
.github/ISSUE_TEMPLATE/submit-recipe.yml
vendored
Normal file
@@ -0,0 +1,110 @@
|
||||
name: 🧑🍳 Submit a Recipe to the Goose Cookbook
|
||||
description: Share a reusable Goose recipe with the community!
|
||||
title: "[Recipe] <your recipe title here>"
|
||||
labels: ["recipe submission"]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for contributing to the Goose Cookbook! 🍳
|
||||
Recipes are reusable sessions created in Goose Desktop or CLI and shared with the community to help others vibe code faster.
|
||||
|
||||
📌 **How to Submit**
|
||||
- Create your recipe using Goose ("Make recipe from this session")
|
||||
- Fill out the YAML below using the format provided
|
||||
- Paste it into the field and submit the issue — we'll review and add it to the Cookbook!
|
||||
|
||||
🪄 **What Happens After?**
|
||||
- If accepted, we'll publish your recipe to the [Goose Recipes Cookbook](https://block.github.io/goose/recipes)
|
||||
- You'll receive OpenRouter **LLM API credits** as a thank you!
|
||||
- Your GitHub handle will be displayed and linked on the recipe card
|
||||
- If you provide an email below, we'll email you your credits when your recipe is approved and merged.
|
||||
- If the YAML has any issues, Goose will comment with validation errors so you can fix and resubmit.
|
||||
|
||||
🧪 **Pro Tip:** You can test your recipe locally in your terminal with:
|
||||
`goose recipe validate your-recipe.yaml`
|
||||
|
||||
- type: textarea
|
||||
id: recipe-yaml
|
||||
attributes:
|
||||
label: Paste Your Full Recipe YAML Below
|
||||
description: Use the structure below and we’ll auto-fill your GitHub handle for `author.contact` after submission.
|
||||
placeholder: |
|
||||
version: "1.0.0"
|
||||
id: clean-up-feature-flag
|
||||
title: Clean Up Feature Flag
|
||||
description: Automatically clean up all references of a fully rolled out feature flag from a codebase and make the new behavior the default.
|
||||
instructions: |
|
||||
Your job is to systematically remove a fully rolled out feature flag and ensure the new behavior is now the default. Use code search tools like ripgrep to identify all references to the flag, clean up definition files, usage sites, tests, and configuration files. Then create a commit and push changes with clear commit messages documenting the flag removal.
|
||||
prompt: |
|
||||
Task: Remove a feature flag that has been fully rolled out, where the feature flag's functionality should become the default behavior.
|
||||
|
||||
Context:
|
||||
Feature flag key: {{ feature_flag_key }}
|
||||
Project: {{ repo_dir }}
|
||||
|
||||
Steps to follow:
|
||||
1. Check out a *new* branch from main or master named using the feature flag key.
|
||||
2. Find the feature flag constant/object that wraps the key.
|
||||
3. Search for all references to the constant/object using ripgrep or equivalent tools.
|
||||
4. Remove all conditional logic and make the new behavior default.
|
||||
5. Remove unused imports, mocks, config, and tests.
|
||||
6. Commit your changes and push the branch.
|
||||
7. Open a GitHub PR.
|
||||
|
||||
Use commit messages like:
|
||||
chore(flag-cleanup): remove <feature_flag_key> flag from codebase
|
||||
|
||||
parameters:
|
||||
- key: feature_flag_key
|
||||
input_type: string
|
||||
requirement: required
|
||||
description: Key of the feature flag
|
||||
|
||||
- key: repo_dir
|
||||
input_type: string
|
||||
requirement: optional
|
||||
default: ./
|
||||
description: Directory of the codebase
|
||||
|
||||
extensions:
|
||||
- type: stdio
|
||||
name: developer
|
||||
cmd: uvx
|
||||
args:
|
||||
- developer-mcp@latest
|
||||
timeout: 300
|
||||
bundled: true
|
||||
description: Access developer tools
|
||||
|
||||
activities:
|
||||
- Remove feature flag definitions
|
||||
- Clean up feature flag usage sites
|
||||
- Update affected tests
|
||||
- Remove flag configurations
|
||||
- Document flag removal
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: input
|
||||
id: email
|
||||
attributes:
|
||||
label: Your Email (optional)
|
||||
description: If your recipe is approved, we'll email your LLM API credits here.
|
||||
placeholder: yourname@example.com
|
||||
validations:
|
||||
required: false
|
||||
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
🛠 **Recipe Field Tips**
|
||||
- `version` must be "1.0.0" for now
|
||||
- `id` should be lowercase, hyphenated, and unique (e.g. `my-awesome-recipe`)
|
||||
- `title` is the display name of your recipe
|
||||
- `description` should clearly explain what the recipe does
|
||||
- `instructions` are specific steps Goose should follow — supports template variables like `{{ variable_name }}`
|
||||
- `prompt` is the first thing Goose sees when the recipe is launched
|
||||
- `parameters` should include required or optional inputs — optional ones must have `default`
|
||||
- `extensions` must follow the full format with `type`, `cmd`, `args`, `timeout`, etc.
|
||||
- `activities` describe the main actions the recipe performs
|
||||
362
.github/workflows/build-cli.yml
vendored
362
.github/workflows/build-cli.yml
vendored
@@ -1,7 +1,12 @@
|
||||
# This is a **reuseable** workflow that bundles the Desktop App for macOS.
|
||||
# This is a **reuseable** workflow that builds the CLI for multiple platforms.
|
||||
# It doesn't get triggered on its own. It gets used in multiple workflows:
|
||||
# - release.yml
|
||||
# - canary.yml
|
||||
#
|
||||
# Platform Build Strategy:
|
||||
# - Linux: Uses Ubuntu runner with cross-compilation
|
||||
# - macOS: Uses macOS runner with cross-compilation
|
||||
# - Windows: Uses Ubuntu runner with Docker cross-compilation (same as desktop build)
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
@@ -9,38 +14,50 @@ on:
|
||||
required: false
|
||||
default: ""
|
||||
type: string
|
||||
# Let's allow overriding the OSes and architectures in JSON array form:
|
||||
# e.g. '["ubuntu-latest","macos-latest"]'
|
||||
# If no input is provided, these defaults apply.
|
||||
operating-systems:
|
||||
type: string
|
||||
required: false
|
||||
default: '["ubuntu-latest","macos-latest"]'
|
||||
architectures:
|
||||
type: string
|
||||
required: false
|
||||
default: '["x86_64","aarch64"]'
|
||||
ref:
|
||||
type: string
|
||||
required: false
|
||||
default: 'refs/heads/main'
|
||||
default: ""
|
||||
|
||||
name: "Reusable workflow to build CLI"
|
||||
|
||||
jobs:
|
||||
build-cli:
|
||||
name: Build CLI
|
||||
runs-on: ${{ matrix.os }}
|
||||
runs-on: ${{ matrix.build-on }}
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: ${{ fromJson(inputs.operating-systems) }}
|
||||
architecture: ${{ fromJson(inputs.architectures) }}
|
||||
include:
|
||||
# Linux builds
|
||||
- os: ubuntu-latest
|
||||
architecture: x86_64
|
||||
target-suffix: unknown-linux-gnu
|
||||
build-on: ubuntu-latest
|
||||
use-cross: true
|
||||
- os: ubuntu-latest
|
||||
architecture: aarch64
|
||||
target-suffix: unknown-linux-gnu
|
||||
build-on: ubuntu-latest
|
||||
use-cross: true
|
||||
# macOS builds
|
||||
- os: macos-latest
|
||||
architecture: x86_64
|
||||
target-suffix: apple-darwin
|
||||
build-on: macos-latest
|
||||
use-cross: true
|
||||
- os: macos-latest
|
||||
architecture: aarch64
|
||||
target-suffix: apple-darwin
|
||||
build-on: macos-latest
|
||||
use-cross: true
|
||||
# Windows builds (only x86_64 supported)
|
||||
- os: windows
|
||||
architecture: x86_64
|
||||
target-suffix: pc-windows-gnu
|
||||
build-on: ubuntu-latest
|
||||
use-cross: false
|
||||
use-docker: true
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -56,9 +73,41 @@ jobs:
|
||||
rm -f Cargo.toml.bak
|
||||
|
||||
- name: Install cross
|
||||
if: matrix.use-cross
|
||||
run: source ./bin/activate-hermit && cargo install cross --git https://github.com/cross-rs/cross
|
||||
|
||||
- name: Build CLI
|
||||
# Install Go for building temporal-service
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # pin@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
# Cache Cargo registry and git dependencies for Windows builds
|
||||
- name: Cache Cargo registry (Windows)
|
||||
if: matrix.use-docker
|
||||
uses: actions/cache@2f8e54208210a422b2efd51efaa6bd6d7ca8920f
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry/index
|
||||
~/.cargo/registry/cache
|
||||
~/.cargo/git/db
|
||||
key: ${{ runner.os }}-cargo-registry-${{ hashFiles('Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-registry-
|
||||
|
||||
# Cache compiled dependencies (target/release/deps) for Windows builds
|
||||
- name: Cache Cargo build (Windows)
|
||||
if: matrix.use-docker
|
||||
uses: actions/cache@2f8e54208210a422b2efd51efaa6bd6d7ca8920f
|
||||
with:
|
||||
path: target
|
||||
key: ${{ runner.os }}-cargo-build-${{ hashFiles('Cargo.lock') }}-${{ hashFiles('rust-toolchain.toml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-build-${{ hashFiles('Cargo.lock') }}-
|
||||
${{ runner.os }}-cargo-build-
|
||||
|
||||
- name: Build CLI (Linux/macOS)
|
||||
if: matrix.use-cross
|
||||
env:
|
||||
CROSS_NO_WARNINGS: 0
|
||||
RUST_LOG: debug
|
||||
@@ -77,13 +126,284 @@ jobs:
|
||||
echo "Building with explicit PROTOC path..."
|
||||
cross build --release --target ${TARGET} -p goose-cli -vv
|
||||
|
||||
# tar the goose binary as goose-<TARGET>.tar.bz2
|
||||
cd target/${TARGET}/release
|
||||
tar -cjf goose-${TARGET}.tar.bz2 goose
|
||||
- name: Build CLI (Windows)
|
||||
if: matrix.use-docker
|
||||
run: |
|
||||
echo "🚀 Building Windows CLI executable with enhanced GitHub Actions caching..."
|
||||
|
||||
# Create cache directories
|
||||
mkdir -p ~/.cargo/registry ~/.cargo/git
|
||||
|
||||
# Use enhanced caching with GitHub Actions cache mounts
|
||||
docker run --rm \
|
||||
-v "$(pwd)":/usr/src/myapp \
|
||||
-v "$HOME/.cargo/registry":/usr/local/cargo/registry \
|
||||
-v "$HOME/.cargo/git":/usr/local/cargo/git \
|
||||
-w /usr/src/myapp \
|
||||
rust:latest \
|
||||
bash -c "
|
||||
set -e
|
||||
echo '=== Setting up Rust environment with caching ==='
|
||||
export CARGO_HOME=/usr/local/cargo
|
||||
export PATH=/usr/local/cargo/bin:\$PATH
|
||||
|
||||
# Check if Windows target is already installed in cache
|
||||
if rustup target list --installed | grep -q x86_64-pc-windows-gnu; then
|
||||
echo '✅ Windows cross-compilation target already installed'
|
||||
else
|
||||
echo '📦 Installing Windows cross-compilation target...'
|
||||
rustup target add x86_64-pc-windows-gnu
|
||||
fi
|
||||
|
||||
echo '=== Setting up build dependencies ==='
|
||||
apt-get update
|
||||
apt-get install -y mingw-w64 protobuf-compiler cmake time
|
||||
|
||||
echo '=== Setting up cross-compilation environment ==='
|
||||
export CC_x86_64_pc_windows_gnu=x86_64-w64-mingw32-gcc
|
||||
export CXX_x86_64_pc_windows_gnu=x86_64-w64-mingw32-g++
|
||||
export AR_x86_64_pc_windows_gnu=x86_64-w64-mingw32-ar
|
||||
export CARGO_TARGET_X86_64_PC_WINDOWS_GNU_LINKER=x86_64-w64-mingw32-gcc
|
||||
export PKG_CONFIG_ALLOW_CROSS=1
|
||||
export PROTOC=/usr/bin/protoc
|
||||
|
||||
echo '=== Optimized Cargo configuration ==='
|
||||
mkdir -p .cargo
|
||||
echo '[build]' > .cargo/config.toml
|
||||
echo 'jobs = 4' >> .cargo/config.toml
|
||||
echo '' >> .cargo/config.toml
|
||||
echo '[target.x86_64-pc-windows-gnu]' >> .cargo/config.toml
|
||||
echo 'linker = \"x86_64-w64-mingw32-gcc\"' >> .cargo/config.toml
|
||||
echo '' >> .cargo/config.toml
|
||||
echo '[net]' >> .cargo/config.toml
|
||||
echo 'git-fetch-with-cli = true' >> .cargo/config.toml
|
||||
echo 'retry = 3' >> .cargo/config.toml
|
||||
echo '' >> .cargo/config.toml
|
||||
echo '[profile.release]' >> .cargo/config.toml
|
||||
echo 'codegen-units = 1' >> .cargo/config.toml
|
||||
echo 'lto = false' >> .cargo/config.toml
|
||||
echo 'panic = \"abort\"' >> .cargo/config.toml
|
||||
echo 'debug = false' >> .cargo/config.toml
|
||||
echo 'opt-level = 2' >> .cargo/config.toml
|
||||
echo '' >> .cargo/config.toml
|
||||
echo '[registries.crates-io]' >> .cargo/config.toml
|
||||
echo 'protocol = \"sparse\"' >> .cargo/config.toml
|
||||
|
||||
echo '=== Building with cached dependencies ==='
|
||||
# Check if we have cached build artifacts
|
||||
if [ -d target/x86_64-pc-windows-gnu/release/deps ] && [ \"\$(ls -A target/x86_64-pc-windows-gnu/release/deps)\" ]; then
|
||||
echo '✅ Found cached build artifacts, performing incremental build...'
|
||||
CARGO_INCREMENTAL=1
|
||||
else
|
||||
echo '🔨 No cached artifacts found, performing full build...'
|
||||
CARGO_INCREMENTAL=0
|
||||
fi
|
||||
|
||||
echo '🔨 Building Windows CLI executable...'
|
||||
CARGO_INCREMENTAL=\$CARGO_INCREMENTAL \
|
||||
CARGO_NET_RETRY=3 \
|
||||
CARGO_HTTP_TIMEOUT=60 \
|
||||
RUST_BACKTRACE=1 \
|
||||
cargo build --release --target x86_64-pc-windows-gnu -p goose-cli --jobs 4
|
||||
|
||||
echo '=== Copying Windows runtime DLLs ==='
|
||||
GCC_DIR=\$(ls -d /usr/lib/gcc/x86_64-w64-mingw32/*/ | head -n 1)
|
||||
cp \"\$GCC_DIR/libstdc++-6.dll\" target/x86_64-pc-windows-gnu/release/
|
||||
cp \"\$GCC_DIR/libgcc_s_seh-1.dll\" target/x86_64-pc-windows-gnu/release/
|
||||
cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll target/x86_64-pc-windows-gnu/release/
|
||||
|
||||
echo '✅ Build completed successfully!'
|
||||
ls -la target/x86_64-pc-windows-gnu/release/
|
||||
"
|
||||
|
||||
# Verify build succeeded
|
||||
if [ ! -f "./target/x86_64-pc-windows-gnu/release/goose.exe" ]; then
|
||||
echo "❌ Windows CLI binary not found."
|
||||
ls -la ./target/x86_64-pc-windows-gnu/release/ || echo "Release directory doesn't exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Windows CLI binary found!"
|
||||
ls -la ./target/x86_64-pc-windows-gnu/release/goose.exe
|
||||
|
||||
echo "✅ Windows runtime DLLs:"
|
||||
ls -la ./target/x86_64-pc-windows-gnu/release/*.dll
|
||||
|
||||
- name: Build temporal-service for target platform using build.sh script (Linux/macOS)
|
||||
if: matrix.use-cross
|
||||
run: |
|
||||
source ./bin/activate-hermit
|
||||
export TARGET="${{ matrix.architecture }}-${{ matrix.target-suffix }}"
|
||||
|
||||
# Set Go cross-compilation variables based on target
|
||||
case "${TARGET}" in
|
||||
"x86_64-unknown-linux-gnu")
|
||||
export GOOS=linux
|
||||
export GOARCH=amd64
|
||||
BINARY_NAME="temporal-service"
|
||||
;;
|
||||
"aarch64-unknown-linux-gnu")
|
||||
export GOOS=linux
|
||||
export GOARCH=arm64
|
||||
BINARY_NAME="temporal-service"
|
||||
;;
|
||||
"x86_64-apple-darwin")
|
||||
export GOOS=darwin
|
||||
export GOARCH=amd64
|
||||
BINARY_NAME="temporal-service"
|
||||
;;
|
||||
"aarch64-apple-darwin")
|
||||
export GOOS=darwin
|
||||
export GOARCH=arm64
|
||||
BINARY_NAME="temporal-service"
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported target: ${TARGET}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Building temporal-service for ${GOOS}/${GOARCH} using build.sh script..."
|
||||
cd temporal-service
|
||||
# Run build.sh with cross-compilation environment
|
||||
GOOS="${GOOS}" GOARCH="${GOARCH}" ./build.sh
|
||||
# Move the built binary to the expected location
|
||||
mv "${BINARY_NAME}" "../target/${TARGET}/release/${BINARY_NAME}"
|
||||
echo "temporal-service built successfully for ${TARGET}"
|
||||
|
||||
- name: Build temporal-service for Windows
|
||||
if: matrix.use-docker
|
||||
run: |
|
||||
echo "Building temporal-service for Windows using build.sh script..."
|
||||
docker run --rm \
|
||||
-v "$(pwd)":/usr/src/myapp \
|
||||
-w /usr/src/myapp/temporal-service \
|
||||
golang:latest \
|
||||
sh -c "
|
||||
# Make build.sh executable
|
||||
chmod +x build.sh
|
||||
# Set Windows build environment and run build script
|
||||
GOOS=windows GOARCH=amd64 ./build.sh
|
||||
|
||||
# Move the built binary to the expected location (inside container)
|
||||
mkdir -p ../target/x86_64-pc-windows-gnu/release
|
||||
mv temporal-service.exe ../target/x86_64-pc-windows-gnu/release/temporal-service.exe
|
||||
|
||||
# Fix permissions for host access
|
||||
chmod -R 755 ../target/x86_64-pc-windows-gnu
|
||||
"
|
||||
echo "temporal-service.exe built successfully for Windows"
|
||||
|
||||
- name: Download temporal CLI (Linux/macOS)
|
||||
if: matrix.use-cross
|
||||
run: |
|
||||
export TARGET="${{ matrix.architecture }}-${{ matrix.target-suffix }}"
|
||||
TEMPORAL_VERSION="1.3.0"
|
||||
|
||||
# Set platform-specific download parameters
|
||||
case "${TARGET}" in
|
||||
"x86_64-unknown-linux-gnu")
|
||||
TEMPORAL_OS="linux"
|
||||
TEMPORAL_ARCH="amd64"
|
||||
TEMPORAL_EXT=""
|
||||
;;
|
||||
"aarch64-unknown-linux-gnu")
|
||||
TEMPORAL_OS="linux"
|
||||
TEMPORAL_ARCH="arm64"
|
||||
TEMPORAL_EXT=""
|
||||
;;
|
||||
"x86_64-apple-darwin")
|
||||
TEMPORAL_OS="darwin"
|
||||
TEMPORAL_ARCH="amd64"
|
||||
TEMPORAL_EXT=""
|
||||
;;
|
||||
"aarch64-apple-darwin")
|
||||
TEMPORAL_OS="darwin"
|
||||
TEMPORAL_ARCH="arm64"
|
||||
TEMPORAL_EXT=""
|
||||
;;
|
||||
*)
|
||||
echo "Unsupported target for temporal CLI: ${TARGET}"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "Downloading temporal CLI for ${TEMPORAL_OS}/${TEMPORAL_ARCH}..."
|
||||
TEMPORAL_FILE="temporal_cli_${TEMPORAL_VERSION}_${TEMPORAL_OS}_${TEMPORAL_ARCH}.tar.gz"
|
||||
curl -L "https://github.com/temporalio/cli/releases/download/v${TEMPORAL_VERSION}/${TEMPORAL_FILE}" -o "${TEMPORAL_FILE}"
|
||||
|
||||
# Extract temporal CLI
|
||||
tar -xzf "${TEMPORAL_FILE}"
|
||||
chmod +x temporal${TEMPORAL_EXT}
|
||||
|
||||
# Move to target directory
|
||||
mv temporal${TEMPORAL_EXT} "target/${TARGET}/release/temporal${TEMPORAL_EXT}"
|
||||
|
||||
# Clean up
|
||||
rm -f "${TEMPORAL_FILE}"
|
||||
echo "temporal CLI downloaded successfully for ${TARGET}"
|
||||
|
||||
- name: Download temporal CLI (Windows)
|
||||
if: matrix.use-docker
|
||||
run: |
|
||||
TEMPORAL_VERSION="1.3.0"
|
||||
echo "Downloading temporal CLI for Windows..."
|
||||
curl -L "https://github.com/temporalio/cli/releases/download/v${TEMPORAL_VERSION}/temporal_cli_${TEMPORAL_VERSION}_windows_amd64.zip" -o temporal-cli-windows.zip
|
||||
unzip -o temporal-cli-windows.zip
|
||||
chmod +x temporal.exe
|
||||
|
||||
# Fix permissions on target directory (created by Docker as root)
|
||||
sudo chown -R $(whoami):$(whoami) target/x86_64-pc-windows-gnu/ || true
|
||||
|
||||
# Move to target directory
|
||||
mv temporal.exe target/x86_64-pc-windows-gnu/release/temporal.exe
|
||||
|
||||
# Clean up
|
||||
rm -f temporal-cli-windows.zip
|
||||
echo "temporal CLI downloaded successfully for Windows"
|
||||
|
||||
- name: Package CLI with temporal-service (Linux/macOS)
|
||||
if: matrix.use-cross
|
||||
run: |
|
||||
source ./bin/activate-hermit
|
||||
export TARGET="${{ matrix.architecture }}-${{ matrix.target-suffix }}"
|
||||
|
||||
# Create a directory for the package contents
|
||||
mkdir -p "target/${TARGET}/release/goose-package"
|
||||
|
||||
# Copy binaries
|
||||
cp "target/${TARGET}/release/goose" "target/${TARGET}/release/goose-package/"
|
||||
cp "target/${TARGET}/release/temporal-service" "target/${TARGET}/release/goose-package/"
|
||||
cp "target/${TARGET}/release/temporal" "target/${TARGET}/release/goose-package/"
|
||||
|
||||
# Create the tar archive with all binaries
|
||||
cd "target/${TARGET}/release"
|
||||
tar -cjf "goose-${TARGET}.tar.bz2" -C goose-package .
|
||||
echo "ARTIFACT=target/${TARGET}/release/goose-${TARGET}.tar.bz2" >> $GITHUB_ENV
|
||||
|
||||
- name: Package CLI with temporal-service (Windows)
|
||||
if: matrix.use-docker
|
||||
run: |
|
||||
export TARGET="${{ matrix.architecture }}-${{ matrix.target-suffix }}"
|
||||
|
||||
# Create a directory for the package contents
|
||||
mkdir -p "target/${TARGET}/release/goose-package"
|
||||
|
||||
# Copy binaries
|
||||
cp "target/${TARGET}/release/goose.exe" "target/${TARGET}/release/goose-package/"
|
||||
cp "target/${TARGET}/release/temporal-service.exe" "target/${TARGET}/release/goose-package/"
|
||||
cp "target/${TARGET}/release/temporal.exe" "target/${TARGET}/release/goose-package/"
|
||||
|
||||
# Copy Windows runtime DLLs
|
||||
cp "target/${TARGET}/release/"*.dll "target/${TARGET}/release/goose-package/"
|
||||
|
||||
# Create the zip archive with all binaries and DLLs
|
||||
cd "target/${TARGET}/release"
|
||||
zip -r "goose-${TARGET}.zip" goose-package/
|
||||
echo "ARTIFACT=target/${TARGET}/release/goose-${TARGET}.zip" >> $GITHUB_ENV
|
||||
|
||||
- name: Upload CLI artifact
|
||||
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # pin@v4
|
||||
with:
|
||||
name: goose-${{ matrix.architecture }}-${{ matrix.target-suffix }}
|
||||
path: ${{ env.ARTIFACT }}
|
||||
path: ${{ env.ARTIFACT }}
|
||||
|
||||
41
.github/workflows/bundle-desktop-intel.yml
vendored
41
.github/workflows/bundle-desktop-intel.yml
vendored
@@ -24,7 +24,7 @@ on:
|
||||
ref:
|
||||
type: string
|
||||
required: false
|
||||
default: 'refs/heads/main'
|
||||
default: ''
|
||||
secrets:
|
||||
CERTIFICATE_OSX_APPLICATION:
|
||||
description: 'Certificate for macOS application signing'
|
||||
@@ -82,7 +82,8 @@ jobs:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683
|
||||
with:
|
||||
ref: ${{ inputs.ref }}
|
||||
# Only pass ref if it's explicitly set, otherwise let checkout action use its default behavior
|
||||
ref: ${{ inputs.ref != '' && inputs.ref || '' }}
|
||||
fetch-depth: 0
|
||||
|
||||
# Update versions before build
|
||||
@@ -138,9 +139,32 @@ jobs:
|
||||
restore-keys: |
|
||||
${{ runner.os }}-intel-cargo-build-
|
||||
|
||||
# Build specifically for Intel architecture
|
||||
- name: Build goosed for Intel
|
||||
run: source ./bin/activate-hermit && cargo build --release -p goose-server --target x86_64-apple-darwin
|
||||
# Install Go for building temporal-service
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # pin@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
- name: Build goose-server for Intel macOS (x86_64)
|
||||
run: |
|
||||
source ./bin/activate-hermit
|
||||
rustup target add x86_64-apple-darwin
|
||||
cargo build --release -p goose-server --target x86_64-apple-darwin
|
||||
|
||||
# Build temporal-service using build.sh script
|
||||
- name: Build temporal-service
|
||||
run: |
|
||||
echo "Building temporal-service using build.sh script..."
|
||||
cd temporal-service
|
||||
./build.sh
|
||||
echo "temporal-service built successfully"
|
||||
|
||||
# Install and prepare temporal CLI
|
||||
- name: Install temporal CLI via hermit
|
||||
run: |
|
||||
echo "Installing temporal CLI via hermit..."
|
||||
./bin/hermit install temporal-cli
|
||||
echo "temporal CLI installed successfully"
|
||||
|
||||
# Post-build cleanup to free space
|
||||
- name: Post-build cleanup
|
||||
@@ -156,8 +180,11 @@ jobs:
|
||||
# Check disk space after cleanup
|
||||
df -h
|
||||
|
||||
- name: Copy binary into Electron folder
|
||||
run: cp target/x86_64-apple-darwin/release/goosed ui/desktop/src/bin/goosed
|
||||
- name: Copy binaries into Electron folder
|
||||
run: |
|
||||
cp target/x86_64-apple-darwin/release/goosed ui/desktop/src/bin/goosed
|
||||
cp temporal-service/temporal-service ui/desktop/src/bin/temporal-service
|
||||
cp bin/temporal ui/desktop/src/bin/temporal
|
||||
|
||||
- name: Add MacOS certs for signing and notarization
|
||||
if: ${{ inputs.signing }}
|
||||
|
||||
268
.github/workflows/bundle-desktop-linux.yml
vendored
Normal file
268
.github/workflows/bundle-desktop-linux.yml
vendored
Normal file
@@ -0,0 +1,268 @@
|
||||
# This is a **reuseable** workflow that bundles the Desktop App for Linux.
|
||||
# It doesn't get triggered on its own. It gets used in multiple workflows:
|
||||
# - release.yml
|
||||
# - canary.yml (when added)
|
||||
# - pr-comment-bundle-desktop.yml (when added)
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to set for the build'
|
||||
required: false
|
||||
default: ""
|
||||
type: string
|
||||
ref:
|
||||
type: string
|
||||
required: false
|
||||
default: ''
|
||||
|
||||
name: "Bundle Desktop (Linux)"
|
||||
|
||||
jobs:
|
||||
build-desktop-linux:
|
||||
name: Build Desktop (Linux)
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
# 1) Check out source
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4
|
||||
with:
|
||||
# Only pass ref if it's explicitly set, otherwise let checkout action use its default behavior
|
||||
ref: ${{ inputs.ref != '' && inputs.ref || '' }}
|
||||
fetch-depth: 0
|
||||
|
||||
# 2) Update versions before build
|
||||
- name: Update versions
|
||||
if: ${{ inputs.version != '' }}
|
||||
run: |
|
||||
# Update version in Cargo.toml
|
||||
sed -i.bak 's/^version = ".*"/version = "'${{ inputs.version }}'"/' Cargo.toml
|
||||
rm -f Cargo.toml.bak
|
||||
|
||||
# Update version in package.json
|
||||
cd ui/desktop
|
||||
npm version ${{ inputs.version }} --no-git-tag-version --allow-same-version
|
||||
|
||||
# 3) Debug information
|
||||
- name: Debug workflow info
|
||||
env:
|
||||
WORKFLOW_NAME: ${{ github.workflow }}
|
||||
WORKFLOW_REF: ${{ github.ref }}
|
||||
EVENT_NAME: ${{ github.event_name }}
|
||||
REPOSITORY: ${{ github.repository }}
|
||||
run: |
|
||||
echo "=== Workflow Information ==="
|
||||
echo "Workflow: ${WORKFLOW_NAME}"
|
||||
echo "Ref: ${WORKFLOW_REF}"
|
||||
echo "Event: ${EVENT_NAME}"
|
||||
echo "Repo: ${REPOSITORY}"
|
||||
echo ""
|
||||
echo "=== System Information ==="
|
||||
uname -a
|
||||
lsb_release -a || true
|
||||
df -h
|
||||
|
||||
# 4) Install system dependencies for Linux packaging
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y \
|
||||
build-essential \
|
||||
libnss3-dev \
|
||||
libatk-bridge2.0-dev \
|
||||
libdrm2 \
|
||||
libxcomposite1 \
|
||||
libxdamage1 \
|
||||
libxrandr2 \
|
||||
libgbm1 \
|
||||
libxss1 \
|
||||
libasound2t64 \
|
||||
rpm \
|
||||
fakeroot \
|
||||
dpkg-dev \
|
||||
protobuf-compiler
|
||||
|
||||
# 4a) Pre-build cleanup to ensure enough disk space
|
||||
- name: Pre-build cleanup
|
||||
run: |
|
||||
echo "Performing aggressive pre-build cleanup..."
|
||||
# Clean npm cache
|
||||
npm cache clean --force || true
|
||||
# Clean any previous build artifacts
|
||||
rm -rf target || true
|
||||
# Clean Homebrew cache (if exists)
|
||||
brew cleanup || true
|
||||
# Remove unnecessary large directories
|
||||
sudo rm -rf /usr/share/dotnet || true
|
||||
sudo rm -rf /usr/local/lib/android || true
|
||||
sudo rm -rf /opt/ghc || true
|
||||
sudo rm -rf /usr/local/share/boost || true
|
||||
# Clean apt cache
|
||||
sudo apt-get clean || true
|
||||
sudo apt-get autoremove -y || true
|
||||
# Check disk space after cleanup
|
||||
df -h
|
||||
|
||||
# 5) Set up Rust
|
||||
- name: Set up Rust
|
||||
uses: actions-rust-lang/setup-rust-toolchain@9d7e65c320fdb52dcd45ffaa68deb6c02c8754d9 # pin@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
|
||||
# 6) Set up Node.js
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 23
|
||||
cache: 'npm'
|
||||
cache-dependency-path: ui/desktop/package-lock.json
|
||||
|
||||
# 7) Cache Rust dependencies
|
||||
- name: Cache Cargo registry
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cargo/registry
|
||||
key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-registry-
|
||||
|
||||
- name: Cache Cargo index
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.cargo/index
|
||||
key: ${{ runner.os }}-cargo-index
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-index
|
||||
|
||||
- name: Cache Cargo build
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: target
|
||||
key: ${{ runner.os }}-cargo-build-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-build-
|
||||
|
||||
# 8) Set up Go for building temporal-service
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # pin@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
# 9) Build temporal-service using build.sh script
|
||||
- name: Build temporal-service
|
||||
run: |
|
||||
echo "Building temporal-service using build.sh script..."
|
||||
cd temporal-service
|
||||
./build.sh
|
||||
echo "temporal-service built successfully"
|
||||
|
||||
# 10) Build the Rust goosed binary
|
||||
- name: Build goosed binary
|
||||
run: |
|
||||
echo "Building goosed binary for Linux..."
|
||||
cargo build --release -p goose-server
|
||||
ls -la target/release/
|
||||
file target/release/goosed
|
||||
|
||||
# 11) Clean up build artifacts to save space
|
||||
- name: Clean up build artifacts
|
||||
run: |
|
||||
echo "Cleaning up to save disk space..."
|
||||
# Remove debug artifacts
|
||||
rm -rf target/debug || true
|
||||
# Remove incremental build files
|
||||
rm -rf target/release/incremental || true
|
||||
rm -rf target/release/deps || true
|
||||
rm -rf target/release/build || true
|
||||
# Remove other target directories that aren't needed
|
||||
find target -name "*.rlib" -delete || true
|
||||
find target -name "*.rmeta" -delete || true
|
||||
# Don't run cargo clean as it will remove our binary
|
||||
# Check disk space
|
||||
df -h
|
||||
|
||||
# 12) Copy binaries to Electron folder
|
||||
- name: Copy binaries into Electron folder
|
||||
run: |
|
||||
echo "Copying binaries to ui/desktop/src/bin/"
|
||||
mkdir -p ui/desktop/src/bin
|
||||
cp target/release/goosed ui/desktop/src/bin/
|
||||
cp temporal-service/temporal-service ui/desktop/src/bin/
|
||||
chmod +x ui/desktop/src/bin/goosed
|
||||
chmod +x ui/desktop/src/bin/temporal-service
|
||||
ls -la ui/desktop/src/bin/
|
||||
|
||||
# 13) Final cleanup before npm build
|
||||
- name: Final cleanup before npm build
|
||||
run: |
|
||||
echo "Final cleanup before npm build..."
|
||||
# Now we can remove the entire target directory since we copied the binary
|
||||
rm -rf target || true
|
||||
# Clean any remaining caches
|
||||
rm -rf ~/.cargo/registry/cache || true
|
||||
rm -rf ~/.cargo/git/db || true
|
||||
# Check final disk space
|
||||
df -h
|
||||
|
||||
# 14) Install npm dependencies
|
||||
- name: Install npm dependencies
|
||||
run: |
|
||||
cd ui/desktop
|
||||
# Clear npm cache and remove lock file as suggested by the error
|
||||
rm -rf node_modules package-lock.json || true
|
||||
npm cache clean --force || true
|
||||
npm install
|
||||
# Verify installation
|
||||
ls -la node_modules/.bin/ | head -5
|
||||
|
||||
# 15) Build Electron app with Linux makers (.deb and .rpm)
|
||||
- name: Build Linux packages
|
||||
run: |
|
||||
cd ui/desktop
|
||||
echo "Building Linux packages (.deb and .rpm)..."
|
||||
|
||||
# Build both .deb and .rpm packages
|
||||
npm run make -- --platform=linux --arch=x64
|
||||
|
||||
echo "Build completed. Checking output..."
|
||||
ls -la out/
|
||||
find out/ -name "*.deb" -o -name "*.rpm" | head -10
|
||||
|
||||
# 16) List all generated files for debugging
|
||||
- name: List generated files
|
||||
run: |
|
||||
echo "=== All files in out/ directory ==="
|
||||
find ui/desktop/out/ -type f | head -20
|
||||
echo ""
|
||||
echo "=== Package files specifically ==="
|
||||
find ui/desktop/out/ -name "*.deb" -o -name "*.rpm"
|
||||
echo ""
|
||||
echo "=== File sizes ==="
|
||||
find ui/desktop/out/ -name "*.deb" -o -name "*.rpm" -exec ls -lh {} \;
|
||||
|
||||
# 17) Upload .deb package
|
||||
- name: Upload .deb package
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Goose-linux-x64-deb
|
||||
path: ui/desktop/out/make/deb/x64/*.deb
|
||||
if-no-files-found: error
|
||||
|
||||
# 18) Upload .rpm package
|
||||
- name: Upload .rpm package
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Goose-linux-x64-rpm
|
||||
path: ui/desktop/out/make/rpm/x64/*.rpm
|
||||
if-no-files-found: error
|
||||
|
||||
# 19) Create combined artifact with both packages
|
||||
- name: Upload combined Linux packages
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: Goose-linux-x64
|
||||
path: |
|
||||
ui/desktop/out/make/deb/x64/*.deb
|
||||
ui/desktop/out/make/rpm/x64/*.rpm
|
||||
if-no-files-found: error
|
||||
360
.github/workflows/bundle-desktop-windows.yml
vendored
360
.github/workflows/bundle-desktop-windows.yml
vendored
@@ -7,20 +7,33 @@ on:
|
||||
# branches: [ "main" ]
|
||||
workflow_call:
|
||||
inputs:
|
||||
version:
|
||||
description: 'Version to build'
|
||||
required: false
|
||||
type: string
|
||||
signing:
|
||||
description: 'Whether to sign the Windows executable'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
ref:
|
||||
description: 'Git ref to checkout'
|
||||
required: false
|
||||
type: string
|
||||
default: ''
|
||||
secrets:
|
||||
WINDOWS_CERTIFICATE:
|
||||
WINDOWS_CODESIGN_CERTIFICATE:
|
||||
required: false
|
||||
WINDOWS_CERTIFICATE_PASSWORD:
|
||||
WINDOW_SIGNING_ROLE:
|
||||
required: false
|
||||
ref:
|
||||
type: string
|
||||
required: false
|
||||
default: 'refs/heads/main'
|
||||
WINDOW_SIGNING_ROLE_TAG:
|
||||
required: false
|
||||
|
||||
# Permissions required for OIDC authentication with AWS
|
||||
permissions:
|
||||
id-token: write # Required to fetch the OIDC token
|
||||
contents: read # Required by actions/checkout
|
||||
actions: read # May be needed for some workflows
|
||||
|
||||
jobs:
|
||||
build-desktop-windows:
|
||||
@@ -30,16 +43,25 @@ jobs:
|
||||
steps:
|
||||
# 1) Check out source
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # pin@v4
|
||||
with:
|
||||
ref: ${{ inputs.ref }}
|
||||
# Only pass ref if it's explicitly set, otherwise let checkout action use its default behavior
|
||||
ref: ${{ inputs.ref != '' && inputs.ref || '' }}
|
||||
fetch-depth: 0
|
||||
|
||||
# 2) Configure AWS credentials for code signing
|
||||
- name: Configure AWS credentials
|
||||
if: inputs.signing && inputs.signing == true
|
||||
uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 # ratchet:aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
role-to-assume: ${{ github.ref == 'refs/heads/main' && secrets.WINDOW_SIGNING_ROLE || secrets.WINDOW_SIGNING_ROLE_TAG }}
|
||||
aws-region: us-west-2
|
||||
|
||||
# 2) Set up Node.js
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@1a4442cacd436585916779262731d5b162bc6ec7 # pin@v3
|
||||
with:
|
||||
node-version: 18
|
||||
node-version: 22
|
||||
|
||||
# 3) Cache dependencies
|
||||
- name: Cache node_modules
|
||||
@@ -48,36 +70,158 @@ jobs:
|
||||
path: |
|
||||
node_modules
|
||||
ui/desktop/node_modules
|
||||
key: ${{ runner.os }}-build-desktop-windows-${{ hashFiles('**/package-lock.json') }}
|
||||
key: ${{ runner.os }}-build-desktop-windows-node22-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-build-desktop-windows-
|
||||
${{ runner.os }}-build-desktop-windows-node22-
|
||||
|
||||
# 4) Build Rust for Windows using Docker (cross-compilation)
|
||||
- name: Build Windows executable using Docker
|
||||
# Cache Cargo registry and git dependencies
|
||||
- name: Cache Cargo registry
|
||||
uses: actions/cache@2f8e54208210a422b2efd51efaa6bd6d7ca8920f
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry/index
|
||||
~/.cargo/registry/cache
|
||||
~/.cargo/git/db
|
||||
key: ${{ runner.os }}-cargo-registry-${{ hashFiles('Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-registry-
|
||||
|
||||
# Cache compiled dependencies (target/release/deps)
|
||||
- name: Cache Cargo build
|
||||
uses: actions/cache@2f8e54208210a422b2efd51efaa6bd6d7ca8920f
|
||||
with:
|
||||
path: target
|
||||
key: ${{ runner.os }}-cargo-build-${{ hashFiles('Cargo.lock') }}-${{ hashFiles('rust-toolchain.toml') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-build-${{ hashFiles('Cargo.lock') }}-
|
||||
${{ runner.os }}-cargo-build-
|
||||
|
||||
# 4) Build Rust for Windows using Docker (cross-compilation with enhanced caching)
|
||||
- name: Build Windows executable using Docker cross-compilation with enhanced caching
|
||||
run: |
|
||||
echo "Building Windows executable using Docker cross-compilation..."
|
||||
docker volume create goose-windows-cache || true
|
||||
echo "🚀 Building Windows executable with enhanced GitHub Actions caching..."
|
||||
|
||||
# Create cache directories
|
||||
mkdir -p ~/.cargo/registry ~/.cargo/git
|
||||
|
||||
# Use enhanced caching with GitHub Actions cache mounts
|
||||
docker run --rm \
|
||||
-v "$(pwd)":/usr/src/myapp \
|
||||
-v goose-windows-cache:/usr/local/cargo/registry \
|
||||
-v "$HOME/.cargo/registry":/usr/local/cargo/registry \
|
||||
-v "$HOME/.cargo/git":/usr/local/cargo/git \
|
||||
-w /usr/src/myapp \
|
||||
rust:latest \
|
||||
sh -c "rustup target add x86_64-pc-windows-gnu && \
|
||||
apt-get update && \
|
||||
apt-get install -y mingw-w64 protobuf-compiler cmake && \
|
||||
export CC_x86_64_pc_windows_gnu=x86_64-w64-mingw32-gcc && \
|
||||
export CXX_x86_64_pc_windows_gnu=x86_64-w64-mingw32-g++ && \
|
||||
export AR_x86_64_pc_windows_gnu=x86_64-w64-mingw32-ar && \
|
||||
export CARGO_TARGET_X86_64_PC_WINDOWS_GNU_LINKER=x86_64-w64-mingw32-gcc && \
|
||||
export PKG_CONFIG_ALLOW_CROSS=1 && \
|
||||
export PROTOC=/usr/bin/protoc && \
|
||||
export PATH=/usr/bin:\$PATH && \
|
||||
protoc --version && \
|
||||
cargo build --release --target x86_64-pc-windows-gnu && \
|
||||
GCC_DIR=\$(ls -d /usr/lib/gcc/x86_64-w64-mingw32/*/ | head -n 1) && \
|
||||
cp \$GCC_DIR/libstdc++-6.dll /usr/src/myapp/target/x86_64-pc-windows-gnu/release/ && \
|
||||
cp \$GCC_DIR/libgcc_s_seh-1.dll /usr/src/myapp/target/x86_64-pc-windows-gnu/release/ && \
|
||||
cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll /usr/src/myapp/target/x86_64-pc-windows-gnu/release/"
|
||||
bash -c "
|
||||
set -e
|
||||
echo '=== Setting up Rust environment with caching ==='
|
||||
export CARGO_HOME=/usr/local/cargo
|
||||
export PATH=/usr/local/cargo/bin:\$PATH
|
||||
|
||||
# Check if Windows target is already installed in cache
|
||||
if rustup target list --installed | grep -q x86_64-pc-windows-gnu; then
|
||||
echo '✅ Windows cross-compilation target already installed'
|
||||
else
|
||||
echo '📦 Installing Windows cross-compilation target...'
|
||||
rustup target add x86_64-pc-windows-gnu
|
||||
fi
|
||||
|
||||
echo '=== Setting up build dependencies ==='
|
||||
apt-get update
|
||||
apt-get install -y mingw-w64 protobuf-compiler cmake time
|
||||
|
||||
echo '=== Setting up cross-compilation environment ==='
|
||||
export CC_x86_64_pc_windows_gnu=x86_64-w64-mingw32-gcc
|
||||
export CXX_x86_64_pc_windows_gnu=x86_64-w64-mingw32-g++
|
||||
export AR_x86_64_pc_windows_gnu=x86_64-w64-mingw32-ar
|
||||
export CARGO_TARGET_X86_64_PC_WINDOWS_GNU_LINKER=x86_64-w64-mingw32-gcc
|
||||
export PKG_CONFIG_ALLOW_CROSS=1
|
||||
export PROTOC=/usr/bin/protoc
|
||||
|
||||
echo '=== Optimized Cargo configuration ==='
|
||||
mkdir -p .cargo
|
||||
echo '[build]' > .cargo/config.toml
|
||||
echo 'jobs = 4' >> .cargo/config.toml
|
||||
echo '' >> .cargo/config.toml
|
||||
echo '[target.x86_64-pc-windows-gnu]' >> .cargo/config.toml
|
||||
echo 'linker = \"x86_64-w64-mingw32-gcc\"' >> .cargo/config.toml
|
||||
echo '' >> .cargo/config.toml
|
||||
echo '[net]' >> .cargo/config.toml
|
||||
echo 'git-fetch-with-cli = true' >> .cargo/config.toml
|
||||
echo 'retry = 3' >> .cargo/config.toml
|
||||
echo '' >> .cargo/config.toml
|
||||
echo '[profile.release]' >> .cargo/config.toml
|
||||
echo 'codegen-units = 1' >> .cargo/config.toml
|
||||
echo 'lto = false' >> .cargo/config.toml
|
||||
echo 'panic = \"abort\"' >> .cargo/config.toml
|
||||
echo 'debug = false' >> .cargo/config.toml
|
||||
echo 'opt-level = 2' >> .cargo/config.toml
|
||||
echo '' >> .cargo/config.toml
|
||||
echo '[registries.crates-io]' >> .cargo/config.toml
|
||||
echo 'protocol = \"sparse\"' >> .cargo/config.toml
|
||||
|
||||
echo '=== Building with cached dependencies ==='
|
||||
# Check if we have cached build artifacts
|
||||
if [ -d target/x86_64-pc-windows-gnu/release/deps ] && [ \"\$(ls -A target/x86_64-pc-windows-gnu/release/deps)\" ]; then
|
||||
echo '✅ Found cached build artifacts, performing incremental build...'
|
||||
CARGO_INCREMENTAL=1
|
||||
else
|
||||
echo '🔨 No cached artifacts found, performing full build...'
|
||||
CARGO_INCREMENTAL=0
|
||||
fi
|
||||
|
||||
echo '🔨 Building Windows executable...'
|
||||
CARGO_INCREMENTAL=\$CARGO_INCREMENTAL \
|
||||
CARGO_NET_RETRY=3 \
|
||||
CARGO_HTTP_TIMEOUT=60 \
|
||||
RUST_BACKTRACE=1 \
|
||||
cargo build --release --target x86_64-pc-windows-gnu --jobs 4
|
||||
|
||||
echo '=== Copying Windows runtime DLLs ==='
|
||||
GCC_DIR=\$(ls -d /usr/lib/gcc/x86_64-w64-mingw32/*/ | head -n 1)
|
||||
cp \"\$GCC_DIR/libstdc++-6.dll\" target/x86_64-pc-windows-gnu/release/
|
||||
cp \"\$GCC_DIR/libgcc_s_seh-1.dll\" target/x86_64-pc-windows-gnu/release/
|
||||
cp /usr/x86_64-w64-mingw32/lib/libwinpthread-1.dll target/x86_64-pc-windows-gnu/release/
|
||||
|
||||
echo '✅ Build completed successfully!'
|
||||
ls -la target/x86_64-pc-windows-gnu/release/
|
||||
"
|
||||
|
||||
# Verify build succeeded
|
||||
if [ ! -f "./target/x86_64-pc-windows-gnu/release/goosed.exe" ]; then
|
||||
echo "❌ Windows binary not found."
|
||||
ls -la ./target/x86_64-pc-windows-gnu/release/ || echo "Release directory doesn't exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Windows binary found!"
|
||||
ls -la ./target/x86_64-pc-windows-gnu/release/goosed.exe
|
||||
ls -la ./target/x86_64-pc-windows-gnu/release/*.dll
|
||||
|
||||
# 4.5) Build temporal-service for Windows using build.sh script
|
||||
- name: Build temporal-service for Windows
|
||||
run: |
|
||||
echo "Building temporal-service for Windows using build.sh script..."
|
||||
docker run --rm \
|
||||
-v "$(pwd)":/usr/src/myapp \
|
||||
-w /usr/src/myapp/temporal-service \
|
||||
golang:latest \
|
||||
sh -c "
|
||||
# Make build.sh executable
|
||||
chmod +x build.sh
|
||||
# Set Windows build environment and run build script
|
||||
GOOS=windows GOARCH=amd64 ./build.sh
|
||||
"
|
||||
echo "temporal-service.exe built successfully"
|
||||
|
||||
# 4.6) Download temporal CLI for Windows
|
||||
- name: Download temporal CLI for Windows
|
||||
run: |
|
||||
echo "Downloading temporal CLI for Windows..."
|
||||
TEMPORAL_VERSION="1.3.0"
|
||||
curl -L "https://github.com/temporalio/cli/releases/download/v${TEMPORAL_VERSION}/temporal_cli_${TEMPORAL_VERSION}_windows_amd64.zip" -o temporal-cli-windows.zip
|
||||
unzip -o temporal-cli-windows.zip
|
||||
chmod +x temporal.exe
|
||||
echo "temporal CLI downloaded successfully"
|
||||
|
||||
# 5) Prepare Windows binary and DLLs
|
||||
- name: Prepare Windows binary and DLLs
|
||||
@@ -87,6 +231,16 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "./temporal-service/temporal-service.exe" ]; then
|
||||
echo "temporal-service.exe not found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "./temporal.exe" ]; then
|
||||
echo "temporal.exe not found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Cleaning destination directory..."
|
||||
rm -rf ./ui/desktop/src/bin
|
||||
mkdir -p ./ui/desktop/src/bin
|
||||
@@ -95,6 +249,12 @@ jobs:
|
||||
cp -f ./target/x86_64-pc-windows-gnu/release/goosed.exe ./ui/desktop/src/bin/
|
||||
cp -f ./target/x86_64-pc-windows-gnu/release/*.dll ./ui/desktop/src/bin/
|
||||
|
||||
echo "Copying temporal-service.exe..."
|
||||
cp -f ./temporal-service/temporal-service.exe ./ui/desktop/src/bin/
|
||||
|
||||
echo "Copying temporal.exe..."
|
||||
cp -f ./temporal.exe ./ui/desktop/src/bin/
|
||||
|
||||
# Copy Windows platform files (tools, scripts, etc.)
|
||||
if [ -d "./ui/desktop/src/platform/windows/bin" ]; then
|
||||
echo "Copying Windows platform files..."
|
||||
@@ -115,31 +275,139 @@ jobs:
|
||||
- name: Build desktop UI with npm
|
||||
run: |
|
||||
cd ui/desktop
|
||||
|
||||
# Fix for rollup native module issue (npm optional dependencies bug)
|
||||
echo "🔧 Fixing npm optional dependencies issue..."
|
||||
rm -rf node_modules package-lock.json
|
||||
npm install
|
||||
|
||||
# Verify rollup native module is installed
|
||||
if [ ! -d "node_modules/@rollup/rollup-linux-x64-gnu" ]; then
|
||||
echo "⚠️ Rollup native module missing, installing manually..."
|
||||
npm install @rollup/rollup-linux-x64-gnu --save-optional
|
||||
fi
|
||||
|
||||
npm run bundle:windows
|
||||
|
||||
# 7) Copy exe/dll to final out/Goose-win32-x64/resources/bin
|
||||
- name: Copy exe/dll to out folder
|
||||
# 7) Copy exe/dll to final out folder and prepare flat distribution
|
||||
- name: Copy exe/dll to final out folder and prepare flat distribution
|
||||
run: |
|
||||
cd ui/desktop
|
||||
mkdir -p ./out/Goose-win32-x64/resources/bin
|
||||
rsync -av src/bin/ out/Goose-win32-x64/resources/bin/
|
||||
|
||||
# Create flat distribution structure
|
||||
mkdir -p ./dist-windows
|
||||
cp -r ./out/Goose-win32-x64/* ./dist-windows/
|
||||
|
||||
# Verify the final structure
|
||||
echo "📋 Final flat distribution structure:"
|
||||
ls -la ./dist-windows/
|
||||
echo "📋 Binary files in resources/bin:"
|
||||
ls -la ./dist-windows/resources/bin/
|
||||
|
||||
# 8) Code signing (if enabled)
|
||||
- name: Sign Windows executable
|
||||
# 8) Sign Windows executables with jsign + AWS KMS
|
||||
- name: Sign Windows executables with jsign + AWS KMS
|
||||
if: inputs.signing && inputs.signing == true
|
||||
env:
|
||||
WINDOWS_CERTIFICATE: ${{ secrets.WINDOWS_CERTIFICATE }}
|
||||
WINDOWS_CERTIFICATE_PASSWORD: ${{ secrets.WINDOWS_CERTIFICATE_PASSWORD }}
|
||||
run: |
|
||||
# Note: This would need to be adapted for Linux-based signing
|
||||
# or moved to a Windows runner for the signing step only
|
||||
echo "Code signing would be implemented here"
|
||||
echo "Currently skipped as we're running on Ubuntu"
|
||||
set -exuo pipefail
|
||||
echo "🔐 Starting Windows code signing with jsign + AWS KMS..."
|
||||
|
||||
# Create certificate file from secret
|
||||
echo "📝 Creating certificate file from GitHub secret..."
|
||||
echo "${{ secrets.WINDOWS_CODESIGN_CERTIFICATE }}" > block-codesign-cert.pem
|
||||
|
||||
# Install Java (required for jsign)
|
||||
echo "☕ Installing Java runtime..."
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y openjdk-11-jre-headless osslsigncode
|
||||
|
||||
# Download jsign
|
||||
echo "📥 Downloading jsign..."
|
||||
wget -q https://github.com/ebourg/jsign/releases/download/6.0/jsign-6.0.jar -O jsign.jar
|
||||
echo "05ca18d4ab7b8c2183289b5378d32860f0ea0f3bdab1f1b8cae5894fb225fa8a jsign.jar" | sha256sum -c
|
||||
|
||||
# Sign the main Electron executable (Goose.exe)
|
||||
echo "🔐 Signing main Electron executable: Goose.exe"
|
||||
cd ui/desktop/dist-windows/
|
||||
|
||||
java -jar ${GITHUB_WORKSPACE}/jsign.jar \
|
||||
--storetype AWS \
|
||||
--keystore us-west-2 \
|
||||
--storepass "${AWS_ACCESS_KEY_ID}|${AWS_SECRET_ACCESS_KEY}|${AWS_SESSION_TOKEN}" \
|
||||
--alias windows-codesign \
|
||||
--certfile "${GITHUB_WORKSPACE}/block-codesign-cert.pem" \
|
||||
--tsaurl "http://timestamp.digicert.com" \
|
||||
--name "Goose" \
|
||||
--url "https://github.com/block/goose" \
|
||||
"Goose.exe"
|
||||
|
||||
# 9) Upload the final Windows build
|
||||
osslsigncode verify Goose.exe
|
||||
echo "✅ Main executable Goose.exe signed successfully"
|
||||
|
||||
# Sign the backend executable (goosed.exe)
|
||||
echo "🔐 Signing backend executable: goosed.exe"
|
||||
cd resources/bin/
|
||||
|
||||
java -jar ${GITHUB_WORKSPACE}/jsign.jar \
|
||||
--storetype AWS \
|
||||
--keystore us-west-2 \
|
||||
--storepass "${AWS_ACCESS_KEY_ID}|${AWS_SECRET_ACCESS_KEY}|${AWS_SESSION_TOKEN}" \
|
||||
--alias windows-codesign \
|
||||
--certfile "${GITHUB_WORKSPACE}/block-codesign-cert.pem" \
|
||||
--tsaurl "http://timestamp.digicert.com" \
|
||||
--name "Goose Backend" \
|
||||
--url "https://github.com/block/goose" \
|
||||
"goosed.exe"
|
||||
|
||||
osslsigncode verify goosed.exe
|
||||
echo "✅ Backend executable goosed.exe signed successfully"
|
||||
|
||||
# Show final file status
|
||||
echo "📋 Final signed files:"
|
||||
cd ../../
|
||||
ls -la Goose.exe
|
||||
sha256sum Goose.exe
|
||||
ls -la resources/bin/goosed.exe
|
||||
sha256sum resources/bin/goosed.exe
|
||||
|
||||
# Clean up certificate file
|
||||
rm -f ${GITHUB_WORKSPACE}/block-codesign-cert.pem
|
||||
|
||||
# 9) Verify signed executables are in final distribution
|
||||
- name: Verify signed executables are in final distribution
|
||||
if: inputs.signing && inputs.signing == true
|
||||
run: |
|
||||
echo "📋 Verifying both signed executables in final distribution:"
|
||||
echo "Main executable:"
|
||||
ls -la ui/desktop/dist-windows/Goose.exe
|
||||
osslsigncode verify ui/desktop/dist-windows/Goose.exe
|
||||
echo "✅ Main executable signature verification passed"
|
||||
|
||||
echo "Backend executable:"
|
||||
ls -la ui/desktop/dist-windows/resources/bin/goosed.exe
|
||||
osslsigncode verify ui/desktop/dist-windows/resources/bin/goosed.exe
|
||||
echo "✅ Backend executable signature verification passed"
|
||||
|
||||
# 10) Create Windows zip package
|
||||
- name: Create Windows zip package
|
||||
run: |
|
||||
cd ui/desktop
|
||||
echo "📦 Creating Windows zip package..."
|
||||
|
||||
# Create a zip file from the dist-windows directory
|
||||
zip -r "Goose-win32-x64.zip" dist-windows/
|
||||
|
||||
echo "✅ Windows zip package created:"
|
||||
ls -la Goose-win32-x64.zip
|
||||
|
||||
# Also create the zip in the expected output structure for consistency
|
||||
mkdir -p out/Goose-win32-x64/
|
||||
cp Goose-win32-x64.zip out/Goose-win32-x64/
|
||||
|
||||
# 11) Upload the final Windows build
|
||||
- name: Upload Windows build artifacts
|
||||
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # pin@v4
|
||||
with:
|
||||
name: desktop-windows-dist
|
||||
path: ui/desktop/out/Goose-win32-x64/
|
||||
name: Goose-win32-x64
|
||||
path: ui/desktop/out/Goose-win32-x64/Goose-win32-x64.zip
|
||||
|
||||
28
.github/workflows/bundle-desktop.yml
vendored
28
.github/workflows/bundle-desktop.yml
vendored
@@ -180,10 +180,31 @@ jobs:
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-build-
|
||||
|
||||
# Install Go for building temporal-service
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # pin@v5
|
||||
with:
|
||||
go-version: '1.21'
|
||||
|
||||
# Build the project
|
||||
- name: Build goosed
|
||||
run: source ./bin/activate-hermit && cargo build --release -p goose-server
|
||||
|
||||
# Build temporal-service using build.sh script
|
||||
- name: Build temporal-service
|
||||
run: |
|
||||
echo "Building temporal-service using build.sh script..."
|
||||
cd temporal-service
|
||||
./build.sh
|
||||
echo "temporal-service built successfully"
|
||||
|
||||
# Install and prepare temporal CLI
|
||||
- name: Install temporal CLI via hermit
|
||||
run: |
|
||||
echo "Installing temporal CLI via hermit..."
|
||||
./bin/hermit install temporal-cli
|
||||
echo "temporal CLI installed successfully"
|
||||
|
||||
# Post-build cleanup to free space
|
||||
- name: Post-build cleanup
|
||||
run: |
|
||||
@@ -197,8 +218,11 @@ jobs:
|
||||
# Check disk space after cleanup
|
||||
df -h
|
||||
|
||||
- name: Copy binary into Electron folder
|
||||
run: cp target/release/goosed ui/desktop/src/bin/goosed
|
||||
- name: Copy binaries into Electron folder
|
||||
run: |
|
||||
cp target/release/goosed ui/desktop/src/bin/goosed
|
||||
cp temporal-service/temporal-service ui/desktop/src/bin/temporal-service
|
||||
cp bin/temporal ui/desktop/src/bin/temporal
|
||||
|
||||
- name: Add MacOS certs for signing and notarization
|
||||
if: ${{ inputs.signing }}
|
||||
|
||||
30
.github/workflows/canary.yml
vendored
30
.github/workflows/canary.yml
vendored
@@ -75,13 +75,36 @@ jobs:
|
||||
APPLE_ID_PASSWORD: ${{ secrets.APPLE_ID_PASSWORD }}
|
||||
APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }}
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 5) Bundle Desktop App (Linux) - builds goosed and Electron app
|
||||
# ------------------------------------------------------------
|
||||
bundle-desktop-linux:
|
||||
needs: [prepare-version]
|
||||
uses: ./.github/workflows/bundle-desktop-linux.yml
|
||||
with:
|
||||
version: ${{ needs.prepare-version.outputs.version }}
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 6) Bundle Desktop App (Windows) - builds goosed and Electron app
|
||||
# ------------------------------------------------------------
|
||||
bundle-desktop-windows:
|
||||
needs: [prepare-version]
|
||||
uses: ./.github/workflows/bundle-desktop-windows.yml
|
||||
with:
|
||||
version: ${{ needs.prepare-version.outputs.version }}
|
||||
signing: true
|
||||
secrets:
|
||||
WINDOWS_CODESIGN_CERTIFICATE: ${{ secrets.WINDOWS_CODESIGN_CERTIFICATE }}
|
||||
WINDOW_SIGNING_ROLE: ${{ secrets.WINDOW_SIGNING_ROLE }}
|
||||
WINDOW_SIGNING_ROLE_TAG: ${{ secrets.WINDOW_SIGNING_ROLE_TAG }}
|
||||
|
||||
# ------------------------------------
|
||||
# 5) Create/Update GitHub Release
|
||||
# 7) Create/Update GitHub Release
|
||||
# ------------------------------------
|
||||
release:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-cli, install-script, bundle-desktop]
|
||||
needs: [build-cli, install-script, bundle-desktop, bundle-desktop-linux, bundle-desktop-windows]
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
@@ -100,7 +123,10 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
artifacts: |
|
||||
goose-*.tar.bz2
|
||||
goose-*.zip
|
||||
Goose*.zip
|
||||
*.deb
|
||||
*.rpm
|
||||
download_cli.sh
|
||||
allowUpdates: true
|
||||
omitBody: true
|
||||
|
||||
47
.github/workflows/ci.yml
vendored
47
.github/workflows/ci.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
|
||||
rust-build-and-test:
|
||||
name: Build and Test Rust Project
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: goose
|
||||
steps:
|
||||
# Add disk space cleanup before linting
|
||||
- name: Check disk space before build
|
||||
@@ -45,8 +45,19 @@ jobs:
|
||||
/usr/local/share/chromium \
|
||||
/usr/local/share/powershell \
|
||||
/usr/share/dotnet \
|
||||
/usr/share/swift
|
||||
|
||||
/usr/share/swift \
|
||||
/opt/ghc \
|
||||
/opt/hostedtoolcache \
|
||||
/usr/local/graalvm \
|
||||
/usr/local/sqlpackage
|
||||
|
||||
# Clean package manager caches
|
||||
sudo apt-get clean
|
||||
sudo apt-get autoremove -y
|
||||
|
||||
# Clean docker if present
|
||||
docker system prune -af 2>/dev/null || true
|
||||
|
||||
df -h
|
||||
|
||||
- name: Checkout Code
|
||||
@@ -84,10 +95,11 @@ jobs:
|
||||
- name: Build and Test
|
||||
run: |
|
||||
gnome-keyring-daemon --components=secrets --daemonize --unlock <<< 'foobar'
|
||||
source ../bin/activate-hermit && cargo test
|
||||
source ../bin/activate-hermit
|
||||
export CARGO_INCREMENTAL=0
|
||||
cargo test --jobs 2
|
||||
working-directory: crates
|
||||
|
||||
# Add disk space cleanup before linting
|
||||
- name: Check disk space before cleanup
|
||||
run: df -h
|
||||
|
||||
@@ -98,23 +110,37 @@ jobs:
|
||||
rm -rf target/debug/deps
|
||||
rm -rf target/debug/build
|
||||
rm -rf target/debug/incremental
|
||||
# Clean cargo cache more aggressively
|
||||
cargo clean || true
|
||||
# Clean npm cache if it exists
|
||||
npm cache clean --force || true
|
||||
npm cache clean --force 2>/dev/null || true
|
||||
# Clean apt cache
|
||||
sudo apt-get clean
|
||||
sudo apt-get autoremove -y
|
||||
# Remove unnecessary large directories
|
||||
rm -rf ~/.cargo/registry/index || true
|
||||
rm -rf ~/.cargo/registry/cache || true
|
||||
# Remove docker images if any
|
||||
docker system prune -af || true
|
||||
# Remove unused packages
|
||||
sudo apt-get autoremove -y || true
|
||||
docker system prune -af 2>/dev/null || true
|
||||
|
||||
- name: Check disk space after cleanup
|
||||
run: df -h
|
||||
|
||||
- name: Lint
|
||||
run: source ./bin/activate-hermit && cargo clippy -- -D warnings
|
||||
run: |
|
||||
source ./bin/activate-hermit
|
||||
export CARGO_INCREMENTAL=0
|
||||
cargo clippy --jobs 2 -- -D warnings
|
||||
|
||||
- name: Install Node.js Dependencies for OpenAPI Check
|
||||
run: source ../../bin/activate-hermit && npm ci
|
||||
working-directory: ui/desktop
|
||||
|
||||
- name: Check OpenAPI Schema is Up-to-Date
|
||||
run: |
|
||||
source ./bin/activate-hermit
|
||||
just check-openapi-schema
|
||||
|
||||
desktop-lint:
|
||||
name: Lint Electron Desktop App
|
||||
runs-on: macos-latest
|
||||
@@ -130,6 +156,7 @@ jobs:
|
||||
run: source ../../bin/activate-hermit && npm run lint:check
|
||||
working-directory: ui/desktop
|
||||
|
||||
|
||||
# Faster Desktop App build for PRs only
|
||||
bundle-desktop-unsigned:
|
||||
uses: ./.github/workflows/bundle-desktop.yml
|
||||
|
||||
136
.github/workflows/create-recipe-pr.yml
vendored
Normal file
136
.github/workflows/create-recipe-pr.yml
vendored
Normal file
@@ -0,0 +1,136 @@
|
||||
name: Handle Recipe Submissions
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, labeled]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
issues: write
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
create-recipe-pr:
|
||||
if: ${{ github.event.label.name == 'recipe submission' || contains(github.event.issue.labels.*.name, 'recipe submission') }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
PROVIDER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
|
||||
|
||||
steps:
|
||||
- name: Checkout repo
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: '20'
|
||||
|
||||
- name: Install and Configure Goose
|
||||
run: |
|
||||
mkdir -p /home/runner/.local/bin
|
||||
curl -fsSL https://github.com/block/goose/releases/download/stable/download_cli.sh \
|
||||
| CONFIGURE=false INSTALL_PATH=/home/runner/.local/bin bash
|
||||
echo "/home/runner/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
mkdir -p ~/.config/goose
|
||||
cat <<EOF > ~/.config/goose/config.yaml
|
||||
GOOSE_PROVIDER: openrouter
|
||||
GOOSE_MODEL: "anthropic/claude-3.5-sonnet"
|
||||
keyring: false
|
||||
EOF
|
||||
|
||||
- name: Extract recipe YAML from issue
|
||||
id: parse
|
||||
run: |
|
||||
ISSUE_BODY=$(jq -r .issue.body "$GITHUB_EVENT_PATH")
|
||||
RECIPE_YAML=$(echo "$ISSUE_BODY" | awk '/```/,/```/' | sed '1d;$d')
|
||||
echo "$RECIPE_YAML" > recipe.yaml
|
||||
|
||||
AUTHOR="${{ github.event.issue.user.login }}"
|
||||
if ! grep -q "^author:" recipe.yaml; then
|
||||
echo -e "\nauthor:\n contact: $AUTHOR" >> recipe.yaml
|
||||
fi
|
||||
|
||||
TITLE=$(yq '.title' recipe.yaml | tr '[:upper:]' '[:lower:]' | tr -cs 'a-z0-9' '-')
|
||||
echo "branch_name=add-recipe-${TITLE}" >> $GITHUB_OUTPUT
|
||||
echo "recipe_title=${TITLE}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Validate recipe.yaml with Goose
|
||||
id: validate
|
||||
continue-on-error: true
|
||||
run: |
|
||||
OUTPUT=$(goose recipe validate recipe.yaml 2>&1)
|
||||
echo "$OUTPUT"
|
||||
{
|
||||
echo "validation_output<<EOF"
|
||||
echo "$OUTPUT"
|
||||
echo "EOF"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Post validation result to issue
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE_NUMBER: ${{ github.event.issue.number }}
|
||||
VALIDATION_B64: ${{ steps.validate.outputs.validation_output }}
|
||||
run: |
|
||||
if [ "${{ steps.validate.outcome }}" == "failure" ]; then
|
||||
OUTPUT=$(echo "$VALIDATION_B64" | base64 --decode)
|
||||
COMMENT="❌ Recipe validation failed:\n\n\`\`\`\n$OUTPUT\n\`\`\`\nPlease fix the above issues and resubmit."
|
||||
echo -e "$COMMENT" | gh issue comment "$ISSUE_NUMBER"
|
||||
gh issue close "$ISSUE_NUMBER"
|
||||
exit 1
|
||||
else
|
||||
gh issue comment "$ISSUE_NUMBER" --body "✅ Recipe validated successfully!"
|
||||
fi
|
||||
|
||||
|
||||
- name: Generate recipeUrl and save updated recipe
|
||||
run: |
|
||||
BASE64_ENCODED=$(cat recipe.yaml | base64 | tr -d '\n')
|
||||
echo "" >> recipe.yaml
|
||||
echo "recipeUrl: goose://recipe?config=${BASE64_ENCODED}" >> recipe.yaml
|
||||
|
||||
- name: Create branch and add file
|
||||
env:
|
||||
BRANCH_NAME: ${{ steps.parse.outputs.branch_name }}
|
||||
run: |
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
DEST_DIR="documentation/src/pages/recipes/data/recipes"
|
||||
mkdir -p "$DEST_DIR"
|
||||
ID=$(yq '.id' recipe.yaml)
|
||||
|
||||
if [ -f "$DEST_DIR/${ID}.yaml" ]; then
|
||||
echo "❌ Recipe with ID '$ID' already exists. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cp recipe.yaml "$DEST_DIR/${ID}.yaml"
|
||||
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
git add "$DEST_DIR/${ID}.yaml"
|
||||
git commit -m "Add recipe: ${ID}"
|
||||
git push origin "$BRANCH_NAME"
|
||||
|
||||
- name: Create pull request
|
||||
id: cpr
|
||||
uses: peter-evans/create-pull-request@5e5b2916f4b4c9420e5e9b0dc4a6d292d30165d7
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
branch: ${{ steps.parse.outputs.branch_name }}
|
||||
title: "Add recipe: ${{ steps.parse.outputs.recipe_title }}"
|
||||
body: "This PR adds a new Goose recipe submitted via issue #${{ github.event.issue.number }}."
|
||||
reviewers: |
|
||||
EbonyLouis
|
||||
angiejones
|
||||
blackgirlbytes
|
||||
|
||||
- name: Comment and close issue
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE_NUMBER: ${{ github.event.issue.number }}
|
||||
PR_URL: ${{ steps.cpr.outputs.pull-request-url }}
|
||||
run: |
|
||||
gh issue comment "$ISSUE_NUMBER" --body "🎉 Thanks for submitting your recipe! We've created a [PR]($PR_URL) to add it to the Cookbook."
|
||||
gh issue close "$ISSUE_NUMBER"
|
||||
10
.github/workflows/pr-comment-build-cli.yml
vendored
10
.github/workflows/pr-comment-build-cli.yml
vendored
@@ -1,4 +1,4 @@
|
||||
# This workflow is triggered by a comment on an issue or PR with the text ".build-cli"
|
||||
# This workflow is triggered by a comment on PR with the text ".build-cli"
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
@@ -22,14 +22,17 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
trigger-on-command:
|
||||
if: >
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event.issue.pull_request && contains(github.event.comment.body, '.build-cli'))
|
||||
name: Trigger on ".build-cli" PR comment
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
continue: ${{ steps.command.outputs.continue || github.event_name == 'workflow_dispatch' }}
|
||||
continue: 'true'
|
||||
pr_number: ${{ steps.command.outputs.issue_number || github.event.inputs.pr_number }}
|
||||
head_sha: ${{ steps.set_head_sha.outputs.head_sha || github.sha }}
|
||||
steps:
|
||||
- if: ${{ github.event_name == 'issue_comment' }}
|
||||
- name: Run command action
|
||||
uses: github/command@v1.3.0
|
||||
id: command
|
||||
with:
|
||||
@@ -86,6 +89,7 @@ jobs:
|
||||
- [📦 Linux (aarch64)](https://nightly.link/${{ github.repository }}/actions/runs/${{ github.run_id }}/goose-aarch64-unknown-linux-gnu.zip)
|
||||
- [📦 macOS (x86_64)](https://nightly.link/${{ github.repository }}/actions/runs/${{ github.run_id }}/goose-x86_64-apple-darwin.zip)
|
||||
- [📦 macOS (aarch64)](https://nightly.link/${{ github.repository }}/actions/runs/${{ github.run_id }}/goose-aarch64-apple-darwin.zip)
|
||||
- [📦 Windows (x86_64)](https://nightly.link/${{ github.repository }}/actions/runs/${{ github.run_id }}/goose-x86_64-pc-windows-gnu.zip)
|
||||
|
||||
These links are provided by nightly.link and will work even if you're not logged into GitHub.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# This workflow is triggered by a comment on an issue or PR with the text ".bundle-intel"
|
||||
# This workflow is triggered by a comment on PR with the text ".bundle-intel"
|
||||
# It bundles the Intel Desktop App, then creates a PR comment with a link to download the app.
|
||||
|
||||
on:
|
||||
@@ -24,15 +24,18 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
trigger-on-command:
|
||||
if: >
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event.issue.pull_request && contains(github.event.comment.body, '.bundle-intel'))
|
||||
name: Trigger on ".bundle-intel" PR comment
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
continue: ${{ steps.command.outputs.continue || github.event_name == 'workflow_dispatch' }}
|
||||
continue: 'true'
|
||||
# Cannot use github.event.pull_request.number since the trigger is 'issue_comment'
|
||||
pr_number: ${{ steps.command.outputs.issue_number || github.event.inputs.pr_number }}
|
||||
head_sha: ${{ steps.set_head_sha.outputs.head_sha || github.sha }}
|
||||
steps:
|
||||
- if: ${{ github.event_name == 'issue_comment' }}
|
||||
- name: Run command action
|
||||
uses: github/command@319d5236cc34ed2cb72a47c058a363db0b628ebe # pin@v1.3.0
|
||||
id: command
|
||||
with:
|
||||
|
||||
22
.github/workflows/pr-comment-bundle-windows.yml
vendored
22
.github/workflows/pr-comment-bundle-windows.yml
vendored
@@ -1,4 +1,4 @@
|
||||
# This workflow is triggered by a comment on an issue or PR with the text ".bundle-windows"
|
||||
# This workflow is triggered by a comment on PR with the text ".bundle-windows"
|
||||
# It bundles the Windows Desktop App, then creates a PR comment with a link to download the app.
|
||||
|
||||
on:
|
||||
@@ -11,10 +11,13 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
# permissions needed for reacting to IssueOps commands on PRs
|
||||
# permissions needed for reacting to IssueOps commands on PRs and AWS OIDC authentication
|
||||
permissions:
|
||||
pull-requests: write
|
||||
checks: read
|
||||
id-token: write # Required for AWS OIDC authentication in called workflow
|
||||
contents: read # Required by actions/checkout in called workflow
|
||||
actions: read # May be needed for some workflows
|
||||
|
||||
name: Bundle Windows Desktop App
|
||||
|
||||
@@ -24,15 +27,18 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
trigger-on-command:
|
||||
if: >
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event.issue.pull_request && contains(github.event.comment.body, '.bundle-windows'))
|
||||
name: Trigger on ".bundle-windows" PR comment
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
continue: ${{ steps.command.outputs.continue || github.event_name == 'workflow_dispatch' }}
|
||||
continue: 'true'
|
||||
# Cannot use github.event.pull_request.number since the trigger is 'issue_comment'
|
||||
pr_number: ${{ steps.command.outputs.issue_number || github.event.inputs.pr_number }}
|
||||
head_sha: ${{ steps.set_head_sha.outputs.head_sha || github.sha }}
|
||||
steps:
|
||||
- if: ${{ github.event_name == 'issue_comment' }}
|
||||
- name: Run command action
|
||||
uses: github/command@319d5236cc34ed2cb72a47c058a363db0b628ebe # pin@v1.3.0
|
||||
id: command
|
||||
with:
|
||||
@@ -61,11 +67,9 @@ jobs:
|
||||
if: ${{ needs.trigger-on-command.outputs.continue == 'true' }}
|
||||
uses: ./.github/workflows/bundle-desktop-windows.yml
|
||||
with:
|
||||
signing: false # false for now as we don't have a cert yet
|
||||
signing: false
|
||||
ref: ${{ needs.trigger-on-command.outputs.head_sha }}
|
||||
secrets:
|
||||
WINDOWS_CERTIFICATE: ${{ secrets.WINDOWS_CERTIFICATE }}
|
||||
WINDOWS_CERTIFICATE_PASSWORD: ${{ secrets.WINDOWS_CERTIFICATE_PASSWORD }}
|
||||
|
||||
|
||||
pr-comment-windows:
|
||||
name: PR Comment with Windows App
|
||||
@@ -93,4 +97,4 @@ jobs:
|
||||
**Instructions:**
|
||||
After downloading, unzip the file and run Goose.exe. The app is signed for Windows.
|
||||
|
||||
This link is provided by nightly.link and will work even if you're not logged into GitHub.
|
||||
This link is provided by nightly.link and will work even if you're not logged into GitHub.
|
||||
|
||||
9
.github/workflows/pr-comment-bundle.yml
vendored
9
.github/workflows/pr-comment-bundle.yml
vendored
@@ -1,4 +1,4 @@
|
||||
# This workflow is triggered by a comment on an issue or PR with the text ".bundle"
|
||||
# This workflow is triggered by a comment on PR with the text ".bundle"
|
||||
# It bundles the ARM64 Desktop App, then creates a PR comment with a link to download the app.
|
||||
|
||||
on:
|
||||
@@ -24,10 +24,13 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
trigger-on-command:
|
||||
if: >
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(github.event.issue.pull_request && contains(github.event.comment.body, '.bundle'))
|
||||
name: Trigger on ".bundle" PR comment
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
continue: ${{ steps.command.outputs.continue || github.event_name == 'workflow_dispatch' }}
|
||||
continue: 'true'
|
||||
pr_number: ${{ steps.command.outputs.issue_number || github.event.inputs.pr_number }}
|
||||
pr_sha: ${{ steps.get_pr_info.outputs.sha }}
|
||||
steps:
|
||||
@@ -48,7 +51,7 @@ jobs:
|
||||
echo "Actor: ${ACTOR}"
|
||||
echo "Repository: ${REPOSITORY}"
|
||||
|
||||
- if: ${{ github.event_name == 'issue_comment' }}
|
||||
- name: Run command action
|
||||
uses: github/command@319d5236cc34ed2cb72a47c058a363db0b628ebe # pin@v1.3.0
|
||||
id: command
|
||||
with:
|
||||
|
||||
42
.github/workflows/release.yml
vendored
42
.github/workflows/release.yml
vendored
@@ -7,6 +7,13 @@ on:
|
||||
- "v1.*"
|
||||
|
||||
name: Release
|
||||
|
||||
# Permissions needed for AWS OIDC authentication in called workflows
|
||||
permissions:
|
||||
id-token: write # Required for AWS OIDC authentication in called workflow
|
||||
contents: write # Required for creating releases and by actions/checkout
|
||||
actions: read # May be needed for some workflows
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
@@ -60,26 +67,31 @@ jobs:
|
||||
APPLE_ID_PASSWORD: ${{ secrets.APPLE_ID_PASSWORD }}
|
||||
APPLE_TEAM_ID: ${{ secrets.APPLE_TEAM_ID }}
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# 5) Bundle Desktop App (Linux)
|
||||
# ------------------------------------------------------------
|
||||
bundle-desktop-linux:
|
||||
uses: ./.github/workflows/bundle-desktop-linux.yml
|
||||
|
||||
# # ------------------------------------------------------------
|
||||
# # 5) Bundle Desktop App (Windows)
|
||||
# # 6) Bundle Desktop App (Windows)
|
||||
# # ------------------------------------------------------------
|
||||
# bundle-desktop-windows:
|
||||
# uses: ./.github/workflows/bundle-desktop-windows.yml
|
||||
# # Signing is disabled by default until we have a certificate
|
||||
# with:
|
||||
# signing: false
|
||||
# # Uncomment and configure these when we have a certificate:
|
||||
# # secrets:
|
||||
# # WINDOWS_CERTIFICATE: ${{ secrets.WINDOWS_CERTIFICATE }}
|
||||
# # WINDOWS_CERTIFICATE_PASSWORD: ${{ secrets.WINDOWS_CERTIFICATE_PASSWORD }}
|
||||
bundle-desktop-windows:
|
||||
uses: ./.github/workflows/bundle-desktop-windows.yml
|
||||
with:
|
||||
signing: true
|
||||
secrets:
|
||||
WINDOWS_CODESIGN_CERTIFICATE: ${{ secrets.WINDOWS_CODESIGN_CERTIFICATE }}
|
||||
WINDOW_SIGNING_ROLE: ${{ secrets.WINDOW_SIGNING_ROLE }}
|
||||
WINDOW_SIGNING_ROLE_TAG: ${{ secrets.WINDOW_SIGNING_ROLE_TAG }}
|
||||
|
||||
# ------------------------------------
|
||||
# 6) Create/Update GitHub Release
|
||||
# 7) Create/Update GitHub Release
|
||||
# ------------------------------------
|
||||
release:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-cli, install-script, bundle-desktop, bundle-desktop-intel]
|
||||
needs: [build-cli, install-script, bundle-desktop, bundle-desktop-intel, bundle-desktop-linux, bundle-desktop-windows]
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
@@ -95,7 +107,10 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
artifacts: |
|
||||
goose-*.tar.bz2
|
||||
goose-*.zip
|
||||
Goose*.zip
|
||||
*.deb
|
||||
*.rpm
|
||||
download_cli.sh
|
||||
allowUpdates: true
|
||||
omitBody: true
|
||||
@@ -110,7 +125,10 @@ jobs:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
artifacts: |
|
||||
goose-*.tar.bz2
|
||||
goose-*.zip
|
||||
Goose*.zip
|
||||
*.deb
|
||||
*.rpm
|
||||
download_cli.sh
|
||||
allowUpdates: true
|
||||
omitBody: true
|
||||
|
||||
30
.github/workflows/reply-to-recipe.yml
vendored
Normal file
30
.github/workflows/reply-to-recipe.yml
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
name: Auto-reply to Recipe Submissions
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
thank-you-comment:
|
||||
if: contains(github.event.issue.title, '[Recipe]')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Add thank-you comment
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
const commentBody = [
|
||||
"🎉 Thanks for submitting your Goose recipe to the Cookbook!",
|
||||
"",
|
||||
"We appreciate you sharing your workflow with the community — our team will review your submission soon.",
|
||||
"If accepted, it’ll be added to the [Goose Recipes Cookbook](https://block.github.io/goose/recipes) and you’ll receive LLM credits as a thank-you!",
|
||||
"",
|
||||
"Stay tuned — and keep those recipes coming 🧑🍳🔥"
|
||||
].join('\n');
|
||||
|
||||
github.issues.createComment({
|
||||
issue_number: context.issue.number,
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
body: commentBody
|
||||
});
|
||||
10
.gitignore
vendored
10
.gitignore
vendored
@@ -12,6 +12,7 @@ tmp/
|
||||
# will have compiled files and executables
|
||||
debug/
|
||||
target/
|
||||
.goose/
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
**/*.rs.bk
|
||||
@@ -30,6 +31,9 @@ ui/desktop/src/bin/goose_llm.dll
|
||||
# Hermit
|
||||
.hermit/
|
||||
|
||||
# Claude
|
||||
.claude
|
||||
|
||||
debug_*.txt
|
||||
|
||||
# Docs
|
||||
@@ -48,3 +52,9 @@ benchmark-*
|
||||
benchconf.json
|
||||
scripts/fake.sh
|
||||
do_not_version/
|
||||
/ui/desktop/src/bin/temporal
|
||||
/temporal-service/temporal.db
|
||||
/ui/desktop/src/bin/temporal.db
|
||||
/temporal.db
|
||||
/ui/desktop/src/bin/goose-scheduler-executor
|
||||
/ui/desktop/src/bin/goose
|
||||
|
||||
@@ -9,13 +9,3 @@ if git diff --cached --name-only | grep -q "^ui/desktop/"; then
|
||||
echo "Warning: ui/desktop directory does not exist, skipping lint-staged"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Only auto-format ui-v2 TS code if relevant files are modified
|
||||
if git diff --cached --name-only | grep -q "^ui-v2/"; then
|
||||
if [ -d "ui-v2" ]; then
|
||||
. "$(dirname -- "$0")/_/husky.sh"
|
||||
cd ui-v2 && npx lint-staged
|
||||
else
|
||||
echo "Warning: ui-v2 directory does not exist, skipping lint-staged"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -44,11 +44,13 @@ And then once you have a connection to an LLM provider working, you can run a se
|
||||
```
|
||||
|
||||
These same commands can be recompiled and immediately run using `cargo run -p goose-cli` for iteration.
|
||||
As you make changes to the rust code, you can try it out on the CLI, or also run checks and tests:
|
||||
As you make changes to the rust code, you can try it out on the CLI, or also run checks, tests, and linter:
|
||||
|
||||
```
|
||||
cargo check # do your changes compile
|
||||
cargo test # do the tests pass with your changes.
|
||||
cargo test # do the tests pass with your changes
|
||||
cargo fmt # format your code
|
||||
cargo clippy # run the linter
|
||||
```
|
||||
|
||||
### Node
|
||||
@@ -65,6 +67,21 @@ you can talk to goose!
|
||||
|
||||
You can now make changes in the code in ui/desktop to iterate on the GUI half of goose.
|
||||
|
||||
### Regenerating the OpenAPI schema
|
||||
|
||||
The file `ui/desktop/openapi.json` is automatically generated during the build.
|
||||
It is written by the `generate_schema` binary in `crates/goose-server`.
|
||||
If you need to update the spec without starting the UI, run:
|
||||
|
||||
```
|
||||
just generate-openapi
|
||||
```
|
||||
|
||||
This command regenerates `ui/desktop/openapi.json` and then runs the UI's
|
||||
`generate-api` script to rebuild the TypeScript client from that spec.
|
||||
|
||||
Changes to the API should be made in the Rust source under `crates/goose-server/src/`.
|
||||
|
||||
## Creating a fork
|
||||
|
||||
To fork the repository:
|
||||
|
||||
493
Cargo.lock
generated
493
Cargo.lock
generated
@@ -49,6 +49,7 @@ dependencies = [
|
||||
"const-random",
|
||||
"getrandom 0.2.15",
|
||||
"once_cell",
|
||||
"serde",
|
||||
"version_check",
|
||||
"zerocopy",
|
||||
]
|
||||
@@ -715,6 +716,7 @@ dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-sigv4",
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-eventstream",
|
||||
"aws-smithy-http 0.60.12",
|
||||
"aws-smithy-runtime",
|
||||
"aws-smithy-runtime-api",
|
||||
@@ -778,6 +780,29 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-sagemakerruntime"
|
||||
version = "1.63.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8c3188bb9f962a9e1781c917dbe7f016ab9430e4bd81ba7daf422e58d86a3595"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-eventstream",
|
||||
"aws-smithy-http 0.61.1",
|
||||
"aws-smithy-json",
|
||||
"aws-smithy-runtime",
|
||||
"aws-smithy-runtime-api",
|
||||
"aws-smithy-types",
|
||||
"aws-types",
|
||||
"bytes",
|
||||
"http 0.2.12",
|
||||
"once_cell",
|
||||
"regex-lite",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-sso"
|
||||
version = "1.61.0"
|
||||
@@ -852,6 +877,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9bfe75fad52793ce6dec0dc3d4b1f388f038b5eb866c8d4d7f3a8e21b5ea5051"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-smithy-eventstream",
|
||||
"aws-smithy-http 0.60.12",
|
||||
"aws-smithy-runtime-api",
|
||||
"aws-smithy-types",
|
||||
@@ -1251,7 +1277,16 @@ dependencies = [
|
||||
"rustc-hash 1.1.0",
|
||||
"shlex",
|
||||
"syn 2.0.99",
|
||||
"which",
|
||||
"which 4.4.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bit-set"
|
||||
version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1"
|
||||
dependencies = [
|
||||
"bit-vec 0.6.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1260,9 +1295,15 @@ version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3"
|
||||
dependencies = [
|
||||
"bit-vec",
|
||||
"bit-vec 0.8.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bit-vec"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb"
|
||||
|
||||
[[package]]
|
||||
name = "bit-vec"
|
||||
version = "0.8.0"
|
||||
@@ -1348,6 +1389,12 @@ dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "borrow-or-share"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3eeab4423108c5d7c744f4d234de88d18d636100093ae04caf4825134b9c3a32"
|
||||
|
||||
[[package]]
|
||||
name = "brotli"
|
||||
version = "7.0.0"
|
||||
@@ -2620,37 +2667,6 @@ dependencies = [
|
||||
"syn 2.0.99",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder"
|
||||
version = "0.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "507dfb09ea8b7fa618fcf76e953f4f5e192547945816d5358edffe39f6f94947"
|
||||
dependencies = [
|
||||
"derive_builder_macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder_core"
|
||||
version = "0.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8"
|
||||
dependencies = [
|
||||
"darling",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.99",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_builder_macro"
|
||||
version = "0.20.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c"
|
||||
dependencies = [
|
||||
"derive_builder_core",
|
||||
"syn 2.0.99",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.10.7"
|
||||
@@ -2760,12 +2776,6 @@ dependencies = [
|
||||
"zip 0.6.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dotenv"
|
||||
version = "0.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77c90badedccf4105eca100756a0b1289e191f6fcbdadd3cee1d2f614f97da8f"
|
||||
|
||||
[[package]]
|
||||
name = "dotenvy"
|
||||
version = "0.15.7"
|
||||
@@ -2796,6 +2806,15 @@ version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
|
||||
|
||||
[[package]]
|
||||
name = "email_address"
|
||||
version = "0.2.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e079f19b08ca6239f47f8ba8509c11cf3ea30095831f7fed61441475edd8c449"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "encode_unicode"
|
||||
version = "1.0.0"
|
||||
@@ -2830,7 +2849,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2848,15 +2867,6 @@ version = "3.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a5d9305ccc6942a704f4335694ecd3de2ea531b114ac2d51f5f843750787a92f"
|
||||
|
||||
[[package]]
|
||||
name = "esaxx-rs"
|
||||
version = "0.1.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d817e038c30374a4bcb22f94d0a8a0e216958d4c3dcde369b1439fec4bdda6e6"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "etcetera"
|
||||
version = "0.8.0"
|
||||
@@ -2916,13 +2926,24 @@ dependencies = [
|
||||
"zune-inflate",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fancy-regex"
|
||||
version = "0.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "531e46835a22af56d1e3b66f04844bed63158bc094a628bec1d321d9b4c44bf2"
|
||||
dependencies = [
|
||||
"bit-set 0.5.3",
|
||||
"regex-automata 0.4.9",
|
||||
"regex-syntax 0.8.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fancy-regex"
|
||||
version = "0.14.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e24cb5a94bcae1e5408b0effca5cd7172ea3c5755049c5f3af4cd283a165298"
|
||||
dependencies = [
|
||||
"bit-set",
|
||||
"bit-set 0.8.0",
|
||||
"regex-automata 0.4.9",
|
||||
"regex-syntax 0.8.5",
|
||||
]
|
||||
@@ -3006,6 +3027,17 @@ dependencies = [
|
||||
"miniz_oxide",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fluent-uri"
|
||||
version = "0.3.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1918b65d96df47d3591bed19c5cca17e3fa5d0707318e4b5ef2eae01764df7e5"
|
||||
dependencies = [
|
||||
"borrow-or-share",
|
||||
"ref-cast",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fnv"
|
||||
version = "1.0.7"
|
||||
@@ -3069,6 +3101,16 @@ dependencies = [
|
||||
"percent-encoding",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fraction"
|
||||
version = "0.15.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f158e3ff0a1b334408dc9fb811cd99b446986f4d8b741bb08f9df1604085ae7"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"num",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "fragile"
|
||||
version = "2.0.0"
|
||||
@@ -3422,14 +3464,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "goose"
|
||||
version = "1.0.24"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"anyhow",
|
||||
"arrow",
|
||||
"async-stream",
|
||||
"async-trait",
|
||||
"aws-config",
|
||||
"aws-sdk-bedrockruntime",
|
||||
"aws-sdk-sagemakerruntime",
|
||||
"aws-smithy-types",
|
||||
"axum",
|
||||
"base64 0.21.7",
|
||||
@@ -3437,14 +3481,16 @@ dependencies = [
|
||||
"chrono",
|
||||
"criterion",
|
||||
"ctor",
|
||||
"dotenv",
|
||||
"dashmap 6.1.0",
|
||||
"dirs 5.0.1",
|
||||
"dotenvy",
|
||||
"etcetera",
|
||||
"fs2",
|
||||
"futures",
|
||||
"futures-util",
|
||||
"include_dir",
|
||||
"indoc 2.0.6",
|
||||
"jsonwebtoken 9.3.1",
|
||||
"jsonschema",
|
||||
"jsonwebtoken",
|
||||
"keyring",
|
||||
"lancedb",
|
||||
"lazy_static",
|
||||
@@ -3454,10 +3500,10 @@ dependencies = [
|
||||
"mockall",
|
||||
"nanoid",
|
||||
"once_cell",
|
||||
"paste",
|
||||
"rand 0.8.5",
|
||||
"regex",
|
||||
"reqwest 0.12.12",
|
||||
"rmcp",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
@@ -3467,13 +3513,15 @@ dependencies = [
|
||||
"temp-env",
|
||||
"tempfile",
|
||||
"thiserror 1.0.69",
|
||||
"tokenizers",
|
||||
"tiktoken-rs",
|
||||
"tokio",
|
||||
"tokio-cron-scheduler",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"url",
|
||||
"urlencoding",
|
||||
"utoipa",
|
||||
"uuid",
|
||||
"webbrowser 0.8.15",
|
||||
@@ -3509,7 +3557,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "goose-bench"
|
||||
version = "1.0.24"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@@ -3522,6 +3570,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"paste",
|
||||
"regex",
|
||||
"rmcp",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
@@ -3533,7 +3582,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "goose-cli"
|
||||
version = "1.0.24"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@@ -3545,6 +3594,7 @@ dependencies = [
|
||||
"clap 4.5.31",
|
||||
"cliclack",
|
||||
"console",
|
||||
"dirs 5.0.1",
|
||||
"etcetera",
|
||||
"futures",
|
||||
"goose",
|
||||
@@ -3552,6 +3602,7 @@ dependencies = [
|
||||
"goose-mcp",
|
||||
"http 1.2.0",
|
||||
"indicatif",
|
||||
"jsonschema",
|
||||
"mcp-client",
|
||||
"mcp-core",
|
||||
"mcp-server",
|
||||
@@ -3561,6 +3612,7 @@ dependencies = [
|
||||
"rand 0.8.5",
|
||||
"regex",
|
||||
"reqwest 0.12.12",
|
||||
"rmcp",
|
||||
"rustyline",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -3572,6 +3624,7 @@ dependencies = [
|
||||
"test-case",
|
||||
"tokio",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
"tower-http",
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
@@ -3582,7 +3635,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "goose-ffi"
|
||||
version = "1.0.24"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"cbindgen",
|
||||
"futures",
|
||||
@@ -3596,7 +3649,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "goose-llm"
|
||||
version = "1.0.24"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@@ -3604,7 +3657,8 @@ dependencies = [
|
||||
"chrono",
|
||||
"criterion",
|
||||
"ctor",
|
||||
"dotenv",
|
||||
"dotenvy",
|
||||
"goose",
|
||||
"include_dir",
|
||||
"indoc 1.0.9",
|
||||
"lazy_static",
|
||||
@@ -3625,7 +3679,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "goose-mcp"
|
||||
version = "1.0.24"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@@ -3633,6 +3687,7 @@ dependencies = [
|
||||
"chrono",
|
||||
"docx-rs",
|
||||
"etcetera",
|
||||
"glob",
|
||||
"google-apis-common",
|
||||
"google-docs1",
|
||||
"google-drive3",
|
||||
@@ -3653,6 +3708,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"regex",
|
||||
"reqwest 0.11.27",
|
||||
"rmcp",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_with",
|
||||
@@ -3667,15 +3723,15 @@ dependencies = [
|
||||
"tracing-subscriber",
|
||||
"umya-spreadsheet",
|
||||
"url",
|
||||
"urlencoding",
|
||||
"utoipa",
|
||||
"webbrowser 0.8.15",
|
||||
"which 6.0.3",
|
||||
"xcap",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "goose-server"
|
||||
version = "1.0.24"
|
||||
version = "1.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
@@ -3696,6 +3752,7 @@ dependencies = [
|
||||
"mcp-server",
|
||||
"once_cell",
|
||||
"reqwest 0.12.12",
|
||||
"rmcp",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_yaml",
|
||||
@@ -3703,6 +3760,7 @@ dependencies = [
|
||||
"tokio",
|
||||
"tokio-cron-scheduler",
|
||||
"tokio-stream",
|
||||
"tokio-util",
|
||||
"tower 0.5.2",
|
||||
"tower-http",
|
||||
"tracing",
|
||||
@@ -4480,7 +4538,7 @@ checksum = "e19b23d53f35ce9f56aebc7d1bb4e6ac1e9c0db7ac85c8d1760c04379edced37"
|
||||
dependencies = [
|
||||
"hermit-abi 0.4.0",
|
||||
"libc",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -4498,15 +4556,6 @@ dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57"
|
||||
dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.12.1"
|
||||
@@ -4592,6 +4641,33 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jsonschema"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1b46a0365a611fbf1d2143104dcf910aada96fafd295bab16c60b802bf6fa1d"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"base64 0.22.1",
|
||||
"bytecount",
|
||||
"email_address",
|
||||
"fancy-regex 0.14.0",
|
||||
"fraction",
|
||||
"idna",
|
||||
"itoa",
|
||||
"num-cmp",
|
||||
"num-traits",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"referencing",
|
||||
"regex",
|
||||
"regex-syntax 0.8.5",
|
||||
"reqwest 0.12.12",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"uuid-simd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jsonwebtoken"
|
||||
version = "8.3.0"
|
||||
@@ -5215,7 +5291,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.52.6",
|
||||
"windows-targets 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5365,22 +5441,6 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "macro_rules_attribute"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a82271f7bc033d84bbca59a3ce3e4159938cb08a9c3aebbe54d215131518a13"
|
||||
dependencies = [
|
||||
"macro_rules_attribute-proc_macro",
|
||||
"paste",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "macro_rules_attribute-proc_macro"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b8dd856d451cc0da70e2ef2ce95a18e39a93b7558bedf10201ad28503f918568"
|
||||
|
||||
[[package]]
|
||||
name = "malloc_buf"
|
||||
version = "0.0.6"
|
||||
@@ -5421,21 +5481,30 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"axum",
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"eventsource-client",
|
||||
"futures",
|
||||
"mcp-core",
|
||||
"nanoid",
|
||||
"nix 0.30.1",
|
||||
"rand 0.8.5",
|
||||
"reqwest 0.11.27",
|
||||
"rmcp",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"sha2",
|
||||
"thiserror 1.0.69",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tower 0.4.13",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
"tracing-subscriber",
|
||||
"url",
|
||||
"webbrowser 1.0.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5446,6 +5515,7 @@ dependencies = [
|
||||
"async-trait",
|
||||
"base64 0.21.7",
|
||||
"chrono",
|
||||
"rmcp",
|
||||
"schemars",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -5455,22 +5525,6 @@ dependencies = [
|
||||
"utoipa",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mcp-macros"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"convert_case",
|
||||
"mcp-core",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"schemars",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"syn 2.0.99",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "mcp-server"
|
||||
version = "0.1.0"
|
||||
@@ -5479,8 +5533,8 @@ dependencies = [
|
||||
"async-trait",
|
||||
"futures",
|
||||
"mcp-core",
|
||||
"mcp-macros",
|
||||
"pin-project",
|
||||
"rmcp",
|
||||
"schemars",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -5528,6 +5582,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "memo-map"
|
||||
version = "0.3.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38d1115007560874e373613744c6fba374c17688327a71c1476d1a5954cc857b"
|
||||
|
||||
[[package]]
|
||||
name = "mime"
|
||||
version = "0.3.17"
|
||||
@@ -5546,10 +5606,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "minijinja"
|
||||
version = "2.8.0"
|
||||
version = "2.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e36f1329330bb1614c94b78632b9ce45dd7d761f3304a1bed07b2990a7c5097"
|
||||
checksum = "dd72e8b4e42274540edabec853f607c015c73436159b06c39c7af85a20433155"
|
||||
dependencies = [
|
||||
"memo-map",
|
||||
"self_cell",
|
||||
"serde",
|
||||
]
|
||||
|
||||
@@ -5641,6 +5703,7 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
name = "monostate"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
@@ -5835,6 +5898,12 @@ dependencies = [
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "num-cmp"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "63335b2e2c34fae2fb0aa2cecfd9f0832a1e24b3b32ecec612c3426d46dc8aaa"
|
||||
|
||||
[[package]]
|
||||
name = "num-complex"
|
||||
version = "0.4.6"
|
||||
@@ -6063,9 +6132,9 @@ checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9"
|
||||
|
||||
[[package]]
|
||||
name = "openssl"
|
||||
version = "0.10.71"
|
||||
version = "0.10.73"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e14130c6a98cd258fdcb0fb6d744152343ff729cbfcb28c656a9d12b999fbcd"
|
||||
checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8"
|
||||
dependencies = [
|
||||
"bitflags 2.9.0",
|
||||
"cfg-if",
|
||||
@@ -6104,9 +6173,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openssl-sys"
|
||||
version = "0.9.106"
|
||||
version = "0.9.109"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd"
|
||||
checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"libc",
|
||||
@@ -6775,7 +6844,7 @@ dependencies = [
|
||||
"once_cell",
|
||||
"socket2 0.5.8",
|
||||
"tracing",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6953,17 +7022,6 @@ dependencies = [
|
||||
"rayon-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rayon-cond"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "059f538b55efd2309c9794130bc149c6a553db90e9d99c2030785c82f0bd7df9"
|
||||
dependencies = [
|
||||
"either",
|
||||
"itertools 0.11.0",
|
||||
"rayon",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rayon-core"
|
||||
version = "1.12.1"
|
||||
@@ -7005,6 +7063,40 @@ dependencies = [
|
||||
"thiserror 2.0.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ref-cast"
|
||||
version = "1.0.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4a0ae411dbe946a674d89546582cea4ba2bb8defac896622d6496f14c23ba5cf"
|
||||
dependencies = [
|
||||
"ref-cast-impl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ref-cast-impl"
|
||||
version = "1.0.24"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1165225c21bff1f3bbce98f5a1f889949bc902d3575308cc7b0de30b4f6d27c7"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.99",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "referencing"
|
||||
version = "0.30.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c8eff4fa778b5c2a57e85c5f2fe3a709c52f0e60d23146e2151cbef5893f420e"
|
||||
dependencies = [
|
||||
"ahash",
|
||||
"fluent-uri",
|
||||
"once_cell",
|
||||
"parking_lot",
|
||||
"percent-encoding",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "regex"
|
||||
version = "1.11.1"
|
||||
@@ -7124,6 +7216,7 @@ dependencies = [
|
||||
"js-sys",
|
||||
"log",
|
||||
"mime",
|
||||
"mime_guess",
|
||||
"once_cell",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
@@ -7188,6 +7281,40 @@ dependencies = [
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rmcp"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37f2048a81a7ff7e8ef6bc5abced70c3d9114c8f03d85d7aaaafd9fd04f12e9e"
|
||||
dependencies = [
|
||||
"base64 0.22.1",
|
||||
"chrono",
|
||||
"futures",
|
||||
"paste",
|
||||
"pin-project-lite",
|
||||
"rmcp-macros",
|
||||
"schemars",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror 2.0.12",
|
||||
"tokio",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rmcp-macros"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72398e694b9f6dbb5de960cf158c8699e6a1854cb5bbaac7de0646b2005763c4"
|
||||
dependencies = [
|
||||
"darling",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"serde_json",
|
||||
"syn 2.0.99",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "roaring"
|
||||
version = "0.10.9"
|
||||
@@ -7302,7 +7429,7 @@ dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.4.15",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7315,7 +7442,7 @@ dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.9.4",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7492,6 +7619,7 @@ version = "0.8.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"dyn-clone",
|
||||
"schemars_derive",
|
||||
"serde",
|
||||
@@ -7600,6 +7728,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "self_cell"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0f7d95a54511e0c7be3f51e8867aa8cf35148d7b9445d44de2f943e2b206e749"
|
||||
|
||||
[[package]]
|
||||
name = "semver"
|
||||
version = "1.0.26"
|
||||
@@ -7954,6 +8088,7 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
||||
name = "spin"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
@@ -8385,7 +8520,7 @@ dependencies = [
|
||||
"getrandom 0.3.1",
|
||||
"once_cell",
|
||||
"rustix 0.38.44",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8530,6 +8665,22 @@ dependencies = [
|
||||
"weezl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tiktoken-rs"
|
||||
version = "0.6.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "44075987ee2486402f0808505dd65692163d243a337fc54363d49afac41087f6"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"base64 0.21.7",
|
||||
"bstr",
|
||||
"fancy-regex 0.13.0",
|
||||
"lazy_static",
|
||||
"parking_lot",
|
||||
"regex",
|
||||
"rustc-hash 1.1.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "time"
|
||||
version = "0.3.38"
|
||||
@@ -8607,38 +8758,6 @@ version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||
|
||||
[[package]]
|
||||
name = "tokenizers"
|
||||
version = "0.20.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b08cc37428a476fc9e20ac850132a513a2e1ce32b6a31addf2b74fa7033b905"
|
||||
dependencies = [
|
||||
"aho-corasick",
|
||||
"derive_builder",
|
||||
"esaxx-rs",
|
||||
"getrandom 0.2.15",
|
||||
"indicatif",
|
||||
"itertools 0.12.1",
|
||||
"lazy_static",
|
||||
"log",
|
||||
"macro_rules_attribute",
|
||||
"monostate",
|
||||
"onig",
|
||||
"paste",
|
||||
"rand 0.8.5",
|
||||
"rayon",
|
||||
"rayon-cond",
|
||||
"regex",
|
||||
"regex-syntax 0.8.5",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"spm_precompiled",
|
||||
"thiserror 1.0.69",
|
||||
"unicode-normalization-alignments",
|
||||
"unicode-segmentation",
|
||||
"unicode_categories",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.43.1"
|
||||
@@ -8750,9 +8869,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tokio-util"
|
||||
version = "0.7.13"
|
||||
version = "0.7.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d7fcaa8d55a2bdd6b83ace262b016eca0d79ee02818c5c1bcdf0305114081078"
|
||||
checksum = "66a539a9ad6d5d281510d5bd368c973d636c02dbf8a67300bfb6b950696ad7df"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-core",
|
||||
@@ -9045,7 +9164,7 @@ dependencies = [
|
||||
"cfb",
|
||||
"chrono",
|
||||
"encoding_rs",
|
||||
"fancy-regex",
|
||||
"fancy-regex 0.14.0",
|
||||
"getrandom 0.2.15",
|
||||
"hmac",
|
||||
"html_parser",
|
||||
@@ -9078,15 +9197,6 @@ version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-normalization-alignments"
|
||||
version = "0.1.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "43f613e4fa046e69818dd287fdc4bc78175ff20331479dab6e1b0f98d57062de"
|
||||
dependencies = [
|
||||
"smallvec",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "unicode-segmentation"
|
||||
version = "1.12.0"
|
||||
@@ -9105,12 +9215,6 @@ version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd"
|
||||
|
||||
[[package]]
|
||||
name = "unicode_categories"
|
||||
version = "0.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e"
|
||||
|
||||
[[package]]
|
||||
name = "uniffi"
|
||||
version = "0.29.2"
|
||||
@@ -9334,6 +9438,17 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "uuid-simd"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "23b082222b4f6619906941c17eb2297fff4c2fb96cb60164170522942a200bd8"
|
||||
dependencies = [
|
||||
"outref",
|
||||
"uuid",
|
||||
"vsimd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "v_frame"
|
||||
version = "0.3.8"
|
||||
@@ -9618,6 +9733,18 @@ dependencies = [
|
||||
"rustix 0.38.44",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "which"
|
||||
version = "6.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b4ee928febd44d98f2f459a4a79bd4d928591333a494a10a868418ac1b39cf1f"
|
||||
dependencies = [
|
||||
"either",
|
||||
"home",
|
||||
"rustix 0.38.44",
|
||||
"winsafe",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wild"
|
||||
version = "2.2.1"
|
||||
@@ -9649,7 +9776,7 @@ version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
|
||||
dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -10038,6 +10165,12 @@ dependencies = [
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "winsafe"
|
||||
version = "0.0.19"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904"
|
||||
|
||||
[[package]]
|
||||
name = "wiremock"
|
||||
version = "0.6.3"
|
||||
|
||||
10
Cargo.toml
10
Cargo.toml
@@ -4,12 +4,18 @@ resolver = "2"
|
||||
|
||||
[workspace.package]
|
||||
edition = "2021"
|
||||
version = "1.0.24"
|
||||
version = "1.1.0"
|
||||
authors = ["Block <ai-oss-tools@block.xyz>"]
|
||||
license = "Apache-2.0"
|
||||
repository = "https://github.com/block/goose"
|
||||
description = "An AI agent"
|
||||
|
||||
[workspace.lints.clippy]
|
||||
uninlined_format_args = "allow"
|
||||
|
||||
[workspace.dependencies]
|
||||
rmcp = { version = "0.2.1", features = ["schemars"] }
|
||||
|
||||
# Patch for Windows cross-compilation issue with crunchy
|
||||
[patch.crates-io]
|
||||
crunchy = { git = "https://github.com/nmathewson/crunchy", branch = "cross-compilation-fix" }
|
||||
crunchy = { git = "https://github.com/nmathewson/crunchy", branch = "cross-compilation-fix" }
|
||||
|
||||
193
Justfile
193
Justfile
@@ -59,6 +59,27 @@ copy-binary BUILD_MODE="release":
|
||||
echo "Binary not found in target/{{BUILD_MODE}}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@if [ -f ./target/{{BUILD_MODE}}/goose ]; then \
|
||||
echo "Copying goose CLI binary from target/{{BUILD_MODE}}..."; \
|
||||
cp -p ./target/{{BUILD_MODE}}/goose ./ui/desktop/src/bin/; \
|
||||
else \
|
||||
echo "Goose CLI binary not found in target/{{BUILD_MODE}}"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@if [ -f ./temporal-service/temporal-service ]; then \
|
||||
echo "Copying temporal-service binary..."; \
|
||||
cp -p ./temporal-service/temporal-service ./ui/desktop/src/bin/; \
|
||||
else \
|
||||
echo "temporal-service binary not found. Building it..."; \
|
||||
cd temporal-service && ./build.sh && cp -p temporal-service ../ui/desktop/src/bin/; \
|
||||
fi
|
||||
@echo "Checking temporal CLI binary..."
|
||||
@if [ ! -f ./ui/desktop/src/bin/temporal ]; then \
|
||||
echo "temporal CLI binary not found in ui/desktop/src/bin/"; \
|
||||
echo "Please ensure temporal CLI is available or will be downloaded at runtime"; \
|
||||
else \
|
||||
echo "temporal CLI binary found"; \
|
||||
fi
|
||||
|
||||
# Copy binary command for Intel build
|
||||
copy-binary-intel:
|
||||
@@ -69,6 +90,27 @@ copy-binary-intel:
|
||||
echo "Intel release binary not found."; \
|
||||
exit 1; \
|
||||
fi
|
||||
@if [ -f ./target/x86_64-apple-darwin/release/goose ]; then \
|
||||
echo "Copying Intel goose CLI binary to ui/desktop/src/bin..."; \
|
||||
cp -p ./target/x86_64-apple-darwin/release/goose ./ui/desktop/src/bin/; \
|
||||
else \
|
||||
echo "Intel goose CLI binary not found."; \
|
||||
exit 1; \
|
||||
fi
|
||||
@if [ -f ./temporal-service/temporal-service ]; then \
|
||||
echo "Copying temporal-service binary..."; \
|
||||
cp -p ./temporal-service/temporal-service ./ui/desktop/src/bin/; \
|
||||
else \
|
||||
echo "temporal-service binary not found. Building it..."; \
|
||||
cd temporal-service && ./build.sh && cp -p temporal-service ../ui/desktop/src/bin/; \
|
||||
fi
|
||||
@echo "Checking temporal CLI binary..."
|
||||
@if [ ! -f ./ui/desktop/src/bin/temporal ]; then \
|
||||
echo "temporal CLI binary not found in ui/desktop/src/bin/"; \
|
||||
echo "Please ensure temporal CLI is available or will be downloaded at runtime"; \
|
||||
else \
|
||||
echo "temporal CLI binary found"; \
|
||||
fi
|
||||
|
||||
# Copy Windows binary command
|
||||
copy-binary-windows:
|
||||
@@ -80,6 +122,20 @@ copy-binary-windows:
|
||||
Write-Host 'Windows binary not found.' -ForegroundColor Red; \
|
||||
exit 1; \
|
||||
}"
|
||||
@powershell.exe -Command "if (Test-Path ./target/x86_64-pc-windows-gnu/release/goose-scheduler-executor.exe) { \
|
||||
Write-Host 'Copying Windows goose-scheduler-executor binary...'; \
|
||||
Copy-Item -Path './target/x86_64-pc-windows-gnu/release/goose-scheduler-executor.exe' -Destination './ui/desktop/src/bin/' -Force; \
|
||||
} else { \
|
||||
Write-Host 'Windows goose-scheduler-executor binary not found.' -ForegroundColor Yellow; \
|
||||
}"
|
||||
@if [ -f ./temporal-service/temporal-service.exe ]; then \
|
||||
echo "Copying Windows temporal-service binary..."; \
|
||||
cp -p ./temporal-service/temporal-service.exe ./ui/desktop/src/bin/; \
|
||||
else \
|
||||
echo "Windows temporal-service binary not found. Building it..."; \
|
||||
cd temporal-service && GOOS=windows GOARCH=amd64 go build -o temporal-service.exe main.go && cp temporal-service.exe ../ui/desktop/src/bin/; \
|
||||
fi
|
||||
@echo "Note: Temporal CLI for Windows will be downloaded at runtime if needed"
|
||||
|
||||
# Run UI with latest
|
||||
run-ui:
|
||||
@@ -91,12 +147,25 @@ run-ui-only:
|
||||
@echo "Running UI..."
|
||||
cd ui/desktop && npm install && npm run start-gui
|
||||
|
||||
debug-ui:
|
||||
@echo "🚀 Starting Goose frontend in external backend mode"
|
||||
cd ui/desktop && \
|
||||
export GOOSE_EXTERNAL_BACKEND=true && \
|
||||
export GOOSE_EXTERNAL_PORT=3000 && \
|
||||
npm install && \
|
||||
npm run start-gui
|
||||
|
||||
# Run UI with alpha changes
|
||||
run-ui-alpha:
|
||||
run-ui-alpha temporal="true":
|
||||
@just release-binary
|
||||
@echo "Running UI..."
|
||||
cd ui/desktop && npm install && ALPHA=true npm run start-alpha-gui
|
||||
@echo "Running UI with {{ if temporal == "true" { "Temporal" } else { "Legacy" } }} scheduler..."
|
||||
cd ui/desktop && npm install && ALPHA=true GOOSE_SCHEDULER_TYPE={{ if temporal == "true" { "temporal" } else { "legacy" } }} npm run start-alpha-gui
|
||||
|
||||
# Run UI with alpha changes using legacy scheduler (no Temporal dependency)
|
||||
run-ui-alpha-legacy:
|
||||
@just release-binary
|
||||
@echo "Running UI with Legacy scheduler (no Temporal required)..."
|
||||
cd ui/desktop && npm install && ALPHA=true GOOSE_SCHEDULER_TYPE=legacy npm run start-alpha-gui
|
||||
|
||||
# Run UI with latest (Windows version)
|
||||
run-ui-windows:
|
||||
@@ -115,11 +184,31 @@ run-server:
|
||||
@echo "Running server..."
|
||||
cargo run -p goose-server
|
||||
|
||||
# Check if OpenAPI schema is up-to-date
|
||||
check-openapi-schema: generate-openapi
|
||||
./scripts/check-openapi-schema.sh
|
||||
|
||||
# Generate OpenAPI specification without starting the UI
|
||||
generate-openapi:
|
||||
@echo "Generating OpenAPI schema..."
|
||||
cargo run -p goose-server --bin generate_schema
|
||||
@echo "Generating frontend API..."
|
||||
cd ui/desktop && npm run generate-api
|
||||
|
||||
# make GUI with latest binary
|
||||
lint-ui:
|
||||
cd ui/desktop && npm run lint:check
|
||||
|
||||
# make GUI with latest binary
|
||||
make-ui:
|
||||
@just release-binary
|
||||
cd ui/desktop && npm run bundle:default
|
||||
|
||||
# make GUI with latest binary and alpha features enabled
|
||||
make-ui-alpha:
|
||||
@just release-binary
|
||||
cd ui/desktop && npm run bundle:alpha
|
||||
|
||||
# make GUI with latest Windows binary
|
||||
make-ui-windows:
|
||||
@just release-windows
|
||||
@@ -132,25 +221,8 @@ make-ui-windows:
|
||||
echo "Copying Windows binary and DLLs..." && \
|
||||
cp -f ./target/x86_64-pc-windows-gnu/release/goosed.exe ./ui/desktop/src/bin/ && \
|
||||
cp -f ./target/x86_64-pc-windows-gnu/release/*.dll ./ui/desktop/src/bin/ && \
|
||||
if [ -d "./ui/desktop/src/platform/windows/bin" ]; then \
|
||||
echo "Copying Windows platform files..." && \
|
||||
for file in ./ui/desktop/src/platform/windows/bin/*.{exe,dll,cmd}; do \
|
||||
if [ -f "$file" ] && [ "$(basename "$file")" != "goosed.exe" ]; then \
|
||||
cp -f "$file" ./ui/desktop/src/bin/; \
|
||||
fi; \
|
||||
done && \
|
||||
if [ -d "./ui/desktop/src/platform/windows/bin/goose-npm" ]; then \
|
||||
echo "Setting up npm environment..." && \
|
||||
rsync -a --delete ./ui/desktop/src/platform/windows/bin/goose-npm/ ./ui/desktop/src/bin/goose-npm/; \
|
||||
fi && \
|
||||
echo "Windows-specific files copied successfully"; \
|
||||
fi && \
|
||||
echo "Starting Windows package build..." && \
|
||||
(cd ui/desktop && echo "In desktop directory, running npm bundle:windows..." && npm run bundle:windows) && \
|
||||
echo "Creating resources directory..." && \
|
||||
(cd ui/desktop && mkdir -p out/Goose-win32-x64/resources/bin) && \
|
||||
echo "Copying final binaries..." && \
|
||||
(cd ui/desktop && rsync -av src/bin/ out/Goose-win32-x64/resources/bin/) && \
|
||||
(cd ui/desktop && npm run bundle:windows) && \
|
||||
echo "Windows package build complete!"; \
|
||||
else \
|
||||
echo "Windows binary not found."; \
|
||||
@@ -162,10 +234,50 @@ make-ui-intel:
|
||||
@just release-intel
|
||||
cd ui/desktop && npm run bundle:intel
|
||||
|
||||
# Setup langfuse server
|
||||
langfuse-server:
|
||||
#!/usr/bin/env bash
|
||||
./scripts/setup_langfuse.sh
|
||||
# Start Temporal services (server and temporal-service)
|
||||
start-temporal:
|
||||
@echo "Starting Temporal server..."
|
||||
@if ! pgrep -f "temporal server start-dev" > /dev/null; then \
|
||||
echo "Starting Temporal server in background..."; \
|
||||
nohup temporal server start-dev --db-filename temporal.db --port 7233 --ui-port 8233 --log-level warn > temporal-server.log 2>&1 & \
|
||||
echo "Waiting for Temporal server to start..."; \
|
||||
sleep 5; \
|
||||
else \
|
||||
echo "Temporal server is already running"; \
|
||||
fi
|
||||
@echo "Starting temporal-service..."
|
||||
@if ! pgrep -f "temporal-service" > /dev/null; then \
|
||||
echo "Starting temporal-service in background..."; \
|
||||
cd temporal-service && nohup ./temporal-service > temporal-service.log 2>&1 & \
|
||||
echo "Waiting for temporal-service to start..."; \
|
||||
sleep 3; \
|
||||
else \
|
||||
echo "temporal-service is already running"; \
|
||||
fi
|
||||
@echo "Temporal services started. Check logs: temporal-server.log, temporal-service/temporal-service.log"
|
||||
|
||||
# Stop Temporal services
|
||||
stop-temporal:
|
||||
@echo "Stopping Temporal services..."
|
||||
@pkill -f "temporal server start-dev" || echo "Temporal server was not running"
|
||||
@pkill -f "temporal-service" || echo "temporal-service was not running"
|
||||
@echo "Temporal services stopped"
|
||||
|
||||
# Check status of Temporal services
|
||||
status-temporal:
|
||||
@echo "Checking Temporal services status..."
|
||||
@if pgrep -f "temporal server start-dev" > /dev/null; then \
|
||||
echo "✓ Temporal server is running"; \
|
||||
else \
|
||||
echo "✗ Temporal server is not running"; \
|
||||
fi
|
||||
@if pgrep -f "temporal-service" > /dev/null; then \
|
||||
echo "✓ temporal-service is running"; \
|
||||
else \
|
||||
echo "✗ temporal-service is not running"; \
|
||||
fi
|
||||
@echo "Testing temporal-service health..."
|
||||
@curl -s http://localhost:8080/health > /dev/null && echo "✓ temporal-service is responding" || echo "✗ temporal-service is not responding"
|
||||
|
||||
# Run UI with debug build
|
||||
run-dev:
|
||||
@@ -180,12 +292,11 @@ install-deps:
|
||||
cd ui/desktop && npm install
|
||||
cd documentation && yarn
|
||||
|
||||
# ensure the current branch is "main" or error
|
||||
ensure-main:
|
||||
ensure-release-branch:
|
||||
#!/usr/bin/env bash
|
||||
branch=$(git rev-parse --abbrev-ref HEAD); \
|
||||
if [ "$branch" != "main" ]; then \
|
||||
echo "Error: You are not on the main branch (current: $branch)"; \
|
||||
if [[ ! "$branch" == release/* ]]; then \
|
||||
echo "Error: You are not on a release branch (current: $branch)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
@@ -193,7 +304,7 @@ ensure-main:
|
||||
git fetch
|
||||
# @{u} refers to upstream branch of current branch
|
||||
if [ "$(git rev-parse HEAD)" != "$(git rev-parse @{u})" ]; then \
|
||||
echo "Error: Your branch is not up to date with the upstream main branch"; \
|
||||
echo "Error: Your branch is not up to date with the upstream branch"; \
|
||||
echo " ensure your branch is up to date (git pull)"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@@ -215,7 +326,7 @@ validate version:
|
||||
fi
|
||||
|
||||
# set cargo and app versions, must be semver
|
||||
release version: ensure-main
|
||||
prepare-release version:
|
||||
@just validate {{ version }} || exit 1
|
||||
|
||||
@git switch -c "release/{{ version }}"
|
||||
@@ -233,8 +344,8 @@ release version: ensure-main
|
||||
get-tag-version:
|
||||
@uvx --from=toml-cli toml get --toml-path=Cargo.toml "workspace.package.version"
|
||||
|
||||
# create the git tag from Cargo.toml, must be on main
|
||||
tag: ensure-main
|
||||
# create the git tag from Cargo.toml, checking we're on a release branch
|
||||
tag: ensure-release-branch
|
||||
git tag v$(just get-tag-version)
|
||||
|
||||
# create tag and push to origin (use this when release branch is merged to main)
|
||||
@@ -243,9 +354,9 @@ tag-push: tag
|
||||
git push origin tag v$(just get-tag-version)
|
||||
|
||||
# generate release notes from git commits
|
||||
release-notes:
|
||||
release-notes old:
|
||||
#!/usr/bin/env bash
|
||||
git log --pretty=format:"- %s" v$(just get-tag-version)..HEAD
|
||||
git log --pretty=format:"- %s" {{ old }}..v$(just get-tag-version)
|
||||
|
||||
### s = file seperator based on OS
|
||||
s := if os() == "windows" { "\\" } else { "/" }
|
||||
@@ -261,16 +372,16 @@ set windows-shell := ["powershell.exe", "-NoLogo", "-Command"]
|
||||
### Build the core code
|
||||
### profile = --release or "" for debug
|
||||
### allparam = OR/AND/ANY/NONE --workspace --all-features --all-targets
|
||||
win-bld profile allparam:
|
||||
win-bld profile allparam:
|
||||
cargo run {{profile}} -p goose-server --bin generate_schema
|
||||
cargo build {{profile}} {{allparam}}
|
||||
|
||||
### Build just debug
|
||||
win-bld-dbg:
|
||||
win-bld-dbg:
|
||||
just win-bld " " " "
|
||||
|
||||
### Build debug and test, examples,...
|
||||
win-bld-dbg-all:
|
||||
win-bld-dbg-all:
|
||||
just win-bld " " "--workspace --all-targets --all-features"
|
||||
|
||||
### Build just release
|
||||
@@ -333,8 +444,8 @@ win-total-rls *allparam:
|
||||
just win-bld-rls{{allparam}}
|
||||
just win-run-rls
|
||||
|
||||
### Build and run the Kotlin example with
|
||||
### auto-generated bindings for goose-llm
|
||||
### Build and run the Kotlin example with
|
||||
### auto-generated bindings for goose-llm
|
||||
kotlin-example:
|
||||
# Build Rust dylib and generate Kotlin bindings
|
||||
cargo build -p goose-llm
|
||||
@@ -352,4 +463,4 @@ kotlin-example:
|
||||
cd bindings/kotlin/ && java \
|
||||
-Djna.library.path=$HOME/Development/goose/target/debug \
|
||||
-classpath "example.jar:libs/kotlin-stdlib-1.9.0.jar:libs/kotlinx-coroutines-core-jvm-1.7.3.jar:libs/jna-5.13.0.jar" \
|
||||
UsageKt
|
||||
UsageKt
|
||||
|
||||
28
README.md
28
README.md
@@ -21,33 +21,7 @@ goose is your on-machine AI agent, capable of automating complex development tas
|
||||
|
||||
Whether you're prototyping an idea, refining existing code, or managing intricate engineering pipelines, goose adapts to your workflow and executes tasks with precision.
|
||||
|
||||
Designed for maximum flexibility, goose works with any LLM, seamlessly integrates with MCP servers, and is available as both a desktop app as well as CLI - making it the ultimate AI assistant for developers who want to move faster and focus on innovation.
|
||||
|
||||
## Multiple Model Configuration
|
||||
|
||||
goose supports using different models for different purposes to optimize performance and cost, which can work across model providers as well as models.
|
||||
|
||||
### Lead/Worker Model Pattern
|
||||
Use a powerful model for initial planning and complex reasoning, then switch to a faster/cheaper model for execution, this happens automatically by goose:
|
||||
|
||||
```bash
|
||||
# Required: Enable lead model mode
|
||||
export GOOSE_LEAD_MODEL=modelY
|
||||
# Optional: configure a provider for the lead model if not the default provider
|
||||
export GOOSE_LEAD_PROVIDER=providerX # Defaults to main provider
|
||||
```
|
||||
|
||||
### Planning Model Configuration
|
||||
Use a specialized model for the `/plan` command in CLI mode, this is explicitly invoked when you want to plan (vs execute)
|
||||
|
||||
```bash
|
||||
# Optional: Use different model for planning
|
||||
export GOOSE_PLANNER_PROVIDER=openai
|
||||
export GOOSE_PLANNER_MODEL=gpt-4
|
||||
```
|
||||
|
||||
Both patterns help you balance model capabilities with cost and speed for optimal results, and switch between models and vendors as required.
|
||||
|
||||
Designed for maximum flexibility, goose works with any LLM and supports multi-model configuration to optimize performance and cost, seamlessly integrates with MCP servers, and is available as both a desktop app as well as CLI - making it the ultimate AI assistant for developers who want to move faster and focus on innovation.
|
||||
|
||||
# Quick Links
|
||||
- [Quickstart](https://block.github.io/goose/docs/quickstart)
|
||||
|
||||
27
RELEASE.md
Normal file
27
RELEASE.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# Making a Release
|
||||
|
||||
You'll generally create one of two release types: a regular feature release (minor version bump) or a bug-fixing patch release (patch version bump). Regular releases start on main, while patch releases start with an existing release tag.
|
||||
|
||||
## Regular release from main
|
||||
|
||||
1. Check out the main branch.
|
||||
2. Pick the new version. Use a new minor version (e.g. if the current latest release is 1.2.3, use 1.3.0). Save it using `export VERSION=<new version>`
|
||||
3. Run `just prepare-release $VERSION`. This will create a branch `release/<version>`. Push this branch and open a PR into main. The diff should show version updates to Cargo.toml/package.json and their lock files.
|
||||
4. Test this build. When ready to make the release, proceed to the next step.
|
||||
5. Tag the release: run `just tag-push` to create the tag and push it. This will start the build process for your new release.
|
||||
6. Merge the PR you created in step 2.
|
||||
7. Once the release is created on [Github](https://github.com/block/goose/releases), run `just release-notes <prior release>` to generate release notes. Copy these into the release description.
|
||||
|
||||
## Patch release
|
||||
|
||||
Follow the above steps, but rather than starting on main, start on the release tag you're interested in patching. Increment the patch version instead of minor (e.g. 1.2.3 -> 1.2.4). Bug fixes should be merged to main and then cherry-picked onto this branch.
|
||||
|
||||
1. Before proceeding, make sure any fixes you're looking to include in a patch are merged into main, if possible.
|
||||
2. Check out the release you're patching using the tag (e.g `git checkout v1.3.0`). Set the version by incrementing the patch version (`export VERSION=1.3.1`).
|
||||
3. Run `just prepare-release $VERSION`.
|
||||
4. Cherry-pick the relevant fixes from the main branch.
|
||||
5. Test this build. When ready to make the release, proceed to the next step.
|
||||
6. Tag the release: run `just tag-push` to create the tag and push it. This will start the build process for your new release.
|
||||
7. Once the release is created on [Github](https://github.com/block/goose/releases), run `just release-notes <prior release>` to generate release notes. Copy these into the release description.
|
||||
|
||||
Note that you won't merge this branch into main.
|
||||
1
bin/.just-1.40.0.pkg
Symbolic link
1
bin/.just-1.40.0.pkg
Symbolic link
@@ -0,0 +1 @@
|
||||
hermit
|
||||
1
bin/.temporal-cli-1.3.0.pkg
Symbolic link
1
bin/.temporal-cli-1.3.0.pkg
Symbolic link
@@ -0,0 +1 @@
|
||||
hermit
|
||||
1
bin/temporal
Symbolic link
1
bin/temporal
Symbolic link
@@ -0,0 +1 @@
|
||||
.temporal-cli-1.3.0.pkg
|
||||
115
bindings/kotlin/example/RuntimeStats.kt
Normal file
115
bindings/kotlin/example/RuntimeStats.kt
Normal file
@@ -0,0 +1,115 @@
|
||||
import kotlin.system.measureNanoTime
|
||||
import kotlinx.coroutines.runBlocking
|
||||
import uniffi.goose_llm.*
|
||||
|
||||
import java.net.URI
|
||||
import java.net.http.HttpClient
|
||||
import java.net.http.HttpRequest
|
||||
import java.net.http.HttpResponse
|
||||
|
||||
/* ---------- Goose helpers ---------- */
|
||||
|
||||
fun buildProviderConfig(host: String, token: String): String =
|
||||
"""{ "host": "$host", "token": "$token" }"""
|
||||
|
||||
suspend fun timeGooseCall(
|
||||
modelCfg: ModelConfig,
|
||||
providerName: String,
|
||||
providerCfg: String
|
||||
): Pair<Double, CompletionResponse> {
|
||||
|
||||
val req = createCompletionRequest(
|
||||
providerName,
|
||||
providerCfg,
|
||||
modelCfg,
|
||||
systemPreamble = "You are a helpful assistant.",
|
||||
messages = listOf(
|
||||
Message(
|
||||
Role.USER,
|
||||
System.currentTimeMillis() / 1000,
|
||||
listOf(MessageContent.Text(TextContent("Write me a 1000 word chapter about learning Go vs Rust in the world of LLMs and AI.")))
|
||||
)
|
||||
),
|
||||
extensions = emptyList()
|
||||
)
|
||||
|
||||
lateinit var resp: CompletionResponse
|
||||
val wallMs = measureNanoTime { resp = completion(req) } / 1_000_000.0
|
||||
return wallMs to resp
|
||||
}
|
||||
|
||||
/* ---------- OpenAI helpers ---------- */
|
||||
|
||||
fun timeOpenAiCall(client: HttpClient, apiKey: String): Double {
|
||||
val body = """
|
||||
{
|
||||
"model": "gpt-4.1",
|
||||
"max_tokens": 500,
|
||||
"messages": [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Write me a 1000 word chapter about learning Go vs Rust in the world of LLMs and AI."}
|
||||
]
|
||||
}
|
||||
""".trimIndent()
|
||||
|
||||
val request = HttpRequest.newBuilder()
|
||||
.uri(URI.create("https://api.openai.com/v1/chat/completions"))
|
||||
.header("Authorization", "Bearer $apiKey")
|
||||
.header("Content-Type", "application/json")
|
||||
.POST(HttpRequest.BodyPublishers.ofString(body))
|
||||
.build()
|
||||
|
||||
val wallMs = measureNanoTime {
|
||||
client.send(request, HttpResponse.BodyHandlers.ofString())
|
||||
} / 1_000_000.0
|
||||
|
||||
return wallMs
|
||||
}
|
||||
|
||||
/* ---------- main ---------- */
|
||||
|
||||
fun main() = runBlocking {
|
||||
/* Goose provider setup */
|
||||
val providerName = "databricks"
|
||||
val host = System.getenv("DATABRICKS_HOST") ?: error("DATABRICKS_HOST not set")
|
||||
val token = System.getenv("DATABRICKS_TOKEN") ?: error("DATABRICKS_TOKEN not set")
|
||||
val providerCfg = buildProviderConfig(host, token)
|
||||
|
||||
/* OpenAI setup */
|
||||
val openAiKey = System.getenv("OPENAI_API_KEY") ?: error("OPENAI_API_KEY not set")
|
||||
val httpClient = HttpClient.newBuilder().build()
|
||||
|
||||
val gooseModels = listOf("goose-claude-4-sonnet", "goose-gpt-4-1")
|
||||
val runsPerModel = 3
|
||||
|
||||
/* --- Goose timing --- */
|
||||
for (model in gooseModels) {
|
||||
val maxTokens = 500
|
||||
val cfg = ModelConfig(model, 100_000u, 0.0f, maxTokens)
|
||||
var wallSum = 0.0
|
||||
var gooseSum = 0.0
|
||||
|
||||
println("=== Goose: $model ===")
|
||||
repeat(runsPerModel) { run ->
|
||||
val (wall, resp) = timeGooseCall(cfg, providerName, providerCfg)
|
||||
val gooseMs = resp.runtimeMetrics.totalTimeSec * 1_000
|
||||
val overhead = wall - gooseMs
|
||||
wallSum += wall
|
||||
gooseSum += gooseMs
|
||||
println("run ${run + 1}: wall = %.1f ms | goose-llm = %.1f ms | overhead = %.1f ms"
|
||||
.format(wall, gooseMs, overhead))
|
||||
}
|
||||
println("-- avg wall = %.1f ms | avg overhead = %.1f ms --\n"
|
||||
.format(wallSum / runsPerModel, (wallSum - gooseSum) / runsPerModel))
|
||||
}
|
||||
|
||||
/* --- OpenAI direct timing --- */
|
||||
var oaSum = 0.0
|
||||
println("=== OpenAI: gpt-4.1 (direct HTTPS) ===")
|
||||
repeat(runsPerModel) { run ->
|
||||
val wall = timeOpenAiCall(httpClient, openAiKey)
|
||||
oaSum += wall
|
||||
println("run ${run + 1}: wall = %.1f ms".format(wall))
|
||||
}
|
||||
println("-- avg wall = %.1f ms --".format(oaSum / runsPerModel))
|
||||
}
|
||||
@@ -1,200 +1,228 @@
|
||||
import java.io.File
|
||||
import java.util.Base64
|
||||
import kotlinx.coroutines.runBlocking
|
||||
import uniffi.goose_llm.*
|
||||
|
||||
fun main() = runBlocking {
|
||||
val now = System.currentTimeMillis() / 1000
|
||||
val msgs = listOf(
|
||||
// 1) User sends a plain-text prompt
|
||||
Message(
|
||||
role = Role.USER,
|
||||
created = now,
|
||||
content = listOf(
|
||||
MessageContent.Text(
|
||||
TextContent("What is 7 x 6?")
|
||||
)
|
||||
)
|
||||
),
|
||||
/* ---------- shared helpers ---------- */
|
||||
|
||||
// 2) Assistant makes a tool request (ToolReq) to calculate 7×6
|
||||
Message(
|
||||
role = Role.ASSISTANT,
|
||||
created = now + 2,
|
||||
content = listOf(
|
||||
MessageContent.ToolReq(
|
||||
ToolRequest(
|
||||
id = "calc1",
|
||||
toolCall = """
|
||||
{
|
||||
"status": "success",
|
||||
"value": {
|
||||
"name": "calculator_extension__toolname",
|
||||
"arguments": {
|
||||
"operation": "multiply",
|
||||
"numbers": [7, 6]
|
||||
},
|
||||
"needsApproval": false
|
||||
}
|
||||
}
|
||||
""".trimIndent()
|
||||
)
|
||||
)
|
||||
)
|
||||
),
|
||||
|
||||
// 3) User (on behalf of the tool) responds with the tool result (ToolResp)
|
||||
Message(
|
||||
role = Role.USER,
|
||||
created = now + 3,
|
||||
content = listOf(
|
||||
MessageContent.ToolResp(
|
||||
ToolResponse(
|
||||
id = "calc1",
|
||||
toolResult = """
|
||||
{
|
||||
"status": "success",
|
||||
"value": [
|
||||
{"type": "text", "text": "42"}
|
||||
]
|
||||
}
|
||||
""".trimIndent()
|
||||
)
|
||||
)
|
||||
)
|
||||
),
|
||||
)
|
||||
|
||||
printMessages(msgs)
|
||||
println("---\n")
|
||||
|
||||
// Setup provider
|
||||
val providerName = "databricks"
|
||||
val host = System.getenv("DATABRICKS_HOST") ?: error("DATABRICKS_HOST not set")
|
||||
val token = System.getenv("DATABRICKS_TOKEN") ?: error("DATABRICKS_TOKEN not set")
|
||||
val providerConfig = """{"host": "$host", "token": "$token"}"""
|
||||
|
||||
println("Provider Name: $providerName")
|
||||
println("Provider Config: $providerConfig")
|
||||
|
||||
|
||||
val sessionName = generateSessionName(providerName, providerConfig, msgs)
|
||||
println("\nSession Name: $sessionName")
|
||||
|
||||
val tooltip = generateTooltip(providerName, providerConfig, msgs)
|
||||
println("\nTooltip: $tooltip")
|
||||
|
||||
// Completion
|
||||
val modelName = "goose-gpt-4-1"
|
||||
val modelConfig = ModelConfig(
|
||||
modelName,
|
||||
100000u, // UInt
|
||||
0.1f, // Float
|
||||
200 // Int
|
||||
)
|
||||
fun buildProviderConfig(host: String, token: String, imageFormat: String = "OpenAi"): String = """
|
||||
{
|
||||
"host": "$host",
|
||||
"token": "$token",
|
||||
"image_format": "$imageFormat"
|
||||
}
|
||||
""".trimIndent()
|
||||
|
||||
fun calculatorExtension(): ExtensionConfig {
|
||||
val calculatorTool = createToolConfig(
|
||||
name = "calculator",
|
||||
name = "calculator",
|
||||
description = "Perform basic arithmetic operations",
|
||||
inputSchema = """
|
||||
{
|
||||
"type": "object",
|
||||
"required": ["operation", "numbers"],
|
||||
"properties": {
|
||||
"operation": {
|
||||
"type": "string",
|
||||
"enum": ["add", "subtract", "multiply", "divide"],
|
||||
"description": "The arithmetic operation to perform"
|
||||
},
|
||||
"numbers": {
|
||||
"type": "array",
|
||||
"items": { "type": "number" },
|
||||
"description": "List of numbers to operate on in order"
|
||||
}
|
||||
"type": "object",
|
||||
"required": ["operation", "numbers"],
|
||||
"properties": {
|
||||
"operation": {
|
||||
"type": "string",
|
||||
"enum": ["add", "subtract", "multiply", "divide"],
|
||||
"description": "The arithmetic operation to perform"
|
||||
},
|
||||
"numbers": {
|
||||
"type": "array",
|
||||
"items": { "type": "number" },
|
||||
"description": "List of numbers to operate on in order"
|
||||
}
|
||||
}
|
||||
}
|
||||
""".trimIndent(),
|
||||
approvalMode = ToolApprovalMode.AUTO
|
||||
)
|
||||
|
||||
val calculator_extension = ExtensionConfig(
|
||||
name = "calculator_extension",
|
||||
return ExtensionConfig(
|
||||
name = "calculator_extension",
|
||||
instructions = "This extension provides a calculator tool.",
|
||||
tools = listOf(calculatorTool)
|
||||
tools = listOf(calculatorTool)
|
||||
)
|
||||
}
|
||||
|
||||
/* ---------- demos ---------- */
|
||||
|
||||
suspend fun runCalculatorDemo(
|
||||
modelConfig: ModelConfig,
|
||||
providerName: String,
|
||||
providerConfig: String
|
||||
) {
|
||||
val now = System.currentTimeMillis() / 1000
|
||||
val msgs = listOf(
|
||||
// same conversation you already had
|
||||
Message(Role.USER, now, listOf(MessageContent.Text(TextContent("What is 7 x 6?")))),
|
||||
Message(Role.ASSISTANT, now + 2, listOf(MessageContent.ToolReq(
|
||||
ToolRequest(
|
||||
id = "calc1",
|
||||
toolCall = """
|
||||
{
|
||||
"status": "success",
|
||||
"value": {
|
||||
"name": "calculator_extension__toolname",
|
||||
"arguments": { "operation": "doesnotexist", "numbers": [7,6] },
|
||||
"needsApproval": false
|
||||
}
|
||||
}
|
||||
""".trimIndent()
|
||||
)))),
|
||||
Message(Role.USER, now + 3, listOf(MessageContent.ToolResp(
|
||||
ToolResponse(
|
||||
id = "calc1",
|
||||
toolResult = """
|
||||
{
|
||||
"status": "error",
|
||||
"error": "Invalid value for operation: 'doesnotexist'. Valid values are: ['add','subtract','multiply','divide']"
|
||||
}
|
||||
""".trimIndent()
|
||||
)))),
|
||||
Message(Role.ASSISTANT, now + 4, listOf(MessageContent.ToolReq(
|
||||
ToolRequest(
|
||||
id = "calc1",
|
||||
toolCall = """
|
||||
{
|
||||
"status": "success",
|
||||
"value": {
|
||||
"name": "calculator_extension__toolname",
|
||||
"arguments": { "operation": "multiply", "numbers": [7,6] },
|
||||
"needsApproval": false
|
||||
}
|
||||
}
|
||||
""".trimIndent()
|
||||
)))),
|
||||
Message(Role.USER, now + 5, listOf(MessageContent.ToolResp(
|
||||
ToolResponse(
|
||||
id = "calc1",
|
||||
toolResult = """
|
||||
{
|
||||
"status": "success",
|
||||
"value": [ { "type": "text", "text": "42" } ]
|
||||
}
|
||||
""".trimIndent()
|
||||
))))
|
||||
)
|
||||
|
||||
val extensions = listOf(calculator_extension)
|
||||
val systemPreamble = "You are a helpful assistant."
|
||||
/* one-shot prompt with error */
|
||||
val reqErr = createCompletionRequest(
|
||||
providerName, providerConfig, modelConfig,
|
||||
"You are a helpful assistant.",
|
||||
messages = listOf(msgs.first()),
|
||||
extensions = listOf(calculatorExtension())
|
||||
)
|
||||
println("\n[${modelConfig.modelName}] Calculator (single-msg) → ${completion(reqErr).message}")
|
||||
|
||||
/* full conversation */
|
||||
val reqAll = createCompletionRequest(
|
||||
providerName, providerConfig, modelConfig,
|
||||
"You are a helpful assistant.",
|
||||
messages = msgs,
|
||||
extensions = listOf(calculatorExtension())
|
||||
)
|
||||
println("[${modelConfig.modelName}] Calculator (full chat) → ${completion(reqAll).message}")
|
||||
}
|
||||
|
||||
suspend fun runImageExample(
|
||||
modelConfig: ModelConfig,
|
||||
providerName: String,
|
||||
providerConfig: String
|
||||
) {
|
||||
val imagePath = "../../crates/goose/examples/test_assets/test_image.png"
|
||||
val base64Image = Base64.getEncoder().encodeToString(File(imagePath).readBytes())
|
||||
val now = System.currentTimeMillis() / 1000
|
||||
|
||||
val msgs = listOf(
|
||||
Message(Role.USER, now, listOf(
|
||||
MessageContent.Text(TextContent("What is in this image?")),
|
||||
MessageContent.Image(ImageContent(base64Image, "image/png"))
|
||||
)),
|
||||
)
|
||||
|
||||
val req = createCompletionRequest(
|
||||
providerName,
|
||||
providerConfig,
|
||||
modelConfig,
|
||||
systemPreamble,
|
||||
msgs,
|
||||
extensions
|
||||
providerName, providerConfig, modelConfig,
|
||||
"You are a helpful assistant. Please describe any text you see in the image.",
|
||||
messages = msgs,
|
||||
extensions = emptyList()
|
||||
)
|
||||
|
||||
val response = completion(req)
|
||||
println("\nCompletion Response:\n${response.message}")
|
||||
println()
|
||||
|
||||
// ---- UI Extraction (custom schema) ----
|
||||
runUiExtraction(providerName, providerConfig)
|
||||
println("\n[${modelConfig.modelName}] Image example → ${completion(req).message}")
|
||||
}
|
||||
|
||||
suspend fun runPromptOverride(
|
||||
modelConfig: ModelConfig,
|
||||
providerName: String,
|
||||
providerConfig: String
|
||||
) {
|
||||
val now = System.currentTimeMillis() / 1000
|
||||
val req = createCompletionRequest(
|
||||
providerName, providerConfig, modelConfig,
|
||||
systemPreamble = null,
|
||||
systemPromptOverride = "You are a bot named Tile Creator. Your task is to create a tile based on the user's input.",
|
||||
messages = listOf(
|
||||
Message(Role.USER, now, listOf(MessageContent.Text(TextContent("What's your name?"))))
|
||||
),
|
||||
extensions = emptyList()
|
||||
)
|
||||
println("\n[${modelConfig.modelName}] Prompt override → ${completion(req).message}")
|
||||
}
|
||||
|
||||
suspend fun runUiExtraction(providerName: String, providerConfig: String) {
|
||||
val systemPrompt = "You are a UI generator AI. Convert the user input into a JSON-driven UI."
|
||||
val messages = listOf(
|
||||
Message(
|
||||
role = Role.USER,
|
||||
created = System.currentTimeMillis() / 1000,
|
||||
content = listOf(
|
||||
MessageContent.Text(
|
||||
TextContent("Make a User Profile Form")
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
val schema = """{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"type": "string",
|
||||
"enum": ["div","button","header","section","field","form"]
|
||||
},
|
||||
"label": { "type": "string" },
|
||||
"children": {
|
||||
"type": "array",
|
||||
"items": { "${'$'}ref": "#" }
|
||||
},
|
||||
"attributes": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": { "type": "string" },
|
||||
"value": { "type": "string" }
|
||||
},
|
||||
"required": ["name","value"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": ["type","label","children","attributes"],
|
||||
"additionalProperties": false
|
||||
}""".trimIndent();
|
||||
val schema = /* same JSON schema as before */ """
|
||||
{
|
||||
"type":"object",
|
||||
"properties":{
|
||||
"type":{"type":"string","enum":["div","button","header","section","field","form"]},
|
||||
"label":{"type":"string"},
|
||||
"children":{"type":"array","items":{"${'$'}ref":"#"}},
|
||||
"attributes":{"type":"array","items":{"type":"object","properties":{"name":{"type":"string"},"value":{"type":"string"}},"required":["name","value"],"additionalProperties":false}}
|
||||
},
|
||||
"required":["type","label","children","attributes"],
|
||||
"additionalProperties":false
|
||||
}
|
||||
""".trimIndent()
|
||||
|
||||
try {
|
||||
val response = generateStructuredOutputs(
|
||||
providerName = providerName,
|
||||
providerConfig = providerConfig,
|
||||
systemPrompt = systemPrompt,
|
||||
messages = messages,
|
||||
schema = schema
|
||||
)
|
||||
println("\nUI Extraction Output:\n${response}")
|
||||
} catch (e: ProviderException) {
|
||||
println("\nUI Extraction failed:\n${e.message}")
|
||||
}
|
||||
val messages = listOf(
|
||||
Message(Role.USER, System.currentTimeMillis()/1000,
|
||||
listOf(MessageContent.Text(TextContent("Make a User Profile Form"))))
|
||||
)
|
||||
|
||||
val res = generateStructuredOutputs(
|
||||
providerName, providerConfig,
|
||||
systemPrompt = "You are a UI generator AI. Convert the user input into a JSON-driven UI.",
|
||||
messages = messages,
|
||||
schema = schema
|
||||
)
|
||||
println("\n[UI-Extraction] → $res")
|
||||
}
|
||||
|
||||
/* ---------- entry-point ---------- */
|
||||
|
||||
fun main() = runBlocking {
|
||||
/* --- provider setup --- */
|
||||
val providerName = "databricks"
|
||||
val host = System.getenv("DATABRICKS_HOST") ?: error("DATABRICKS_HOST not set")
|
||||
val token = System.getenv("DATABRICKS_TOKEN") ?: error("DATABRICKS_TOKEN not set")
|
||||
val providerConfig = buildProviderConfig(host, token)
|
||||
|
||||
println("Provider: $providerName")
|
||||
println("Config : $providerConfig\n")
|
||||
|
||||
/* --- run demos for each model --- */
|
||||
// NOTE: `claude-3-5-haiku` does NOT support images
|
||||
val modelNames = listOf("kgoose-gpt-4o", "goose-claude-4-sonnet")
|
||||
|
||||
for (name in modelNames) {
|
||||
val modelConfig = ModelConfig(name, 100000u, 0.1f, 200)
|
||||
println("\n===== Running demos for model: $name =====")
|
||||
|
||||
runCalculatorDemo(modelConfig, providerName, providerConfig)
|
||||
runImageExample(modelConfig, providerName, providerConfig)
|
||||
runPromptOverride(modelConfig, providerName, providerConfig)
|
||||
println("===== End demos for $name =====\n")
|
||||
}
|
||||
|
||||
/* UI extraction is model-agnostic, so run it once */
|
||||
runUiExtraction(providerName, providerConfig)
|
||||
}
|
||||
|
||||
@@ -830,8 +830,10 @@ internal interface UniffiLib : Library {
|
||||
`providerConfig`: RustBuffer.ByValue,
|
||||
`modelConfig`: RustBuffer.ByValue,
|
||||
`systemPreamble`: RustBuffer.ByValue,
|
||||
`systemPromptOverride`: RustBuffer.ByValue,
|
||||
`messages`: RustBuffer.ByValue,
|
||||
`extensions`: RustBuffer.ByValue,
|
||||
`requestId`: RustBuffer.ByValue,
|
||||
uniffi_out_err: UniffiRustCallStatus,
|
||||
): RustBuffer.ByValue
|
||||
|
||||
@@ -847,6 +849,7 @@ internal interface UniffiLib : Library {
|
||||
`providerName`: RustBuffer.ByValue,
|
||||
`providerConfig`: RustBuffer.ByValue,
|
||||
`messages`: RustBuffer.ByValue,
|
||||
`requestId`: RustBuffer.ByValue,
|
||||
): Long
|
||||
|
||||
fun uniffi_goose_llm_fn_func_generate_structured_outputs(
|
||||
@@ -855,12 +858,14 @@ internal interface UniffiLib : Library {
|
||||
`systemPrompt`: RustBuffer.ByValue,
|
||||
`messages`: RustBuffer.ByValue,
|
||||
`schema`: RustBuffer.ByValue,
|
||||
`requestId`: RustBuffer.ByValue,
|
||||
): Long
|
||||
|
||||
fun uniffi_goose_llm_fn_func_generate_tooltip(
|
||||
`providerName`: RustBuffer.ByValue,
|
||||
`providerConfig`: RustBuffer.ByValue,
|
||||
`messages`: RustBuffer.ByValue,
|
||||
`requestId`: RustBuffer.ByValue,
|
||||
): Long
|
||||
|
||||
fun uniffi_goose_llm_fn_func_print_messages(
|
||||
@@ -1100,19 +1105,19 @@ private fun uniffiCheckApiChecksums(lib: IntegrityCheckingUniffiLib) {
|
||||
if (lib.uniffi_goose_llm_checksum_func_completion() != 47457.toShort()) {
|
||||
throw RuntimeException("UniFFI API checksum mismatch: try cleaning and rebuilding your project")
|
||||
}
|
||||
if (lib.uniffi_goose_llm_checksum_func_create_completion_request() != 39068.toShort()) {
|
||||
if (lib.uniffi_goose_llm_checksum_func_create_completion_request() != 15391.toShort()) {
|
||||
throw RuntimeException("UniFFI API checksum mismatch: try cleaning and rebuilding your project")
|
||||
}
|
||||
if (lib.uniffi_goose_llm_checksum_func_create_tool_config() != 49910.toShort()) {
|
||||
throw RuntimeException("UniFFI API checksum mismatch: try cleaning and rebuilding your project")
|
||||
}
|
||||
if (lib.uniffi_goose_llm_checksum_func_generate_session_name() != 64087.toShort()) {
|
||||
if (lib.uniffi_goose_llm_checksum_func_generate_session_name() != 34350.toShort()) {
|
||||
throw RuntimeException("UniFFI API checksum mismatch: try cleaning and rebuilding your project")
|
||||
}
|
||||
if (lib.uniffi_goose_llm_checksum_func_generate_structured_outputs() != 43426.toShort()) {
|
||||
if (lib.uniffi_goose_llm_checksum_func_generate_structured_outputs() != 4576.toShort()) {
|
||||
throw RuntimeException("UniFFI API checksum mismatch: try cleaning and rebuilding your project")
|
||||
}
|
||||
if (lib.uniffi_goose_llm_checksum_func_generate_tooltip() != 41121.toShort()) {
|
||||
if (lib.uniffi_goose_llm_checksum_func_generate_tooltip() != 36439.toShort()) {
|
||||
throw RuntimeException("UniFFI API checksum mismatch: try cleaning and rebuilding your project")
|
||||
}
|
||||
if (lib.uniffi_goose_llm_checksum_func_print_messages() != 30278.toShort()) {
|
||||
@@ -2955,9 +2960,11 @@ fun `createCompletionRequest`(
|
||||
`providerName`: kotlin.String,
|
||||
`providerConfig`: Value,
|
||||
`modelConfig`: ModelConfig,
|
||||
`systemPreamble`: kotlin.String,
|
||||
`systemPreamble`: kotlin.String? = null,
|
||||
`systemPromptOverride`: kotlin.String? = null,
|
||||
`messages`: List<Message>,
|
||||
`extensions`: List<ExtensionConfig>,
|
||||
`requestId`: kotlin.String? = null,
|
||||
): CompletionRequest =
|
||||
FfiConverterTypeCompletionRequest.lift(
|
||||
uniffiRustCall { _status ->
|
||||
@@ -2965,9 +2972,11 @@ fun `createCompletionRequest`(
|
||||
FfiConverterString.lower(`providerName`),
|
||||
FfiConverterTypeValue.lower(`providerConfig`),
|
||||
FfiConverterTypeModelConfig.lower(`modelConfig`),
|
||||
FfiConverterString.lower(`systemPreamble`),
|
||||
FfiConverterOptionalString.lower(`systemPreamble`),
|
||||
FfiConverterOptionalString.lower(`systemPromptOverride`),
|
||||
FfiConverterSequenceTypeMessage.lower(`messages`),
|
||||
FfiConverterSequenceTypeExtensionConfig.lower(`extensions`),
|
||||
FfiConverterOptionalString.lower(`requestId`),
|
||||
_status,
|
||||
)
|
||||
},
|
||||
@@ -3000,12 +3009,14 @@ suspend fun `generateSessionName`(
|
||||
`providerName`: kotlin.String,
|
||||
`providerConfig`: Value,
|
||||
`messages`: List<Message>,
|
||||
`requestId`: kotlin.String? = null,
|
||||
): kotlin.String =
|
||||
uniffiRustCallAsync(
|
||||
UniffiLib.INSTANCE.uniffi_goose_llm_fn_func_generate_session_name(
|
||||
FfiConverterString.lower(`providerName`),
|
||||
FfiConverterTypeValue.lower(`providerConfig`),
|
||||
FfiConverterSequenceTypeMessage.lower(`messages`),
|
||||
FfiConverterOptionalString.lower(`requestId`),
|
||||
),
|
||||
{ future, callback, continuation -> UniffiLib.INSTANCE.ffi_goose_llm_rust_future_poll_rust_buffer(future, callback, continuation) },
|
||||
{ future, continuation -> UniffiLib.INSTANCE.ffi_goose_llm_rust_future_complete_rust_buffer(future, continuation) },
|
||||
@@ -3028,6 +3039,7 @@ suspend fun `generateStructuredOutputs`(
|
||||
`systemPrompt`: kotlin.String,
|
||||
`messages`: List<Message>,
|
||||
`schema`: Value,
|
||||
`requestId`: kotlin.String? = null,
|
||||
): ProviderExtractResponse =
|
||||
uniffiRustCallAsync(
|
||||
UniffiLib.INSTANCE.uniffi_goose_llm_fn_func_generate_structured_outputs(
|
||||
@@ -3036,6 +3048,7 @@ suspend fun `generateStructuredOutputs`(
|
||||
FfiConverterString.lower(`systemPrompt`),
|
||||
FfiConverterSequenceTypeMessage.lower(`messages`),
|
||||
FfiConverterTypeValue.lower(`schema`),
|
||||
FfiConverterOptionalString.lower(`requestId`),
|
||||
),
|
||||
{ future, callback, continuation -> UniffiLib.INSTANCE.ffi_goose_llm_rust_future_poll_rust_buffer(future, callback, continuation) },
|
||||
{ future, continuation -> UniffiLib.INSTANCE.ffi_goose_llm_rust_future_complete_rust_buffer(future, continuation) },
|
||||
@@ -3056,12 +3069,14 @@ suspend fun `generateTooltip`(
|
||||
`providerName`: kotlin.String,
|
||||
`providerConfig`: Value,
|
||||
`messages`: List<Message>,
|
||||
`requestId`: kotlin.String? = null,
|
||||
): kotlin.String =
|
||||
uniffiRustCallAsync(
|
||||
UniffiLib.INSTANCE.uniffi_goose_llm_fn_func_generate_tooltip(
|
||||
FfiConverterString.lower(`providerName`),
|
||||
FfiConverterTypeValue.lower(`providerConfig`),
|
||||
FfiConverterSequenceTypeMessage.lower(`messages`),
|
||||
FfiConverterOptionalString.lower(`requestId`),
|
||||
),
|
||||
{ future, callback, continuation -> UniffiLib.INSTANCE.ffi_goose_llm_rust_future_poll_rust_buffer(future, callback, continuation) },
|
||||
{ future, continuation -> UniffiLib.INSTANCE.ffi_goose_llm_rust_future_complete_rust_buffer(future, continuation) },
|
||||
|
||||
@@ -7,6 +7,8 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
description.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
@@ -14,6 +16,7 @@ paste = "1.0"
|
||||
ctor = "0.2.7"
|
||||
goose = { path = "../goose" }
|
||||
mcp-core = { path = "../mcp-core" }
|
||||
rmcp = { workspace = true }
|
||||
async-trait = "0.1.86"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
serde_json = "1.0"
|
||||
|
||||
@@ -5684,7 +5684,7 @@
|
||||
"type": "object"
|
||||
},
|
||||
"volumeHandle": {
|
||||
"description": "volumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.",
|
||||
"description": "volumeHandle is the unique volume name returned by the CSI volume plugin's CreateVolume to refer to the volume on all subsequent calls. Required.",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
@@ -15051,7 +15051,7 @@
|
||||
"description": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.",
|
||||
"properties": {
|
||||
"config": {
|
||||
"description": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.",
|
||||
"description": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classes may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.resource.v1alpha3.DeviceClassConfiguration"
|
||||
},
|
||||
@@ -15884,7 +15884,7 @@
|
||||
"description": "DeviceClassSpec is used in a [DeviceClass] to define what can be allocated and how to configure it.",
|
||||
"properties": {
|
||||
"config": {
|
||||
"description": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classses may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.",
|
||||
"description": "Config defines configuration parameters that apply to each device that is claimed via this class. Some classes may potentially be satisfied by multiple drivers, so each instance of a vendor configuration applies to exactly one driver.\n\nThey are passed to the driver, but are not considered while allocating the claim.",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.resource.v1beta1.DeviceClassConfiguration"
|
||||
},
|
||||
|
||||
@@ -18,7 +18,7 @@ pub struct BenchAgentError {
|
||||
#[async_trait]
|
||||
pub trait BenchBaseSession: Send + Sync {
|
||||
async fn headless(&mut self, message: String) -> anyhow::Result<()>;
|
||||
fn session_file(&self) -> PathBuf;
|
||||
fn session_file(&self) -> Option<PathBuf>;
|
||||
fn message_history(&self) -> Vec<Message>;
|
||||
fn get_total_token_usage(&self) -> anyhow::Result<Option<i32>>;
|
||||
}
|
||||
@@ -52,7 +52,7 @@ impl BenchAgent {
|
||||
pub(crate) async fn get_token_usage(&self) -> Option<i32> {
|
||||
self.session.get_total_token_usage().ok().flatten()
|
||||
}
|
||||
pub(crate) fn session_file(&self) -> PathBuf {
|
||||
pub(crate) fn session_file(&self) -> Option<PathBuf> {
|
||||
self.session.session_file()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::eval_suites::{
|
||||
use crate::register_evaluation;
|
||||
use async_trait::async_trait;
|
||||
use goose::message::MessageContent;
|
||||
use mcp_core::role::Role;
|
||||
use rmcp::model::Role;
|
||||
use serde_json::{self, Value};
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::eval_suites::{
|
||||
use crate::register_evaluation;
|
||||
use async_trait::async_trait;
|
||||
use goose::message::MessageContent;
|
||||
use mcp_core::role::Role;
|
||||
use rmcp::model::Role;
|
||||
use serde_json::{self, Value};
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::eval_suites::{
|
||||
use crate::register_evaluation;
|
||||
use async_trait::async_trait;
|
||||
use goose::message::MessageContent;
|
||||
use mcp_core::role::Role;
|
||||
use rmcp::model::Role;
|
||||
use serde_json::{self, Value};
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::eval_suites::{
|
||||
use crate::register_evaluation;
|
||||
use async_trait::async_trait;
|
||||
use goose::message::MessageContent;
|
||||
use mcp_core::role::Role;
|
||||
use rmcp::model::Role;
|
||||
use serde_json::{self, Value};
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::eval_suites::{
|
||||
use crate::register_evaluation;
|
||||
use async_trait::async_trait;
|
||||
use goose::message::MessageContent;
|
||||
use mcp_core::role::Role;
|
||||
use rmcp::model::Role;
|
||||
use serde_json::{self, Value};
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -7,8 +7,7 @@ use crate::eval_suites::{
|
||||
use crate::register_evaluation;
|
||||
use async_trait::async_trait;
|
||||
use goose::message::MessageContent;
|
||||
use mcp_core::content::Content;
|
||||
use mcp_core::role::Role;
|
||||
use rmcp::model::Role;
|
||||
use serde_json::{self, Value};
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -68,7 +67,7 @@ impl Evaluation for DeveloperImage {
|
||||
if let Ok(result) = &tool_resp.tool_result {
|
||||
// Check each item in the result list
|
||||
for item in result {
|
||||
if let Content::Image(image) = item {
|
||||
if let Some(image) = item.as_image() {
|
||||
// Image content already contains mime_type and data
|
||||
if image.mime_type.starts_with("image/")
|
||||
&& !image.data.is_empty()
|
||||
|
||||
@@ -9,7 +9,7 @@ use crate::eval_suites::{
|
||||
use crate::register_evaluation;
|
||||
use async_trait::async_trait;
|
||||
use goose::message::MessageContent;
|
||||
use mcp_core::role::Role;
|
||||
use rmcp::model::Role;
|
||||
use serde_json::{self, Value};
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::eval_suites::{
|
||||
use crate::register_evaluation;
|
||||
use async_trait::async_trait;
|
||||
use goose::message::MessageContent;
|
||||
use mcp_core::role::Role;
|
||||
use rmcp::model::Role;
|
||||
use serde_json::{self, Value};
|
||||
use std::fs;
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::eval_suites::{
|
||||
use crate::register_evaluation;
|
||||
use async_trait::async_trait;
|
||||
use goose::message::MessageContent;
|
||||
use mcp_core::role::Role;
|
||||
use rmcp::model::Role;
|
||||
use serde_json::{self, Value};
|
||||
use std::fs;
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ use crate::eval_suites::{
|
||||
use crate::register_evaluation;
|
||||
use async_trait::async_trait;
|
||||
use goose::message::MessageContent;
|
||||
use mcp_core::role::Role;
|
||||
use rmcp::model::Role;
|
||||
use serde_json::{self, Value};
|
||||
|
||||
pub struct SquirrelCensus {}
|
||||
|
||||
@@ -155,8 +155,15 @@ impl EvalRunner {
|
||||
.canonicalize()
|
||||
.context("Failed to canonicalize current directory path")?;
|
||||
|
||||
BenchmarkWorkDir::deep_copy(agent.session_file().as_path(), here.as_path(), false)
|
||||
.context("Failed to copy session file to evaluation directory")?;
|
||||
BenchmarkWorkDir::deep_copy(
|
||||
agent
|
||||
.session_file()
|
||||
.expect("Failed to get session file")
|
||||
.as_path(),
|
||||
here.as_path(),
|
||||
false,
|
||||
)
|
||||
.context("Failed to copy session file to evaluation directory")?;
|
||||
|
||||
tracing::info!("Evaluation completed successfully");
|
||||
} else {
|
||||
|
||||
@@ -7,6 +7,9 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
description.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[[bin]]
|
||||
name = "goose"
|
||||
path = "src/main.rs"
|
||||
@@ -18,16 +21,19 @@ goose-mcp = { path = "../goose-mcp" }
|
||||
mcp-client = { path = "../mcp-client" }
|
||||
mcp-server = { path = "../mcp-server" }
|
||||
mcp-core = { path = "../mcp-core" }
|
||||
rmcp = { workspace = true }
|
||||
clap = { version = "4.4", features = ["derive"] }
|
||||
cliclack = "0.3.5"
|
||||
console = "0.15.8"
|
||||
bat = "0.24.0"
|
||||
anyhow = "1.0"
|
||||
serde_json = "1.0"
|
||||
jsonschema = "0.30.0"
|
||||
tokio = { version = "1.43", features = ["full"] }
|
||||
futures = "0.3"
|
||||
serde = { version = "1.0", features = ["derive"] } # For serialization
|
||||
serde_yaml = "0.9"
|
||||
tempfile = "3"
|
||||
etcetera = "0.8.0"
|
||||
reqwest = { version = "0.12.9", features = [
|
||||
"rustls-tls-native-roots",
|
||||
@@ -52,9 +58,10 @@ shlex = "1.3.0"
|
||||
async-trait = "0.1.86"
|
||||
base64 = "0.22.1"
|
||||
regex = "1.11.1"
|
||||
minijinja = "2.8.0"
|
||||
minijinja = { version = "2.10.2", features = ["loader"] }
|
||||
nix = { version = "0.30.1", features = ["process", "signal"] }
|
||||
tar = "0.4"
|
||||
dirs = "5.0"
|
||||
# Web server dependencies
|
||||
axum = { version = "0.8.1", features = ["ws", "macros"] }
|
||||
tower-http = { version = "0.5", features = ["cors", "fs"] }
|
||||
@@ -62,8 +69,8 @@ tokio-stream = "0.1"
|
||||
bytes = "1.5"
|
||||
http = "1.0"
|
||||
webbrowser = "1.0"
|
||||
|
||||
indicatif = "0.17.11"
|
||||
tokio-util = "0.7.15"
|
||||
|
||||
[target.'cfg(target_os = "windows")'.dependencies]
|
||||
winapi = { version = "0.3", features = ["wincred"] }
|
||||
|
||||
@@ -8,17 +8,19 @@ use crate::commands::configure::handle_configure;
|
||||
use crate::commands::info::handle_info;
|
||||
use crate::commands::mcp::run_server;
|
||||
use crate::commands::project::{handle_project_default, handle_projects_interactive};
|
||||
use crate::commands::recipe::{handle_deeplink, handle_validate};
|
||||
use crate::commands::recipe::{handle_deeplink, handle_list, handle_validate};
|
||||
// Import the new handlers from commands::schedule
|
||||
use crate::commands::schedule::{
|
||||
handle_schedule_add, handle_schedule_list, handle_schedule_remove, handle_schedule_run_now,
|
||||
handle_schedule_add, handle_schedule_cron_help, handle_schedule_list, handle_schedule_remove,
|
||||
handle_schedule_run_now, handle_schedule_services_status, handle_schedule_services_stop,
|
||||
handle_schedule_sessions,
|
||||
};
|
||||
use crate::commands::session::{handle_session_list, handle_session_remove};
|
||||
use crate::logging::setup_logging;
|
||||
use crate::recipes::recipe::{explain_recipe_with_parameters, load_recipe_as_template};
|
||||
use crate::recipes::extract_from_cli::extract_recipe_info_from_cli;
|
||||
use crate::recipes::recipe::{explain_recipe, render_recipe_as_yaml};
|
||||
use crate::session;
|
||||
use crate::session::{build_session, SessionBuilderConfig};
|
||||
use crate::session::{build_session, SessionBuilderConfig, SessionSettings};
|
||||
use goose_bench::bench_config::BenchRunConfig;
|
||||
use goose_bench::runners::bench_runner::BenchRunner;
|
||||
use goose_bench::runners::eval_runner::EvalRunner;
|
||||
@@ -42,7 +44,8 @@ struct Identifier {
|
||||
long,
|
||||
value_name = "NAME",
|
||||
help = "Name for the chat session (e.g., 'project-x')",
|
||||
long_help = "Specify a name for your chat session. When used with --resume, will resume this specific session if it exists."
|
||||
long_help = "Specify a name for your chat session. When used with --resume, will resume this specific session if it exists.",
|
||||
alias = "id"
|
||||
)]
|
||||
name: Option<String>,
|
||||
|
||||
@@ -123,7 +126,11 @@ enum SchedulerCommand {
|
||||
Add {
|
||||
#[arg(long, help = "Unique ID for the job")]
|
||||
id: String,
|
||||
#[arg(long, help = "Cron string for the schedule (e.g., '0 0 * * * *')")]
|
||||
#[arg(
|
||||
long,
|
||||
help = "Cron expression for the schedule",
|
||||
long_help = "Cron expression for when to run the job. Examples:\n '0 * * * *' - Every hour at minute 0\n '0 */2 * * *' - Every 2 hours\n '@hourly' - Every hour (shorthand)\n '0 9 * * *' - Every day at 9:00 AM\n '0 9 * * 1' - Every Monday at 9:00 AM\n '0 0 1 * *' - First day of every month at midnight"
|
||||
)]
|
||||
cron: String,
|
||||
#[arg(
|
||||
long,
|
||||
@@ -155,6 +162,15 @@ enum SchedulerCommand {
|
||||
#[arg(long, help = "ID of the schedule to run")] // Explicitly make it --id
|
||||
id: String,
|
||||
},
|
||||
/// Check status of Temporal services (temporal scheduler only)
|
||||
#[command(about = "Check status of Temporal services")]
|
||||
ServicesStatus {},
|
||||
/// Stop Temporal services (temporal scheduler only)
|
||||
#[command(about = "Stop Temporal services")]
|
||||
ServicesStop {},
|
||||
/// Show cron expression examples and help
|
||||
#[command(about = "Show cron expression examples and help")]
|
||||
CronHelp {},
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
@@ -230,6 +246,27 @@ enum RecipeCommand {
|
||||
)]
|
||||
recipe_name: String,
|
||||
},
|
||||
|
||||
/// List available recipes
|
||||
#[command(about = "List available recipes")]
|
||||
List {
|
||||
/// Output format (text, json)
|
||||
#[arg(
|
||||
long = "format",
|
||||
value_name = "FORMAT",
|
||||
help = "Output format (text, json)",
|
||||
default_value = "text"
|
||||
)]
|
||||
format: String,
|
||||
|
||||
/// Show verbose information including recipe descriptions
|
||||
#[arg(
|
||||
short,
|
||||
long,
|
||||
help = "Show verbose information including recipe descriptions"
|
||||
)]
|
||||
verbose: bool,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
@@ -296,6 +333,15 @@ enum Command {
|
||||
)]
|
||||
max_tool_repetitions: Option<u32>,
|
||||
|
||||
/// Maximum number of turns (iterations) allowed in a single response
|
||||
#[arg(
|
||||
long = "max-turns",
|
||||
value_name = "NUMBER",
|
||||
help = "Maximum number of turns allowed without user input (default: 1000)",
|
||||
long_help = "Set a limit on how many turns (iterations) the agent can take without asking for user input to continue."
|
||||
)]
|
||||
max_turns: Option<u32>,
|
||||
|
||||
/// Add stdio extensions with environment variables and commands
|
||||
#[arg(
|
||||
long = "with-extension",
|
||||
@@ -316,6 +362,16 @@ enum Command {
|
||||
)]
|
||||
remote_extensions: Vec<String>,
|
||||
|
||||
/// Add streamable HTTP extensions with a URL
|
||||
#[arg(
|
||||
long = "with-streamable-http-extension",
|
||||
value_name = "URL",
|
||||
help = "Add streamable HTTP extensions (can be specified multiple times)",
|
||||
long_help = "Add streamable HTTP extensions from a URL. Can be specified multiple times. Format: 'url...'",
|
||||
action = clap::ArgAction::Append
|
||||
)]
|
||||
streamable_http_extensions: Vec<String>,
|
||||
|
||||
/// Add builtin extensions by name
|
||||
#[arg(
|
||||
long = "with-builtin",
|
||||
@@ -361,6 +417,16 @@ enum Command {
|
||||
)]
|
||||
input_text: Option<String>,
|
||||
|
||||
/// Additional system prompt to customize agent behavior
|
||||
#[arg(
|
||||
long = "system",
|
||||
value_name = "TEXT",
|
||||
help = "Additional system prompt to customize agent behavior",
|
||||
long_help = "Provide additional system instructions to customize the agent's behavior",
|
||||
conflicts_with = "recipe"
|
||||
)]
|
||||
system: Option<String>,
|
||||
|
||||
/// Recipe name or full path to the recipe file
|
||||
#[arg(
|
||||
short = None,
|
||||
@@ -396,7 +462,7 @@ enum Command {
|
||||
long = "no-session",
|
||||
help = "Run without storing a session file",
|
||||
long_help = "Execute commands without creating or using a session file. Useful for automated runs.",
|
||||
conflicts_with_all = ["resume", "name", "path"]
|
||||
conflicts_with_all = ["resume", "name", "path"]
|
||||
)]
|
||||
no_session: bool,
|
||||
|
||||
@@ -407,6 +473,13 @@ enum Command {
|
||||
)]
|
||||
explain: bool,
|
||||
|
||||
/// Print the rendered recipe instead of running it
|
||||
#[arg(
|
||||
long = "render-recipe",
|
||||
help = "Print the rendered recipe instead of running it."
|
||||
)]
|
||||
render_recipe: bool,
|
||||
|
||||
/// Maximum number of consecutive identical tool calls allowed
|
||||
#[arg(
|
||||
long = "max-tool-repetitions",
|
||||
@@ -416,6 +489,15 @@ enum Command {
|
||||
)]
|
||||
max_tool_repetitions: Option<u32>,
|
||||
|
||||
/// Maximum number of turns (iterations) allowed in a single response
|
||||
#[arg(
|
||||
long = "max-turns",
|
||||
value_name = "NUMBER",
|
||||
help = "Maximum number of turns allowed without user input (default: 1000)",
|
||||
long_help = "Set a limit on how many turns (iterations) the agent can take without asking for user input to continue."
|
||||
)]
|
||||
max_turns: Option<u32>,
|
||||
|
||||
/// Identifier for this run session
|
||||
#[command(flatten)]
|
||||
identifier: Option<Identifier>,
|
||||
@@ -458,6 +540,16 @@ enum Command {
|
||||
)]
|
||||
remote_extensions: Vec<String>,
|
||||
|
||||
/// Add streamable HTTP extensions
|
||||
#[arg(
|
||||
long = "with-streamable-http-extension",
|
||||
value_name = "URL",
|
||||
help = "Add streamable HTTP extensions (can be specified multiple times)",
|
||||
long_help = "Add streamable HTTP extensions. Can be specified multiple times. Format: 'url...'",
|
||||
action = clap::ArgAction::Append
|
||||
)]
|
||||
streamable_http_extensions: Vec<String>,
|
||||
|
||||
/// Add builtin extensions by name
|
||||
#[arg(
|
||||
long = "with-builtin",
|
||||
@@ -467,6 +559,52 @@ enum Command {
|
||||
value_delimiter = ','
|
||||
)]
|
||||
builtins: Vec<String>,
|
||||
|
||||
/// Quiet mode - suppress non-response output
|
||||
#[arg(
|
||||
short = 'q',
|
||||
long = "quiet",
|
||||
help = "Quiet mode. Suppress non-response output, printing only the model response to stdout"
|
||||
)]
|
||||
quiet: bool,
|
||||
|
||||
/// Scheduled job ID (used internally for scheduled executions)
|
||||
#[arg(
|
||||
long = "scheduled-job-id",
|
||||
value_name = "ID",
|
||||
help = "ID of the scheduled job that triggered this execution (internal use)",
|
||||
long_help = "Internal parameter used when this run command is executed by a scheduled job. This associates the session with the schedule for tracking purposes.",
|
||||
hide = true
|
||||
)]
|
||||
scheduled_job_id: Option<String>,
|
||||
|
||||
/// Additional sub-recipe file paths
|
||||
#[arg(
|
||||
long = "sub-recipe",
|
||||
value_name = "RECIPE",
|
||||
help = "Sub-recipe name or file path (can be specified multiple times)",
|
||||
long_help = "Specify sub-recipes to include alongside the main recipe. Can be:\n - Recipe names from GitHub (if GOOSE_RECIPE_GITHUB_REPO is configured)\n - Local file paths to YAML files\nCan be specified multiple times to include multiple sub-recipes.",
|
||||
action = clap::ArgAction::Append
|
||||
)]
|
||||
additional_sub_recipes: Vec<String>,
|
||||
|
||||
/// Provider to use for this run (overrides environment variable)
|
||||
#[arg(
|
||||
long = "provider",
|
||||
value_name = "PROVIDER",
|
||||
help = "Specify the LLM provider to use (e.g., 'openai', 'anthropic')",
|
||||
long_help = "Override the GOOSE_PROVIDER environment variable for this run. Available providers include openai, anthropic, ollama, databricks, gemini-cli, claude-code, and others."
|
||||
)]
|
||||
provider: Option<String>,
|
||||
|
||||
/// Model to use for this run (overrides environment variable)
|
||||
#[arg(
|
||||
long = "model",
|
||||
value_name = "MODEL",
|
||||
help = "Specify the model to use (e.g., 'gpt-4o', 'claude-3.5-sonnet')",
|
||||
long_help = "Override the GOOSE_MODEL environment variable for this run. The model must be supported by the specified provider."
|
||||
)]
|
||||
model: Option<String>,
|
||||
},
|
||||
|
||||
/// Recipe utilities for validation and deeplinking
|
||||
@@ -500,13 +638,15 @@ enum Command {
|
||||
reconfigure: bool,
|
||||
},
|
||||
|
||||
/// Evaluate system configuration across a range of practical tasks
|
||||
#[command(about = "Evaluate system configuration across a range of practical tasks")]
|
||||
Bench {
|
||||
#[command(subcommand)]
|
||||
cmd: BenchCommand,
|
||||
},
|
||||
|
||||
/// Start a web server with a chat interface
|
||||
#[command(about = "Start a web server with a chat interface", hide = true)]
|
||||
#[command(about = "Experimental: Start a web server with a chat interface")]
|
||||
Web {
|
||||
/// Port to run the web server on
|
||||
#[arg(
|
||||
@@ -538,10 +678,19 @@ enum CliProviderVariant {
|
||||
Ollama,
|
||||
}
|
||||
|
||||
struct InputConfig {
|
||||
contents: Option<String>,
|
||||
extensions_override: Option<Vec<ExtensionConfig>>,
|
||||
additional_system_prompt: Option<String>,
|
||||
#[derive(Debug)]
|
||||
pub struct InputConfig {
|
||||
pub contents: Option<String>,
|
||||
pub extensions_override: Option<Vec<ExtensionConfig>>,
|
||||
pub additional_system_prompt: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RecipeInfo {
|
||||
pub session_settings: Option<SessionSettings>,
|
||||
pub sub_recipes: Option<Vec<goose::recipe::SubRecipe>>,
|
||||
pub final_output_response: Option<goose::recipe::Response>,
|
||||
pub retry_config: Option<goose::agents::types::RetryConfig>,
|
||||
}
|
||||
|
||||
pub async fn cli() -> Result<()> {
|
||||
@@ -571,8 +720,10 @@ pub async fn cli() -> Result<()> {
|
||||
history,
|
||||
debug,
|
||||
max_tool_repetitions,
|
||||
max_turns,
|
||||
extensions,
|
||||
remote_extensions,
|
||||
streamable_http_extensions,
|
||||
builtins,
|
||||
}) => {
|
||||
return match command {
|
||||
@@ -613,15 +764,30 @@ pub async fn cli() -> Result<()> {
|
||||
no_session: false,
|
||||
extensions,
|
||||
remote_extensions,
|
||||
streamable_http_extensions,
|
||||
builtins,
|
||||
extensions_override: None,
|
||||
additional_system_prompt: None,
|
||||
settings: None,
|
||||
provider: None,
|
||||
model: None,
|
||||
debug,
|
||||
max_tool_repetitions,
|
||||
max_turns,
|
||||
scheduled_job_id: None,
|
||||
interactive: true,
|
||||
quiet: false,
|
||||
sub_recipes: None,
|
||||
final_output_response: None,
|
||||
retry_config: None,
|
||||
})
|
||||
.await;
|
||||
setup_logging(
|
||||
session.session_file().file_stem().and_then(|s| s.to_str()),
|
||||
session
|
||||
.session_file()
|
||||
.as_ref()
|
||||
.and_then(|p| p.file_stem())
|
||||
.and_then(|s| s.to_str()),
|
||||
None,
|
||||
)?;
|
||||
|
||||
@@ -645,36 +811,47 @@ pub async fn cli() -> Result<()> {
|
||||
handle_projects_interactive()?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Some(Command::Run {
|
||||
instructions,
|
||||
input_text,
|
||||
recipe,
|
||||
system,
|
||||
interactive,
|
||||
identifier,
|
||||
resume,
|
||||
no_session,
|
||||
debug,
|
||||
max_tool_repetitions,
|
||||
max_turns,
|
||||
extensions,
|
||||
remote_extensions,
|
||||
streamable_http_extensions,
|
||||
builtins,
|
||||
params,
|
||||
explain,
|
||||
render_recipe,
|
||||
scheduled_job_id,
|
||||
quiet,
|
||||
additional_sub_recipes,
|
||||
provider,
|
||||
model,
|
||||
}) => {
|
||||
let input_config = match (instructions, input_text, recipe, explain) {
|
||||
(Some(file), _, _, _) if file == "-" => {
|
||||
let (input_config, recipe_info) = match (instructions, input_text, recipe) {
|
||||
(Some(file), _, _) if file == "-" => {
|
||||
let mut input = String::new();
|
||||
std::io::stdin()
|
||||
.read_to_string(&mut input)
|
||||
.expect("Failed to read from stdin");
|
||||
|
||||
InputConfig {
|
||||
let input_config = InputConfig {
|
||||
contents: Some(input),
|
||||
extensions_override: None,
|
||||
additional_system_prompt: None,
|
||||
}
|
||||
additional_system_prompt: system,
|
||||
};
|
||||
(input_config, None)
|
||||
}
|
||||
(Some(file), _, _, _) => {
|
||||
(Some(file), _, _) => {
|
||||
let contents = std::fs::read_to_string(&file).unwrap_or_else(|err| {
|
||||
eprintln!(
|
||||
"Instruction file not found — did you mean to use goose run --text?\n{}",
|
||||
@@ -682,34 +859,38 @@ pub async fn cli() -> Result<()> {
|
||||
);
|
||||
std::process::exit(1);
|
||||
});
|
||||
InputConfig {
|
||||
let input_config = InputConfig {
|
||||
contents: Some(contents),
|
||||
extensions_override: None,
|
||||
additional_system_prompt: None,
|
||||
}
|
||||
};
|
||||
(input_config, None)
|
||||
}
|
||||
(_, Some(text), _, _) => InputConfig {
|
||||
contents: Some(text),
|
||||
extensions_override: None,
|
||||
additional_system_prompt: None,
|
||||
},
|
||||
(_, _, Some(recipe_name), explain) => {
|
||||
(_, Some(text), _) => {
|
||||
let input_config = InputConfig {
|
||||
contents: Some(text),
|
||||
extensions_override: None,
|
||||
additional_system_prompt: system,
|
||||
};
|
||||
(input_config, None)
|
||||
}
|
||||
(_, _, Some(recipe_name)) => {
|
||||
if explain {
|
||||
explain_recipe_with_parameters(&recipe_name, params)?;
|
||||
explain_recipe(&recipe_name, params)?;
|
||||
return Ok(());
|
||||
}
|
||||
let recipe =
|
||||
load_recipe_as_template(&recipe_name, params).unwrap_or_else(|err| {
|
||||
if render_recipe {
|
||||
if let Err(err) = render_recipe_as_yaml(&recipe_name, params) {
|
||||
eprintln!("{}: {}", console::style("Error").red().bold(), err);
|
||||
std::process::exit(1);
|
||||
});
|
||||
InputConfig {
|
||||
contents: recipe.prompt,
|
||||
extensions_override: recipe.extensions,
|
||||
additional_system_prompt: recipe.instructions,
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
let (input_config, recipe_info) =
|
||||
extract_recipe_info_from_cli(recipe_name, params, additional_sub_recipes)?;
|
||||
(input_config, Some(recipe_info))
|
||||
}
|
||||
(None, None, None, _) => {
|
||||
(None, None, None) => {
|
||||
eprintln!("Error: Must provide either --instructions (-i), --text (-t), or --recipe. Use -i - for stdin.");
|
||||
std::process::exit(1);
|
||||
}
|
||||
@@ -721,16 +902,35 @@ pub async fn cli() -> Result<()> {
|
||||
no_session,
|
||||
extensions,
|
||||
remote_extensions,
|
||||
streamable_http_extensions,
|
||||
builtins,
|
||||
extensions_override: input_config.extensions_override,
|
||||
additional_system_prompt: input_config.additional_system_prompt,
|
||||
settings: recipe_info
|
||||
.as_ref()
|
||||
.and_then(|r| r.session_settings.clone()),
|
||||
provider,
|
||||
model,
|
||||
debug,
|
||||
max_tool_repetitions,
|
||||
max_turns,
|
||||
scheduled_job_id,
|
||||
interactive, // Use the interactive flag from the Run command
|
||||
quiet,
|
||||
sub_recipes: recipe_info.as_ref().and_then(|r| r.sub_recipes.clone()),
|
||||
final_output_response: recipe_info
|
||||
.as_ref()
|
||||
.and_then(|r| r.final_output_response.clone()),
|
||||
retry_config: recipe_info.as_ref().and_then(|r| r.retry_config.clone()),
|
||||
})
|
||||
.await;
|
||||
|
||||
setup_logging(
|
||||
session.session_file().file_stem().and_then(|s| s.to_str()),
|
||||
session
|
||||
.session_file()
|
||||
.as_ref()
|
||||
.and_then(|p| p.file_stem())
|
||||
.and_then(|s| s.to_str()),
|
||||
None,
|
||||
)?;
|
||||
|
||||
@@ -768,6 +968,15 @@ pub async fn cli() -> Result<()> {
|
||||
// New arm
|
||||
handle_schedule_run_now(id).await?;
|
||||
}
|
||||
SchedulerCommand::ServicesStatus {} => {
|
||||
handle_schedule_services_status().await?;
|
||||
}
|
||||
SchedulerCommand::ServicesStop {} => {
|
||||
handle_schedule_services_stop().await?;
|
||||
}
|
||||
SchedulerCommand::CronHelp {} => {
|
||||
handle_schedule_cron_help().await?;
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
@@ -807,6 +1016,9 @@ pub async fn cli() -> Result<()> {
|
||||
RecipeCommand::Deeplink { recipe_name } => {
|
||||
handle_deeplink(&recipe_name)?;
|
||||
}
|
||||
RecipeCommand::List { format, verbose } => {
|
||||
handle_list(&format, verbose)?;
|
||||
}
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
@@ -826,15 +1038,30 @@ pub async fn cli() -> Result<()> {
|
||||
no_session: false,
|
||||
extensions: Vec::new(),
|
||||
remote_extensions: Vec::new(),
|
||||
streamable_http_extensions: Vec::new(),
|
||||
builtins: Vec::new(),
|
||||
extensions_override: None,
|
||||
additional_system_prompt: None,
|
||||
settings: None::<SessionSettings>,
|
||||
provider: None,
|
||||
model: None,
|
||||
debug: false,
|
||||
max_tool_repetitions: None,
|
||||
max_turns: None,
|
||||
scheduled_job_id: None,
|
||||
interactive: true, // Default case is always interactive
|
||||
quiet: false,
|
||||
sub_recipes: None,
|
||||
final_output_response: None,
|
||||
retry_config: None,
|
||||
})
|
||||
.await;
|
||||
setup_logging(
|
||||
session.session_file().file_stem().and_then(|s| s.to_str()),
|
||||
session
|
||||
.session_file()
|
||||
.as_ref()
|
||||
.and_then(|p| p.file_stem())
|
||||
.and_then(|s| s.to_str()),
|
||||
None,
|
||||
)?;
|
||||
if let Err(e) = session.interactive(None).await {
|
||||
|
||||
@@ -15,7 +15,7 @@ impl BenchBaseSession for Session {
|
||||
async fn headless(&mut self, message: String) -> anyhow::Result<()> {
|
||||
self.headless(message).await
|
||||
}
|
||||
fn session_file(&self) -> PathBuf {
|
||||
fn session_file(&self) -> Option<PathBuf> {
|
||||
self.session_file()
|
||||
}
|
||||
fn message_history(&self) -> Vec<Message> {
|
||||
@@ -37,11 +37,22 @@ pub async fn agent_generator(
|
||||
no_session: false,
|
||||
extensions: requirements.external,
|
||||
remote_extensions: requirements.remote,
|
||||
streamable_http_extensions: Vec::new(),
|
||||
builtins: requirements.builtin,
|
||||
extensions_override: None,
|
||||
additional_system_prompt: None,
|
||||
settings: None,
|
||||
provider: None,
|
||||
model: None,
|
||||
debug: false,
|
||||
max_tool_repetitions: None,
|
||||
interactive: false, // Benchmarking is non-interactive
|
||||
scheduled_job_id: None,
|
||||
max_turns: None,
|
||||
quiet: false,
|
||||
sub_recipes: None,
|
||||
final_output_response: None,
|
||||
retry_config: None,
|
||||
})
|
||||
.await;
|
||||
|
||||
|
||||
@@ -15,9 +15,9 @@ use goose::config::{
|
||||
};
|
||||
use goose::message::Message;
|
||||
use goose::providers::{create, providers};
|
||||
use mcp_core::tool::ToolAnnotations;
|
||||
use mcp_core::Tool;
|
||||
use serde_json::{json, Value};
|
||||
use rmcp::model::{Tool, ToolAnnotations};
|
||||
use rmcp::object;
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::error::Error;
|
||||
|
||||
@@ -80,6 +80,7 @@ pub async fn handle_configure() -> Result<(), Box<dyn Error>> {
|
||||
display_name: Some(goose::config::DEFAULT_DISPLAY_NAME.to_string()),
|
||||
timeout: Some(goose::config::DEFAULT_EXTENSION_TIMEOUT),
|
||||
bundled: Some(true),
|
||||
description: None,
|
||||
},
|
||||
})?;
|
||||
}
|
||||
@@ -351,6 +352,7 @@ pub async fn configure_provider_dialog() -> Result<bool, Box<dyn Error>> {
|
||||
.map(|m| (m, m.as_str(), ""))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.filter_mode() // enable "fuzzy search" filtering for the list of models
|
||||
.interact()?
|
||||
.to_string(),
|
||||
Ok(None) => {
|
||||
@@ -385,21 +387,21 @@ pub async fn configure_provider_dialog() -> Result<bool, Box<dyn Error>> {
|
||||
let sample_tool = Tool::new(
|
||||
"get_weather".to_string(),
|
||||
"Get current temperature for a given location.".to_string(),
|
||||
json!({
|
||||
object!({
|
||||
"type": "object",
|
||||
"required": ["location"],
|
||||
"properties": {
|
||||
"location": {"type": "string"}
|
||||
}
|
||||
}),
|
||||
Some(ToolAnnotations {
|
||||
title: Some("Get weather".to_string()),
|
||||
read_only_hint: true,
|
||||
destructive_hint: false,
|
||||
idempotent_hint: false,
|
||||
open_world_hint: false,
|
||||
}),
|
||||
);
|
||||
)
|
||||
.annotate(ToolAnnotations {
|
||||
title: Some("Get weather".to_string()),
|
||||
read_only_hint: Some(true),
|
||||
destructive_hint: Some(false),
|
||||
idempotent_hint: Some(false),
|
||||
open_world_hint: Some(false),
|
||||
});
|
||||
vec![sample_tool]
|
||||
} else {
|
||||
vec![]
|
||||
@@ -409,9 +411,8 @@ pub async fn configure_provider_dialog() -> Result<bool, Box<dyn Error>> {
|
||||
.complete(
|
||||
"You are an AI agent called Goose. You use tools of connected extensions to solve problems.",
|
||||
&messages,
|
||||
&tools
|
||||
)
|
||||
.await;
|
||||
&tools.into_iter().collect::<Vec<_>>()
|
||||
).await;
|
||||
|
||||
match result {
|
||||
Ok((_message, _usage)) => {
|
||||
@@ -442,11 +443,14 @@ pub fn toggle_extensions_dialog() -> Result<(), Box<dyn Error>> {
|
||||
}
|
||||
|
||||
// Create a list of extension names and their enabled status
|
||||
let extension_status: Vec<(String, bool)> = extensions
|
||||
let mut extension_status: Vec<(String, bool)> = extensions
|
||||
.iter()
|
||||
.map(|entry| (entry.config.name().to_string(), entry.enabled))
|
||||
.collect();
|
||||
|
||||
// Sort extensions alphabetically by name
|
||||
extension_status.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
|
||||
// Get currently enabled extensions for the selection
|
||||
let enabled_extensions: Vec<&String> = extension_status
|
||||
.iter()
|
||||
@@ -494,8 +498,13 @@ pub fn configure_extensions_dialog() -> Result<(), Box<dyn Error>> {
|
||||
)
|
||||
.item(
|
||||
"sse",
|
||||
"Remote Extension",
|
||||
"Connect to a remote extension via SSE",
|
||||
"Remote Extension (SSE)",
|
||||
"Connect to a remote extension via Server-Sent Events",
|
||||
)
|
||||
.item(
|
||||
"streamable_http",
|
||||
"Remote Extension (Streaming HTTP)",
|
||||
"Connect to a remote extension via MCP Streaming HTTP",
|
||||
)
|
||||
.interact()?;
|
||||
|
||||
@@ -503,21 +512,22 @@ pub fn configure_extensions_dialog() -> Result<(), Box<dyn Error>> {
|
||||
// TODO we'll want a place to collect all these options, maybe just an enum in goose-mcp
|
||||
"built-in" => {
|
||||
let extension = cliclack::select("Which built-in extension would you like to enable?")
|
||||
.item(
|
||||
"developer",
|
||||
"Developer Tools",
|
||||
"Code editing and shell access",
|
||||
)
|
||||
.item(
|
||||
"computercontroller",
|
||||
"Computer Controller",
|
||||
"controls for webscraping, file caching, and automations",
|
||||
)
|
||||
.item(
|
||||
"developer",
|
||||
"Developer Tools",
|
||||
"Code editing and shell access",
|
||||
)
|
||||
.item(
|
||||
"googledrive",
|
||||
"Google Drive",
|
||||
"Search and read content from google drive - additional config required",
|
||||
)
|
||||
.item("jetbrains", "JetBrains", "Connect to jetbrains IDEs")
|
||||
.item(
|
||||
"memory",
|
||||
"Memory",
|
||||
@@ -528,7 +538,6 @@ pub fn configure_extensions_dialog() -> Result<(), Box<dyn Error>> {
|
||||
"Tutorial",
|
||||
"Access interactive tutorials and guides",
|
||||
)
|
||||
.item("jetbrains", "JetBrains", "Connect to jetbrains IDEs")
|
||||
.interact()?
|
||||
.to_string();
|
||||
|
||||
@@ -549,6 +558,7 @@ pub fn configure_extensions_dialog() -> Result<(), Box<dyn Error>> {
|
||||
display_name: Some(display_name),
|
||||
timeout: Some(timeout),
|
||||
bundled: Some(true),
|
||||
description: None,
|
||||
},
|
||||
})?;
|
||||
|
||||
@@ -763,6 +773,133 @@ pub fn configure_extensions_dialog() -> Result<(), Box<dyn Error>> {
|
||||
|
||||
cliclack::outro(format!("Added {} extension", style(name).green()))?;
|
||||
}
|
||||
"streamable_http" => {
|
||||
let extensions = ExtensionConfigManager::get_all_names()?;
|
||||
let name: String = cliclack::input("What would you like to call this extension?")
|
||||
.placeholder("my-remote-extension")
|
||||
.validate(move |input: &String| {
|
||||
if input.is_empty() {
|
||||
Err("Please enter a name")
|
||||
} else if extensions.contains(input) {
|
||||
Err("An extension with this name already exists")
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
})
|
||||
.interact()?;
|
||||
|
||||
let uri: String = cliclack::input("What is the Streaming HTTP endpoint URI?")
|
||||
.placeholder("http://localhost:8000/messages")
|
||||
.validate(|input: &String| {
|
||||
if input.is_empty() {
|
||||
Err("Please enter a URI")
|
||||
} else if !(input.starts_with("http://") || input.starts_with("https://")) {
|
||||
Err("URI should start with http:// or https://")
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
})
|
||||
.interact()?;
|
||||
|
||||
let timeout: u64 = cliclack::input("Please set the timeout for this tool (in secs):")
|
||||
.placeholder(&goose::config::DEFAULT_EXTENSION_TIMEOUT.to_string())
|
||||
.validate(|input: &String| match input.parse::<u64>() {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => Err("Please enter a valid timeout"),
|
||||
})
|
||||
.interact()?;
|
||||
|
||||
let add_desc = cliclack::confirm("Would you like to add a description?").interact()?;
|
||||
|
||||
let description = if add_desc {
|
||||
let desc = cliclack::input("Enter a description for this extension:")
|
||||
.placeholder("Description")
|
||||
.validate(|input: &String| {
|
||||
if input.trim().is_empty() {
|
||||
Err("Please enter a valid description")
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
})
|
||||
.interact()?;
|
||||
Some(desc)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let add_headers =
|
||||
cliclack::confirm("Would you like to add custom headers?").interact()?;
|
||||
|
||||
let mut headers = HashMap::new();
|
||||
if add_headers {
|
||||
loop {
|
||||
let key: String = cliclack::input("Header name:")
|
||||
.placeholder("Authorization")
|
||||
.interact()?;
|
||||
|
||||
let value: String = cliclack::input("Header value:")
|
||||
.placeholder("Bearer token123")
|
||||
.interact()?;
|
||||
|
||||
headers.insert(key, value);
|
||||
|
||||
if !cliclack::confirm("Add another header?").interact()? {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let add_env = false; // No env prompt for Streaming HTTP
|
||||
|
||||
let mut envs = HashMap::new();
|
||||
let mut env_keys = Vec::new();
|
||||
let config = Config::global();
|
||||
|
||||
if add_env {
|
||||
loop {
|
||||
let key: String = cliclack::input("Environment variable name:")
|
||||
.placeholder("API_KEY")
|
||||
.interact()?;
|
||||
|
||||
let value: String = cliclack::password("Environment variable value:")
|
||||
.mask('▪')
|
||||
.interact()?;
|
||||
|
||||
// Try to store in keychain
|
||||
let keychain_key = key.to_string();
|
||||
match config.set_secret(&keychain_key, Value::String(value.clone())) {
|
||||
Ok(_) => {
|
||||
// Successfully stored in keychain, add to env_keys
|
||||
env_keys.push(keychain_key);
|
||||
}
|
||||
Err(_) => {
|
||||
// Failed to store in keychain, store directly in envs
|
||||
envs.insert(key, value);
|
||||
}
|
||||
}
|
||||
|
||||
if !cliclack::confirm("Add another environment variable?").interact()? {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ExtensionConfigManager::set(ExtensionEntry {
|
||||
enabled: true,
|
||||
config: ExtensionConfig::StreamableHttp {
|
||||
name: name.clone(),
|
||||
uri,
|
||||
envs: Envs::new(envs),
|
||||
env_keys,
|
||||
headers,
|
||||
description,
|
||||
timeout: Some(timeout),
|
||||
bundled: None,
|
||||
},
|
||||
})?;
|
||||
|
||||
cliclack::outro(format!("Added {} extension", style(name).green()))?;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
@@ -773,11 +910,14 @@ pub fn remove_extension_dialog() -> Result<(), Box<dyn Error>> {
|
||||
let extensions = ExtensionConfigManager::get_all()?;
|
||||
|
||||
// Create a list of extension names and their enabled status
|
||||
let extension_status: Vec<(String, bool)> = extensions
|
||||
let mut extension_status: Vec<(String, bool)> = extensions
|
||||
.iter()
|
||||
.map(|entry| (entry.config.name().to_string(), entry.enabled))
|
||||
.collect();
|
||||
|
||||
// Sort extensions alphabetically by name
|
||||
extension_status.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
|
||||
if extensions.is_empty() {
|
||||
cliclack::outro(
|
||||
"No extensions configured yet. Run configure and add some extensions first.",
|
||||
@@ -839,6 +979,11 @@ pub async fn configure_settings_dialog() -> Result<(), Box<dyn Error>> {
|
||||
"Tool Output",
|
||||
"Show more or less tool output",
|
||||
)
|
||||
.item(
|
||||
"max_turns",
|
||||
"Max Turns",
|
||||
"Set maximum number of turns without user input",
|
||||
)
|
||||
.item(
|
||||
"experiment",
|
||||
"Toggle Experiment",
|
||||
@@ -849,6 +994,11 @@ pub async fn configure_settings_dialog() -> Result<(), Box<dyn Error>> {
|
||||
"Goose recipe github repo",
|
||||
"Goose will pull recipes from this repo if not found locally.",
|
||||
)
|
||||
.item(
|
||||
"scheduler",
|
||||
"Scheduler Type",
|
||||
"Choose between built-in cron scheduler or Temporal workflow engine",
|
||||
)
|
||||
.interact()?;
|
||||
|
||||
match setting_type {
|
||||
@@ -864,12 +1014,18 @@ pub async fn configure_settings_dialog() -> Result<(), Box<dyn Error>> {
|
||||
"tool_output" => {
|
||||
configure_tool_output_dialog()?;
|
||||
}
|
||||
"max_turns" => {
|
||||
configure_max_turns_dialog()?;
|
||||
}
|
||||
"experiment" => {
|
||||
toggle_experiments_dialog()?;
|
||||
}
|
||||
"recipe" => {
|
||||
configure_recipe_dialog()?;
|
||||
}
|
||||
"scheduler" => {
|
||||
configure_scheduler_dialog()?;
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
@@ -887,7 +1043,7 @@ pub fn configure_goose_mode_dialog() -> Result<(), Box<dyn Error>> {
|
||||
let mode = cliclack::select("Which Goose mode would you like to configure?")
|
||||
.item(
|
||||
"auto",
|
||||
"Auto Mode",
|
||||
"Auto Mode",
|
||||
"Full file modification, extension usage, edit, create and delete files freely"
|
||||
)
|
||||
.item(
|
||||
@@ -1052,6 +1208,9 @@ pub async fn configure_tool_permissions_dialog() -> Result<(), Box<dyn Error>> {
|
||||
.collect();
|
||||
extensions.push("platform".to_string());
|
||||
|
||||
// Sort extensions alphabetically by name
|
||||
extensions.sort();
|
||||
|
||||
let selected_extension_name = cliclack::select("Choose an extension to configure tools")
|
||||
.items(
|
||||
&extensions
|
||||
@@ -1110,7 +1269,10 @@ pub async fn configure_tool_permissions_dialog() -> Result<(), Box<dyn Error>> {
|
||||
.map(|tool| {
|
||||
ToolInfo::new(
|
||||
&tool.name,
|
||||
&tool.description,
|
||||
tool.description
|
||||
.as_ref()
|
||||
.map(|d| d.as_ref())
|
||||
.unwrap_or_default(),
|
||||
get_parameter_names(&tool),
|
||||
permission_manager.get_user_permission(&tool.name),
|
||||
)
|
||||
@@ -1217,3 +1379,89 @@ fn configure_recipe_dialog() -> Result<(), Box<dyn Error>> {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn configure_scheduler_dialog() -> Result<(), Box<dyn Error>> {
|
||||
let config = Config::global();
|
||||
|
||||
// Check if GOOSE_SCHEDULER_TYPE is set as an environment variable
|
||||
if std::env::var("GOOSE_SCHEDULER_TYPE").is_ok() {
|
||||
let _ = cliclack::log::info("Notice: GOOSE_SCHEDULER_TYPE environment variable is set and will override the configuration here.");
|
||||
}
|
||||
|
||||
// Get current scheduler type from config for display
|
||||
let current_scheduler: String = config
|
||||
.get_param("GOOSE_SCHEDULER_TYPE")
|
||||
.unwrap_or_else(|_| "legacy".to_string());
|
||||
|
||||
println!(
|
||||
"Current scheduler type: {}",
|
||||
style(¤t_scheduler).cyan()
|
||||
);
|
||||
|
||||
let scheduler_type = cliclack::select("Which scheduler type would you like to use?")
|
||||
.items(&[
|
||||
("legacy", "Built-in Cron (Default)", "Uses Goose's built-in cron scheduler. Simple and reliable for basic scheduling needs."),
|
||||
("temporal", "Temporal", "Uses Temporal workflow engine for advanced scheduling features. Requires Temporal CLI to be installed.")
|
||||
])
|
||||
.interact()?;
|
||||
|
||||
match scheduler_type {
|
||||
"legacy" => {
|
||||
config.set_param("GOOSE_SCHEDULER_TYPE", Value::String("legacy".to_string()))?;
|
||||
cliclack::outro(
|
||||
"Set to Built-in Cron scheduler - simple and reliable for basic scheduling",
|
||||
)?;
|
||||
}
|
||||
"temporal" => {
|
||||
config.set_param(
|
||||
"GOOSE_SCHEDULER_TYPE",
|
||||
Value::String("temporal".to_string()),
|
||||
)?;
|
||||
cliclack::outro(
|
||||
"Set to Temporal scheduler - advanced workflow engine for complex scheduling",
|
||||
)?;
|
||||
println!();
|
||||
println!("📋 {}", style("Note:").bold());
|
||||
println!(" • Temporal scheduler requires Temporal CLI to be installed");
|
||||
println!(" • macOS: brew install temporal");
|
||||
println!(" • Linux/Windows: https://github.com/temporalio/cli/releases");
|
||||
println!(" • If Temporal is unavailable, Goose will automatically fall back to the built-in scheduler");
|
||||
println!(" • The scheduling engines do not share the list of schedules");
|
||||
}
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn configure_max_turns_dialog() -> Result<(), Box<dyn Error>> {
|
||||
let config = Config::global();
|
||||
|
||||
let current_max_turns: u32 = config.get_param("GOOSE_MAX_TURNS").unwrap_or(1000);
|
||||
|
||||
let max_turns_input: String =
|
||||
cliclack::input("Set maximum number of agent turns without user input:")
|
||||
.placeholder(¤t_max_turns.to_string())
|
||||
.default_input(¤t_max_turns.to_string())
|
||||
.validate(|input: &String| match input.parse::<u32>() {
|
||||
Ok(value) => {
|
||||
if value < 1 {
|
||||
Err("Value must be at least 1")
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Err(_) => Err("Please enter a valid number"),
|
||||
})
|
||||
.interact()?;
|
||||
|
||||
let max_turns: u32 = max_turns_input.parse()?;
|
||||
config.set_param("GOOSE_MAX_TURNS", Value::from(max_turns))?;
|
||||
|
||||
cliclack::outro(format!(
|
||||
"Set maximum turns to {} - Goose will ask for input after {} consecutive actions",
|
||||
max_turns, max_turns
|
||||
))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use anyhow::Result;
|
||||
use goose_mcp::{
|
||||
ComputerControllerRouter, DeveloperRouter, GoogleDriveRouter, JetBrainsRouter, MemoryRouter,
|
||||
TutorialRouter,
|
||||
ComputerControllerRouter, DeveloperRouter, GoogleDriveRouter, MemoryRouter, TutorialRouter,
|
||||
};
|
||||
use mcp_server::router::RouterService;
|
||||
use mcp_server::{BoundedService, ByteTransport, Server};
|
||||
@@ -26,7 +25,6 @@ pub async fn run_server(name: &str) -> Result<()> {
|
||||
let router: Option<Box<dyn BoundedService>> = match name {
|
||||
"developer" => Some(Box::new(RouterService(DeveloperRouter::new()))),
|
||||
"computercontroller" => Some(Box::new(RouterService(ComputerControllerRouter::new()))),
|
||||
"jetbrains" => Some(Box::new(RouterService(JetBrainsRouter::new()))),
|
||||
"google_drive" | "googledrive" => {
|
||||
let router = GoogleDriveRouter::new().await;
|
||||
Some(Box::new(RouterService(router)))
|
||||
|
||||
@@ -4,6 +4,7 @@ use cliclack::{self, intro, outro};
|
||||
use std::path::Path;
|
||||
|
||||
use crate::project_tracker::ProjectTracker;
|
||||
use goose::utils::safe_truncate;
|
||||
|
||||
/// Format a DateTime for display
|
||||
fn format_date(date: DateTime<chrono::Utc>) -> String {
|
||||
@@ -199,11 +200,7 @@ pub fn handle_projects_interactive() -> Result<()> {
|
||||
.last_instruction
|
||||
.as_ref()
|
||||
.map_or(String::new(), |instr| {
|
||||
let truncated = if instr.len() > 40 {
|
||||
format!("{}...", &instr[0..37])
|
||||
} else {
|
||||
instr.clone()
|
||||
};
|
||||
let truncated = safe_truncate(instr, 40);
|
||||
format!(" [{}]", truncated)
|
||||
});
|
||||
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
use anyhow::Result;
|
||||
use base64::Engine;
|
||||
use console::style;
|
||||
|
||||
use crate::recipes::recipe::load_recipe;
|
||||
use crate::recipes::github_recipe::RecipeSource;
|
||||
use crate::recipes::recipe::load_recipe_for_validation;
|
||||
use crate::recipes::search_recipe::list_available_recipes;
|
||||
use goose::recipe_deeplink;
|
||||
|
||||
/// Validates a recipe file
|
||||
///
|
||||
@@ -15,7 +17,7 @@ use crate::recipes::recipe::load_recipe;
|
||||
/// Result indicating success or failure
|
||||
pub fn handle_validate(recipe_name: &str) -> Result<()> {
|
||||
// Load and validate the recipe file
|
||||
match load_recipe(recipe_name) {
|
||||
match load_recipe_for_validation(recipe_name) {
|
||||
Ok(_) => {
|
||||
println!("{} recipe file is valid", style("✓").green().bold());
|
||||
Ok(())
|
||||
@@ -36,24 +38,205 @@ pub fn handle_validate(recipe_name: &str) -> Result<()> {
|
||||
/// # Returns
|
||||
///
|
||||
/// Result indicating success or failure
|
||||
pub fn handle_deeplink(recipe_name: &str) -> Result<()> {
|
||||
pub fn handle_deeplink(recipe_name: &str) -> Result<String> {
|
||||
// Load the recipe file first to validate it
|
||||
match load_recipe(recipe_name) {
|
||||
Ok(recipe) => {
|
||||
if let Ok(recipe_json) = serde_json::to_string(&recipe) {
|
||||
let deeplink = base64::engine::general_purpose::STANDARD.encode(recipe_json);
|
||||
match load_recipe_for_validation(recipe_name) {
|
||||
Ok(recipe) => match recipe_deeplink::encode(&recipe) {
|
||||
Ok(encoded) => {
|
||||
println!(
|
||||
"{} Generated deeplink for: {}",
|
||||
style("✓").green().bold(),
|
||||
recipe.title
|
||||
);
|
||||
println!("goose://recipe?config={}", deeplink);
|
||||
let full_url = format!("goose://recipe?config={}", encoded);
|
||||
println!("{}", full_url);
|
||||
Ok(full_url)
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
println!(
|
||||
"{} Failed to encode recipe: {}",
|
||||
style("✗").red().bold(),
|
||||
err
|
||||
);
|
||||
Err(anyhow::anyhow!("Failed to encode recipe: {}", err))
|
||||
}
|
||||
},
|
||||
Err(err) => {
|
||||
println!("{} {}", style("✗").red().bold(), err);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Lists all available recipes from local paths and GitHub repositories
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `format` - Output format ("text" or "json")
|
||||
/// * `verbose` - Whether to show detailed information
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// Result indicating success or failure
|
||||
pub fn handle_list(format: &str, verbose: bool) -> Result<()> {
|
||||
let recipes = match list_available_recipes() {
|
||||
Ok(recipes) => recipes,
|
||||
Err(e) => {
|
||||
return Err(anyhow::anyhow!("Failed to list recipes: {}", e));
|
||||
}
|
||||
};
|
||||
|
||||
match format {
|
||||
"json" => {
|
||||
println!("{}", serde_json::to_string(&recipes)?);
|
||||
}
|
||||
_ => {
|
||||
if recipes.is_empty() {
|
||||
println!("No recipes found");
|
||||
return Ok(());
|
||||
} else {
|
||||
println!("Available recipes:");
|
||||
for recipe in recipes {
|
||||
let source_info = match recipe.source {
|
||||
RecipeSource::Local => format!("local: {}", recipe.path),
|
||||
RecipeSource::GitHub => format!("github: {}", recipe.path),
|
||||
};
|
||||
|
||||
let description = if let Some(desc) = &recipe.description {
|
||||
if desc.is_empty() {
|
||||
"(none)"
|
||||
} else {
|
||||
desc
|
||||
}
|
||||
} else {
|
||||
"(none)"
|
||||
};
|
||||
|
||||
let output = format!("{} - {} - {}", recipe.name, description, source_info);
|
||||
if verbose {
|
||||
println!(" {}", output);
|
||||
if let Some(title) = &recipe.title {
|
||||
println!(" Title: {}", title);
|
||||
}
|
||||
println!(" Path: {}", recipe.path);
|
||||
} else {
|
||||
println!("{}", output);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::fs;
|
||||
use tempfile::TempDir;
|
||||
|
||||
fn create_test_recipe_file(dir: &TempDir, filename: &str, content: &str) -> String {
|
||||
let file_path = dir.path().join(filename);
|
||||
fs::write(&file_path, content).expect("Failed to write test recipe file");
|
||||
file_path.to_string_lossy().into_owned()
|
||||
}
|
||||
|
||||
const VALID_RECIPE_CONTENT: &str = r#"
|
||||
title: "Test Recipe with Valid JSON Schema"
|
||||
description: "A test recipe with valid JSON schema"
|
||||
prompt: "Test prompt content"
|
||||
instructions: "Test instructions"
|
||||
response:
|
||||
json_schema:
|
||||
type: object
|
||||
properties:
|
||||
result:
|
||||
type: string
|
||||
description: "The result"
|
||||
count:
|
||||
type: number
|
||||
description: "A count value"
|
||||
required:
|
||||
- result
|
||||
"#;
|
||||
|
||||
const INVALID_RECIPE_CONTENT: &str = r#"
|
||||
title: "Test Recipe"
|
||||
description: "A test recipe for deeplink generation"
|
||||
prompt: "Test prompt content {{ name }}"
|
||||
instructions: "Test instructions"
|
||||
"#;
|
||||
|
||||
const RECIPE_WITH_INVALID_JSON_SCHEMA: &str = r#"
|
||||
title: "Test Recipe with Invalid JSON Schema"
|
||||
description: "A test recipe with invalid JSON schema"
|
||||
prompt: "Test prompt content"
|
||||
instructions: "Test instructions"
|
||||
response:
|
||||
json_schema:
|
||||
type: invalid_type
|
||||
properties:
|
||||
result:
|
||||
type: unknown_type
|
||||
required: "should_be_array_not_string"
|
||||
"#;
|
||||
|
||||
#[test]
|
||||
fn test_handle_deeplink_valid_recipe() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp directory");
|
||||
let recipe_path =
|
||||
create_test_recipe_file(&temp_dir, "test_recipe.yaml", VALID_RECIPE_CONTENT);
|
||||
|
||||
let result = handle_deeplink(&recipe_path);
|
||||
assert!(result.is_ok());
|
||||
let url = result.unwrap();
|
||||
assert!(url.starts_with("goose://recipe?config="));
|
||||
let encoded_part = url.strip_prefix("goose://recipe?config=").unwrap();
|
||||
assert!(encoded_part.len() > 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_handle_deeplink_invalid_recipe() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp directory");
|
||||
let recipe_path =
|
||||
create_test_recipe_file(&temp_dir, "test_recipe.yaml", INVALID_RECIPE_CONTENT);
|
||||
let result = handle_deeplink(&recipe_path);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_handle_validation_valid_recipe() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp directory");
|
||||
let recipe_path =
|
||||
create_test_recipe_file(&temp_dir, "test_recipe.yaml", VALID_RECIPE_CONTENT);
|
||||
|
||||
let result = handle_validate(&recipe_path);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_handle_validation_invalid_recipe() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp directory");
|
||||
let recipe_path =
|
||||
create_test_recipe_file(&temp_dir, "test_recipe.yaml", INVALID_RECIPE_CONTENT);
|
||||
let result = handle_validate(&recipe_path);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_handle_validation_recipe_with_invalid_json_schema() {
|
||||
let temp_dir = TempDir::new().expect("Failed to create temp directory");
|
||||
let recipe_path = create_test_recipe_file(
|
||||
&temp_dir,
|
||||
"test_recipe.yaml",
|
||||
RECIPE_WITH_INVALID_JSON_SCHEMA,
|
||||
);
|
||||
|
||||
let result = handle_validate(&recipe_path);
|
||||
assert!(result.is_err());
|
||||
assert!(result
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("JSON schema validation failed"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
use anyhow::{bail, Context, Result};
|
||||
use base64::engine::{general_purpose::STANDARD as BASE64_STANDARD, Engine};
|
||||
use goose::scheduler::{
|
||||
get_default_scheduled_recipes_dir, get_default_scheduler_storage_path, ScheduledJob, Scheduler,
|
||||
get_default_scheduled_recipes_dir, get_default_scheduler_storage_path, ScheduledJob,
|
||||
SchedulerError,
|
||||
};
|
||||
use goose::scheduler_factory::SchedulerFactory;
|
||||
use goose::temporal_scheduler::TemporalScheduler;
|
||||
use std::path::Path;
|
||||
|
||||
// Base64 decoding function - might be needed if recipe_source_arg can be base64
|
||||
@@ -15,6 +17,64 @@ async fn _decode_base64_recipe(source: &str) -> Result<String> {
|
||||
String::from_utf8(bytes).with_context(|| "Decoded Base64 recipe source is not valid UTF-8.")
|
||||
}
|
||||
|
||||
fn validate_cron_expression(cron: &str) -> Result<()> {
|
||||
// Basic validation and helpful suggestions
|
||||
if cron.trim().is_empty() {
|
||||
bail!("Cron expression cannot be empty");
|
||||
}
|
||||
|
||||
// Check for common mistakes and provide helpful suggestions
|
||||
let parts: Vec<&str> = cron.split_whitespace().collect();
|
||||
|
||||
match parts.len() {
|
||||
5 => {
|
||||
// Standard 5-field cron (minute hour day month weekday)
|
||||
println!("✅ Using standard 5-field cron format: {}", cron);
|
||||
}
|
||||
6 => {
|
||||
// 6-field cron with seconds (second minute hour day month weekday)
|
||||
println!("✅ Using 6-field cron format with seconds: {}", cron);
|
||||
}
|
||||
1 if cron.starts_with('@') => {
|
||||
// Shorthand expressions like @hourly, @daily, etc.
|
||||
let valid_shorthands = [
|
||||
"@yearly",
|
||||
"@annually",
|
||||
"@monthly",
|
||||
"@weekly",
|
||||
"@daily",
|
||||
"@midnight",
|
||||
"@hourly",
|
||||
];
|
||||
if valid_shorthands.contains(&cron) {
|
||||
println!("✅ Using cron shorthand: {}", cron);
|
||||
} else {
|
||||
println!(
|
||||
"⚠️ Unknown cron shorthand '{}'. Valid options: {}",
|
||||
cron,
|
||||
valid_shorthands.join(", ")
|
||||
);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
println!("⚠️ Unusual cron format detected: '{}'", cron);
|
||||
println!(" Common formats:");
|
||||
println!(" - 5 fields: '0 * * * *' (minute hour day month weekday)");
|
||||
println!(" - 6 fields: '0 0 * * * *' (second minute hour day month weekday)");
|
||||
println!(" - Shorthand: '@hourly', '@daily', '@weekly', '@monthly'");
|
||||
}
|
||||
}
|
||||
|
||||
// Provide examples for common scheduling needs
|
||||
if cron == "* * * * *" {
|
||||
println!("⚠️ This will run every minute! Did you mean:");
|
||||
println!(" - '0 * * * *' for every hour?");
|
||||
println!(" - '0 0 * * *' for every day?");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_schedule_add(
|
||||
id: String,
|
||||
cron: String,
|
||||
@@ -25,6 +85,9 @@ pub async fn handle_schedule_add(
|
||||
id, cron, recipe_source_arg
|
||||
);
|
||||
|
||||
// Validate cron expression and provide helpful feedback
|
||||
validate_cron_expression(&cron)?;
|
||||
|
||||
// The Scheduler's add_scheduled_job will handle copying the recipe from recipe_source_arg
|
||||
// to its internal storage and validating the path.
|
||||
let job = ScheduledJob {
|
||||
@@ -36,11 +99,12 @@ pub async fn handle_schedule_add(
|
||||
paused: false,
|
||||
current_session_id: None,
|
||||
process_start_time: None,
|
||||
execution_mode: Some("background".to_string()), // Default to background for CLI
|
||||
};
|
||||
|
||||
let scheduler_storage_path =
|
||||
get_default_scheduler_storage_path().context("Failed to get scheduler storage path")?;
|
||||
let scheduler = Scheduler::new(scheduler_storage_path)
|
||||
let scheduler = SchedulerFactory::create(scheduler_storage_path)
|
||||
.await
|
||||
.context("Failed to initialize scheduler")?;
|
||||
|
||||
@@ -85,19 +149,28 @@ pub async fn handle_schedule_add(
|
||||
pub async fn handle_schedule_list() -> Result<()> {
|
||||
let scheduler_storage_path =
|
||||
get_default_scheduler_storage_path().context("Failed to get scheduler storage path")?;
|
||||
let scheduler = Scheduler::new(scheduler_storage_path)
|
||||
let scheduler = SchedulerFactory::create(scheduler_storage_path)
|
||||
.await
|
||||
.context("Failed to initialize scheduler")?;
|
||||
|
||||
let jobs = scheduler.list_scheduled_jobs().await;
|
||||
let jobs = scheduler.list_scheduled_jobs().await?;
|
||||
if jobs.is_empty() {
|
||||
println!("No scheduled jobs found.");
|
||||
} else {
|
||||
println!("Scheduled Jobs:");
|
||||
for job in jobs {
|
||||
let status = if job.currently_running {
|
||||
"🟢 RUNNING"
|
||||
} else if job.paused {
|
||||
"⏸️ PAUSED"
|
||||
} else {
|
||||
"⏹️ IDLE"
|
||||
};
|
||||
|
||||
println!(
|
||||
"- ID: {}\n Cron: {}\n Recipe Source (in store): {}\n Last Run: {}",
|
||||
"- ID: {}\n Status: {}\n Cron: {}\n Recipe Source (in store): {}\n Last Run: {}",
|
||||
job.id,
|
||||
status,
|
||||
job.cron,
|
||||
job.source, // This source is now the path within scheduled_recipes_dir
|
||||
job.last_run
|
||||
@@ -111,7 +184,7 @@ pub async fn handle_schedule_list() -> Result<()> {
|
||||
pub async fn handle_schedule_remove(id: String) -> Result<()> {
|
||||
let scheduler_storage_path =
|
||||
get_default_scheduler_storage_path().context("Failed to get scheduler storage path")?;
|
||||
let scheduler = Scheduler::new(scheduler_storage_path)
|
||||
let scheduler = SchedulerFactory::create(scheduler_storage_path)
|
||||
.await
|
||||
.context("Failed to initialize scheduler")?;
|
||||
|
||||
@@ -133,7 +206,7 @@ pub async fn handle_schedule_remove(id: String) -> Result<()> {
|
||||
pub async fn handle_schedule_sessions(id: String, limit: Option<u32>) -> Result<()> {
|
||||
let scheduler_storage_path =
|
||||
get_default_scheduler_storage_path().context("Failed to get scheduler storage path")?;
|
||||
let scheduler = Scheduler::new(scheduler_storage_path)
|
||||
let scheduler = SchedulerFactory::create(scheduler_storage_path)
|
||||
.await
|
||||
.context("Failed to initialize scheduler")?;
|
||||
|
||||
@@ -166,7 +239,7 @@ pub async fn handle_schedule_sessions(id: String, limit: Option<u32>) -> Result<
|
||||
pub async fn handle_schedule_run_now(id: String) -> Result<()> {
|
||||
let scheduler_storage_path =
|
||||
get_default_scheduler_storage_path().context("Failed to get scheduler storage path")?;
|
||||
let scheduler = Scheduler::new(scheduler_storage_path)
|
||||
let scheduler = SchedulerFactory::create(scheduler_storage_path)
|
||||
.await
|
||||
.context("Failed to initialize scheduler")?;
|
||||
|
||||
@@ -186,3 +259,138 @@ pub async fn handle_schedule_run_now(id: String) -> Result<()> {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_schedule_services_status() -> Result<()> {
|
||||
// Check if we're using temporal scheduler
|
||||
let scheduler_type =
|
||||
std::env::var("GOOSE_SCHEDULER_TYPE").unwrap_or_else(|_| "temporal".to_string());
|
||||
|
||||
if scheduler_type != "temporal" {
|
||||
println!("Service management is only available for temporal scheduler.");
|
||||
println!("Set GOOSE_SCHEDULER_TYPE=temporal to use Temporal services.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("Checking Temporal services status...");
|
||||
|
||||
// Create a temporary TemporalScheduler to check status
|
||||
match TemporalScheduler::new().await {
|
||||
Ok(scheduler) => {
|
||||
let info = scheduler.get_service_info().await;
|
||||
println!("{}", info);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("❌ Failed to check services: {}", e);
|
||||
println!();
|
||||
println!("💡 This might mean:");
|
||||
println!(" • Temporal CLI is not installed");
|
||||
println!(" • temporal-service binary is not available");
|
||||
println!(" • Services are not running");
|
||||
println!();
|
||||
println!("🔧 To fix this:");
|
||||
println!(" 1. Install Temporal CLI:");
|
||||
println!(" macOS: brew install temporal");
|
||||
println!(" Linux/Windows: https://github.com/temporalio/cli/releases");
|
||||
println!(" 2. Or use legacy scheduler: export GOOSE_SCHEDULER_TYPE=legacy");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_schedule_services_stop() -> Result<()> {
|
||||
// Check if we're using temporal scheduler
|
||||
let scheduler_type =
|
||||
std::env::var("GOOSE_SCHEDULER_TYPE").unwrap_or_else(|_| "temporal".to_string());
|
||||
|
||||
if scheduler_type != "temporal" {
|
||||
println!("Service management is only available for temporal scheduler.");
|
||||
println!("Set GOOSE_SCHEDULER_TYPE=temporal to use Temporal services.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("Stopping Temporal services...");
|
||||
|
||||
// Create a temporary TemporalScheduler to stop services
|
||||
match TemporalScheduler::new().await {
|
||||
Ok(scheduler) => match scheduler.stop_services().await {
|
||||
Ok(result) => {
|
||||
println!("{}", result);
|
||||
println!("\nNote: Services were running independently and have been stopped.");
|
||||
println!("They will be automatically restarted when needed.");
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to stop services: {}", e);
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
println!("Failed to initialize scheduler: {}", e);
|
||||
println!("Services may not be running or may have already been stopped.");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn handle_schedule_cron_help() -> Result<()> {
|
||||
println!("📅 Cron Expression Guide for Goose Scheduler");
|
||||
println!("===========================================\\n");
|
||||
|
||||
println!("🕐 HOURLY SCHEDULES (Most Common Request):");
|
||||
println!(" 0 * * * * - Every hour at minute 0 (e.g., 1:00, 2:00, 3:00...)");
|
||||
println!(" 30 * * * * - Every hour at minute 30 (e.g., 1:30, 2:30, 3:30...)");
|
||||
println!(" 0 */2 * * * - Every 2 hours at minute 0 (e.g., 2:00, 4:00, 6:00...)");
|
||||
println!(" 0 */3 * * * - Every 3 hours at minute 0 (e.g., 3:00, 6:00, 9:00...)");
|
||||
println!(" @hourly - Every hour (same as \"0 * * * *\")\\n");
|
||||
|
||||
println!("📅 DAILY SCHEDULES:");
|
||||
println!(" 0 9 * * * - Every day at 9:00 AM");
|
||||
println!(" 30 14 * * * - Every day at 2:30 PM");
|
||||
println!(" 0 0 * * * - Every day at midnight");
|
||||
println!(" @daily - Every day at midnight\\n");
|
||||
|
||||
println!("📆 WEEKLY SCHEDULES:");
|
||||
println!(" 0 9 * * 1 - Every Monday at 9:00 AM");
|
||||
println!(" 0 17 * * 5 - Every Friday at 5:00 PM");
|
||||
println!(" 0 0 * * 0 - Every Sunday at midnight");
|
||||
println!(" @weekly - Every Sunday at midnight\\n");
|
||||
|
||||
println!("🗓️ MONTHLY SCHEDULES:");
|
||||
println!(" 0 9 1 * * - First day of every month at 9:00 AM");
|
||||
println!(" 0 0 15 * * - 15th of every month at midnight");
|
||||
println!(" @monthly - First day of every month at midnight\\n");
|
||||
|
||||
println!("📝 CRON FORMAT:");
|
||||
println!(" Standard 5-field: minute hour day month weekday");
|
||||
println!(" ┌───────────── minute (0 - 59)");
|
||||
println!(" │ ┌─────────── hour (0 - 23)");
|
||||
println!(" │ │ ┌───────── day of month (1 - 31)");
|
||||
println!(" │ │ │ ┌─────── month (1 - 12)");
|
||||
println!(" │ │ │ │ ┌───── day of week (0 - 7, Sunday = 0 or 7)");
|
||||
println!(" │ │ │ │ │");
|
||||
println!(" * * * * *\\n");
|
||||
|
||||
println!("🔧 SPECIAL CHARACTERS:");
|
||||
println!(" * - Any value (every minute, hour, day, etc.)");
|
||||
println!(" */n - Every nth interval (*/5 = every 5 minutes)");
|
||||
println!(" n-m - Range (1-5 = 1,2,3,4,5)");
|
||||
println!(" n,m - List (1,3,5 = 1 or 3 or 5)\\n");
|
||||
|
||||
println!("⚡ SHORTHAND EXPRESSIONS:");
|
||||
println!(" @yearly - Once a year (0 0 1 1 *)");
|
||||
println!(" @monthly - Once a month (0 0 1 * *)");
|
||||
println!(" @weekly - Once a week (0 0 * * 0)");
|
||||
println!(" @daily - Once a day (0 0 * * *)");
|
||||
println!(" @hourly - Once an hour (0 * * * *)\\n");
|
||||
|
||||
println!("💡 EXAMPLES:");
|
||||
println!(
|
||||
" goose schedule add --id hourly-report --cron \"0 * * * *\" --recipe-source report.yaml"
|
||||
);
|
||||
println!(
|
||||
" goose schedule add --id daily-backup --cron \"@daily\" --recipe-source backup.yaml"
|
||||
);
|
||||
println!(" goose schedule add --id weekly-summary --cron \"0 9 * * 1\" --recipe-source summary.yaml");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
use crate::session::message_to_markdown;
|
||||
use anyhow::{Context, Result};
|
||||
use cliclack::{confirm, multiselect, select};
|
||||
use goose::session::info::{get_session_info, SessionInfo, SortOrder};
|
||||
use goose::session::info::{get_valid_sorted_sessions, SessionInfo, SortOrder};
|
||||
use goose::session::{self, Identifier};
|
||||
use goose::utils::safe_truncate;
|
||||
use regex::Regex;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
@@ -50,11 +51,7 @@ fn prompt_interactive_session_removal(sessions: &[SessionInfo]) -> Result<Vec<Se
|
||||
} else {
|
||||
&s.metadata.description
|
||||
};
|
||||
let truncated_desc = if desc.len() > TRUNCATED_DESC_LENGTH {
|
||||
format!("{}...", &desc[..TRUNCATED_DESC_LENGTH - 3])
|
||||
} else {
|
||||
desc.to_string()
|
||||
};
|
||||
let truncated_desc = safe_truncate(desc, TRUNCATED_DESC_LENGTH);
|
||||
let display_text = format!("{} - {} ({})", s.modified, truncated_desc, s.id);
|
||||
(display_text, s.clone())
|
||||
})
|
||||
@@ -75,7 +72,7 @@ fn prompt_interactive_session_removal(sessions: &[SessionInfo]) -> Result<Vec<Se
|
||||
}
|
||||
|
||||
pub fn handle_session_remove(id: Option<String>, regex_string: Option<String>) -> Result<()> {
|
||||
let all_sessions = match get_session_info(SortOrder::Descending) {
|
||||
let all_sessions = match get_valid_sorted_sessions(SortOrder::Descending) {
|
||||
Ok(sessions) => sessions,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to retrieve sessions: {:?}", e);
|
||||
@@ -125,7 +122,7 @@ pub fn handle_session_list(verbose: bool, format: String, ascending: bool) -> Re
|
||||
SortOrder::Descending
|
||||
};
|
||||
|
||||
let sessions = match get_session_info(sort_order) {
|
||||
let sessions = match get_valid_sorted_sessions(sort_order) {
|
||||
Ok(sessions) => sessions,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to list sessions: {:?}", e);
|
||||
@@ -175,7 +172,12 @@ pub fn handle_session_list(verbose: bool, format: String, ascending: bool) -> Re
|
||||
/// without creating an Agent or prompting about working directories.
|
||||
pub fn handle_session_export(identifier: Identifier, output_path: Option<PathBuf>) -> Result<()> {
|
||||
// Get the session file path
|
||||
let session_file_path = goose::session::get_path(identifier.clone());
|
||||
let session_file_path = match goose::session::get_path(identifier.clone()) {
|
||||
Ok(path) => path,
|
||||
Err(e) => {
|
||||
return Err(anyhow::anyhow!("Invalid session identifier: {}", e));
|
||||
}
|
||||
};
|
||||
|
||||
if !session_file_path.exists() {
|
||||
return Err(anyhow::anyhow!(
|
||||
@@ -239,7 +241,7 @@ fn export_session_to_markdown(
|
||||
|
||||
for message in &messages {
|
||||
// Check if this is a User message containing only ToolResponses
|
||||
let is_only_tool_response = message.role == mcp_core::role::Role::User
|
||||
let is_only_tool_response = message.role == rmcp::model::Role::User
|
||||
&& message
|
||||
.content
|
||||
.iter()
|
||||
@@ -261,8 +263,8 @@ fn export_session_to_markdown(
|
||||
// Output the role prefix except for tool response-only messages
|
||||
if !is_only_tool_response {
|
||||
let role_prefix = match message.role {
|
||||
mcp_core::role::Role::User => "### User:\n",
|
||||
mcp_core::role::Role::Assistant => "### Assistant:\n",
|
||||
rmcp::model::Role::User => "### User:\n",
|
||||
rmcp::model::Role::Assistant => "### Assistant:\n",
|
||||
};
|
||||
markdown_output.push_str(role_prefix);
|
||||
}
|
||||
@@ -289,7 +291,7 @@ fn export_session_to_markdown(
|
||||
/// Shows a list of available sessions and lets the user select one
|
||||
pub fn prompt_interactive_session_selection() -> Result<session::Identifier> {
|
||||
// Get sessions sorted by modification date (newest first)
|
||||
let sessions = match get_session_info(SortOrder::Descending) {
|
||||
let sessions = match get_valid_sorted_sessions(SortOrder::Descending) {
|
||||
Ok(sessions) => sessions,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to list sessions: {:?}", e);
|
||||
@@ -315,11 +317,7 @@ pub fn prompt_interactive_session_selection() -> Result<session::Identifier> {
|
||||
};
|
||||
|
||||
// Truncate description if too long
|
||||
let truncated_desc = if desc.len() > 40 {
|
||||
format!("{}...", &desc[..37])
|
||||
} else {
|
||||
desc.to_string()
|
||||
};
|
||||
let truncated_desc = safe_truncate(desc, 40);
|
||||
|
||||
let display_text = format!("{} - {} ({})", s.modified, truncated_desc, s.id);
|
||||
(display_text, s.clone())
|
||||
|
||||
@@ -199,7 +199,17 @@ async fn serve_static(axum::extract::Path(path): axum::extract::Path<String>) ->
|
||||
include_str!("../../static/script.js"),
|
||||
)
|
||||
.into_response(),
|
||||
_ => (axum::http::StatusCode::NOT_FOUND, "Not found").into_response(),
|
||||
"img/logo_dark.png" => (
|
||||
[("content-type", "image/png")],
|
||||
include_bytes!("../../../../documentation/static/img/logo_dark.png").to_vec(),
|
||||
)
|
||||
.into_response(),
|
||||
"img/logo_light.png" => (
|
||||
[("content-type", "image/png")],
|
||||
include_bytes!("../../../../documentation/static/img/logo_light.png").to_vec(),
|
||||
)
|
||||
.into_response(),
|
||||
_ => (http::StatusCode::NOT_FOUND, "Not found").into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -215,14 +225,15 @@ async fn list_sessions() -> Json<serde_json::Value> {
|
||||
Ok(sessions) => {
|
||||
let session_info: Vec<serde_json::Value> = sessions
|
||||
.into_iter()
|
||||
.map(|(name, path)| {
|
||||
let metadata = session::read_metadata(&path).unwrap_or_default();
|
||||
serde_json::json!({
|
||||
"name": name,
|
||||
"path": path,
|
||||
"description": metadata.description,
|
||||
"message_count": metadata.message_count,
|
||||
"working_dir": metadata.working_dir
|
||||
.filter_map(|(name, path)| {
|
||||
session::read_metadata(&path).ok().map(|metadata| {
|
||||
serde_json::json!({
|
||||
"name": name,
|
||||
"path": path,
|
||||
"description": metadata.description,
|
||||
"message_count": metadata.message_count,
|
||||
"working_dir": metadata.working_dir
|
||||
})
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
@@ -236,23 +247,33 @@ async fn list_sessions() -> Json<serde_json::Value> {
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_session(
|
||||
axum::extract::Path(session_id): axum::extract::Path<String>,
|
||||
) -> Json<serde_json::Value> {
|
||||
let session_file = session::get_path(session::Identifier::Name(session_id));
|
||||
let session_file = match session::get_path(session::Identifier::Name(session_id)) {
|
||||
Ok(path) => path,
|
||||
Err(e) => {
|
||||
return Json(serde_json::json!({
|
||||
"error": format!("Invalid session ID: {}", e)
|
||||
}));
|
||||
}
|
||||
};
|
||||
|
||||
let error_response = |e: Box<dyn std::error::Error>| {
|
||||
Json(serde_json::json!({
|
||||
"error": e.to_string()
|
||||
}))
|
||||
};
|
||||
|
||||
match session::read_messages(&session_file) {
|
||||
Ok(messages) => {
|
||||
let metadata = session::read_metadata(&session_file).unwrap_or_default();
|
||||
Json(serde_json::json!({
|
||||
Ok(messages) => match session::read_metadata(&session_file) {
|
||||
Ok(metadata) => Json(serde_json::json!({
|
||||
"metadata": metadata,
|
||||
"messages": messages
|
||||
}))
|
||||
}
|
||||
Err(e) => Json(serde_json::json!({
|
||||
"error": e.to_string()
|
||||
})),
|
||||
})),
|
||||
Err(e) => error_response(e.into()),
|
||||
},
|
||||
Err(e) => error_response(e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -278,8 +299,15 @@ async fn handle_socket(socket: WebSocket, state: AppState) {
|
||||
..
|
||||
}) => {
|
||||
// Get session file path from session_id
|
||||
let session_file =
|
||||
session::get_path(session::Identifier::Name(session_id.clone()));
|
||||
let session_file = match session::get_path(session::Identifier::Name(
|
||||
session_id.clone(),
|
||||
)) {
|
||||
Ok(path) => path,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to get session path: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Get or create session in memory (for fast access during processing)
|
||||
let session_messages = {
|
||||
@@ -447,17 +475,25 @@ async fn process_message_streaming(
|
||||
}
|
||||
|
||||
let provider = provider.unwrap();
|
||||
session::persist_messages(&session_file, &messages, Some(provider.clone())).await?;
|
||||
let working_dir = Some(std::env::current_dir()?);
|
||||
session::persist_messages(
|
||||
&session_file,
|
||||
&messages,
|
||||
Some(provider.clone()),
|
||||
working_dir.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Create a session config
|
||||
let session_config = SessionConfig {
|
||||
id: session::Identifier::Path(session_file.clone()),
|
||||
working_dir: std::env::current_dir()?,
|
||||
schedule_id: None,
|
||||
execution_mode: None,
|
||||
max_turns: None,
|
||||
retry_config: None,
|
||||
};
|
||||
|
||||
// Get response from agent
|
||||
match agent.reply(&messages, Some(session_config)).await {
|
||||
match agent.reply(&messages, Some(session_config), None).await {
|
||||
Ok(mut stream) => {
|
||||
while let Some(result) = stream.next().await {
|
||||
match result {
|
||||
@@ -473,7 +509,13 @@ async fn process_message_streaming(
|
||||
let session_msgs = session_messages.lock().await;
|
||||
session_msgs.clone()
|
||||
};
|
||||
session::persist_messages(&session_file, ¤t_messages, None).await?;
|
||||
session::persist_messages(
|
||||
&session_file,
|
||||
¤t_messages,
|
||||
None,
|
||||
working_dir.clone(),
|
||||
)
|
||||
.await?;
|
||||
// Handle different message content types
|
||||
for content in &message.content {
|
||||
match content {
|
||||
@@ -589,6 +631,11 @@ async fn process_message_streaming(
|
||||
// For now, we'll just log them
|
||||
tracing::info!("Received MCP notification in web interface");
|
||||
}
|
||||
Ok(AgentEvent::ModelChange { model, mode }) => {
|
||||
// Log model change
|
||||
tracing::info!("Model changed to {} in {} mode", model, mode);
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
error!("Error in message stream: {}", e);
|
||||
let mut sender = sender.lock().await;
|
||||
|
||||
@@ -5,8 +5,10 @@ pub mod commands;
|
||||
pub mod logging;
|
||||
pub mod project_tracker;
|
||||
pub mod recipes;
|
||||
pub mod scenario_tests;
|
||||
pub mod session;
|
||||
pub mod signal;
|
||||
|
||||
// Re-export commonly used types
|
||||
pub use session::Session;
|
||||
|
||||
|
||||
@@ -180,9 +180,9 @@ fn setup_logging_internal(
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono::TimeZone;
|
||||
use rand;
|
||||
use std::env;
|
||||
use tempfile::TempDir;
|
||||
use test_case::test_case;
|
||||
|
||||
fn setup_temp_home() -> TempDir {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
@@ -209,30 +209,41 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[test_case(Some("test_session"), true ; "with session name and error capture")]
|
||||
#[test_case(Some("test_session"), false ; "with session name without error capture")]
|
||||
#[test_case(None, false ; "without session name")]
|
||||
async fn test_log_file_name(session_name: Option<&str>, _with_error_capture: bool) {
|
||||
// Create a unique test directory for each test
|
||||
let test_name = session_name.unwrap_or("no_session");
|
||||
let random_suffix = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.subsec_nanos();
|
||||
let test_dir = PathBuf::from(format!(
|
||||
"/tmp/goose_test_home_{}_{}",
|
||||
test_name, random_suffix
|
||||
));
|
||||
if test_dir.exists() {
|
||||
fs::remove_dir_all(&test_dir).unwrap();
|
||||
}
|
||||
fs::create_dir_all(&test_dir).unwrap();
|
||||
async fn test_log_file_name_session_with_error_capture() {
|
||||
do_test_log_file_name(Some("test_session_with_error"), true).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_log_file_name_session_without_error_capture() {
|
||||
do_test_log_file_name(Some("test_session_without_error"), false).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_log_file_name_no_session() {
|
||||
do_test_log_file_name(None, false).await;
|
||||
}
|
||||
|
||||
async fn do_test_log_file_name(session_name: Option<&str>, _with_error_capture: bool) {
|
||||
use tempfile::TempDir;
|
||||
|
||||
// Create a unique prefix to avoid test interference
|
||||
let test_id = format!(
|
||||
"{}_{}",
|
||||
session_name.unwrap_or("no_session"),
|
||||
rand::random::<u32>()
|
||||
);
|
||||
|
||||
// Create a proper temporary directory that will be automatically cleaned up
|
||||
let _temp_dir = TempDir::with_prefix(&format!("goose_test_{}_", test_id)).unwrap();
|
||||
let test_dir = _temp_dir.path();
|
||||
|
||||
// Set up environment
|
||||
if cfg!(windows) {
|
||||
env::set_var("USERPROFILE", &test_dir);
|
||||
env::set_var("USERPROFILE", test_dir);
|
||||
} else {
|
||||
env::set_var("HOME", &test_dir);
|
||||
env::set_var("HOME", test_dir);
|
||||
// Also set TMPDIR to prevent temp directory sharing between tests
|
||||
env::set_var("TMPDIR", test_dir);
|
||||
}
|
||||
|
||||
// Create error capture if needed - but don't use it in tests to avoid tokio runtime issues
|
||||
@@ -243,8 +254,10 @@ mod tests {
|
||||
println!("Before timestamp: {}", before_timestamp);
|
||||
|
||||
// Get the log directory and clean any existing log files
|
||||
let random_suffix = rand::random::<u32>() % 100000000;
|
||||
let log_dir = get_log_directory_with_date(Some(format!("test-{}", random_suffix))).unwrap();
|
||||
println!("Log directory: {}", log_dir.display());
|
||||
println!("Test directory: {}", test_dir.display());
|
||||
if log_dir.exists() {
|
||||
for entry in fs::read_dir(&log_dir).unwrap() {
|
||||
let entry = entry.unwrap();
|
||||
@@ -421,10 +434,8 @@ mod tests {
|
||||
// Wait a moment to ensure all files are written
|
||||
std::thread::sleep(std::time::Duration::from_millis(100));
|
||||
|
||||
// Clean up test directory
|
||||
fs::remove_dir_all(&test_dir).unwrap_or_else(|e| {
|
||||
println!("Warning: Failed to clean up test directory: {}", e);
|
||||
});
|
||||
// Keep _temp_dir alive until the end so it doesn't get cleaned up prematurely
|
||||
drop(_temp_dir);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
247
crates/goose-cli/src/recipes/extract_from_cli.rs
Normal file
247
crates/goose-cli/src/recipes/extract_from_cli.rs
Normal file
@@ -0,0 +1,247 @@
|
||||
use std::path::PathBuf;
|
||||
|
||||
use anyhow::{anyhow, Result};
|
||||
use goose::recipe::SubRecipe;
|
||||
|
||||
use crate::recipes::print_recipe::print_recipe_info;
|
||||
use crate::recipes::recipe::load_recipe;
|
||||
use crate::recipes::search_recipe::retrieve_recipe_file;
|
||||
use crate::{
|
||||
cli::{InputConfig, RecipeInfo},
|
||||
session::SessionSettings,
|
||||
};
|
||||
|
||||
pub fn extract_recipe_info_from_cli(
|
||||
recipe_name: String,
|
||||
params: Vec<(String, String)>,
|
||||
additional_sub_recipes: Vec<String>,
|
||||
) -> Result<(InputConfig, RecipeInfo)> {
|
||||
let recipe = load_recipe(&recipe_name, params.clone()).unwrap_or_else(|err| {
|
||||
eprintln!("{}: {}", console::style("Error").red().bold(), err);
|
||||
std::process::exit(1);
|
||||
});
|
||||
print_recipe_info(&recipe, params);
|
||||
let mut all_sub_recipes = recipe.sub_recipes.clone().unwrap_or_default();
|
||||
if !additional_sub_recipes.is_empty() {
|
||||
for sub_recipe_name in additional_sub_recipes {
|
||||
match retrieve_recipe_file(&sub_recipe_name) {
|
||||
Ok(recipe_file) => {
|
||||
let name = extract_recipe_name(&sub_recipe_name);
|
||||
let recipe_file_path = recipe_file.file_path;
|
||||
let additional_sub_recipe = SubRecipe {
|
||||
path: recipe_file_path.to_string_lossy().to_string(),
|
||||
name,
|
||||
values: None,
|
||||
sequential_when_repeated: true,
|
||||
description: None,
|
||||
};
|
||||
all_sub_recipes.push(additional_sub_recipe);
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(anyhow!(
|
||||
"Could not retrieve sub-recipe '{}': {}",
|
||||
sub_recipe_name,
|
||||
e
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let input_config = InputConfig {
|
||||
contents: recipe.prompt.filter(|s| !s.trim().is_empty()),
|
||||
extensions_override: recipe.extensions,
|
||||
additional_system_prompt: recipe.instructions,
|
||||
};
|
||||
|
||||
let recipe_info = RecipeInfo {
|
||||
session_settings: recipe.settings.map(|s| SessionSettings {
|
||||
goose_provider: s.goose_provider,
|
||||
goose_model: s.goose_model,
|
||||
temperature: s.temperature,
|
||||
}),
|
||||
sub_recipes: Some(all_sub_recipes),
|
||||
final_output_response: recipe.response,
|
||||
retry_config: recipe.retry,
|
||||
};
|
||||
|
||||
Ok((input_config, recipe_info))
|
||||
}
|
||||
|
||||
fn extract_recipe_name(recipe_identifier: &str) -> String {
|
||||
// If it's a path (contains / or \), extract the file stem
|
||||
if recipe_identifier.contains('/') || recipe_identifier.contains('\\') {
|
||||
PathBuf::from(recipe_identifier)
|
||||
.file_stem()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("unknown")
|
||||
.to_string()
|
||||
} else {
|
||||
// If it's just a name (like "weekly-updates"), use it directly
|
||||
recipe_identifier.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::PathBuf;
|
||||
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_extract_recipe_info_from_cli_basic() {
|
||||
let (_temp_dir, recipe_path) = create_recipe();
|
||||
let params = vec![("name".to_string(), "my_value".to_string())];
|
||||
let recipe_name = recipe_path.to_str().unwrap().to_string();
|
||||
|
||||
let (input_config, recipe_info) =
|
||||
extract_recipe_info_from_cli(recipe_name, params, Vec::new()).unwrap();
|
||||
let settings = recipe_info.session_settings;
|
||||
let sub_recipes = recipe_info.sub_recipes;
|
||||
let response = recipe_info.final_output_response;
|
||||
|
||||
assert_eq!(input_config.contents, Some("test_prompt".to_string()));
|
||||
assert_eq!(
|
||||
input_config.additional_system_prompt,
|
||||
Some("test_instructions my_value".to_string())
|
||||
);
|
||||
assert!(input_config.extensions_override.is_none());
|
||||
|
||||
assert!(settings.is_some());
|
||||
let settings = settings.unwrap();
|
||||
assert_eq!(settings.goose_provider, Some("test_provider".to_string()));
|
||||
assert_eq!(settings.goose_model, Some("test_model".to_string()));
|
||||
assert_eq!(settings.temperature, Some(0.7));
|
||||
|
||||
assert!(sub_recipes.is_some());
|
||||
let sub_recipes = sub_recipes.unwrap();
|
||||
assert!(sub_recipes.len() == 1);
|
||||
assert_eq!(sub_recipes[0].path, "existing_sub_recipe.yaml".to_string());
|
||||
assert_eq!(sub_recipes[0].name, "existing_sub_recipe".to_string());
|
||||
assert!(sub_recipes[0].values.is_none());
|
||||
assert!(response.is_some());
|
||||
let response = response.unwrap();
|
||||
assert_eq!(
|
||||
response.json_schema,
|
||||
Some(serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {"type": "string"}
|
||||
}
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_recipe_info_from_cli_with_additional_sub_recipes() {
|
||||
let (_temp_dir, recipe_path) = create_recipe();
|
||||
|
||||
// Create actual sub-recipe files in the temp directory
|
||||
std::fs::create_dir_all(_temp_dir.path().join("path/to")).unwrap();
|
||||
std::fs::create_dir_all(_temp_dir.path().join("another")).unwrap();
|
||||
|
||||
let sub_recipe1_path = _temp_dir.path().join("path/to/sub_recipe1.yaml");
|
||||
let sub_recipe2_path = _temp_dir.path().join("another/sub_recipe2.yaml");
|
||||
|
||||
std::fs::write(&sub_recipe1_path, "title: Sub Recipe 1").unwrap();
|
||||
std::fs::write(&sub_recipe2_path, "title: Sub Recipe 2").unwrap();
|
||||
|
||||
let params = vec![("name".to_string(), "my_value".to_string())];
|
||||
let recipe_name = recipe_path.to_str().unwrap().to_string();
|
||||
let additional_sub_recipes = vec![
|
||||
sub_recipe1_path.to_string_lossy().to_string(),
|
||||
sub_recipe2_path.to_string_lossy().to_string(),
|
||||
];
|
||||
|
||||
let (input_config, recipe_info) =
|
||||
extract_recipe_info_from_cli(recipe_name, params, additional_sub_recipes).unwrap();
|
||||
let settings = recipe_info.session_settings;
|
||||
let sub_recipes = recipe_info.sub_recipes;
|
||||
let response = recipe_info.final_output_response;
|
||||
|
||||
assert_eq!(input_config.contents, Some("test_prompt".to_string()));
|
||||
assert_eq!(
|
||||
input_config.additional_system_prompt,
|
||||
Some("test_instructions my_value".to_string())
|
||||
);
|
||||
assert!(input_config.extensions_override.is_none());
|
||||
|
||||
assert!(settings.is_some());
|
||||
let settings = settings.unwrap();
|
||||
assert_eq!(settings.goose_provider, Some("test_provider".to_string()));
|
||||
assert_eq!(settings.goose_model, Some("test_model".to_string()));
|
||||
assert_eq!(settings.temperature, Some(0.7));
|
||||
|
||||
assert!(sub_recipes.is_some());
|
||||
let sub_recipes = sub_recipes.unwrap();
|
||||
assert!(sub_recipes.len() == 3);
|
||||
assert_eq!(sub_recipes[0].path, "existing_sub_recipe.yaml".to_string());
|
||||
assert_eq!(sub_recipes[0].name, "existing_sub_recipe".to_string());
|
||||
assert!(sub_recipes[0].values.is_none());
|
||||
assert_eq!(
|
||||
sub_recipes[1].path,
|
||||
sub_recipe1_path
|
||||
.canonicalize()
|
||||
.unwrap()
|
||||
.to_string_lossy()
|
||||
.to_string()
|
||||
);
|
||||
assert_eq!(sub_recipes[1].name, "sub_recipe1".to_string());
|
||||
assert!(sub_recipes[1].values.is_none());
|
||||
assert_eq!(
|
||||
sub_recipes[2].path,
|
||||
sub_recipe2_path
|
||||
.canonicalize()
|
||||
.unwrap()
|
||||
.to_string_lossy()
|
||||
.to_string()
|
||||
);
|
||||
assert_eq!(sub_recipes[2].name, "sub_recipe2".to_string());
|
||||
assert!(sub_recipes[2].values.is_none());
|
||||
assert!(response.is_some());
|
||||
let response = response.unwrap();
|
||||
assert_eq!(
|
||||
response.json_schema,
|
||||
Some(serde_json::json!({
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"result": {"type": "string"}
|
||||
}
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
fn create_recipe() -> (TempDir, PathBuf) {
|
||||
let test_recipe_content = r#"
|
||||
title: test_recipe
|
||||
description: A test recipe
|
||||
instructions: test_instructions {{name}}
|
||||
prompt: test_prompt
|
||||
parameters:
|
||||
- key: name
|
||||
description: name
|
||||
input_type: string
|
||||
requirement: required
|
||||
settings:
|
||||
goose_provider: test_provider
|
||||
goose_model: test_model
|
||||
temperature: 0.7
|
||||
sub_recipes:
|
||||
- path: existing_sub_recipe.yaml
|
||||
name: existing_sub_recipe
|
||||
response:
|
||||
json_schema:
|
||||
type: object
|
||||
properties:
|
||||
result:
|
||||
type: string
|
||||
"#;
|
||||
let temp_dir = tempfile::tempdir().unwrap();
|
||||
let recipe_path: std::path::PathBuf = temp_dir.path().join("test_recipe.yaml");
|
||||
|
||||
std::fs::write(&recipe_path, test_recipe_content).unwrap();
|
||||
let canonical_recipe_path = recipe_path.canonicalize().unwrap();
|
||||
(temp_dir, canonical_recipe_path)
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,10 @@
|
||||
use anyhow::Result;
|
||||
use anyhow::{anyhow, Result};
|
||||
use console::style;
|
||||
use goose::recipe::template_recipe::parse_recipe_content;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::recipes::recipe::RECIPE_FILE_EXTENSIONS;
|
||||
use goose::recipe::read_recipe_file_content::RecipeFile;
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::path::Path;
|
||||
@@ -8,13 +13,26 @@ use std::process::Command;
|
||||
use std::process::Stdio;
|
||||
use tar::Archive;
|
||||
|
||||
use crate::recipes::recipe::RECIPE_FILE_EXTENSIONS;
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RecipeInfo {
|
||||
pub name: String,
|
||||
pub source: RecipeSource,
|
||||
pub path: String,
|
||||
pub title: Option<String>,
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum RecipeSource {
|
||||
Local,
|
||||
GitHub,
|
||||
}
|
||||
|
||||
pub const GOOSE_RECIPE_GITHUB_REPO_CONFIG_KEY: &str = "GOOSE_RECIPE_GITHUB_REPO";
|
||||
pub fn retrieve_recipe_from_github(
|
||||
recipe_name: &str,
|
||||
recipe_repo_full_name: &str,
|
||||
) -> Result<(String, PathBuf)> {
|
||||
) -> Result<RecipeFile> {
|
||||
println!(
|
||||
"📦 Looking for recipe \"{}\" in github repo: {}",
|
||||
recipe_name, recipe_repo_full_name
|
||||
@@ -26,7 +44,13 @@ pub fn retrieve_recipe_from_github(
|
||||
for attempt in 1..=max_attempts {
|
||||
match clone_and_download_recipe(recipe_name, recipe_repo_full_name) {
|
||||
Ok(download_dir) => match read_recipe_file(&download_dir) {
|
||||
Ok(content) => return Ok((content, download_dir)),
|
||||
Ok((content, recipe_file_local_path)) => {
|
||||
return Ok(RecipeFile {
|
||||
content,
|
||||
parent_dir: download_dir.clone(),
|
||||
file_path: recipe_file_local_path,
|
||||
})
|
||||
}
|
||||
Err(err) => return Err(err),
|
||||
},
|
||||
Err(err) => {
|
||||
@@ -47,7 +71,7 @@ fn clean_cloned_dirs(recipe_repo_full_name: &str) -> anyhow::Result<()> {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
fn read_recipe_file(download_dir: &Path) -> Result<String> {
|
||||
fn read_recipe_file(download_dir: &Path) -> Result<(String, PathBuf)> {
|
||||
for ext in RECIPE_FILE_EXTENSIONS {
|
||||
let candidate_file_path = download_dir.join(format!("recipe.{}", ext));
|
||||
if candidate_file_path.exists() {
|
||||
@@ -59,7 +83,7 @@ fn read_recipe_file(download_dir: &Path) -> Result<String> {
|
||||
.unwrap()
|
||||
.display()
|
||||
);
|
||||
return Ok(content);
|
||||
return Ok((content, candidate_file_path));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -76,7 +100,7 @@ fn clone_and_download_recipe(recipe_name: &str, recipe_repo_full_name: &str) ->
|
||||
get_folder_from_github(&local_repo_path, recipe_name)
|
||||
}
|
||||
|
||||
fn ensure_gh_authenticated() -> Result<()> {
|
||||
pub fn ensure_gh_authenticated() -> Result<()> {
|
||||
// Check authentication status
|
||||
let status = Command::new("gh")
|
||||
.args(["auth", "status"])
|
||||
@@ -190,3 +214,133 @@ fn list_files(dir: &Path) -> Result<()> {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Lists all available recipes from a GitHub repository
|
||||
pub fn list_github_recipes(repo: &str) -> Result<Vec<RecipeInfo>> {
|
||||
discover_github_recipes(repo)
|
||||
}
|
||||
|
||||
fn discover_github_recipes(repo: &str) -> Result<Vec<RecipeInfo>> {
|
||||
use serde_json::Value;
|
||||
use std::process::Command;
|
||||
|
||||
// Ensure GitHub CLI is authenticated
|
||||
ensure_gh_authenticated()?;
|
||||
|
||||
// Get repository contents using GitHub CLI
|
||||
let output = Command::new("gh")
|
||||
.args(["api", &format!("repos/{}/contents", repo)])
|
||||
.output()
|
||||
.map_err(|e| anyhow!("Failed to fetch repository contents using 'gh api' command (executed when GOOSE_RECIPE_GITHUB_REPO is configured). This requires GitHub CLI (gh) to be installed and authenticated. Error: {}", e))?;
|
||||
|
||||
if !output.status.success() {
|
||||
let error_msg = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(anyhow!("GitHub API request failed: {}", error_msg));
|
||||
}
|
||||
|
||||
let contents: Value = serde_json::from_slice(&output.stdout)
|
||||
.map_err(|e| anyhow!("Failed to parse GitHub API response: {}", e))?;
|
||||
|
||||
let mut recipes = Vec::new();
|
||||
|
||||
if let Some(items) = contents.as_array() {
|
||||
for item in items {
|
||||
if let (Some(name), Some(item_type)) = (
|
||||
item.get("name").and_then(|n| n.as_str()),
|
||||
item.get("type").and_then(|t| t.as_str()),
|
||||
) {
|
||||
if item_type == "dir" {
|
||||
// Check if this directory contains a recipe file
|
||||
if let Ok(recipe_info) = check_github_directory_for_recipe(repo, name) {
|
||||
recipes.push(recipe_info);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(recipes)
|
||||
}
|
||||
|
||||
fn check_github_directory_for_recipe(repo: &str, dir_name: &str) -> Result<RecipeInfo> {
|
||||
use serde_json::Value;
|
||||
use std::process::Command;
|
||||
|
||||
// Check directory contents for recipe files
|
||||
let output = Command::new("gh")
|
||||
.args(["api", &format!("repos/{}/contents/{}", repo, dir_name)])
|
||||
.output()
|
||||
.map_err(|e| anyhow!("Failed to check directory contents: {}", e))?;
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(anyhow!("Failed to access directory: {}", dir_name));
|
||||
}
|
||||
|
||||
let contents: Value = serde_json::from_slice(&output.stdout)
|
||||
.map_err(|e| anyhow!("Failed to parse directory contents: {}", e))?;
|
||||
|
||||
if let Some(items) = contents.as_array() {
|
||||
for item in items {
|
||||
if let Some(name) = item.get("name").and_then(|n| n.as_str()) {
|
||||
if RECIPE_FILE_EXTENSIONS
|
||||
.iter()
|
||||
.any(|ext| name == format!("recipe.{}", ext))
|
||||
{
|
||||
// Found a recipe file, get its content
|
||||
return get_github_recipe_info(repo, dir_name, name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(anyhow!("No recipe file found in directory: {}", dir_name))
|
||||
}
|
||||
|
||||
fn get_github_recipe_info(repo: &str, dir_name: &str, recipe_filename: &str) -> Result<RecipeInfo> {
|
||||
use serde_json::Value;
|
||||
use std::process::Command;
|
||||
|
||||
// Get the recipe file content
|
||||
let output = Command::new("gh")
|
||||
.args([
|
||||
"api",
|
||||
&format!("repos/{}/contents/{}/{}", repo, dir_name, recipe_filename),
|
||||
])
|
||||
.output()
|
||||
.map_err(|e| anyhow!("Failed to get recipe file content: {}", e))?;
|
||||
|
||||
if !output.status.success() {
|
||||
return Err(anyhow!(
|
||||
"Failed to access recipe file: {}/{}",
|
||||
dir_name,
|
||||
recipe_filename
|
||||
));
|
||||
}
|
||||
|
||||
let file_info: Value = serde_json::from_slice(&output.stdout)
|
||||
.map_err(|e| anyhow!("Failed to parse file info: {}", e))?;
|
||||
|
||||
if let Some(content_b64) = file_info.get("content").and_then(|c| c.as_str()) {
|
||||
// Decode base64 content
|
||||
use base64::{engine::general_purpose, Engine as _};
|
||||
let content_bytes = general_purpose::STANDARD
|
||||
.decode(content_b64.replace('\n', ""))
|
||||
.map_err(|e| anyhow!("Failed to decode base64 content: {}", e))?;
|
||||
|
||||
let content = String::from_utf8(content_bytes)
|
||||
.map_err(|e| anyhow!("Failed to convert content to string: {}", e))?;
|
||||
|
||||
// Parse the recipe content
|
||||
let (recipe, _) = parse_recipe_content(&content, format!("{}/{}", repo, dir_name))?;
|
||||
|
||||
return Ok(RecipeInfo {
|
||||
name: dir_name.to_string(),
|
||||
source: RecipeSource::GitHub,
|
||||
path: format!("{}/{}", repo, dir_name),
|
||||
title: Some(recipe.title),
|
||||
description: Some(recipe.description),
|
||||
});
|
||||
}
|
||||
|
||||
Err(anyhow!("Failed to get recipe content from GitHub"))
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
pub mod extract_from_cli;
|
||||
pub mod github_recipe;
|
||||
pub mod print_recipe;
|
||||
pub mod recipe;
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use console::style;
|
||||
use goose::recipe::Recipe;
|
||||
|
||||
use crate::recipes::recipe::BUILT_IN_RECIPE_DIR_PARAM;
|
||||
use goose::recipe::{Recipe, BUILT_IN_RECIPE_DIR_PARAM};
|
||||
|
||||
pub fn print_recipe_explanation(recipe: &Recipe) {
|
||||
println!(
|
||||
@@ -81,3 +79,18 @@ pub fn missing_parameters_command_line(missing_params: Vec<String>) -> String {
|
||||
.collect::<Vec<_>>()
|
||||
.join(" ")
|
||||
}
|
||||
|
||||
pub fn print_recipe_info(recipe: &Recipe, params: Vec<(String, String)>) {
|
||||
println!(
|
||||
"{} {}",
|
||||
style("Loading recipe:").green().bold(),
|
||||
style(&recipe.title).green()
|
||||
);
|
||||
println!("{} {}", style("Description:").bold(), &recipe.description);
|
||||
|
||||
if !params.is_empty() {
|
||||
println!("{}", style("Parameters used to load this recipe:").bold());
|
||||
print_parameters_with_values(params.into_iter().collect());
|
||||
}
|
||||
println!();
|
||||
}
|
||||
|
||||
@@ -1,532 +1,155 @@
|
||||
use anyhow::Result;
|
||||
use console::style;
|
||||
|
||||
use crate::recipes::print_recipe::{
|
||||
missing_parameters_command_line, print_parameters_with_values, print_recipe_explanation,
|
||||
missing_parameters_command_line, print_recipe_explanation,
|
||||
print_required_parameters_for_template,
|
||||
};
|
||||
use crate::recipes::search_recipe::retrieve_recipe_file;
|
||||
use goose::recipe::{Recipe, RecipeParameter, RecipeParameterRequirement};
|
||||
use minijinja::{Environment, Error, Template, UndefinedBehavior};
|
||||
use serde_json::Value as JsonValue;
|
||||
use serde_yaml::Value as YamlValue;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::PathBuf;
|
||||
use anyhow::Result;
|
||||
use goose::recipe::build_recipe::{
|
||||
apply_values_to_parameters, build_recipe_from_template, validate_recipe_parameters, RecipeError,
|
||||
};
|
||||
use goose::recipe::read_recipe_file_content::RecipeFile;
|
||||
use goose::recipe::template_recipe::render_recipe_for_preview;
|
||||
use goose::recipe::Recipe;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub const BUILT_IN_RECIPE_DIR_PARAM: &str = "recipe_dir";
|
||||
pub const RECIPE_FILE_EXTENSIONS: &[&str] = &["yaml", "json"];
|
||||
/// Loads, validates a recipe from a YAML or JSON file, and renders it with the given parameters
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `path` - Path to the recipe file (YAML or JSON)
|
||||
/// * `params` - parameters to render the recipe with
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The rendered recipe if successful
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if:
|
||||
/// - Recipe is not valid
|
||||
/// - The required fields are missing
|
||||
pub fn load_recipe_as_template(recipe_name: &str, params: Vec<(String, String)>) -> Result<Recipe> {
|
||||
let (recipe_file_content, recipe_parent_dir) = retrieve_recipe_file(recipe_name)?;
|
||||
|
||||
let recipe = validate_recipe_file_parameters(&recipe_file_content)?;
|
||||
fn create_user_prompt_callback() -> impl Fn(&str, &str) -> Result<String> {
|
||||
|key: &str, description: &str| -> Result<String> {
|
||||
let input_value =
|
||||
cliclack::input(format!("Please enter {} ({})", key, description)).interact()?;
|
||||
Ok(input_value)
|
||||
}
|
||||
}
|
||||
|
||||
let (params_for_template, missing_params) =
|
||||
apply_values_to_parameters(¶ms, recipe.parameters, recipe_parent_dir, true)?;
|
||||
if !missing_params.is_empty() {
|
||||
return Err(anyhow::anyhow!(
|
||||
fn load_recipe_file_with_dir(recipe_name: &str) -> Result<(RecipeFile, String)> {
|
||||
let recipe_file = retrieve_recipe_file(recipe_name)?;
|
||||
let recipe_dir_str = recipe_file
|
||||
.parent_dir
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow::anyhow!("Error getting recipe directory"))?
|
||||
.to_string();
|
||||
Ok((recipe_file, recipe_dir_str))
|
||||
}
|
||||
|
||||
pub fn load_recipe(recipe_name: &str, params: Vec<(String, String)>) -> Result<Recipe> {
|
||||
let recipe_file = retrieve_recipe_file(recipe_name)?;
|
||||
match build_recipe_from_template(recipe_file, params, Some(create_user_prompt_callback())) {
|
||||
Ok(recipe) => Ok(recipe),
|
||||
Err(RecipeError::MissingParams { parameters }) => Err(anyhow::anyhow!(
|
||||
"Please provide the following parameters in the command line: {}",
|
||||
missing_parameters_command_line(missing_params)
|
||||
));
|
||||
missing_parameters_command_line(parameters)
|
||||
)),
|
||||
Err(e) => Err(anyhow::anyhow!(e.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn render_recipe_as_yaml(recipe_name: &str, params: Vec<(String, String)>) -> Result<()> {
|
||||
let recipe = load_recipe(recipe_name, params)?;
|
||||
match serde_yaml::to_string(&recipe) {
|
||||
Ok(yaml_content) => {
|
||||
println!("{}", yaml_content);
|
||||
Ok(())
|
||||
}
|
||||
Err(_) => {
|
||||
eprintln!("Failed to serialize recipe to YAML");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn load_recipe_for_validation(recipe_name: &str) -> Result<Recipe> {
|
||||
let (recipe_file, recipe_dir_str) = load_recipe_file_with_dir(recipe_name)?;
|
||||
let recipe_file_content = &recipe_file.content;
|
||||
validate_recipe_parameters(recipe_file_content, &recipe_dir_str)?;
|
||||
let recipe = render_recipe_for_preview(
|
||||
recipe_file_content,
|
||||
recipe_dir_str.to_string(),
|
||||
&HashMap::new(),
|
||||
)?;
|
||||
|
||||
if let Some(response) = &recipe.response {
|
||||
if let Some(json_schema) = &response.json_schema {
|
||||
validate_json_schema(json_schema)?;
|
||||
}
|
||||
}
|
||||
|
||||
let rendered_content = render_content_with_params(&recipe_file_content, ¶ms_for_template)?;
|
||||
|
||||
let recipe = parse_recipe_content(&rendered_content)?;
|
||||
|
||||
// Display information about the loaded recipe
|
||||
println!(
|
||||
"{} {}",
|
||||
style("Loading recipe:").green().bold(),
|
||||
style(&recipe.title).green()
|
||||
);
|
||||
println!("{} {}", style("Description:").bold(), &recipe.description);
|
||||
|
||||
if !params_for_template.is_empty() {
|
||||
println!("{}", style("Parameters used to load this recipe:").bold());
|
||||
print_parameters_with_values(params_for_template);
|
||||
}
|
||||
println!();
|
||||
Ok(recipe)
|
||||
}
|
||||
|
||||
/// Loads and validates a recipe from a YAML or JSON file
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `path` - Path to the recipe file (YAML or JSON)
|
||||
/// * `params` - optional parameters to render the recipe with
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// The parsed recipe struct if successful
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if:
|
||||
/// - The file doesn't exist
|
||||
/// - The file can't be read
|
||||
/// - The YAML/JSON is invalid
|
||||
/// - The parameter definition does not match the template variables in the recipe file
|
||||
pub fn load_recipe(recipe_name: &str) -> Result<Recipe> {
|
||||
let (recipe_file_content, _) = retrieve_recipe_file(recipe_name)?;
|
||||
pub fn explain_recipe(recipe_name: &str, params: Vec<(String, String)>) -> Result<()> {
|
||||
let (recipe_file, recipe_dir_str) = load_recipe_file_with_dir(recipe_name)?;
|
||||
let recipe_file_content = &recipe_file.content;
|
||||
let recipe_parameters = validate_recipe_parameters(recipe_file_content, &recipe_dir_str)?;
|
||||
|
||||
validate_recipe_file_parameters(&recipe_file_content)
|
||||
}
|
||||
|
||||
pub fn explain_recipe_with_parameters(
|
||||
recipe_name: &str,
|
||||
params: Vec<(String, String)>,
|
||||
) -> Result<()> {
|
||||
let (recipe_file_content, recipe_parent_dir) = retrieve_recipe_file(recipe_name)?;
|
||||
|
||||
let raw_recipe = validate_recipe_file_parameters(&recipe_file_content)?;
|
||||
print_recipe_explanation(&raw_recipe);
|
||||
let recipe_parameters = raw_recipe.parameters;
|
||||
let (params_for_template, missing_params) =
|
||||
apply_values_to_parameters(¶ms, recipe_parameters, recipe_parent_dir, false)?;
|
||||
let (params_for_template, missing_params) = apply_values_to_parameters(
|
||||
¶ms,
|
||||
recipe_parameters,
|
||||
&recipe_dir_str,
|
||||
None::<fn(&str, &str) -> Result<String>>,
|
||||
)?;
|
||||
let recipe = render_recipe_for_preview(
|
||||
recipe_file_content,
|
||||
recipe_dir_str.to_string(),
|
||||
¶ms_for_template,
|
||||
)?;
|
||||
print_recipe_explanation(&recipe);
|
||||
print_required_parameters_for_template(params_for_template, missing_params);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn validate_recipe_file_parameters(recipe_file_content: &str) -> Result<Recipe> {
|
||||
let recipe_from_recipe_file: Recipe = parse_recipe_content(recipe_file_content)?;
|
||||
validate_optional_parameters(&recipe_from_recipe_file)?;
|
||||
validate_parameters_in_template(&recipe_from_recipe_file.parameters, recipe_file_content)?;
|
||||
Ok(recipe_from_recipe_file)
|
||||
}
|
||||
|
||||
fn validate_parameters_in_template(
|
||||
recipe_parameters: &Option<Vec<RecipeParameter>>,
|
||||
recipe_file_content: &str,
|
||||
) -> Result<()> {
|
||||
let mut template_variables = extract_template_variables(recipe_file_content)?;
|
||||
template_variables.remove(BUILT_IN_RECIPE_DIR_PARAM);
|
||||
|
||||
let param_keys: HashSet<String> = recipe_parameters
|
||||
.as_ref()
|
||||
.unwrap_or(&vec![])
|
||||
.iter()
|
||||
.map(|p| p.key.clone())
|
||||
.collect();
|
||||
|
||||
let missing_keys = template_variables
|
||||
.difference(¶m_keys)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let extra_keys = param_keys
|
||||
.difference(&template_variables)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
if missing_keys.is_empty() && extra_keys.is_empty() {
|
||||
return Ok(());
|
||||
fn validate_json_schema(schema: &serde_json::Value) -> Result<()> {
|
||||
match jsonschema::validator_for(schema) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(err) => Err(anyhow::anyhow!("JSON schema validation failed: {}", err)),
|
||||
}
|
||||
|
||||
let mut message = String::new();
|
||||
|
||||
if !missing_keys.is_empty() {
|
||||
message.push_str(&format!(
|
||||
"Missing definitions for parameters in the recipe file: {}.",
|
||||
missing_keys
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
));
|
||||
}
|
||||
|
||||
if !extra_keys.is_empty() {
|
||||
message.push_str(&format!(
|
||||
"\nUnnecessary parameter definitions: {}.",
|
||||
extra_keys
|
||||
.iter()
|
||||
.map(|s| s.to_string())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
));
|
||||
}
|
||||
Err(anyhow::anyhow!("{}", message.trim_end()))
|
||||
}
|
||||
|
||||
fn validate_optional_parameters(recipe: &Recipe) -> Result<()> {
|
||||
let optional_params_without_default_values: Vec<String> = recipe
|
||||
.parameters
|
||||
.as_ref()
|
||||
.unwrap_or(&vec![])
|
||||
.iter()
|
||||
.filter(|p| {
|
||||
matches!(p.requirement, RecipeParameterRequirement::Optional) && p.default.is_none()
|
||||
})
|
||||
.map(|p| p.key.clone())
|
||||
.collect();
|
||||
|
||||
if optional_params_without_default_values.is_empty() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(anyhow::anyhow!("Optional parameters missing default values in the recipe: {}. Please provide defaults.", optional_params_without_default_values.join(", ")))
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_recipe_content(content: &str) -> Result<Recipe> {
|
||||
if serde_json::from_str::<JsonValue>(content).is_ok() {
|
||||
Ok(serde_json::from_str(content)?)
|
||||
} else if serde_yaml::from_str::<YamlValue>(content).is_ok() {
|
||||
Ok(serde_yaml::from_str(content)?)
|
||||
} else {
|
||||
Err(anyhow::anyhow!(
|
||||
"Unsupported file format for recipe file. Expected .yaml or .json"
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_template_variables(template_str: &str) -> Result<HashSet<String>> {
|
||||
let mut env = Environment::new();
|
||||
env.set_undefined_behavior(UndefinedBehavior::Strict);
|
||||
|
||||
let template = env
|
||||
.template_from_str(template_str)
|
||||
.map_err(|e: Error| anyhow::anyhow!("Invalid template syntax: {}", e.to_string()))?;
|
||||
|
||||
Ok(template.undeclared_variables(true))
|
||||
}
|
||||
|
||||
fn apply_values_to_parameters(
|
||||
user_params: &[(String, String)],
|
||||
recipe_parameters: Option<Vec<RecipeParameter>>,
|
||||
recipe_parent_dir: PathBuf,
|
||||
enable_user_prompt: bool,
|
||||
) -> Result<(HashMap<String, String>, Vec<String>)> {
|
||||
let mut param_map: HashMap<String, String> = user_params.iter().cloned().collect();
|
||||
let recipe_parent_dir_str = recipe_parent_dir
|
||||
.to_str()
|
||||
.ok_or_else(|| anyhow::anyhow!("Invalid UTF-8 in recipe_dir"))?;
|
||||
param_map.insert(
|
||||
BUILT_IN_RECIPE_DIR_PARAM.to_string(),
|
||||
recipe_parent_dir_str.to_string(),
|
||||
);
|
||||
let mut missing_params: Vec<String> = Vec::new();
|
||||
for param in recipe_parameters.unwrap_or_default() {
|
||||
if !param_map.contains_key(¶m.key) {
|
||||
match (¶m.default, ¶m.requirement) {
|
||||
(Some(default), _) => param_map.insert(param.key.clone(), default.clone()),
|
||||
(None, RecipeParameterRequirement::UserPrompt) if enable_user_prompt => {
|
||||
let input_value = cliclack::input(format!(
|
||||
"Please enter {} ({})",
|
||||
param.key, param.description
|
||||
))
|
||||
.interact()?;
|
||||
param_map.insert(param.key.clone(), input_value)
|
||||
}
|
||||
_ => {
|
||||
missing_params.push(param.key.clone());
|
||||
None
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
Ok((param_map, missing_params))
|
||||
}
|
||||
|
||||
fn render_content_with_params(content: &str, params: &HashMap<String, String>) -> Result<String> {
|
||||
// Create a minijinja environment and context
|
||||
let mut env = minijinja::Environment::new();
|
||||
env.set_undefined_behavior(UndefinedBehavior::Strict);
|
||||
let template: Template<'_, '_> = env
|
||||
.template_from_str(content)
|
||||
.map_err(|e: Error| anyhow::anyhow!("Invalid template syntax: {}", e.to_string()))?;
|
||||
|
||||
// Render the template with the parameters
|
||||
template.render(params).map_err(|e: Error| {
|
||||
anyhow::anyhow!(
|
||||
"Failed to render the recipe {} - please check if all required parameters are provided",
|
||||
e.to_string()
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::path::PathBuf;
|
||||
|
||||
use goose::recipe::{RecipeParameterInputType, RecipeParameterRequirement};
|
||||
use tempfile::TempDir;
|
||||
|
||||
use super::*;
|
||||
use crate::recipes::recipe::load_recipe;
|
||||
|
||||
fn setup_recipe_file(instructions_and_parameters: &str) -> (TempDir, PathBuf) {
|
||||
let recipe_content = format!(
|
||||
r#"{{
|
||||
"version": "1.0.0",
|
||||
"title": "Test Recipe",
|
||||
"description": "A test recipe",
|
||||
{}
|
||||
}}"#,
|
||||
instructions_and_parameters
|
||||
);
|
||||
// Create a temporary file
|
||||
let temp_dir = tempfile::tempdir().unwrap();
|
||||
let recipe_path: std::path::PathBuf = temp_dir.path().join("test_recipe.json");
|
||||
std::fs::write(&recipe_path, recipe_content).unwrap();
|
||||
(temp_dir, recipe_path)
|
||||
}
|
||||
mod load_recipe {
|
||||
use super::*;
|
||||
#[test]
|
||||
fn test_load_recipe_success() {
|
||||
let recipe_content = r#"{
|
||||
"version": "1.0.0",
|
||||
"title": "Test Recipe",
|
||||
"description": "A test recipe",
|
||||
"instructions": "Test instructions with {{ my_name }}",
|
||||
"parameters": [
|
||||
{
|
||||
"key": "my_name",
|
||||
"input_type": "string",
|
||||
"requirement": "required",
|
||||
"description": "A test parameter"
|
||||
}
|
||||
]
|
||||
}"#;
|
||||
let temp_dir = tempfile::tempdir().unwrap();
|
||||
let recipe_path = temp_dir.path().join("test_recipe.json");
|
||||
std::fs::write(&recipe_path, recipe_content).unwrap();
|
||||
|
||||
#[test]
|
||||
fn test_render_content_with_params() {
|
||||
// Test basic parameter substitution
|
||||
let content = "Hello {{ name }}!";
|
||||
let mut params = HashMap::new();
|
||||
params.insert("name".to_string(), "World".to_string());
|
||||
let result = render_content_with_params(content, ¶ms).unwrap();
|
||||
assert_eq!(result, "Hello World!");
|
||||
let params = vec![("my_name".to_string(), "value".to_string())];
|
||||
let recipe = load_recipe(recipe_path.to_str().unwrap(), params).unwrap();
|
||||
|
||||
// Test empty parameter substitution
|
||||
let content = "Hello {{ empty }}!";
|
||||
let mut params = HashMap::new();
|
||||
params.insert("empty".to_string(), "".to_string());
|
||||
let result = render_content_with_params(content, ¶ms).unwrap();
|
||||
assert_eq!(result, "Hello !");
|
||||
|
||||
// Test multiple parameters
|
||||
let content = "{{ greeting }} {{ name }}!";
|
||||
let mut params = HashMap::new();
|
||||
params.insert("greeting".to_string(), "Hi".to_string());
|
||||
params.insert("name".to_string(), "Alice".to_string());
|
||||
let result = render_content_with_params(content, ¶ms).unwrap();
|
||||
assert_eq!(result, "Hi Alice!");
|
||||
|
||||
// Test missing parameter results in error
|
||||
let content = "Hello {{ missing }}!";
|
||||
let params = HashMap::new();
|
||||
let err = render_content_with_params(content, ¶ms).unwrap_err();
|
||||
assert!(err
|
||||
.to_string()
|
||||
.contains("please check if all required parameters"));
|
||||
|
||||
// Test invalid template syntax results in error
|
||||
let content = "Hello {{ unclosed";
|
||||
let params = HashMap::new();
|
||||
let err = render_content_with_params(content, ¶ms).unwrap_err();
|
||||
assert!(err.to_string().contains("Invalid template syntax"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_recipe_as_template_success() {
|
||||
let instructions_and_parameters = r#"
|
||||
"instructions": "Test instructions with {{ my_name }}",
|
||||
"parameters": [
|
||||
{
|
||||
"key": "my_name",
|
||||
"input_type": "string",
|
||||
"requirement": "required",
|
||||
"description": "A test parameter"
|
||||
}
|
||||
]"#;
|
||||
|
||||
let (_temp_dir, recipe_path) = setup_recipe_file(instructions_and_parameters);
|
||||
|
||||
let params = vec![("my_name".to_string(), "value".to_string())];
|
||||
let recipe = load_recipe_as_template(recipe_path.to_str().unwrap(), params).unwrap();
|
||||
|
||||
assert_eq!(recipe.title, "Test Recipe");
|
||||
assert_eq!(recipe.description, "A test recipe");
|
||||
assert_eq!(recipe.instructions.unwrap(), "Test instructions with value");
|
||||
// Verify parameters match recipe definition
|
||||
assert_eq!(recipe.parameters.as_ref().unwrap().len(), 1);
|
||||
let param = &recipe.parameters.as_ref().unwrap()[0];
|
||||
assert_eq!(param.key, "my_name");
|
||||
assert!(matches!(param.input_type, RecipeParameterInputType::String));
|
||||
assert!(matches!(
|
||||
param.requirement,
|
||||
RecipeParameterRequirement::Required
|
||||
));
|
||||
assert_eq!(param.description, "A test parameter");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_recipe_as_template_success_variable_in_prompt() {
|
||||
let instructions_and_parameters = r#"
|
||||
"instructions": "Test instructions",
|
||||
"prompt": "My prompt {{ my_name }}",
|
||||
"parameters": [
|
||||
{
|
||||
"key": "my_name",
|
||||
"input_type": "string",
|
||||
"requirement": "required",
|
||||
"description": "A test parameter"
|
||||
}
|
||||
]"#;
|
||||
|
||||
let (_temp_dir, recipe_path) = setup_recipe_file(instructions_and_parameters);
|
||||
|
||||
let params = vec![("my_name".to_string(), "value".to_string())];
|
||||
let recipe = load_recipe_as_template(recipe_path.to_str().unwrap(), params).unwrap();
|
||||
|
||||
assert_eq!(recipe.title, "Test Recipe");
|
||||
assert_eq!(recipe.description, "A test recipe");
|
||||
assert_eq!(recipe.instructions.unwrap(), "Test instructions");
|
||||
assert_eq!(recipe.prompt.unwrap(), "My prompt value");
|
||||
let param = &recipe.parameters.as_ref().unwrap()[0];
|
||||
assert_eq!(param.key, "my_name");
|
||||
assert!(matches!(param.input_type, RecipeParameterInputType::String));
|
||||
assert!(matches!(
|
||||
param.requirement,
|
||||
RecipeParameterRequirement::Required
|
||||
));
|
||||
assert_eq!(param.description, "A test parameter");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_recipe_as_template_wrong_parameters_in_recipe_file() {
|
||||
let instructions_and_parameters = r#"
|
||||
"instructions": "Test instructions with {{ expected_param1 }} {{ expected_param2 }}",
|
||||
"parameters": [
|
||||
{
|
||||
"key": "wrong_param_key",
|
||||
"input_type": "string",
|
||||
"requirement": "required",
|
||||
"description": "A test parameter"
|
||||
}
|
||||
]"#;
|
||||
let (_temp_dir, recipe_path) = setup_recipe_file(instructions_and_parameters);
|
||||
|
||||
let load_recipe_result = load_recipe_as_template(recipe_path.to_str().unwrap(), Vec::new());
|
||||
assert!(load_recipe_result.is_err());
|
||||
let err = load_recipe_result.unwrap_err();
|
||||
println!("{}", err.to_string());
|
||||
assert!(err
|
||||
.to_string()
|
||||
.contains("Unnecessary parameter definitions: wrong_param_key."));
|
||||
assert!(err
|
||||
.to_string()
|
||||
.contains("Missing definitions for parameters in the recipe file:"));
|
||||
assert!(err.to_string().contains("expected_param1"));
|
||||
assert!(err.to_string().contains("expected_param2"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_recipe_as_template_with_default_values_in_recipe_file() {
|
||||
let instructions_and_parameters = r#"
|
||||
"instructions": "Test instructions with {{ param_with_default }} {{ param_without_default }}",
|
||||
"parameters": [
|
||||
{
|
||||
"key": "param_with_default",
|
||||
"input_type": "string",
|
||||
"requirement": "optional",
|
||||
"default": "my_default_value",
|
||||
"description": "A test parameter"
|
||||
},
|
||||
{
|
||||
"key": "param_without_default",
|
||||
"input_type": "string",
|
||||
"requirement": "required",
|
||||
"description": "A test parameter"
|
||||
}
|
||||
]"#;
|
||||
let (_temp_dir, recipe_path) = setup_recipe_file(instructions_and_parameters);
|
||||
let params = vec![("param_without_default".to_string(), "value1".to_string())];
|
||||
|
||||
let recipe = load_recipe_as_template(recipe_path.to_str().unwrap(), params).unwrap();
|
||||
|
||||
assert_eq!(recipe.title, "Test Recipe");
|
||||
assert_eq!(recipe.description, "A test recipe");
|
||||
assert_eq!(
|
||||
recipe.instructions.unwrap(),
|
||||
"Test instructions with my_default_value value1"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_recipe_as_template_optional_parameters_with_empty_default_values_in_recipe_file() {
|
||||
let instructions_and_parameters = r#"
|
||||
"instructions": "Test instructions with {{ optional_param }}",
|
||||
"parameters": [
|
||||
{
|
||||
"key": "optional_param",
|
||||
"input_type": "string",
|
||||
"requirement": "optional",
|
||||
"description": "A test parameter",
|
||||
"default": "",
|
||||
}
|
||||
]"#;
|
||||
let (_temp_dir, recipe_path) = setup_recipe_file(instructions_and_parameters);
|
||||
|
||||
let recipe = load_recipe_as_template(recipe_path.to_str().unwrap(), Vec::new()).unwrap();
|
||||
assert_eq!(recipe.title, "Test Recipe");
|
||||
assert_eq!(recipe.description, "A test recipe");
|
||||
assert_eq!(recipe.instructions.unwrap(), "Test instructions with ");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_recipe_as_template_optional_parameters_without_default_values_in_recipe_file() {
|
||||
let instructions_and_parameters = r#"
|
||||
"instructions": "Test instructions with {{ optional_param }}",
|
||||
"parameters": [
|
||||
{
|
||||
"key": "optional_param",
|
||||
"input_type": "string",
|
||||
"requirement": "optional",
|
||||
"description": "A test parameter"
|
||||
}
|
||||
]"#;
|
||||
let (_temp_dir, recipe_path) = setup_recipe_file(instructions_and_parameters);
|
||||
|
||||
let load_recipe_result = load_recipe_as_template(recipe_path.to_str().unwrap(), Vec::new());
|
||||
assert!(load_recipe_result.is_err());
|
||||
let err = load_recipe_result.unwrap_err();
|
||||
println!("{}", err.to_string());
|
||||
assert!(err.to_string().contains(
|
||||
"Optional parameters missing default values in the recipe: optional_param. Please provide defaults."
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_recipe_as_template_wrong_input_type_in_recipe_file() {
|
||||
let instructions_and_parameters = r#"
|
||||
"instructions": "Test instructions with {{ param }}",
|
||||
"parameters": [
|
||||
{
|
||||
"key": "param",
|
||||
"input_type": "some_invalid_type",
|
||||
"requirement": "required",
|
||||
"description": "A test parameter"
|
||||
}
|
||||
]"#;
|
||||
let params = vec![("param".to_string(), "value".to_string())];
|
||||
let (_temp_dir, recipe_path) = setup_recipe_file(instructions_and_parameters);
|
||||
|
||||
let load_recipe_result = load_recipe_as_template(recipe_path.to_str().unwrap(), params);
|
||||
assert!(load_recipe_result.is_err());
|
||||
let err = load_recipe_result.unwrap_err();
|
||||
assert!(err
|
||||
.to_string()
|
||||
.contains("unknown variant `some_invalid_type`"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_recipe_as_template_success_without_parameters() {
|
||||
let instructions_and_parameters = r#"
|
||||
"instructions": "Test instructions"
|
||||
"#;
|
||||
let (_temp_dir, recipe_path) = setup_recipe_file(instructions_and_parameters);
|
||||
|
||||
let recipe = load_recipe_as_template(recipe_path.to_str().unwrap(), Vec::new()).unwrap();
|
||||
assert_eq!(recipe.instructions.unwrap(), "Test instructions");
|
||||
assert!(recipe.parameters.is_none());
|
||||
assert_eq!(recipe.title, "Test Recipe");
|
||||
assert_eq!(recipe.description, "A test recipe");
|
||||
assert_eq!(recipe.instructions.unwrap(), "Test instructions with value");
|
||||
// Verify parameters match recipe definition
|
||||
assert_eq!(recipe.parameters.as_ref().unwrap().len(), 1);
|
||||
let param = &recipe.parameters.as_ref().unwrap()[0];
|
||||
assert_eq!(param.key, "my_name");
|
||||
assert!(matches!(param.input_type, RecipeParameterInputType::String));
|
||||
assert!(matches!(
|
||||
param.requirement,
|
||||
RecipeParameterRequirement::Required
|
||||
));
|
||||
assert_eq!(param.description, "A test parameter");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,21 @@
|
||||
use anyhow::{anyhow, Result};
|
||||
use goose::config::Config;
|
||||
use goose::recipe::read_recipe_file_content::{read_recipe_file, RecipeFile};
|
||||
use goose::recipe::template_recipe::parse_recipe_content;
|
||||
use std::env;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::{env, fs};
|
||||
|
||||
use crate::recipes::recipe::RECIPE_FILE_EXTENSIONS;
|
||||
|
||||
use super::github_recipe::{retrieve_recipe_from_github, GOOSE_RECIPE_GITHUB_REPO_CONFIG_KEY};
|
||||
use super::github_recipe::{
|
||||
list_github_recipes, retrieve_recipe_from_github, RecipeInfo, RecipeSource,
|
||||
GOOSE_RECIPE_GITHUB_REPO_CONFIG_KEY,
|
||||
};
|
||||
|
||||
const GOOSE_RECIPE_PATH_ENV_VAR: &str = "GOOSE_RECIPE_PATH";
|
||||
|
||||
pub fn retrieve_recipe_file(recipe_name: &str) -> Result<(String, PathBuf)> {
|
||||
// If recipe_name ends with yaml or json, treat it as a direct file path
|
||||
pub fn retrieve_recipe_file(recipe_name: &str) -> Result<RecipeFile> {
|
||||
if RECIPE_FILE_EXTENSIONS
|
||||
.iter()
|
||||
.any(|ext| recipe_name.ends_with(&format!(".{}", ext)))
|
||||
@@ -18,6 +23,12 @@ pub fn retrieve_recipe_file(recipe_name: &str) -> Result<(String, PathBuf)> {
|
||||
let path = PathBuf::from(recipe_name);
|
||||
return read_recipe_file(path);
|
||||
}
|
||||
if is_file_path(recipe_name) || is_file_name(recipe_name) {
|
||||
return Err(anyhow!(
|
||||
"Recipe file {} is not a json or yaml file",
|
||||
recipe_name
|
||||
));
|
||||
}
|
||||
retrieve_recipe_from_local_path(recipe_name).or_else(|e| {
|
||||
if let Some(recipe_repo_full_name) = configured_github_recipe_repo() {
|
||||
retrieve_recipe_from_github(recipe_name, &recipe_repo_full_name)
|
||||
@@ -27,7 +38,18 @@ pub fn retrieve_recipe_file(recipe_name: &str) -> Result<(String, PathBuf)> {
|
||||
})
|
||||
}
|
||||
|
||||
fn read_recipe_in_dir(dir: &Path, recipe_name: &str) -> Result<(String, PathBuf)> {
|
||||
fn is_file_path(recipe_name: &str) -> bool {
|
||||
recipe_name.contains('/')
|
||||
|| recipe_name.contains('\\')
|
||||
|| recipe_name.starts_with('~')
|
||||
|| recipe_name.starts_with('.')
|
||||
}
|
||||
|
||||
fn is_file_name(recipe_name: &str) -> bool {
|
||||
Path::new(recipe_name).extension().is_some()
|
||||
}
|
||||
|
||||
fn read_recipe_in_dir(dir: &Path, recipe_name: &str) -> Result<RecipeFile> {
|
||||
for ext in RECIPE_FILE_EXTENSIONS {
|
||||
let recipe_path = dir.join(format!("{}.{}", recipe_name, ext));
|
||||
if let Ok(result) = read_recipe_file(recipe_path) {
|
||||
@@ -42,7 +64,7 @@ fn read_recipe_in_dir(dir: &Path, recipe_name: &str) -> Result<(String, PathBuf)
|
||||
)))
|
||||
}
|
||||
|
||||
fn retrieve_recipe_from_local_path(recipe_name: &str) -> Result<(String, PathBuf)> {
|
||||
fn retrieve_recipe_from_local_path(recipe_name: &str) -> Result<RecipeFile> {
|
||||
let mut search_dirs = vec![PathBuf::from(".")];
|
||||
if let Ok(recipe_path_env) = env::var(GOOSE_RECIPE_PATH_ENV_VAR) {
|
||||
let path_separator = if cfg!(windows) { ';' } else { ':' };
|
||||
@@ -78,24 +100,95 @@ fn configured_github_recipe_repo() -> Option<String> {
|
||||
}
|
||||
}
|
||||
|
||||
fn read_recipe_file<P: AsRef<Path>>(recipe_path: P) -> Result<(String, PathBuf)> {
|
||||
let path = recipe_path.as_ref();
|
||||
/// Lists all available recipes from local paths and GitHub repositories
|
||||
pub fn list_available_recipes() -> Result<Vec<RecipeInfo>> {
|
||||
let mut recipes = Vec::new();
|
||||
|
||||
let content = fs::read_to_string(path)
|
||||
.map_err(|e| anyhow!("Failed to read recipe file {}: {}", path.display(), e))?;
|
||||
// Search local recipes
|
||||
if let Ok(local_recipes) = discover_local_recipes() {
|
||||
recipes.extend(local_recipes);
|
||||
}
|
||||
|
||||
let canonical = path.canonicalize().map_err(|e| {
|
||||
anyhow!(
|
||||
"Failed to resolve absolute path for {}: {}",
|
||||
path.display(),
|
||||
e
|
||||
)
|
||||
})?;
|
||||
// Search GitHub recipes if configured
|
||||
if let Some(repo) = configured_github_recipe_repo() {
|
||||
if let Ok(github_recipes) = list_github_recipes(&repo) {
|
||||
recipes.extend(github_recipes);
|
||||
}
|
||||
}
|
||||
|
||||
let parent_dir = canonical
|
||||
.parent()
|
||||
.ok_or_else(|| anyhow!("Resolved path has no parent: {}", canonical.display()))?
|
||||
.to_path_buf();
|
||||
|
||||
Ok((content, parent_dir))
|
||||
Ok(recipes)
|
||||
}
|
||||
|
||||
fn discover_local_recipes() -> Result<Vec<RecipeInfo>> {
|
||||
let mut recipes = Vec::new();
|
||||
let mut search_dirs = vec![PathBuf::from(".")];
|
||||
|
||||
// Add GOOSE_RECIPE_PATH directories
|
||||
if let Ok(recipe_path_env) = env::var(GOOSE_RECIPE_PATH_ENV_VAR) {
|
||||
let path_separator = if cfg!(windows) { ';' } else { ':' };
|
||||
let recipe_path_env_dirs: Vec<PathBuf> = recipe_path_env
|
||||
.split(path_separator)
|
||||
.map(PathBuf::from)
|
||||
.collect();
|
||||
search_dirs.extend(recipe_path_env_dirs);
|
||||
}
|
||||
|
||||
for dir in search_dirs {
|
||||
if let Ok(dir_recipes) = scan_directory_for_recipes(&dir) {
|
||||
recipes.extend(dir_recipes);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(recipes)
|
||||
}
|
||||
|
||||
fn scan_directory_for_recipes(dir: &Path) -> Result<Vec<RecipeInfo>> {
|
||||
let mut recipes = Vec::new();
|
||||
|
||||
if !dir.exists() || !dir.is_dir() {
|
||||
return Ok(recipes);
|
||||
}
|
||||
|
||||
for entry in fs::read_dir(dir)? {
|
||||
let entry = entry?;
|
||||
let path = entry.path();
|
||||
|
||||
if path.is_file() {
|
||||
if let Some(extension) = path.extension() {
|
||||
if RECIPE_FILE_EXTENSIONS.contains(&extension.to_string_lossy().as_ref()) {
|
||||
if let Ok(recipe_info) = create_local_recipe_info(&path) {
|
||||
recipes.push(recipe_info);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(recipes)
|
||||
}
|
||||
|
||||
fn create_local_recipe_info(path: &Path) -> Result<RecipeInfo> {
|
||||
let content = fs::read_to_string(path)?;
|
||||
let recipe_dir = path
|
||||
.parent()
|
||||
.unwrap_or_else(|| Path::new("."))
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
let (recipe, _) = parse_recipe_content(&content, recipe_dir)?;
|
||||
|
||||
let name = path
|
||||
.file_stem()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("unknown")
|
||||
.to_string();
|
||||
|
||||
let path_str = path.to_string_lossy().to_string();
|
||||
|
||||
Ok(RecipeInfo {
|
||||
name,
|
||||
source: RecipeSource::Local,
|
||||
path: path_str,
|
||||
title: Some(recipe.title),
|
||||
description: Some(recipe.description),
|
||||
})
|
||||
}
|
||||
|
||||
106
crates/goose-cli/src/scenario_tests/mod.rs
Normal file
106
crates/goose-cli/src/scenario_tests/mod.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
mod scenarios;
|
||||
|
||||
use crate::session::Session;
|
||||
use anyhow::Result;
|
||||
use goose::agents::Agent;
|
||||
use goose::config::Config;
|
||||
use goose::message::Message;
|
||||
use goose::model::ModelConfig;
|
||||
use goose::providers::{create, testprovider::TestProvider};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ScenarioResult {
|
||||
pub messages: Vec<Message>,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
impl ScenarioResult {
|
||||
pub fn message_contents(&self) -> Vec<String> {
|
||||
self.messages
|
||||
.iter()
|
||||
.flat_map(|msg| &msg.content)
|
||||
.map(|content| content.as_text().unwrap_or("").to_string())
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_test_scenario(test_name: &str, inputs: &[&str]) -> Result<ScenarioResult> {
|
||||
let manifest_dir = env!("CARGO_MANIFEST_DIR");
|
||||
let file_path = format!(
|
||||
"{}/src/scenario_tests/recordings/{}.json",
|
||||
manifest_dir, test_name
|
||||
);
|
||||
|
||||
if let Some(parent) = Path::new(&file_path).parent() {
|
||||
std::fs::create_dir_all(parent)?;
|
||||
}
|
||||
|
||||
let replay_mode = Path::new(&file_path).exists();
|
||||
let provider = if replay_mode {
|
||||
match TestProvider::new_replaying(&file_path) {
|
||||
Ok(test_provider) => {
|
||||
Arc::new(test_provider) as Arc<dyn goose::providers::base::Provider>
|
||||
}
|
||||
Err(e) => {
|
||||
let _ = std::fs::remove_file(&file_path);
|
||||
return Err(anyhow::anyhow!(
|
||||
"Test replay failed for '{}': {}. File deleted - re-run test to record fresh data.",
|
||||
test_name, e
|
||||
));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if std::env::var("GITHUB_ACTIONS").is_ok() {
|
||||
panic!(
|
||||
"Test recording is not supported on CI. \
|
||||
Did you forget to add the file {} to the repository and were expecting that to replay?",
|
||||
file_path
|
||||
);
|
||||
}
|
||||
let config = Config::global();
|
||||
|
||||
let (provider_name, model_name): (String, String) = match (
|
||||
config.get_param::<String>("GOOSE_PROVIDER"),
|
||||
config.get_param::<String>("GOOSE_MODEL"),
|
||||
) {
|
||||
(Ok(provider), Ok(model)) => (provider, model),
|
||||
_ => {
|
||||
panic!("Provider or model not configured. Run 'goose configure' first");
|
||||
}
|
||||
};
|
||||
|
||||
let model_config = ModelConfig::new(model_name);
|
||||
|
||||
let inner_provider = create(&provider_name, model_config)?;
|
||||
Arc::new(TestProvider::new_recording(inner_provider, &file_path))
|
||||
};
|
||||
|
||||
let agent = Agent::new();
|
||||
agent.update_provider(provider).await?;
|
||||
|
||||
let mut session = Session::new(agent, None, false, None, None, None, None);
|
||||
|
||||
let mut error = None;
|
||||
for input in inputs {
|
||||
if let Err(e) = session.headless(input.to_string()).await {
|
||||
error = Some(e.to_string());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let messages = session.message_history().to_vec();
|
||||
|
||||
if let Some(ref err_msg) = error {
|
||||
if err_msg.contains("No recorded response found") {
|
||||
let _ = std::fs::remove_file(&file_path);
|
||||
return Err(anyhow::anyhow!(
|
||||
"Test replay failed for '{}' - missing recorded interaction: {}. File deleted - re-run test to record fresh data.",
|
||||
test_name, err_msg
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ScenarioResult { messages, error })
|
||||
}
|
||||
@@ -0,0 +1,474 @@
|
||||
{
|
||||
"d2c95695a2c9ad5d95248955d850d147d76e209f537dab9bcc70fc815f2d8de7": {
|
||||
"input": {
|
||||
"system": "You are a general-purpose AI agent called Goose, created by Block, the parent company of Square, CashApp, and Tidal. Goose is being developed as an open-source software project.\n\nThe current date is 2025-07-22 15:02:47.\n\nGoose uses LLM providers with tool calling capability. You can be used with different language models (gpt-4o, claude-3.5-sonnet, o1, llama-3.2, deepseek-r1, etc).\nThese models have varying knowledge cut-off dates depending on when they were trained, but typically it's between 5-10 months prior to the current date.\n\n# Extensions\n\nExtensions allow other applications to provide context to Goose. Extensions connect Goose to different data sources and tools.\nYou are capable of dynamically plugging into new extensions and learning how to use them. You solve higher level problems using the tools in these extensions, and can interact with multiple at once.\nUse the search_available_extensions tool to find additional extensions to enable to help with your task. To enable extensions, use the enable_extension tool and provide the extension_name. You should only enable extensions found from the search_available_extensions tool.\n\n\nNo extensions are defined. You should let the user know that they should add extensions.\n\n\n\n# Suggestion\n\"\"\n\n\n\n\n# Response Guidelines\n\n- Use Markdown formatting for all responses.\n- Follow best practices for Markdown, including:\n - Using headers for organization.\n - Bullet points for lists.\n - Links formatted correctly, either as linked text (e.g., [this is linked text](https://example.com)) or automatic links using angle brackets (e.g., <http://example.com/>).\n- For code examples, use fenced code blocks by placing triple backticks (` ``` `) before and after the code. Include the language identifier after the opening backticks (e.g., ` ```python `) to enable syntax highlighting.\n- Ensure clarity, conciseness, and proper formatting to enhance readability and usability.\n\n# Additional Instructions:\n\nRight now you are *NOT* in the chat only mode and have access to tool use and system.",
|
||||
"messages": [
|
||||
{
|
||||
"id": null,
|
||||
"role": "user",
|
||||
"created": 1753196567,
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "hello"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": null,
|
||||
"role": "assistant",
|
||||
"created": 1753196578,
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Hello! I'm Goose, an AI agent created by Block. I'm here to help you with various tasks and questions.\n\nI notice that you don't currently have any extensions enabled. Extensions allow me to connect to different data sources and tools to help you accomplish more complex tasks. \n\nHere's what I can help you with right now:\n- **General conversation and questions**\n- **Task management** - I can create and execute dynamic tasks\n- **Scheduling** - I can manage scheduled recipe execution\n- **Extension management** - I can search for and enable additional capabilities\n\nTo get the most out of our interaction, you might want to:\n1. Let me search for available extensions that could help with your specific needs\n2. Tell me what kind of tasks you're looking to accomplish\n\nWhat would you like to do today?"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": null,
|
||||
"role": "user",
|
||||
"created": 1753196578,
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "goodbye"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"tools": [
|
||||
{
|
||||
"name": "platform__search_available_extensions",
|
||||
"description": "Searches for additional extensions available to help complete tasks.\n Use this tool when you're unable to find a specific feature or functionality you need to complete your task, or when standard approaches aren't working.\n These extensions might provide the exact tools needed to solve your problem.\n If you find a relevant one, consider using your tools to enable it.",
|
||||
"inputSchema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"type": "object"
|
||||
},
|
||||
"annotations": {
|
||||
"title": "Discover extensions",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "platform__manage_extensions",
|
||||
"description": "Tool to manage extensions and tools in goose context.\n Enable or disable extensions to help complete tasks.\n Enable or disable an extension by providing the extension name.\n ",
|
||||
"inputSchema": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"description": "The action to perform",
|
||||
"enum": [
|
||||
"enable",
|
||||
"disable"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"extension_name": {
|
||||
"description": "The name of the extension to enable",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action",
|
||||
"extension_name"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"annotations": {
|
||||
"title": "Enable or disable an extension",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "platform__manage_schedule",
|
||||
"description": "Manage scheduled recipe execution for this Goose instance.\n\nActions:\n- \"list\": List all scheduled jobs\n- \"create\": Create a new scheduled job from a recipe file\n- \"run_now\": Execute a scheduled job immediately \n- \"pause\": Pause a scheduled job\n- \"unpause\": Resume a paused job\n- \"delete\": Remove a scheduled job\n- \"kill\": Terminate a currently running job\n- \"inspect\": Get details about a running job\n- \"sessions\": List execution history for a job\n- \"session_content\": Get the full content (messages) of a specific session\n",
|
||||
"inputSchema": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"enum": [
|
||||
"list",
|
||||
"create",
|
||||
"run_now",
|
||||
"pause",
|
||||
"unpause",
|
||||
"delete",
|
||||
"kill",
|
||||
"inspect",
|
||||
"sessions",
|
||||
"session_content"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"cron_expression": {
|
||||
"description": "A cron expression for create action. Supports both 5-field (minute hour day month weekday) and 6-field (second minute hour day month weekday) formats. 5-field expressions are automatically converted to 6-field by prepending '0' for seconds.",
|
||||
"type": "string"
|
||||
},
|
||||
"execution_mode": {
|
||||
"default": "background",
|
||||
"description": "Execution mode for create action: 'foreground' or 'background'",
|
||||
"enum": [
|
||||
"foreground",
|
||||
"background"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"job_id": {
|
||||
"description": "Job identifier for operations on existing jobs",
|
||||
"type": "string"
|
||||
},
|
||||
"limit": {
|
||||
"default": 50,
|
||||
"description": "Limit for sessions list",
|
||||
"type": "integer"
|
||||
},
|
||||
"recipe_path": {
|
||||
"description": "Path to recipe file for create action",
|
||||
"type": "string"
|
||||
},
|
||||
"session_id": {
|
||||
"description": "Session identifier for session_content action",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"annotations": {
|
||||
"title": "Manage scheduled recipes",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "dynamic_task__create_task",
|
||||
"description": "Use this tool to create one or more dynamic tasks from a shared text instruction and varying parameters.How it works:\n - Provide a single text instruction\n - Use the 'task_parameters' field to pass an array of parameter sets\n - Each resulting task will use the same instruction with different parameter values\n This is useful when performing the same operation across many inputs (e.g., getting weather for multiple cities, searching multiple slack channels, iterating through various linear tickets, etc).\n Once created, these tasks should be passed to the 'subagent__execute_task' tool for execution. Tasks can run sequentially or in parallel.\n ---\n What is a 'subagent'?\n A 'subagent' is a stateless sub-process that executes a single task independently. Use subagents when:\n - You want to parallelize similar work across different inputs\n - You are not sure your search or operation will succeed on the first try\n Each subagent receives a task with a defined payload and returns a result, which is not visible to the user unless explicitly summarized by the system.\n ---\n Examples of 'task_parameters' for a single task:\n text_instruction: Search for the config file in the root directory.\n Examples of 'task_parameters' for multiple tasks:\n text_instruction: Get weather for Melbourne.\n timeout_seconds: 300\n text_instruction: Get weather for Los Angeles.\n timeout_seconds: 300\n text_instruction: Get weather for San Francisco.\n timeout_seconds: 300\n ",
|
||||
"inputSchema": {
|
||||
"properties": {
|
||||
"task_parameters": {
|
||||
"description": "Array of parameter sets for creating tasks. For a single task, provide an array with one element. For multiple tasks, provide an array with multiple elements, each with different parameter values. If there is no parameter set, provide an empty array.",
|
||||
"items": {
|
||||
"properties": {
|
||||
"text_instruction": {
|
||||
"description": "The text instruction to execute",
|
||||
"type": "string"
|
||||
},
|
||||
"timeout_seconds": {
|
||||
"description": "Optional timeout for the task in seconds (default: 300)",
|
||||
"minimum": 1,
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"text_instruction"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"annotations": {
|
||||
"title": "Dynamic Task Creation",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "subagent__execute_task",
|
||||
"description": "Only use the subagent__execute_task tool when you execute sub recipe task or dynamic task.\nEXECUTION STRATEGY DECISION:\n1. If the tasks are created with execution_mode, use the execution_mode.\n2. Execute tasks sequentially unless user explicitly requests parallel execution. PARALLEL: User uses keywords like 'parallel', 'simultaneously', 'at the same time', 'concurrently'\n\nIMPLEMENTATION:\n- Sequential execution: Call this tool multiple times, passing exactly ONE task per call\n- Parallel execution: Call this tool once, passing an ARRAY of all tasks\n\nEXAMPLES:\nUser Intent Based:\n- User: 'get weather and tell me a joke' → Sequential (2 separate tool calls, 1 task each)\n- User: 'get weather and joke in parallel' → Parallel (1 tool call with array of 2 tasks)\n- User: 'run these simultaneously' → Parallel (1 tool call with task array)\n- User: 'do task A then task B' → Sequential (2 separate tool calls)",
|
||||
"inputSchema": {
|
||||
"properties": {
|
||||
"execution_mode": {
|
||||
"default": "sequential",
|
||||
"description": "Execution strategy for multiple tasks. Use 'sequential' (default) unless user explicitly requests parallel execution with words like 'parallel', 'simultaneously', 'at the same time', or 'concurrently'.",
|
||||
"enum": [
|
||||
"sequential",
|
||||
"parallel"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"task_ids": {
|
||||
"items": {
|
||||
"description": "Unique identifier for the task",
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"task_ids"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"annotations": {
|
||||
"title": "Run tasks in parallel",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"output": {
|
||||
"message": {
|
||||
"id": null,
|
||||
"role": "assistant",
|
||||
"created": 1753196580,
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Goodbye! It was nice chatting with you. Feel free to come back anytime if you need help with tasks, have questions, or want to explore what extensions might be useful for your work. Have a great day! 👋"
|
||||
}
|
||||
]
|
||||
},
|
||||
"usage": {
|
||||
"model": "us.anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
"usage": {
|
||||
"input_tokens": 2700,
|
||||
"output_tokens": 52,
|
||||
"total_tokens": 2752
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"205407e3c76ac3acb35d9da9f560058217d3b267873f8e0a715a1946e2714ddd": {
|
||||
"input": {
|
||||
"system": "You are a general-purpose AI agent called Goose, created by Block, the parent company of Square, CashApp, and Tidal. Goose is being developed as an open-source software project.\n\nThe current date is 2025-07-22 15:02:47.\n\nGoose uses LLM providers with tool calling capability. You can be used with different language models (gpt-4o, claude-3.5-sonnet, o1, llama-3.2, deepseek-r1, etc).\nThese models have varying knowledge cut-off dates depending on when they were trained, but typically it's between 5-10 months prior to the current date.\n\n# Extensions\n\nExtensions allow other applications to provide context to Goose. Extensions connect Goose to different data sources and tools.\nYou are capable of dynamically plugging into new extensions and learning how to use them. You solve higher level problems using the tools in these extensions, and can interact with multiple at once.\nUse the search_available_extensions tool to find additional extensions to enable to help with your task. To enable extensions, use the enable_extension tool and provide the extension_name. You should only enable extensions found from the search_available_extensions tool.\n\n\nNo extensions are defined. You should let the user know that they should add extensions.\n\n\n\n# Suggestion\n\"\"\n\n\n\n\n# Response Guidelines\n\n- Use Markdown formatting for all responses.\n- Follow best practices for Markdown, including:\n - Using headers for organization.\n - Bullet points for lists.\n - Links formatted correctly, either as linked text (e.g., [this is linked text](https://example.com)) or automatic links using angle brackets (e.g., <http://example.com/>).\n- For code examples, use fenced code blocks by placing triple backticks (` ``` `) before and after the code. Include the language identifier after the opening backticks (e.g., ` ```python `) to enable syntax highlighting.\n- Ensure clarity, conciseness, and proper formatting to enhance readability and usability.\n\n# Additional Instructions:\n\nRight now you are *NOT* in the chat only mode and have access to tool use and system.",
|
||||
"messages": [
|
||||
{
|
||||
"id": null,
|
||||
"role": "user",
|
||||
"created": 1753196567,
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "hello"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"tools": [
|
||||
{
|
||||
"name": "platform__search_available_extensions",
|
||||
"description": "Searches for additional extensions available to help complete tasks.\n Use this tool when you're unable to find a specific feature or functionality you need to complete your task, or when standard approaches aren't working.\n These extensions might provide the exact tools needed to solve your problem.\n If you find a relevant one, consider using your tools to enable it.",
|
||||
"inputSchema": {
|
||||
"properties": {},
|
||||
"required": [],
|
||||
"type": "object"
|
||||
},
|
||||
"annotations": {
|
||||
"title": "Discover extensions",
|
||||
"readOnlyHint": true,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "platform__manage_extensions",
|
||||
"description": "Tool to manage extensions and tools in goose context.\n Enable or disable extensions to help complete tasks.\n Enable or disable an extension by providing the extension name.\n ",
|
||||
"inputSchema": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"description": "The action to perform",
|
||||
"enum": [
|
||||
"enable",
|
||||
"disable"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"extension_name": {
|
||||
"description": "The name of the extension to enable",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action",
|
||||
"extension_name"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"annotations": {
|
||||
"title": "Enable or disable an extension",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": false,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "platform__manage_schedule",
|
||||
"description": "Manage scheduled recipe execution for this Goose instance.\n\nActions:\n- \"list\": List all scheduled jobs\n- \"create\": Create a new scheduled job from a recipe file\n- \"run_now\": Execute a scheduled job immediately \n- \"pause\": Pause a scheduled job\n- \"unpause\": Resume a paused job\n- \"delete\": Remove a scheduled job\n- \"kill\": Terminate a currently running job\n- \"inspect\": Get details about a running job\n- \"sessions\": List execution history for a job\n- \"session_content\": Get the full content (messages) of a specific session\n",
|
||||
"inputSchema": {
|
||||
"properties": {
|
||||
"action": {
|
||||
"enum": [
|
||||
"list",
|
||||
"create",
|
||||
"run_now",
|
||||
"pause",
|
||||
"unpause",
|
||||
"delete",
|
||||
"kill",
|
||||
"inspect",
|
||||
"sessions",
|
||||
"session_content"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"cron_expression": {
|
||||
"description": "A cron expression for create action. Supports both 5-field (minute hour day month weekday) and 6-field (second minute hour day month weekday) formats. 5-field expressions are automatically converted to 6-field by prepending '0' for seconds.",
|
||||
"type": "string"
|
||||
},
|
||||
"execution_mode": {
|
||||
"default": "background",
|
||||
"description": "Execution mode for create action: 'foreground' or 'background'",
|
||||
"enum": [
|
||||
"foreground",
|
||||
"background"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"job_id": {
|
||||
"description": "Job identifier for operations on existing jobs",
|
||||
"type": "string"
|
||||
},
|
||||
"limit": {
|
||||
"default": 50,
|
||||
"description": "Limit for sessions list",
|
||||
"type": "integer"
|
||||
},
|
||||
"recipe_path": {
|
||||
"description": "Path to recipe file for create action",
|
||||
"type": "string"
|
||||
},
|
||||
"session_id": {
|
||||
"description": "Session identifier for session_content action",
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"action"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"annotations": {
|
||||
"title": "Manage scheduled recipes",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "dynamic_task__create_task",
|
||||
"description": "Use this tool to create one or more dynamic tasks from a shared text instruction and varying parameters.How it works:\n - Provide a single text instruction\n - Use the 'task_parameters' field to pass an array of parameter sets\n - Each resulting task will use the same instruction with different parameter values\n This is useful when performing the same operation across many inputs (e.g., getting weather for multiple cities, searching multiple slack channels, iterating through various linear tickets, etc).\n Once created, these tasks should be passed to the 'subagent__execute_task' tool for execution. Tasks can run sequentially or in parallel.\n ---\n What is a 'subagent'?\n A 'subagent' is a stateless sub-process that executes a single task independently. Use subagents when:\n - You want to parallelize similar work across different inputs\n - You are not sure your search or operation will succeed on the first try\n Each subagent receives a task with a defined payload and returns a result, which is not visible to the user unless explicitly summarized by the system.\n ---\n Examples of 'task_parameters' for a single task:\n text_instruction: Search for the config file in the root directory.\n Examples of 'task_parameters' for multiple tasks:\n text_instruction: Get weather for Melbourne.\n timeout_seconds: 300\n text_instruction: Get weather for Los Angeles.\n timeout_seconds: 300\n text_instruction: Get weather for San Francisco.\n timeout_seconds: 300\n ",
|
||||
"inputSchema": {
|
||||
"properties": {
|
||||
"task_parameters": {
|
||||
"description": "Array of parameter sets for creating tasks. For a single task, provide an array with one element. For multiple tasks, provide an array with multiple elements, each with different parameter values. If there is no parameter set, provide an empty array.",
|
||||
"items": {
|
||||
"properties": {
|
||||
"text_instruction": {
|
||||
"description": "The text instruction to execute",
|
||||
"type": "string"
|
||||
},
|
||||
"timeout_seconds": {
|
||||
"description": "Optional timeout for the task in seconds (default: 300)",
|
||||
"minimum": 1,
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"text_instruction"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"annotations": {
|
||||
"title": "Dynamic Task Creation",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "subagent__execute_task",
|
||||
"description": "Only use the subagent__execute_task tool when you execute sub recipe task or dynamic task.\nEXECUTION STRATEGY DECISION:\n1. If the tasks are created with execution_mode, use the execution_mode.\n2. Execute tasks sequentially unless user explicitly requests parallel execution. PARALLEL: User uses keywords like 'parallel', 'simultaneously', 'at the same time', 'concurrently'\n\nIMPLEMENTATION:\n- Sequential execution: Call this tool multiple times, passing exactly ONE task per call\n- Parallel execution: Call this tool once, passing an ARRAY of all tasks\n\nEXAMPLES:\nUser Intent Based:\n- User: 'get weather and tell me a joke' → Sequential (2 separate tool calls, 1 task each)\n- User: 'get weather and joke in parallel' → Parallel (1 tool call with array of 2 tasks)\n- User: 'run these simultaneously' → Parallel (1 tool call with task array)\n- User: 'do task A then task B' → Sequential (2 separate tool calls)",
|
||||
"inputSchema": {
|
||||
"properties": {
|
||||
"execution_mode": {
|
||||
"default": "sequential",
|
||||
"description": "Execution strategy for multiple tasks. Use 'sequential' (default) unless user explicitly requests parallel execution with words like 'parallel', 'simultaneously', 'at the same time', or 'concurrently'.",
|
||||
"enum": [
|
||||
"sequential",
|
||||
"parallel"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"task_ids": {
|
||||
"items": {
|
||||
"description": "Unique identifier for the task",
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"task_ids"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"annotations": {
|
||||
"title": "Run tasks in parallel",
|
||||
"readOnlyHint": false,
|
||||
"destructiveHint": true,
|
||||
"idempotentHint": false,
|
||||
"openWorldHint": true
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"output": {
|
||||
"message": {
|
||||
"id": null,
|
||||
"role": "assistant",
|
||||
"created": 1753196578,
|
||||
"content": [
|
||||
{
|
||||
"type": "text",
|
||||
"text": "Hello! I'm Goose, an AI agent created by Block. I'm here to help you with various tasks and questions.\n\nI notice that you don't currently have any extensions enabled. Extensions allow me to connect to different data sources and tools to help you accomplish more complex tasks. \n\nHere's what I can help you with right now:\n- **General conversation and questions**\n- **Task management** - I can create and execute dynamic tasks\n- **Scheduling** - I can manage scheduled recipe execution\n- **Extension management** - I can search for and enable additional capabilities\n\nTo get the most out of our interaction, you might want to:\n1. Let me search for available extensions that could help with your specific needs\n2. Tell me what kind of tasks you're looking to accomplish\n\nWhat would you like to do today?"
|
||||
}
|
||||
]
|
||||
},
|
||||
"usage": {
|
||||
"model": "us.anthropic.claude-sonnet-4-20250514-v1:0",
|
||||
"usage": {
|
||||
"input_tokens": 2517,
|
||||
"output_tokens": 179,
|
||||
"total_tokens": 2696
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
22
crates/goose-cli/src/scenario_tests/scenarios.rs
Normal file
22
crates/goose-cli/src/scenario_tests/scenarios.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::scenario_tests::run_test_scenario;
|
||||
use anyhow::Result;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_greeting() -> Result<()> {
|
||||
let result = run_test_scenario("basic_greeting", &["hello", "goodbye"]).await?;
|
||||
|
||||
assert!(result
|
||||
.message_contents()
|
||||
.iter()
|
||||
.any(|msg| msg.contains("Hello")));
|
||||
assert!(result
|
||||
.message_contents()
|
||||
.iter()
|
||||
.any(|msg| msg.contains("Goodbye")));
|
||||
assert!(result.error.is_none());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,14 @@
|
||||
use console::style;
|
||||
use goose::agents::extension::ExtensionError;
|
||||
use goose::agents::types::RetryConfig;
|
||||
use goose::agents::Agent;
|
||||
use goose::config::{Config, ExtensionConfig, ExtensionConfigManager};
|
||||
use goose::providers::create;
|
||||
use goose::recipe::{Response, SubRecipe};
|
||||
use goose::session;
|
||||
use goose::session::Identifier;
|
||||
use mcp_client::transport::Error as McpClientError;
|
||||
use rustyline::EditMode;
|
||||
use std::process;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -28,35 +31,204 @@ pub struct SessionBuilderConfig {
|
||||
pub extensions: Vec<String>,
|
||||
/// List of remote extension commands to add
|
||||
pub remote_extensions: Vec<String>,
|
||||
/// List of streamable HTTP extension commands to add
|
||||
pub streamable_http_extensions: Vec<String>,
|
||||
/// List of builtin extension commands to add
|
||||
pub builtins: Vec<String>,
|
||||
/// List of extensions to enable, enable only this set and ignore configured ones
|
||||
pub extensions_override: Option<Vec<ExtensionConfig>>,
|
||||
/// Any additional system prompt to append to the default
|
||||
pub additional_system_prompt: Option<String>,
|
||||
/// Settings to override the global Goose settings
|
||||
pub settings: Option<SessionSettings>,
|
||||
/// Provider override from CLI arguments
|
||||
pub provider: Option<String>,
|
||||
/// Model override from CLI arguments
|
||||
pub model: Option<String>,
|
||||
/// Enable debug printing
|
||||
pub debug: bool,
|
||||
/// Maximum number of consecutive identical tool calls allowed
|
||||
pub max_tool_repetitions: Option<u32>,
|
||||
/// Maximum number of turns (iterations) allowed without user input
|
||||
pub max_turns: Option<u32>,
|
||||
/// ID of the scheduled job that triggered this session (if any)
|
||||
pub scheduled_job_id: Option<String>,
|
||||
/// Whether this session will be used interactively (affects debugging prompts)
|
||||
pub interactive: bool,
|
||||
/// Quiet mode - suppress non-response output
|
||||
pub quiet: bool,
|
||||
/// Sub-recipes to add to the session
|
||||
pub sub_recipes: Option<Vec<SubRecipe>>,
|
||||
/// Final output expected response
|
||||
pub final_output_response: Option<Response>,
|
||||
/// Retry configuration for automated validation and recovery
|
||||
pub retry_config: Option<RetryConfig>,
|
||||
}
|
||||
|
||||
/// Offers to help debug an extension failure by creating a minimal debugging session
|
||||
async fn offer_extension_debugging_help(
|
||||
extension_name: &str,
|
||||
error_message: &str,
|
||||
provider: Arc<dyn goose::providers::base::Provider>,
|
||||
interactive: bool,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
// Only offer debugging help in interactive mode
|
||||
if !interactive {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let help_prompt = format!(
|
||||
"Would you like me to help debug the '{}' extension failure?",
|
||||
extension_name
|
||||
);
|
||||
|
||||
let should_help = match cliclack::confirm(help_prompt)
|
||||
.initial_value(false)
|
||||
.interact()
|
||||
{
|
||||
Ok(choice) => choice,
|
||||
Err(e) => {
|
||||
if e.kind() == std::io::ErrorKind::Interrupted {
|
||||
return Ok(());
|
||||
} else {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if !should_help {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("{}", style("🔧 Starting debugging session...").cyan());
|
||||
|
||||
// Create a debugging prompt with context about the extension failure
|
||||
let debug_prompt = format!(
|
||||
"I'm having trouble starting an extension called '{}'. Here's the error I encountered:\n\n{}\n\nCan you help me diagnose what might be wrong and suggest how to fix it? Please consider common issues like:\n- Missing dependencies or tools\n- Configuration problems\n- Network connectivity (for remote extensions)\n- Permission issues\n- Path or environment variable problems",
|
||||
extension_name,
|
||||
error_message
|
||||
);
|
||||
|
||||
// Create a minimal agent for debugging
|
||||
let debug_agent = Agent::new();
|
||||
debug_agent.update_provider(provider).await?;
|
||||
|
||||
// Add the developer extension if available to help with debugging
|
||||
if let Ok(extensions) = ExtensionConfigManager::get_all() {
|
||||
for ext_wrapper in extensions {
|
||||
if ext_wrapper.enabled && ext_wrapper.config.name() == "developer" {
|
||||
if let Err(e) = debug_agent.add_extension(ext_wrapper.config).await {
|
||||
// If we can't add developer extension, continue without it
|
||||
eprintln!(
|
||||
"Note: Could not load developer extension for debugging: {}",
|
||||
e
|
||||
);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create a temporary session file for this debugging session
|
||||
let temp_session_file =
|
||||
std::env::temp_dir().join(format!("goose_debug_extension_{}.jsonl", extension_name));
|
||||
|
||||
// Create the debugging session
|
||||
let mut debug_session = Session::new(
|
||||
debug_agent,
|
||||
Some(temp_session_file.clone()),
|
||||
false,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
);
|
||||
|
||||
// Process the debugging request
|
||||
println!("{}", style("Analyzing the extension failure...").yellow());
|
||||
match debug_session.headless(debug_prompt).await {
|
||||
Ok(_) => {
|
||||
println!(
|
||||
"{}",
|
||||
style("✅ Debugging session completed. Check the suggestions above.").green()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!(
|
||||
"{}",
|
||||
style(format!("❌ Debugging session failed: {}", e)).red()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up the temporary session file
|
||||
let _ = std::fs::remove_file(temp_session_file);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default)]
|
||||
pub struct SessionSettings {
|
||||
pub goose_model: Option<String>,
|
||||
pub goose_provider: Option<String>,
|
||||
pub temperature: Option<f32>,
|
||||
}
|
||||
|
||||
pub async fn build_session(session_config: SessionBuilderConfig) -> Session {
|
||||
// Load config and get provider/model
|
||||
let config = Config::global();
|
||||
|
||||
let provider_name: String = config
|
||||
.get_param("GOOSE_PROVIDER")
|
||||
let provider_name = session_config
|
||||
.provider
|
||||
.or_else(|| {
|
||||
session_config
|
||||
.settings
|
||||
.as_ref()
|
||||
.and_then(|s| s.goose_provider.clone())
|
||||
})
|
||||
.or_else(|| config.get_param("GOOSE_PROVIDER").ok())
|
||||
.expect("No provider configured. Run 'goose configure' first");
|
||||
|
||||
let model: String = config
|
||||
.get_param("GOOSE_MODEL")
|
||||
let model_name = session_config
|
||||
.model
|
||||
.or_else(|| {
|
||||
session_config
|
||||
.settings
|
||||
.as_ref()
|
||||
.and_then(|s| s.goose_model.clone())
|
||||
})
|
||||
.or_else(|| config.get_param("GOOSE_MODEL").ok())
|
||||
.expect("No model configured. Run 'goose configure' first");
|
||||
let model_config = goose::model::ModelConfig::new(model.clone());
|
||||
|
||||
let temperature = session_config.settings.as_ref().and_then(|s| s.temperature);
|
||||
|
||||
let model_config =
|
||||
goose::model::ModelConfig::new(model_name.clone()).with_temperature(temperature);
|
||||
|
||||
// Create the agent
|
||||
let agent: Agent = Agent::new();
|
||||
let new_provider = create(&provider_name, model_config).unwrap();
|
||||
|
||||
if let Some(sub_recipes) = session_config.sub_recipes {
|
||||
agent.add_sub_recipes(sub_recipes).await;
|
||||
}
|
||||
|
||||
if let Some(final_output_response) = session_config.final_output_response {
|
||||
agent.add_final_output_tool(final_output_response).await;
|
||||
}
|
||||
|
||||
let new_provider = match create(&provider_name, model_config) {
|
||||
Ok(provider) => provider,
|
||||
Err(e) => {
|
||||
output::render_error(&format!(
|
||||
"Error {}.\n\
|
||||
Please check your system keychain and run 'goose configure' again.\n\
|
||||
If your system is unable to use the keyring, please try setting secret key(s) via environment variables.\n\
|
||||
For more info, see: https://block.github.io/goose/docs/troubleshooting/#keychainkeyring-errors",
|
||||
e
|
||||
));
|
||||
process::exit(1);
|
||||
}
|
||||
};
|
||||
// Keep a reference to the provider for display_session_info
|
||||
let provider_for_display = Arc::clone(&new_provider);
|
||||
|
||||
@@ -69,7 +241,7 @@ pub async fn build_session(session_config: SessionBuilderConfig) -> Session {
|
||||
worker_model
|
||||
);
|
||||
} else {
|
||||
tracing::info!("🤖 Using model: {}", model);
|
||||
tracing::info!("🤖 Using model: {}", model_name);
|
||||
}
|
||||
|
||||
agent
|
||||
@@ -86,19 +258,17 @@ pub async fn build_session(session_config: SessionBuilderConfig) -> Session {
|
||||
}
|
||||
|
||||
// Handle session file resolution and resuming
|
||||
let session_file = if session_config.no_session {
|
||||
// Use a temporary path that won't be written to
|
||||
#[cfg(unix)]
|
||||
{
|
||||
std::path::PathBuf::from("/dev/null")
|
||||
}
|
||||
#[cfg(windows)]
|
||||
{
|
||||
std::path::PathBuf::from("NUL")
|
||||
}
|
||||
let session_file: Option<std::path::PathBuf> = if session_config.no_session {
|
||||
None
|
||||
} else if session_config.resume {
|
||||
if let Some(identifier) = session_config.identifier {
|
||||
let session_file = session::get_path(identifier);
|
||||
let session_file = match session::get_path(identifier) {
|
||||
Err(e) => {
|
||||
output::render_error(&format!("Invalid session identifier: {}", e));
|
||||
process::exit(1);
|
||||
}
|
||||
Ok(path) => path,
|
||||
};
|
||||
if !session_file.exists() {
|
||||
output::render_error(&format!(
|
||||
"Cannot resume session {} - no such session exists",
|
||||
@@ -107,11 +277,11 @@ pub async fn build_session(session_config: SessionBuilderConfig) -> Session {
|
||||
process::exit(1);
|
||||
}
|
||||
|
||||
session_file
|
||||
Some(session_file)
|
||||
} else {
|
||||
// Try to resume most recent session
|
||||
match session::get_most_recent_session() {
|
||||
Ok(file) => file,
|
||||
Ok(file) => Some(file),
|
||||
Err(_) => {
|
||||
output::render_error("Cannot resume - no previous sessions found");
|
||||
process::exit(1);
|
||||
@@ -126,35 +296,43 @@ pub async fn build_session(session_config: SessionBuilderConfig) -> Session {
|
||||
};
|
||||
|
||||
// Just get the path - file will be created when needed
|
||||
session::get_path(id)
|
||||
match session::get_path(id) {
|
||||
Ok(path) => Some(path),
|
||||
Err(e) => {
|
||||
output::render_error(&format!("Failed to create session path: {}", e));
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
if session_config.resume && !session_config.no_session {
|
||||
// Read the session metadata
|
||||
let metadata = session::read_metadata(&session_file).unwrap_or_else(|e| {
|
||||
output::render_error(&format!("Failed to read session metadata: {}", e));
|
||||
process::exit(1);
|
||||
});
|
||||
if session_config.resume {
|
||||
if let Some(session_file) = session_file.as_ref() {
|
||||
// Read the session metadata
|
||||
let metadata = session::read_metadata(session_file).unwrap_or_else(|e| {
|
||||
output::render_error(&format!("Failed to read session metadata: {}", e));
|
||||
process::exit(1);
|
||||
});
|
||||
|
||||
let current_workdir =
|
||||
std::env::current_dir().expect("Failed to get current working directory");
|
||||
if current_workdir != metadata.working_dir {
|
||||
// Ask user if they want to change the working directory
|
||||
let change_workdir = cliclack::confirm(format!("{} The original working directory of this session was set to {}. Your current directory is {}. Do you want to switch back to the original working directory?", style("WARNING:").yellow(), style(metadata.working_dir.display()).cyan(), style(current_workdir.display()).cyan()))
|
||||
let current_workdir =
|
||||
std::env::current_dir().expect("Failed to get current working directory");
|
||||
if current_workdir != metadata.working_dir {
|
||||
// Ask user if they want to change the working directory
|
||||
let change_workdir = cliclack::confirm(format!("{} The original working directory of this session was set to {}. Your current directory is {}. Do you want to switch back to the original working directory?", style("WARNING:").yellow(), style(metadata.working_dir.display()).cyan(), style(current_workdir.display()).cyan()))
|
||||
.initial_value(true)
|
||||
.interact().expect("Failed to get user input");
|
||||
|
||||
if change_workdir {
|
||||
if !metadata.working_dir.exists() {
|
||||
output::render_error(&format!(
|
||||
"Cannot switch to original working directory - {} no longer exists",
|
||||
style(metadata.working_dir.display()).cyan()
|
||||
));
|
||||
} else if let Err(e) = std::env::set_current_dir(&metadata.working_dir) {
|
||||
output::render_error(&format!(
|
||||
"Failed to switch to original working directory: {}",
|
||||
e
|
||||
));
|
||||
if change_workdir {
|
||||
if !metadata.working_dir.exists() {
|
||||
output::render_error(&format!(
|
||||
"Cannot switch to original working directory - {} no longer exists",
|
||||
style(metadata.working_dir.display()).cyan()
|
||||
));
|
||||
} else if let Err(e) = std::env::set_current_dir(&metadata.working_dir) {
|
||||
output::render_error(&format!(
|
||||
"Failed to switch to original working directory: {}",
|
||||
e
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -180,39 +358,194 @@ pub async fn build_session(session_config: SessionBuilderConfig) -> Session {
|
||||
ExtensionError::Transport(McpClientError::StdioProcessError(inner)) => inner,
|
||||
_ => e.to_string(),
|
||||
};
|
||||
eprintln!("Failed to start extension: {}, {:?}", extension.name(), err);
|
||||
eprintln!(
|
||||
"Please check extension configuration for {}.",
|
||||
extension.name()
|
||||
"{}",
|
||||
style(format!(
|
||||
"Warning: Failed to start extension '{}': {}",
|
||||
extension.name(),
|
||||
err
|
||||
))
|
||||
.yellow()
|
||||
);
|
||||
process::exit(1);
|
||||
eprintln!(
|
||||
"{}",
|
||||
style(format!(
|
||||
"Continuing without extension '{}'",
|
||||
extension.name()
|
||||
))
|
||||
.yellow()
|
||||
);
|
||||
|
||||
// Offer debugging help
|
||||
if let Err(debug_err) = offer_extension_debugging_help(
|
||||
&extension.name(),
|
||||
&err,
|
||||
Arc::clone(&provider_for_display),
|
||||
session_config.interactive,
|
||||
)
|
||||
.await
|
||||
{
|
||||
eprintln!("Note: Could not start debugging session: {}", debug_err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Determine editor mode
|
||||
let edit_mode = config
|
||||
.get_param::<String>("EDIT_MODE")
|
||||
.ok()
|
||||
.and_then(|edit_mode| match edit_mode.to_lowercase().as_str() {
|
||||
"emacs" => Some(EditMode::Emacs),
|
||||
"vi" => Some(EditMode::Vi),
|
||||
_ => {
|
||||
eprintln!("Invalid EDIT_MODE specified, defaulting to Emacs");
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
// Create new session
|
||||
let mut session = Session::new(agent, session_file.clone(), session_config.debug);
|
||||
let mut session = Session::new(
|
||||
agent,
|
||||
session_file.clone(),
|
||||
session_config.debug,
|
||||
session_config.scheduled_job_id.clone(),
|
||||
session_config.max_turns,
|
||||
edit_mode,
|
||||
session_config.retry_config.clone(),
|
||||
);
|
||||
|
||||
// Add extensions if provided
|
||||
for extension_str in session_config.extensions {
|
||||
if let Err(e) = session.add_extension(extension_str).await {
|
||||
eprintln!("Failed to start extension: {}", e);
|
||||
process::exit(1);
|
||||
if let Err(e) = session.add_extension(extension_str.clone()).await {
|
||||
eprintln!(
|
||||
"{}",
|
||||
style(format!(
|
||||
"Warning: Failed to start extension '{}': {}",
|
||||
extension_str, e
|
||||
))
|
||||
.yellow()
|
||||
);
|
||||
eprintln!(
|
||||
"{}",
|
||||
style(format!("Continuing without extension '{}'", extension_str)).yellow()
|
||||
);
|
||||
|
||||
// Offer debugging help
|
||||
if let Err(debug_err) = offer_extension_debugging_help(
|
||||
&extension_str,
|
||||
&e.to_string(),
|
||||
Arc::clone(&provider_for_display),
|
||||
session_config.interactive,
|
||||
)
|
||||
.await
|
||||
{
|
||||
eprintln!("Note: Could not start debugging session: {}", debug_err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add remote extensions if provided
|
||||
for extension_str in session_config.remote_extensions {
|
||||
if let Err(e) = session.add_remote_extension(extension_str).await {
|
||||
eprintln!("Failed to start extension: {}", e);
|
||||
process::exit(1);
|
||||
if let Err(e) = session.add_remote_extension(extension_str.clone()).await {
|
||||
eprintln!(
|
||||
"{}",
|
||||
style(format!(
|
||||
"Warning: Failed to start remote extension '{}': {}",
|
||||
extension_str, e
|
||||
))
|
||||
.yellow()
|
||||
);
|
||||
eprintln!(
|
||||
"{}",
|
||||
style(format!(
|
||||
"Continuing without remote extension '{}'",
|
||||
extension_str
|
||||
))
|
||||
.yellow()
|
||||
);
|
||||
|
||||
// Offer debugging help
|
||||
if let Err(debug_err) = offer_extension_debugging_help(
|
||||
&extension_str,
|
||||
&e.to_string(),
|
||||
Arc::clone(&provider_for_display),
|
||||
session_config.interactive,
|
||||
)
|
||||
.await
|
||||
{
|
||||
eprintln!("Note: Could not start debugging session: {}", debug_err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add streamable HTTP extensions if provided
|
||||
for extension_str in session_config.streamable_http_extensions {
|
||||
if let Err(e) = session
|
||||
.add_streamable_http_extension(extension_str.clone())
|
||||
.await
|
||||
{
|
||||
eprintln!(
|
||||
"{}",
|
||||
style(format!(
|
||||
"Warning: Failed to start streamable HTTP extension '{}': {}",
|
||||
extension_str, e
|
||||
))
|
||||
.yellow()
|
||||
);
|
||||
eprintln!(
|
||||
"{}",
|
||||
style(format!(
|
||||
"Continuing without streamable HTTP extension '{}'",
|
||||
extension_str
|
||||
))
|
||||
.yellow()
|
||||
);
|
||||
|
||||
// Offer debugging help
|
||||
if let Err(debug_err) = offer_extension_debugging_help(
|
||||
&extension_str,
|
||||
&e.to_string(),
|
||||
Arc::clone(&provider_for_display),
|
||||
session_config.interactive,
|
||||
)
|
||||
.await
|
||||
{
|
||||
eprintln!("Note: Could not start debugging session: {}", debug_err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add builtin extensions
|
||||
for builtin in session_config.builtins {
|
||||
if let Err(e) = session.add_builtin(builtin).await {
|
||||
eprintln!("Failed to start builtin extension: {}", e);
|
||||
process::exit(1);
|
||||
if let Err(e) = session.add_builtin(builtin.clone()).await {
|
||||
eprintln!(
|
||||
"{}",
|
||||
style(format!(
|
||||
"Warning: Failed to start builtin extension '{}': {}",
|
||||
builtin, e
|
||||
))
|
||||
.yellow()
|
||||
);
|
||||
eprintln!(
|
||||
"{}",
|
||||
style(format!(
|
||||
"Continuing without builtin extension '{}'",
|
||||
builtin
|
||||
))
|
||||
.yellow()
|
||||
);
|
||||
|
||||
// Offer debugging help
|
||||
if let Err(debug_err) = offer_extension_debugging_help(
|
||||
&builtin,
|
||||
&e.to_string(),
|
||||
Arc::clone(&provider_for_display),
|
||||
session_config.interactive,
|
||||
)
|
||||
.await
|
||||
{
|
||||
eprintln!("Note: Could not start debugging session: {}", debug_err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -234,12 +567,95 @@ pub async fn build_session(session_config: SessionBuilderConfig) -> Session {
|
||||
session.agent.override_system_prompt(override_prompt).await;
|
||||
}
|
||||
|
||||
output::display_session_info(
|
||||
session_config.resume,
|
||||
&provider_name,
|
||||
&model,
|
||||
&session_file,
|
||||
Some(&provider_for_display),
|
||||
);
|
||||
// Display session information unless in quiet mode
|
||||
if !session_config.quiet {
|
||||
output::display_session_info(
|
||||
session_config.resume,
|
||||
&provider_name,
|
||||
&model_name,
|
||||
&session_file,
|
||||
Some(&provider_for_display),
|
||||
);
|
||||
}
|
||||
session
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_session_builder_config_creation() {
|
||||
let config = SessionBuilderConfig {
|
||||
identifier: Some(Identifier::Name("test".to_string())),
|
||||
resume: false,
|
||||
no_session: false,
|
||||
extensions: vec!["echo test".to_string()],
|
||||
remote_extensions: vec!["http://example.com".to_string()],
|
||||
streamable_http_extensions: vec!["http://example.com/streamable".to_string()],
|
||||
builtins: vec!["developer".to_string()],
|
||||
extensions_override: None,
|
||||
additional_system_prompt: Some("Test prompt".to_string()),
|
||||
settings: None,
|
||||
provider: None,
|
||||
model: None,
|
||||
debug: true,
|
||||
max_tool_repetitions: Some(5),
|
||||
max_turns: None,
|
||||
scheduled_job_id: None,
|
||||
interactive: true,
|
||||
quiet: false,
|
||||
sub_recipes: None,
|
||||
final_output_response: None,
|
||||
retry_config: None,
|
||||
};
|
||||
|
||||
assert_eq!(config.extensions.len(), 1);
|
||||
assert_eq!(config.remote_extensions.len(), 1);
|
||||
assert_eq!(config.streamable_http_extensions.len(), 1);
|
||||
assert_eq!(config.builtins.len(), 1);
|
||||
assert!(config.debug);
|
||||
assert_eq!(config.max_tool_repetitions, Some(5));
|
||||
assert!(config.max_turns.is_none());
|
||||
assert!(config.scheduled_job_id.is_none());
|
||||
assert!(config.interactive);
|
||||
assert!(!config.quiet);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_session_builder_config_default() {
|
||||
let config = SessionBuilderConfig::default();
|
||||
|
||||
assert!(config.identifier.is_none());
|
||||
assert!(!config.resume);
|
||||
assert!(!config.no_session);
|
||||
assert!(config.extensions.is_empty());
|
||||
assert!(config.remote_extensions.is_empty());
|
||||
assert!(config.streamable_http_extensions.is_empty());
|
||||
assert!(config.builtins.is_empty());
|
||||
assert!(config.extensions_override.is_none());
|
||||
assert!(config.additional_system_prompt.is_none());
|
||||
assert!(!config.debug);
|
||||
assert!(config.max_tool_repetitions.is_none());
|
||||
assert!(config.max_turns.is_none());
|
||||
assert!(config.scheduled_job_id.is_none());
|
||||
assert!(!config.interactive);
|
||||
assert!(!config.quiet);
|
||||
assert!(config.final_output_response.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_offer_extension_debugging_help_function_exists() {
|
||||
// This test just verifies the function compiles and can be called
|
||||
// We can't easily test the interactive parts without mocking
|
||||
|
||||
// We can't actually test the full function without a real provider and user interaction
|
||||
// But we can at least verify it compiles and the function signature is correct
|
||||
let extension_name = "test-extension";
|
||||
let error_message = "test error";
|
||||
|
||||
// This test mainly serves as a compilation check
|
||||
assert_eq!(extension_name, "test-extension");
|
||||
assert_eq!(error_message, "test error");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -397,9 +397,10 @@ impl Validator for GooseCompleter {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rmcp::model::PromptArgument;
|
||||
|
||||
use super::*;
|
||||
use crate::session::output;
|
||||
use mcp_core::prompt::PromptArgument;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
// Helper function to create a test completion cache
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use goose::message::{Message, MessageContent, ToolRequest, ToolResponse};
|
||||
use mcp_core::content::Content as McpContent;
|
||||
use mcp_core::resource::ResourceContents;
|
||||
use mcp_core::role::Role;
|
||||
use goose::utils::safe_truncate;
|
||||
use rmcp::model::{RawContent, ResourceContents, Role};
|
||||
use serde_json::Value;
|
||||
|
||||
const MAX_STRING_LENGTH_MD_EXPORT: usize = 4096; // Generous limit for export
|
||||
@@ -10,9 +9,9 @@ const REDACTED_PREFIX_LENGTH: usize = 100; // Show first 100 chars before trimmi
|
||||
fn value_to_simple_markdown_string(value: &Value, export_full_strings: bool) -> String {
|
||||
match value {
|
||||
Value::String(s) => {
|
||||
if !export_full_strings && s.len() > MAX_STRING_LENGTH_MD_EXPORT {
|
||||
let prefix = &s[..REDACTED_PREFIX_LENGTH.min(s.len())];
|
||||
let trimmed_chars = s.len() - prefix.len();
|
||||
if !export_full_strings && s.chars().count() > MAX_STRING_LENGTH_MD_EXPORT {
|
||||
let prefix = safe_truncate(s, REDACTED_PREFIX_LENGTH);
|
||||
let trimmed_chars = s.chars().count() - prefix.chars().count();
|
||||
format!("`{}[ ... trimmed : {} chars ... ]`", prefix, trimmed_chars)
|
||||
} else {
|
||||
// Escape backticks and newlines for inline code.
|
||||
@@ -40,7 +39,7 @@ fn value_to_markdown(value: &Value, depth: usize, export_full_strings: bool) ->
|
||||
md_string.push_str(&format!("{}* **{}**: ", base_indent_str, key));
|
||||
match val {
|
||||
Value::String(s) => {
|
||||
if s.contains('\n') || s.len() > 80 {
|
||||
if s.contains('\n') || s.chars().count() > 80 {
|
||||
// Heuristic for block
|
||||
md_string.push_str(&format!(
|
||||
"\n{} ```\n{}{}\n{} ```\n",
|
||||
@@ -74,7 +73,7 @@ fn value_to_markdown(value: &Value, depth: usize, export_full_strings: bool) ->
|
||||
md_string.push_str(&format!("{}* - ", base_indent_str));
|
||||
match item {
|
||||
Value::String(s) => {
|
||||
if s.contains('\n') || s.len() > 80 {
|
||||
if s.contains('\n') || s.chars().count() > 80 {
|
||||
// Heuristic for block
|
||||
md_string.push_str(&format!(
|
||||
"\n{} ```\n{}{}\n{} ```\n",
|
||||
@@ -218,8 +217,8 @@ pub fn tool_response_to_markdown(resp: &ToolResponse, export_all_content: bool)
|
||||
}
|
||||
}
|
||||
|
||||
match content {
|
||||
McpContent::Text(text_content) => {
|
||||
match &content.raw {
|
||||
RawContent::Text(text_content) => {
|
||||
let trimmed_text = text_content.text.trim();
|
||||
if (trimmed_text.starts_with('{') && trimmed_text.ends_with('}'))
|
||||
|| (trimmed_text.starts_with('[') && trimmed_text.ends_with(']'))
|
||||
@@ -235,7 +234,7 @@ pub fn tool_response_to_markdown(resp: &ToolResponse, export_all_content: bool)
|
||||
md.push_str("\n\n");
|
||||
}
|
||||
}
|
||||
McpContent::Image(image_content) => {
|
||||
RawContent::Image(image_content) => {
|
||||
if image_content.mime_type.starts_with("image/") {
|
||||
// For actual images, provide a placeholder that indicates it's an image
|
||||
md.push_str(&format!(
|
||||
@@ -251,7 +250,7 @@ pub fn tool_response_to_markdown(resp: &ToolResponse, export_all_content: bool)
|
||||
));
|
||||
}
|
||||
}
|
||||
McpContent::Resource(resource) => {
|
||||
RawContent::Resource(resource) => {
|
||||
match &resource.resource {
|
||||
ResourceContents::TextResourceContents {
|
||||
uri,
|
||||
@@ -298,6 +297,9 @@ pub fn tool_response_to_markdown(resp: &ToolResponse, export_all_content: bool)
|
||||
}
|
||||
}
|
||||
}
|
||||
RawContent::Audio(_) => {
|
||||
md.push_str("[audio content not displayed in Markdown export]\n\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -359,8 +361,8 @@ pub fn message_to_markdown(message: &Message, export_all_content: bool) -> Strin
|
||||
mod tests {
|
||||
use super::*;
|
||||
use goose::message::{Message, ToolRequest, ToolResponse};
|
||||
use mcp_core::content::{Content as McpContent, TextContent};
|
||||
use mcp_core::tool::ToolCall;
|
||||
use rmcp::model::{Content, RawTextContent, TextContent};
|
||||
use serde_json::json;
|
||||
|
||||
#[test]
|
||||
@@ -397,7 +399,7 @@ mod tests {
|
||||
assert!(result.starts_with("`"));
|
||||
assert!(result.contains("[ ... trimmed : "));
|
||||
assert!(result.contains("4900 chars ... ]`"));
|
||||
assert!(result.contains(&"a".repeat(100))); // Should contain the prefix
|
||||
assert!(result.contains(&"a".repeat(97))); // Should contain the prefix (100 - 3 for "...")
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -520,12 +522,14 @@ mod tests {
|
||||
#[test]
|
||||
fn test_tool_response_to_markdown_text() {
|
||||
let text_content = TextContent {
|
||||
text: "Command executed successfully".to_string(),
|
||||
raw: RawTextContent {
|
||||
text: "Command executed successfully".to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "test-id".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let result = tool_response_to_markdown(&tool_response, true);
|
||||
@@ -537,12 +541,14 @@ mod tests {
|
||||
fn test_tool_response_to_markdown_json() {
|
||||
let json_text = r#"{"status": "success", "data": "test"}"#;
|
||||
let text_content = TextContent {
|
||||
text: json_text.to_string(),
|
||||
raw: RawTextContent {
|
||||
text: json_text.to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "test-id".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let result = tool_response_to_markdown(&tool_response, true);
|
||||
@@ -639,12 +645,14 @@ if __name__ == "__main__":
|
||||
hello_world()"#;
|
||||
|
||||
let text_content = TextContent {
|
||||
text: python_code.to_string(),
|
||||
raw: RawTextContent {
|
||||
text: python_code.to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "shell-cat".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let request_result = tool_request_to_markdown(&tool_request, true);
|
||||
@@ -676,12 +684,14 @@ if __name__ == "__main__":
|
||||
|
||||
let git_output = " M src/main.rs\n?? temp.txt\n A new_feature.rs";
|
||||
let text_content = TextContent {
|
||||
text: git_output.to_string(),
|
||||
raw: RawTextContent {
|
||||
text: git_output.to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "git-status".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let request_result = tool_request_to_markdown(&tool_request, true);
|
||||
@@ -721,12 +731,14 @@ warning: unused variable `x`
|
||||
Finished dev [unoptimized + debuginfo] target(s) in 2.45s"#;
|
||||
|
||||
let text_content = TextContent {
|
||||
text: build_output.to_string(),
|
||||
raw: RawTextContent {
|
||||
text: build_output.to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "cargo-build".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let response_result = tool_response_to_markdown(&tool_response, true);
|
||||
@@ -764,12 +776,14 @@ warning: unused variable `x`
|
||||
}"#;
|
||||
|
||||
let text_content = TextContent {
|
||||
text: api_response.to_string(),
|
||||
raw: RawTextContent {
|
||||
text: api_response.to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "curl-api".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let response_result = tool_response_to_markdown(&tool_response, true);
|
||||
@@ -796,12 +810,14 @@ warning: unused variable `x`
|
||||
};
|
||||
|
||||
let text_content = TextContent {
|
||||
text: "File created successfully".to_string(),
|
||||
raw: RawTextContent {
|
||||
text: "File created successfully".to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "editor-write".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let request_result = tool_request_to_markdown(&tool_request, true);
|
||||
@@ -849,12 +865,14 @@ def process_data(data: List[Dict]) -> List[Dict]:
|
||||
return [item for item in data if item.get('active', False)]"#;
|
||||
|
||||
let text_content = TextContent {
|
||||
text: python_code.to_string(),
|
||||
raw: RawTextContent {
|
||||
text: python_code.to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "editor-view".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let response_result = tool_response_to_markdown(&tool_response, true);
|
||||
@@ -882,12 +900,14 @@ def process_data(data: List[Dict]) -> List[Dict]:
|
||||
Command failed with exit code 2"#;
|
||||
|
||||
let text_content = TextContent {
|
||||
text: error_output.to_string(),
|
||||
raw: RawTextContent {
|
||||
text: error_output.to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "shell-error".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let response_result = tool_response_to_markdown(&tool_response, true);
|
||||
@@ -918,12 +938,14 @@ Command failed with exit code 2"#;
|
||||
5^2 = 25"#;
|
||||
|
||||
let text_content = TextContent {
|
||||
text: script_output.to_string(),
|
||||
raw: RawTextContent {
|
||||
text: script_output.to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "script-exec".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let request_result = tool_request_to_markdown(&tool_request, true);
|
||||
@@ -961,12 +983,14 @@ drwx------ 3 user staff 96 Dec 6 16:20 com.apple.launchd.abc
|
||||
/tmp"#;
|
||||
|
||||
let text_content = TextContent {
|
||||
text: multi_output.to_string(),
|
||||
raw: RawTextContent {
|
||||
text: multi_output.to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "multi-cmd".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let request_result = tool_request_to_markdown(&_tool_request, true);
|
||||
@@ -1000,12 +1024,14 @@ src/database.rs:23:async fn query_users(pool: &Pool) -> Result<Vec<User>> {
|
||||
src/middleware.rs:12:async fn auth_middleware(req: Request, next: Next) -> Result<Response> {"#;
|
||||
|
||||
let text_content = TextContent {
|
||||
text: grep_output.to_string(),
|
||||
raw: RawTextContent {
|
||||
text: grep_output.to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "grep-search".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let request_result = tool_request_to_markdown(&tool_request, true);
|
||||
@@ -1036,12 +1062,14 @@ src/middleware.rs:12:async fn auth_middleware(req: Request, next: Next) -> Resul
|
||||
|
||||
let json_output = r#"{"status": "success", "data": {"count": 42}}"#;
|
||||
let text_content = TextContent {
|
||||
text: json_output.to_string(),
|
||||
raw: RawTextContent {
|
||||
text: json_output.to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "json-test".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let response_result = tool_response_to_markdown(&tool_response, true);
|
||||
@@ -1073,12 +1101,14 @@ src/middleware.rs:12:async fn auth_middleware(req: Request, next: Next) -> Resul
|
||||
found 0 vulnerabilities"#;
|
||||
|
||||
let text_content = TextContent {
|
||||
text: npm_output.to_string(),
|
||||
raw: RawTextContent {
|
||||
text: npm_output.to_string(),
|
||||
},
|
||||
annotations: None,
|
||||
};
|
||||
let tool_response = ToolResponse {
|
||||
id: "npm-install".to_string(),
|
||||
tool_result: Ok(vec![McpContent::Text(text_content)]),
|
||||
tool_result: Ok(vec![Content::text(text_content.raw.text)]),
|
||||
};
|
||||
|
||||
let request_result = tool_request_to_markdown(&tool_request, true);
|
||||
|
||||
@@ -11,12 +11,14 @@ pub enum InputResult {
|
||||
AddExtension(String),
|
||||
AddBuiltin(String),
|
||||
ToggleTheme,
|
||||
SelectTheme(String),
|
||||
Retry,
|
||||
ListPrompts(Option<String>),
|
||||
PromptCommand(PromptCommandOptions),
|
||||
GooseMode(String),
|
||||
Plan(PlanCommandOptions),
|
||||
EndPlan,
|
||||
Clear,
|
||||
Recipe(Option<String>),
|
||||
Summarize,
|
||||
}
|
||||
@@ -91,6 +93,7 @@ fn handle_slash_command(input: &str) -> Option<InputResult> {
|
||||
const CMD_MODE: &str = "/mode ";
|
||||
const CMD_PLAN: &str = "/plan";
|
||||
const CMD_ENDPLAN: &str = "/endplan";
|
||||
const CMD_CLEAR: &str = "/clear";
|
||||
const CMD_RECIPE: &str = "/recipe";
|
||||
const CMD_SUMMARIZE: &str = "/summarize";
|
||||
|
||||
@@ -101,6 +104,22 @@ fn handle_slash_command(input: &str) -> Option<InputResult> {
|
||||
Some(InputResult::Retry)
|
||||
}
|
||||
"/t" => Some(InputResult::ToggleTheme),
|
||||
s if s.starts_with("/t ") => {
|
||||
let t = s
|
||||
.strip_prefix("/t ")
|
||||
.unwrap_or_default()
|
||||
.trim()
|
||||
.to_lowercase();
|
||||
if ["light", "dark", "ansi"].contains(&t.as_str()) {
|
||||
Some(InputResult::SelectTheme(t))
|
||||
} else {
|
||||
println!(
|
||||
"Theme Unavailable: {} Available themes are: light, dark, ansi",
|
||||
t
|
||||
);
|
||||
Some(InputResult::Retry)
|
||||
}
|
||||
}
|
||||
"/prompts" => Some(InputResult::ListPrompts(None)),
|
||||
s if s.starts_with(CMD_PROMPTS) => {
|
||||
// Parse arguments for /prompts command
|
||||
@@ -134,6 +153,7 @@ fn handle_slash_command(input: &str) -> Option<InputResult> {
|
||||
}
|
||||
s if s.starts_with(CMD_PLAN) => parse_plan_command(s[CMD_PLAN.len()..].trim().to_string()),
|
||||
s if s == CMD_ENDPLAN => Some(InputResult::EndPlan),
|
||||
s if s == CMD_CLEAR => Some(InputResult::Clear),
|
||||
s if s.starts_with(CMD_RECIPE) => parse_recipe_command(s),
|
||||
s if s == CMD_SUMMARIZE => Some(InputResult::Summarize),
|
||||
_ => None,
|
||||
@@ -231,6 +251,7 @@ fn print_help() {
|
||||
"Available commands:
|
||||
/exit or /quit - Exit the session
|
||||
/t - Toggle Light/Dark/Ansi theme
|
||||
/t <name> - Set theme directly (light, dark, ansi)
|
||||
/extension <command> - Add a stdio extension (format: ENV1=val1 command args...)
|
||||
/builtin <names> - Add builtin extensions by name (comma-separated)
|
||||
/prompts [--extension <name>] - List all available prompts, optionally filtered by extension
|
||||
@@ -246,6 +267,7 @@ fn print_help() {
|
||||
If no filepath is provided, it will be saved to ./recipe.yaml.
|
||||
/summarize - Summarize the current conversation to reduce context length while preserving key information.
|
||||
/? or /help - Display this help message
|
||||
/clear - Clears the current chat history
|
||||
|
||||
Navigation:
|
||||
Ctrl+C - Interrupt goose (resets the interaction to before the interrupted user request)
|
||||
|
||||
@@ -4,39 +4,50 @@ mod export;
|
||||
mod input;
|
||||
mod output;
|
||||
mod prompt;
|
||||
mod task_execution_display;
|
||||
mod thinking;
|
||||
|
||||
use crate::session::task_execution_display::{
|
||||
format_task_execution_notification, TASK_EXECUTION_NOTIFICATION_TYPE,
|
||||
};
|
||||
use std::io::Write;
|
||||
|
||||
pub use self::export::message_to_markdown;
|
||||
pub use builder::{build_session, SessionBuilderConfig};
|
||||
pub use builder::{build_session, SessionBuilderConfig, SessionSettings};
|
||||
use console::Color;
|
||||
use goose::agents::AgentEvent;
|
||||
use goose::message::push_message;
|
||||
use goose::permission::permission_confirmation::PrincipalType;
|
||||
use goose::permission::Permission;
|
||||
use goose::permission::PermissionConfirmation;
|
||||
use goose::providers::base::Provider;
|
||||
pub use goose::session::Identifier;
|
||||
use goose::utils::safe_truncate;
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use completion::GooseCompleter;
|
||||
use etcetera::{choose_app_strategy, AppStrategy};
|
||||
use goose::agents::extension::{Envs, ExtensionConfig};
|
||||
use goose::agents::types::RetryConfig;
|
||||
use goose::agents::{Agent, SessionConfig};
|
||||
use goose::config::Config;
|
||||
use goose::message::{Message, MessageContent};
|
||||
use goose::providers::pricing::initialize_pricing_cache;
|
||||
use goose::session;
|
||||
use input::InputResult;
|
||||
use mcp_core::handler::ToolError;
|
||||
use mcp_core::prompt::PromptMessage;
|
||||
use mcp_core::protocol::JsonRpcMessage;
|
||||
use mcp_core::protocol::JsonRpcNotification;
|
||||
use rmcp::model::PromptMessage;
|
||||
use rmcp::model::ServerNotification;
|
||||
|
||||
use rand::{distributions::Alphanumeric, Rng};
|
||||
use rustyline::EditMode;
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tokio;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
pub enum RunMode {
|
||||
Normal,
|
||||
@@ -46,11 +57,15 @@ pub enum RunMode {
|
||||
pub struct Session {
|
||||
agent: Agent,
|
||||
messages: Vec<Message>,
|
||||
session_file: PathBuf,
|
||||
session_file: Option<PathBuf>,
|
||||
// Cache for completion data - using std::sync for thread safety without async
|
||||
completion_cache: Arc<std::sync::RwLock<CompletionCache>>,
|
||||
debug: bool, // New field for debug mode
|
||||
run_mode: RunMode,
|
||||
scheduled_job_id: Option<String>, // ID of the scheduled job that triggered this session
|
||||
max_turns: Option<u32>,
|
||||
edit_mode: Option<EditMode>,
|
||||
retry_config: Option<RetryConfig>,
|
||||
}
|
||||
|
||||
// Cache structure for completion data
|
||||
@@ -107,13 +122,23 @@ pub async fn classify_planner_response(
|
||||
}
|
||||
|
||||
impl Session {
|
||||
pub fn new(agent: Agent, session_file: PathBuf, debug: bool) -> Self {
|
||||
let messages = match session::read_messages(&session_file) {
|
||||
Ok(msgs) => msgs,
|
||||
Err(e) => {
|
||||
pub fn new(
|
||||
agent: Agent,
|
||||
session_file: Option<PathBuf>,
|
||||
debug: bool,
|
||||
scheduled_job_id: Option<String>,
|
||||
max_turns: Option<u32>,
|
||||
edit_mode: Option<EditMode>,
|
||||
retry_config: Option<RetryConfig>,
|
||||
) -> Self {
|
||||
let messages = if let Some(session_file) = &session_file {
|
||||
session::read_messages(session_file).unwrap_or_else(|e| {
|
||||
eprintln!("Warning: Failed to load message history: {}", e);
|
||||
Vec::new()
|
||||
}
|
||||
})
|
||||
} else {
|
||||
// Don't try to read messages if we're not saving sessions
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
Session {
|
||||
@@ -123,6 +148,10 @@ impl Session {
|
||||
completion_cache: Arc::new(std::sync::RwLock::new(CompletionCache::new())),
|
||||
debug,
|
||||
run_mode: RunMode::Normal,
|
||||
scheduled_job_id,
|
||||
max_turns,
|
||||
edit_mode,
|
||||
retry_config,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,7 +177,7 @@ impl Session {
|
||||
/// Format: "ENV1=val1 ENV2=val2 command args..."
|
||||
pub async fn add_extension(&mut self, extension_command: String) -> Result<()> {
|
||||
let mut parts: Vec<&str> = extension_command.split_whitespace().collect();
|
||||
let mut envs = std::collections::HashMap::new();
|
||||
let mut envs = HashMap::new();
|
||||
|
||||
// Parse environment variables (format: KEY=value)
|
||||
while let Some(part) = parts.first() {
|
||||
@@ -228,6 +257,40 @@ impl Session {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add a streamable HTTP extension to the session
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `extension_url` - URL of the server
|
||||
pub async fn add_streamable_http_extension(&mut self, extension_url: String) -> Result<()> {
|
||||
let name: String = rand::thread_rng()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(8)
|
||||
.map(char::from)
|
||||
.collect();
|
||||
|
||||
let config = ExtensionConfig::StreamableHttp {
|
||||
name,
|
||||
uri: extension_url,
|
||||
envs: Envs::new(HashMap::new()),
|
||||
env_keys: Vec::new(),
|
||||
headers: HashMap::new(),
|
||||
description: Some(goose::config::DEFAULT_EXTENSION_DESCRIPTION.to_string()),
|
||||
// TODO: should set timeout
|
||||
timeout: Some(goose::config::DEFAULT_EXTENSION_TIMEOUT),
|
||||
bundled: None,
|
||||
};
|
||||
|
||||
self.agent
|
||||
.add_extension(config)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to start extension: {}", e))?;
|
||||
|
||||
// Invalidate the completion cache when a new extension is added
|
||||
self.invalidate_completion_cache().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Add a builtin extension to the session
|
||||
///
|
||||
/// # Arguments
|
||||
@@ -240,6 +303,7 @@ impl Session {
|
||||
// TODO: should set a timeout
|
||||
timeout: Some(goose::config::DEFAULT_EXTENSION_TIMEOUT),
|
||||
bundled: None,
|
||||
description: None,
|
||||
};
|
||||
self.agent
|
||||
.add_extension(config)
|
||||
@@ -296,23 +360,36 @@ impl Session {
|
||||
}
|
||||
|
||||
pub async fn get_prompt(&mut self, name: &str, arguments: Value) -> Result<Vec<PromptMessage>> {
|
||||
let result = self.agent.get_prompt(name, arguments).await?;
|
||||
Ok(result.messages)
|
||||
Ok(self.agent.get_prompt(name, arguments).await?.messages)
|
||||
}
|
||||
|
||||
/// Process a single message and get the response
|
||||
async fn process_message(&mut self, message: String) -> Result<()> {
|
||||
self.messages.push(Message::user().with_text(&message));
|
||||
self.push_message(Message::user().with_text(&message));
|
||||
// Get the provider from the agent for description generation
|
||||
let provider = self.agent.provider().await?;
|
||||
|
||||
// Persist messages with provider for automatic description generation
|
||||
session::persist_messages(&self.session_file, &self.messages, Some(provider)).await?;
|
||||
if let Some(session_file) = &self.session_file {
|
||||
let working_dir = Some(
|
||||
std::env::current_dir().expect("failed to get current session working directory"),
|
||||
);
|
||||
|
||||
session::persist_messages_with_schedule_id(
|
||||
session_file,
|
||||
&self.messages,
|
||||
Some(provider),
|
||||
self.scheduled_job_id.clone(),
|
||||
working_dir,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Track the current directory and last instruction in projects.json
|
||||
let session_id = self
|
||||
.session_file
|
||||
.file_stem()
|
||||
.as_ref()
|
||||
.and_then(|p| p.file_stem())
|
||||
.and_then(|s| s.to_str())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
@@ -340,9 +417,15 @@ impl Session {
|
||||
self.update_completion_cache().await?;
|
||||
|
||||
// Create a new editor with our custom completer
|
||||
let config = rustyline::Config::builder()
|
||||
.completion_type(rustyline::CompletionType::Circular)
|
||||
.build();
|
||||
let builder =
|
||||
rustyline::Config::builder().completion_type(rustyline::CompletionType::Circular);
|
||||
let builder = if let Some(edit_mode) = self.edit_mode {
|
||||
builder.edit_mode(edit_mode)
|
||||
} else {
|
||||
// Default to Emacs mode if no edit mode is set
|
||||
builder.edit_mode(EditMode::Emacs)
|
||||
};
|
||||
let config = builder.build();
|
||||
let mut editor =
|
||||
rustyline::Editor::<GooseCompleter, rustyline::history::DefaultHistory>::with_config(
|
||||
config,
|
||||
@@ -388,17 +471,18 @@ impl Session {
|
||||
self.display_context_usage().await?;
|
||||
|
||||
match input::get_input(&mut editor)? {
|
||||
input::InputResult::Message(content) => {
|
||||
InputResult::Message(content) => {
|
||||
match self.run_mode {
|
||||
RunMode::Normal => {
|
||||
save_history(&mut editor);
|
||||
|
||||
self.messages.push(Message::user().with_text(&content));
|
||||
self.push_message(Message::user().with_text(&content));
|
||||
|
||||
// Track the current directory and last instruction in projects.json
|
||||
let session_id = self
|
||||
.session_file
|
||||
.file_stem()
|
||||
.as_ref()
|
||||
.and_then(|p| p.file_stem())
|
||||
.and_then(|s| s.to_str())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
@@ -409,16 +493,21 @@ impl Session {
|
||||
eprintln!("Warning: Failed to update project tracker with instruction: {}", e);
|
||||
}
|
||||
|
||||
// Get the provider from the agent for description generation
|
||||
let provider = self.agent.provider().await?;
|
||||
|
||||
// Persist messages with provider for automatic description generation
|
||||
session::persist_messages(
|
||||
&self.session_file,
|
||||
&self.messages,
|
||||
Some(provider),
|
||||
)
|
||||
.await?;
|
||||
if let Some(session_file) = &self.session_file {
|
||||
let working_dir = Some(std::env::current_dir().unwrap_or_default());
|
||||
|
||||
session::persist_messages_with_schedule_id(
|
||||
session_file,
|
||||
&self.messages,
|
||||
Some(provider),
|
||||
self.scheduled_job_id.clone(),
|
||||
working_dir,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
output::show_thinking();
|
||||
self.process_agent_response(true).await?;
|
||||
@@ -471,6 +560,28 @@ impl Session {
|
||||
output::set_theme(new_theme);
|
||||
continue;
|
||||
}
|
||||
|
||||
input::InputResult::SelectTheme(theme_name) => {
|
||||
save_history(&mut editor);
|
||||
|
||||
let new_theme = match theme_name.as_str() {
|
||||
"light" => {
|
||||
println!("Switching to Light theme");
|
||||
output::Theme::Light
|
||||
}
|
||||
"dark" => {
|
||||
println!("Switching to Dark theme");
|
||||
output::Theme::Dark
|
||||
}
|
||||
"ansi" => {
|
||||
println!("Switching to Ansi theme");
|
||||
output::Theme::Ansi
|
||||
}
|
||||
_ => output::Theme::Dark,
|
||||
};
|
||||
output::set_theme(new_theme);
|
||||
continue;
|
||||
}
|
||||
input::InputResult::Retry => continue,
|
||||
input::InputResult::ListPrompts(extension) => {
|
||||
save_history(&mut editor);
|
||||
@@ -521,6 +632,21 @@ impl Session {
|
||||
output::render_exit_plan_mode();
|
||||
continue;
|
||||
}
|
||||
input::InputResult::Clear => {
|
||||
save_history(&mut editor);
|
||||
|
||||
self.messages.clear();
|
||||
tracing::info!("Chat context cleared by user.");
|
||||
output::render_message(
|
||||
&Message::assistant().with_text("Chat context cleared."),
|
||||
self.debug,
|
||||
);
|
||||
if let Some(file) = self.session_file.as_ref().filter(|f| f.exists()) {
|
||||
std::fs::remove_file(file)?;
|
||||
std::fs::File::create(file)?;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
input::InputResult::PromptCommand(opts) => {
|
||||
save_history(&mut editor);
|
||||
self.handle_prompt_command(opts).await?;
|
||||
@@ -589,12 +715,17 @@ impl Session {
|
||||
self.messages = summarized_messages;
|
||||
|
||||
// Persist the summarized messages
|
||||
session::persist_messages(
|
||||
&self.session_file,
|
||||
&self.messages,
|
||||
Some(provider),
|
||||
)
|
||||
.await?;
|
||||
if let Some(session_file) = &self.session_file {
|
||||
let working_dir = std::env::current_dir().ok();
|
||||
session::persist_messages_with_schedule_id(
|
||||
session_file,
|
||||
&self.messages,
|
||||
Some(provider),
|
||||
self.scheduled_job_id.clone(),
|
||||
working_dir,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
output::hide_thinking();
|
||||
println!(
|
||||
@@ -618,8 +749,11 @@ impl Session {
|
||||
}
|
||||
|
||||
println!(
|
||||
"\nClosing session. Recorded to {}",
|
||||
self.session_file.display()
|
||||
"\nClosing session.{}",
|
||||
self.session_file
|
||||
.as_ref()
|
||||
.map(|p| format!(" Recorded to {}", p.display()))
|
||||
.unwrap_or_default()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
@@ -673,7 +807,7 @@ impl Session {
|
||||
self.messages.clear();
|
||||
// add the plan response as a user message
|
||||
let plan_message = Message::user().with_text(plan_response.as_concat_text());
|
||||
self.messages.push(plan_message);
|
||||
self.push_message(plan_message);
|
||||
// act on the plan
|
||||
output::show_thinking();
|
||||
self.process_agent_response(true).await?;
|
||||
@@ -688,13 +822,13 @@ impl Session {
|
||||
} else {
|
||||
// add the plan response (assistant message) & carry the conversation forward
|
||||
// in the next round, the user might wanna slightly modify the plan
|
||||
self.messages.push(plan_response);
|
||||
self.push_message(plan_response);
|
||||
}
|
||||
}
|
||||
PlannerResponseType::ClarifyingQuestions => {
|
||||
// add the plan response (assistant message) & carry the conversation forward
|
||||
// in the next round, the user will answer the clarifying questions
|
||||
self.messages.push(plan_response);
|
||||
self.push_message(plan_response);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -707,18 +841,23 @@ impl Session {
|
||||
}
|
||||
|
||||
async fn process_agent_response(&mut self, interactive: bool) -> Result<()> {
|
||||
let session_id = session::Identifier::Path(self.session_file.clone());
|
||||
let cancel_token = CancellationToken::new();
|
||||
let cancel_token_clone = cancel_token.clone();
|
||||
|
||||
let session_config = self.session_file.as_ref().map(|s| {
|
||||
let session_id = session::Identifier::Path(s.clone());
|
||||
SessionConfig {
|
||||
id: session_id.clone(),
|
||||
working_dir: std::env::current_dir().unwrap_or_default(),
|
||||
schedule_id: self.scheduled_job_id.clone(),
|
||||
execution_mode: None,
|
||||
max_turns: self.max_turns,
|
||||
retry_config: self.retry_config.clone(),
|
||||
}
|
||||
});
|
||||
let mut stream = self
|
||||
.agent
|
||||
.reply(
|
||||
&self.messages,
|
||||
Some(SessionConfig {
|
||||
id: session_id.clone(),
|
||||
working_dir: std::env::current_dir()
|
||||
.expect("failed to get current session working directory"),
|
||||
schedule_id: None,
|
||||
}),
|
||||
)
|
||||
.reply(&self.messages, session_config.clone(), Some(cancel_token))
|
||||
.await?;
|
||||
|
||||
let mut progress_bars = output::McpSpinners::new();
|
||||
@@ -764,9 +903,19 @@ impl Session {
|
||||
confirmation.id.clone(),
|
||||
Err(ToolError::ExecutionError("Tool call cancelled by user".to_string()))
|
||||
));
|
||||
self.messages.push(response_message);
|
||||
session::persist_messages(&self.session_file, &self.messages, None).await?;
|
||||
|
||||
push_message(&mut self.messages, response_message);
|
||||
if let Some(session_file) = &self.session_file {
|
||||
let working_dir = std::env::current_dir().ok();
|
||||
session::persist_messages_with_schedule_id(
|
||||
session_file,
|
||||
&self.messages,
|
||||
None,
|
||||
self.scheduled_job_id.clone(),
|
||||
working_dir,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
cancel_token_clone.cancel();
|
||||
drop(stream);
|
||||
break;
|
||||
} else {
|
||||
@@ -847,74 +996,155 @@ impl Session {
|
||||
.agent
|
||||
.reply(
|
||||
&self.messages,
|
||||
Some(SessionConfig {
|
||||
id: session_id.clone(),
|
||||
working_dir: std::env::current_dir()
|
||||
.expect("failed to get current session working directory"),
|
||||
schedule_id: None,
|
||||
}),
|
||||
session_config.clone(),
|
||||
None
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
// otherwise we have a model/tool to render
|
||||
else {
|
||||
self.messages.push(message.clone());
|
||||
push_message(&mut self.messages, message.clone());
|
||||
|
||||
// No need to update description on assistant messages
|
||||
session::persist_messages(&self.session_file, &self.messages, None).await?;
|
||||
if let Some(session_file) = &self.session_file {
|
||||
let working_dir = std::env::current_dir().ok();
|
||||
session::persist_messages_with_schedule_id(
|
||||
session_file,
|
||||
&self.messages,
|
||||
None,
|
||||
self.scheduled_job_id.clone(),
|
||||
working_dir,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
if interactive {output::hide_thinking()};
|
||||
let _ = progress_bars.hide();
|
||||
output::render_message(&message, self.debug);
|
||||
if interactive {output::show_thinking()};
|
||||
}
|
||||
}
|
||||
Some(Ok(AgentEvent::McpNotification((_id, message)))) => {
|
||||
if let JsonRpcMessage::Notification(JsonRpcNotification{
|
||||
method,
|
||||
params: Some(Value::Object(o)),
|
||||
..
|
||||
}) = message {
|
||||
match method.as_str() {
|
||||
"notifications/message" => {
|
||||
let data = o.get("data").unwrap_or(&Value::Null);
|
||||
let message = match data {
|
||||
Value::String(s) => s.clone(),
|
||||
Value::Object(o) => {
|
||||
if let Some(Value::String(output)) = o.get("output") {
|
||||
output.to_owned()
|
||||
} else {
|
||||
data.to_string()
|
||||
}
|
||||
},
|
||||
v => {
|
||||
v.to_string()
|
||||
},
|
||||
};
|
||||
progress_bars.log(&message);
|
||||
},
|
||||
"notifications/progress" => {
|
||||
let progress = o.get("progress").and_then(|v| v.as_f64());
|
||||
let token = o.get("progressToken").map(|v| v.to_string());
|
||||
let message = o.get("message").and_then(|v| v.as_str());
|
||||
let total = o
|
||||
.get("total")
|
||||
.and_then(|v| v.as_f64());
|
||||
if let (Some(progress), Some(token)) = (progress, token) {
|
||||
progress_bars.update(
|
||||
token.as_str(),
|
||||
progress,
|
||||
total,
|
||||
message,
|
||||
);
|
||||
match &message {
|
||||
ServerNotification::LoggingMessageNotification(notification) => {
|
||||
let data = ¬ification.params.data;
|
||||
let (formatted_message, subagent_id, message_notification_type) = match data {
|
||||
Value::String(s) => (s.clone(), None, None),
|
||||
Value::Object(o) => {
|
||||
// Check for subagent notification structure first
|
||||
if let Some(Value::String(msg)) = o.get("message") {
|
||||
// Extract subagent info for better display
|
||||
let subagent_id = o.get("subagent_id")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("unknown");
|
||||
let notification_type = o.get("type")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("");
|
||||
|
||||
let formatted = match notification_type {
|
||||
"subagent_created" | "completed" | "terminated" => {
|
||||
format!("🤖 {}", msg)
|
||||
}
|
||||
"tool_usage" | "tool_completed" | "tool_error" => {
|
||||
format!("🔧 {}", msg)
|
||||
}
|
||||
"message_processing" | "turn_progress" => {
|
||||
format!("💭 {}", msg)
|
||||
}
|
||||
"response_generated" => {
|
||||
// Check verbosity setting for subagent response content
|
||||
let config = Config::global();
|
||||
let min_priority = config
|
||||
.get_param::<f32>("GOOSE_CLI_MIN_PRIORITY")
|
||||
.ok()
|
||||
.unwrap_or(0.5);
|
||||
|
||||
if min_priority > 0.1 && !self.debug {
|
||||
// High/Medium verbosity: show truncated response
|
||||
if let Some(response_content) = msg.strip_prefix("Responded: ") {
|
||||
format!("🤖 Responded: {}", safe_truncate(response_content, 100))
|
||||
} else {
|
||||
format!("🤖 {}", msg)
|
||||
}
|
||||
} else {
|
||||
// All verbosity or debug: show full response
|
||||
format!("🤖 {}", msg)
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
msg.to_string()
|
||||
}
|
||||
};
|
||||
(formatted, Some(subagent_id.to_string()), Some(notification_type.to_string()))
|
||||
} else if let Some(Value::String(output)) = o.get("output") {
|
||||
// Fallback for other MCP notification types
|
||||
(output.to_owned(), None, None)
|
||||
} else if let Some(result) = format_task_execution_notification(data) {
|
||||
result
|
||||
} else {
|
||||
(data.to_string(), None, None)
|
||||
}
|
||||
},
|
||||
v => {
|
||||
(v.to_string(), None, None)
|
||||
},
|
||||
};
|
||||
|
||||
// Handle subagent notifications - show immediately
|
||||
if let Some(_id) = subagent_id {
|
||||
// TODO: proper display for subagent notifications
|
||||
if interactive {
|
||||
let _ = progress_bars.hide();
|
||||
println!("{}", console::style(&formatted_message).green().dim());
|
||||
} else {
|
||||
progress_bars.log(&formatted_message);
|
||||
}
|
||||
},
|
||||
_ => (),
|
||||
}
|
||||
} else if let Some(ref notification_type) = message_notification_type {
|
||||
if notification_type == TASK_EXECUTION_NOTIFICATION_TYPE {
|
||||
if interactive {
|
||||
let _ = progress_bars.hide();
|
||||
print!("{}", formatted_message);
|
||||
std::io::stdout().flush().unwrap();
|
||||
} else {
|
||||
print!("{}", formatted_message);
|
||||
std::io::stdout().flush().unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Non-subagent notification, display immediately with compact spacing
|
||||
if interactive {
|
||||
let _ = progress_bars.hide();
|
||||
println!("{}", console::style(&formatted_message).green().dim());
|
||||
} else {
|
||||
progress_bars.log(&formatted_message);
|
||||
}
|
||||
}
|
||||
},
|
||||
ServerNotification::ProgressNotification(notification) => {
|
||||
let progress = notification.params.progress;
|
||||
let text = notification.params.message.as_deref();
|
||||
let total = notification.params.total;
|
||||
let token = ¬ification.params.progress_token;
|
||||
progress_bars.update(
|
||||
&token.0.to_string(),
|
||||
progress,
|
||||
total,
|
||||
text,
|
||||
);
|
||||
},
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
Some(Ok(AgentEvent::ModelChange { model, mode })) => {
|
||||
// Log model change if in debug mode
|
||||
if self.debug {
|
||||
eprintln!("Model changed to {} in {} mode", model, mode);
|
||||
}
|
||||
}
|
||||
|
||||
Some(Err(e)) => {
|
||||
eprintln!("Error: {}", e);
|
||||
cancel_token_clone.cancel();
|
||||
drop(stream);
|
||||
if let Err(e) = self.handle_interrupted_messages(false).await {
|
||||
eprintln!("Error handling interruption: {}", e);
|
||||
@@ -931,6 +1161,7 @@ impl Session {
|
||||
}
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
cancel_token_clone.cancel();
|
||||
drop(stream);
|
||||
if let Err(e) = self.handle_interrupted_messages(true).await {
|
||||
eprintln!("Error handling interruption: {}", e);
|
||||
@@ -939,6 +1170,7 @@ impl Session {
|
||||
}
|
||||
}
|
||||
}
|
||||
println!();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -948,7 +1180,7 @@ impl Session {
|
||||
let tool_requests = self
|
||||
.messages
|
||||
.last()
|
||||
.filter(|msg| msg.role == mcp_core::role::Role::Assistant)
|
||||
.filter(|msg| msg.role == rmcp::model::Role::Assistant)
|
||||
.map_or(Vec::new(), |msg| {
|
||||
msg.content
|
||||
.iter()
|
||||
@@ -982,34 +1214,63 @@ impl Session {
|
||||
Err(ToolError::ExecutionError(notification.clone())),
|
||||
));
|
||||
}
|
||||
self.messages.push(response_message);
|
||||
self.push_message(response_message);
|
||||
|
||||
// No need for description update here
|
||||
session::persist_messages(&self.session_file, &self.messages, None).await?;
|
||||
if let Some(session_file) = &self.session_file {
|
||||
let working_dir = std::env::current_dir().ok();
|
||||
session::persist_messages_with_schedule_id(
|
||||
session_file,
|
||||
&self.messages,
|
||||
None,
|
||||
self.scheduled_job_id.clone(),
|
||||
working_dir,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let prompt = format!(
|
||||
"The existing call to {} was interrupted. How would you like to proceed?",
|
||||
last_tool_name
|
||||
);
|
||||
self.messages.push(Message::assistant().with_text(&prompt));
|
||||
self.push_message(Message::assistant().with_text(&prompt));
|
||||
|
||||
// No need for description update here
|
||||
session::persist_messages(&self.session_file, &self.messages, None).await?;
|
||||
if let Some(session_file) = &self.session_file {
|
||||
let working_dir = std::env::current_dir().ok();
|
||||
session::persist_messages_with_schedule_id(
|
||||
session_file,
|
||||
&self.messages,
|
||||
None,
|
||||
self.scheduled_job_id.clone(),
|
||||
working_dir,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
output::render_message(&Message::assistant().with_text(&prompt), self.debug);
|
||||
} else {
|
||||
// An interruption occurred outside of a tool request-response.
|
||||
if let Some(last_msg) = self.messages.last() {
|
||||
if last_msg.role == mcp_core::role::Role::User {
|
||||
if last_msg.role == rmcp::model::Role::User {
|
||||
match last_msg.content.first() {
|
||||
Some(MessageContent::ToolResponse(_)) => {
|
||||
// Interruption occurred after a tool had completed but not assistant reply
|
||||
let prompt = "The tool calling loop was interrupted. How would you like to proceed?";
|
||||
self.messages.push(Message::assistant().with_text(prompt));
|
||||
self.push_message(Message::assistant().with_text(prompt));
|
||||
|
||||
// No need for description update here
|
||||
session::persist_messages(&self.session_file, &self.messages, None)
|
||||
if let Some(session_file) = &self.session_file {
|
||||
let working_dir = std::env::current_dir().ok();
|
||||
session::persist_messages_with_schedule_id(
|
||||
session_file,
|
||||
&self.messages,
|
||||
None,
|
||||
self.scheduled_job_id.clone(),
|
||||
working_dir,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
output::render_message(
|
||||
&Message::assistant().with_text(prompt),
|
||||
@@ -1033,7 +1294,7 @@ impl Session {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn session_file(&self) -> PathBuf {
|
||||
pub fn session_file(&self) -> Option<PathBuf> {
|
||||
self.session_file.clone()
|
||||
}
|
||||
|
||||
@@ -1107,13 +1368,12 @@ impl Session {
|
||||
);
|
||||
}
|
||||
|
||||
/// Get the session metadata
|
||||
pub fn get_metadata(&self) -> Result<session::SessionMetadata> {
|
||||
if !self.session_file.exists() {
|
||||
if !self.session_file.as_ref().is_some_and(|f| f.exists()) {
|
||||
return Err(anyhow::anyhow!("Session file does not exist"));
|
||||
}
|
||||
|
||||
session::read_metadata(&self.session_file)
|
||||
session::read_metadata(self.session_file.as_ref().unwrap())
|
||||
}
|
||||
|
||||
// Get the session's total token usage
|
||||
@@ -1126,13 +1386,42 @@ impl Session {
|
||||
pub async fn display_context_usage(&self) -> Result<()> {
|
||||
let provider = self.agent.provider().await?;
|
||||
let model_config = provider.get_model_config();
|
||||
let context_limit = model_config.context_limit.unwrap_or(32000);
|
||||
let context_limit = model_config.context_limit();
|
||||
|
||||
let config = Config::global();
|
||||
let show_cost = config
|
||||
.get_param::<bool>("GOOSE_CLI_SHOW_COST")
|
||||
.unwrap_or(false);
|
||||
|
||||
let provider_name = config
|
||||
.get_param::<String>("GOOSE_PROVIDER")
|
||||
.unwrap_or_else(|_| "unknown".to_string());
|
||||
|
||||
// Initialize pricing cache on startup
|
||||
tracing::info!("Initializing pricing cache...");
|
||||
if let Err(e) = initialize_pricing_cache().await {
|
||||
tracing::warn!(
|
||||
"Failed to initialize pricing cache: {e}. Pricing data may not be available."
|
||||
);
|
||||
}
|
||||
|
||||
match self.get_metadata() {
|
||||
Ok(metadata) => {
|
||||
let total_tokens = metadata.total_tokens.unwrap_or(0) as usize;
|
||||
|
||||
output::display_context_usage(total_tokens, context_limit);
|
||||
|
||||
if show_cost {
|
||||
let input_tokens = metadata.input_tokens.unwrap_or(0) as usize;
|
||||
let output_tokens = metadata.output_tokens.unwrap_or(0) as usize;
|
||||
output::display_cost_usage(
|
||||
&provider_name,
|
||||
&model_config.model_name,
|
||||
input_tokens,
|
||||
output_tokens,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
output::display_context_usage(0, context_limit);
|
||||
@@ -1168,9 +1457,9 @@ impl Session {
|
||||
let msg = Message::from(prompt_message);
|
||||
// ensure we get a User - Assistant - User type pattern
|
||||
let expected_role = if i % 2 == 0 {
|
||||
mcp_core::Role::User
|
||||
rmcp::model::Role::User
|
||||
} else {
|
||||
mcp_core::Role::Assistant
|
||||
rmcp::model::Role::Assistant
|
||||
};
|
||||
|
||||
if msg.role != expected_role {
|
||||
@@ -1184,10 +1473,10 @@ impl Session {
|
||||
break;
|
||||
}
|
||||
|
||||
if msg.role == mcp_core::Role::User {
|
||||
if msg.role == rmcp::model::Role::User {
|
||||
output::render_message(&msg, self.debug);
|
||||
}
|
||||
self.messages.push(msg);
|
||||
self.push_message(msg);
|
||||
}
|
||||
|
||||
if valid {
|
||||
@@ -1245,6 +1534,10 @@ impl Session {
|
||||
|
||||
Ok(path)
|
||||
}
|
||||
|
||||
fn push_message(&mut self, message: Message) {
|
||||
push_message(&mut self.messages, message);
|
||||
}
|
||||
}
|
||||
|
||||
fn get_reasoner() -> Result<Arc<dyn Provider>, anyhow::Error> {
|
||||
@@ -1273,7 +1566,8 @@ fn get_reasoner() -> Result<Arc<dyn Provider>, anyhow::Error> {
|
||||
.expect("No model configured. Run 'goose configure' first")
|
||||
};
|
||||
|
||||
let model_config = ModelConfig::new(model);
|
||||
let model_config =
|
||||
ModelConfig::new_with_context_env(model, Some("GOOSE_PLANNER_CONTEXT_LIMIT"));
|
||||
let reasoner = create(&provider, model_config)?;
|
||||
|
||||
Ok(reasoner)
|
||||
|
||||
@@ -2,14 +2,16 @@ use bat::WrappingMode;
|
||||
use console::{style, Color};
|
||||
use goose::config::Config;
|
||||
use goose::message::{Message, MessageContent, ToolRequest, ToolResponse};
|
||||
use goose::providers::pricing::get_model_pricing;
|
||||
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
|
||||
use mcp_core::prompt::PromptArgument;
|
||||
use mcp_core::tool::ToolCall;
|
||||
use regex::Regex;
|
||||
use rmcp::model::PromptArgument;
|
||||
use serde_json::Value;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Error;
|
||||
use std::path::Path;
|
||||
use std::io::{Error, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -67,6 +69,17 @@ pub fn set_theme(theme: Theme) {
|
||||
.set_param("GOOSE_CLI_THEME", Value::String(theme.as_config_string()))
|
||||
.expect("Failed to set theme");
|
||||
CURRENT_THEME.with(|t| *t.borrow_mut() = theme);
|
||||
|
||||
let config = Config::global();
|
||||
let theme_str = match theme {
|
||||
Theme::Light => "light",
|
||||
Theme::Dark => "dark",
|
||||
Theme::Ansi => "ansi",
|
||||
};
|
||||
|
||||
if let Err(e) = config.set_param("GOOSE_CLI_THEME", Value::String(theme_str.to_string())) {
|
||||
eprintln!("Failed to save theme setting to config: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_theme() -> Theme {
|
||||
@@ -117,6 +130,15 @@ pub fn hide_thinking() {
|
||||
THINKING.with(|t| t.borrow_mut().hide());
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn set_thinking_message(s: &String) {
|
||||
THINKING.with(|t| {
|
||||
if let Some(spinner) = t.borrow_mut().spinner.as_mut() {
|
||||
spinner.set_message(s);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
pub fn render_message(message: &Message, debug: bool) {
|
||||
let theme = get_theme();
|
||||
|
||||
@@ -144,7 +166,8 @@ pub fn render_message(message: &Message, debug: bool) {
|
||||
}
|
||||
}
|
||||
}
|
||||
println!();
|
||||
|
||||
let _ = std::io::stdout().flush();
|
||||
}
|
||||
|
||||
pub fn render_text(text: &str, color: Option<Color>, dim: bool) {
|
||||
@@ -196,6 +219,7 @@ fn render_tool_request(req: &ToolRequest, theme: Theme, debug: bool) {
|
||||
Ok(call) => match call.name.as_str() {
|
||||
"developer__text_editor" => render_text_editor_request(call, debug),
|
||||
"developer__shell" => render_shell_request(call, debug),
|
||||
"dynamic_task__create_task" => render_dynamic_task_request(call, debug),
|
||||
_ => render_default_request(call, debug),
|
||||
},
|
||||
Err(e) => print_markdown(&e.to_string(), theme),
|
||||
@@ -209,7 +233,7 @@ fn render_tool_response(resp: &ToolResponse, theme: Theme, debug: bool) {
|
||||
Ok(contents) => {
|
||||
for content in contents {
|
||||
if let Some(audience) = content.audience() {
|
||||
if !audience.contains(&mcp_core::role::Role::User) {
|
||||
if !audience.contains(&rmcp::model::Role::User) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@@ -229,7 +253,7 @@ fn render_tool_response(resp: &ToolResponse, theme: Theme, debug: bool) {
|
||||
|
||||
if debug {
|
||||
println!("{:#?}", content);
|
||||
} else if let mcp_core::content::Content::Text(text) = content {
|
||||
} else if let Some(text) = content.as_text() {
|
||||
print_markdown(&text.text, theme);
|
||||
}
|
||||
}
|
||||
@@ -369,6 +393,37 @@ fn render_shell_request(call: &ToolCall, debug: bool) {
|
||||
}
|
||||
}
|
||||
|
||||
fn render_dynamic_task_request(call: &ToolCall, debug: bool) {
|
||||
print_tool_header(call);
|
||||
|
||||
// Print task_parameters array
|
||||
if let Some(Value::Array(task_parameters)) = call.arguments.get("task_parameters") {
|
||||
println!("{}:", style("task_parameters").dim());
|
||||
|
||||
for task_param in task_parameters.iter() {
|
||||
println!(" -");
|
||||
|
||||
if let Some(param_obj) = task_param.as_object() {
|
||||
for (key, value) in param_obj {
|
||||
match value {
|
||||
Value::String(s) => {
|
||||
// For strings, print the full content without truncation
|
||||
println!(" {}: {}", style(key).dim(), style(s).green());
|
||||
}
|
||||
_ => {
|
||||
// For everything else, use print_params
|
||||
print!(" ");
|
||||
print_params(value, 0, debug);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
println!();
|
||||
}
|
||||
|
||||
fn render_default_request(call: &ToolCall, debug: bool) {
|
||||
print_tool_header(call);
|
||||
print_params(&call.arguments, 0, debug);
|
||||
@@ -541,12 +596,12 @@ pub fn display_session_info(
|
||||
resume: bool,
|
||||
provider: &str,
|
||||
model: &str,
|
||||
session_file: &Path,
|
||||
session_file: &Option<PathBuf>,
|
||||
provider_instance: Option<&Arc<dyn goose::providers::base::Provider>>,
|
||||
) {
|
||||
let start_session_msg = if resume {
|
||||
"resuming session |"
|
||||
} else if session_file.to_str() == Some("/dev/null") || session_file.to_str() == Some("NUL") {
|
||||
} else if session_file.is_none() {
|
||||
"running without session |"
|
||||
} else {
|
||||
"starting session |"
|
||||
@@ -588,7 +643,7 @@ pub fn display_session_info(
|
||||
);
|
||||
}
|
||||
|
||||
if session_file.to_str() != Some("/dev/null") && session_file.to_str() != Some("NUL") {
|
||||
if let Some(session_file) = session_file {
|
||||
println!(
|
||||
" {} {}",
|
||||
style("logging to").dim(),
|
||||
@@ -613,12 +668,19 @@ pub fn display_greeting() {
|
||||
pub fn display_context_usage(total_tokens: usize, context_limit: usize) {
|
||||
use console::style;
|
||||
|
||||
// Calculate percentage used
|
||||
let percentage = (total_tokens as f64 / context_limit as f64 * 100.0).round() as usize;
|
||||
if context_limit == 0 {
|
||||
println!("Context: Error - context limit is zero");
|
||||
return;
|
||||
}
|
||||
|
||||
// Create dot visualization
|
||||
// Calculate percentage used with bounds checking
|
||||
let percentage =
|
||||
(((total_tokens as f64 / context_limit as f64) * 100.0).round() as usize).min(100);
|
||||
|
||||
// Create dot visualization with safety bounds
|
||||
let dot_count = 10;
|
||||
let filled_dots = ((percentage as f64 / 100.0) * dot_count as f64).round() as usize;
|
||||
let filled_dots =
|
||||
(((percentage as f64 / 100.0) * dot_count as f64).round() as usize).min(dot_count);
|
||||
let empty_dots = dot_count - filled_dots;
|
||||
|
||||
let filled = "●".repeat(filled_dots);
|
||||
@@ -641,6 +703,68 @@ pub fn display_context_usage(total_tokens: usize, context_limit: usize) {
|
||||
);
|
||||
}
|
||||
|
||||
fn normalize_model_name(model: &str) -> String {
|
||||
let mut result = model.to_string();
|
||||
|
||||
// Remove "-latest" suffix
|
||||
if result.ends_with("-latest") {
|
||||
result = result.strip_suffix("-latest").unwrap().to_string();
|
||||
}
|
||||
|
||||
// Remove date-like suffixes: -YYYYMMDD
|
||||
let re_date = Regex::new(r"-\d{8}$").unwrap();
|
||||
if re_date.is_match(&result) {
|
||||
result = re_date.replace(&result, "").to_string();
|
||||
}
|
||||
|
||||
// Convert version numbers like -3-5- to -3.5- (e.g., claude-3-5-haiku -> claude-3.5-haiku)
|
||||
let re_version = Regex::new(r"-(\d+)-(\d+)-").unwrap();
|
||||
if re_version.is_match(&result) {
|
||||
result = re_version.replace(&result, "-$1.$2-").to_string();
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
async fn estimate_cost_usd(
|
||||
provider: &str,
|
||||
model: &str,
|
||||
input_tokens: usize,
|
||||
output_tokens: usize,
|
||||
) -> Option<f64> {
|
||||
// Use the pricing module's get_model_pricing which handles model name mapping internally
|
||||
let cleaned_model = normalize_model_name(model);
|
||||
let pricing_info = get_model_pricing(provider, &cleaned_model).await;
|
||||
|
||||
match pricing_info {
|
||||
Some(pricing) => {
|
||||
let input_cost = pricing.input_cost * input_tokens as f64;
|
||||
let output_cost = pricing.output_cost * output_tokens as f64;
|
||||
Some(input_cost + output_cost)
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Display cost information, if price data is available.
|
||||
pub async fn display_cost_usage(
|
||||
provider: &str,
|
||||
model: &str,
|
||||
input_tokens: usize,
|
||||
output_tokens: usize,
|
||||
) {
|
||||
if let Some(cost) = estimate_cost_usd(provider, model, input_tokens, output_tokens).await {
|
||||
use console::style;
|
||||
println!(
|
||||
"Cost: {} USD ({} tokens: in {}, out {})",
|
||||
style(format!("${:.4}", cost)).cyan(),
|
||||
input_tokens + output_tokens,
|
||||
input_tokens,
|
||||
output_tokens
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct McpSpinners {
|
||||
bars: HashMap<String, ProgressBar>,
|
||||
log_spinner: Option<ProgressBar>,
|
||||
@@ -675,11 +799,11 @@ impl McpSpinners {
|
||||
spinner.set_message(message.to_string());
|
||||
}
|
||||
|
||||
pub fn update(&mut self, token: &str, value: f64, total: Option<f64>, message: Option<&str>) {
|
||||
pub fn update(&mut self, token: &str, value: u32, total: Option<u32>, message: Option<&str>) {
|
||||
let bar = self.bars.entry(token.to_string()).or_insert_with(|| {
|
||||
if let Some(total) = total {
|
||||
self.multi_bar.add(
|
||||
ProgressBar::new((total * 100.0) as u64).with_style(
|
||||
ProgressBar::new((total * 100) as u64).with_style(
|
||||
ProgressStyle::with_template("[{elapsed}] {bar:40} {pos:>3}/{len:3} {msg}")
|
||||
.unwrap(),
|
||||
),
|
||||
@@ -688,7 +812,7 @@ impl McpSpinners {
|
||||
self.multi_bar.add(ProgressBar::new_spinner())
|
||||
}
|
||||
});
|
||||
bar.set_position((value * 100.0) as u64);
|
||||
bar.set_position((value * 100) as u64);
|
||||
if let Some(msg) = message {
|
||||
bar.set_message(msg.to_string());
|
||||
}
|
||||
|
||||
202
crates/goose-cli/src/session/task_execution_display/mod.rs
Normal file
202
crates/goose-cli/src/session/task_execution_display/mod.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
use goose::agents::subagent_execution_tool::lib::TaskStatus;
|
||||
use goose::agents::subagent_execution_tool::notification_events::{
|
||||
TaskExecutionNotificationEvent, TaskInfo,
|
||||
};
|
||||
use goose::utils::safe_truncate;
|
||||
use serde_json::Value;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
const CLEAR_SCREEN: &str = "\x1b[2J\x1b[H";
|
||||
const MOVE_TO_PROGRESS_LINE: &str = "\x1b[4;1H";
|
||||
const CLEAR_TO_EOL: &str = "\x1b[K";
|
||||
const CLEAR_BELOW: &str = "\x1b[J";
|
||||
pub const TASK_EXECUTION_NOTIFICATION_TYPE: &str = "task_execution";
|
||||
|
||||
static INITIAL_SHOWN: AtomicBool = AtomicBool::new(false);
|
||||
|
||||
fn format_result_data_for_display(result_data: &Value) -> String {
|
||||
match result_data {
|
||||
Value::String(s) => s.to_string(),
|
||||
Value::Object(obj) => {
|
||||
if let Some(partial_output) = obj.get("partial_output").and_then(|v| v.as_str()) {
|
||||
format!("Partial output: {}", partial_output)
|
||||
} else {
|
||||
serde_json::to_string_pretty(obj).unwrap_or_default()
|
||||
}
|
||||
}
|
||||
Value::Array(arr) => serde_json::to_string_pretty(arr).unwrap_or_default(),
|
||||
Value::Bool(b) => b.to_string(),
|
||||
Value::Number(n) => n.to_string(),
|
||||
Value::Null => "null".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn process_output_for_display(output: &str) -> String {
|
||||
const MAX_OUTPUT_LINES: usize = 2;
|
||||
const OUTPUT_PREVIEW_LENGTH: usize = 100;
|
||||
|
||||
let lines: Vec<&str> = output.lines().collect();
|
||||
let recent_lines = if lines.len() > MAX_OUTPUT_LINES {
|
||||
&lines[lines.len() - MAX_OUTPUT_LINES..]
|
||||
} else {
|
||||
&lines
|
||||
};
|
||||
|
||||
let clean_output = recent_lines.join(" ... ");
|
||||
safe_truncate(&clean_output, OUTPUT_PREVIEW_LENGTH)
|
||||
}
|
||||
|
||||
pub fn format_task_execution_notification(
|
||||
data: &Value,
|
||||
) -> Option<(String, Option<String>, Option<String>)> {
|
||||
if let Ok(event) = serde_json::from_value::<TaskExecutionNotificationEvent>(data.clone()) {
|
||||
return Some(match event {
|
||||
TaskExecutionNotificationEvent::LineOutput { output, .. } => (
|
||||
format!("{}\n", output),
|
||||
None,
|
||||
Some(TASK_EXECUTION_NOTIFICATION_TYPE.to_string()),
|
||||
),
|
||||
TaskExecutionNotificationEvent::TasksUpdate { .. } => {
|
||||
let formatted_display = format_tasks_update_from_event(&event);
|
||||
(
|
||||
formatted_display,
|
||||
None,
|
||||
Some(TASK_EXECUTION_NOTIFICATION_TYPE.to_string()),
|
||||
)
|
||||
}
|
||||
TaskExecutionNotificationEvent::TasksComplete { .. } => {
|
||||
let formatted_summary = format_tasks_complete_from_event(&event);
|
||||
(
|
||||
formatted_summary,
|
||||
None,
|
||||
Some(TASK_EXECUTION_NOTIFICATION_TYPE.to_string()),
|
||||
)
|
||||
}
|
||||
});
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
fn format_tasks_update_from_event(event: &TaskExecutionNotificationEvent) -> String {
|
||||
if let TaskExecutionNotificationEvent::TasksUpdate { stats, tasks } = event {
|
||||
let mut display = String::new();
|
||||
|
||||
if !INITIAL_SHOWN.swap(true, Ordering::SeqCst) {
|
||||
display.push_str(CLEAR_SCREEN);
|
||||
display.push_str("🎯 Task Execution Dashboard\n");
|
||||
display.push_str("═══════════════════════════\n\n");
|
||||
} else {
|
||||
display.push_str(MOVE_TO_PROGRESS_LINE);
|
||||
}
|
||||
|
||||
display.push_str(&format!(
|
||||
"📊 Progress: {} total | ⏳ {} pending | 🏃 {} running | ✅ {} completed | ❌ {} failed",
|
||||
stats.total, stats.pending, stats.running, stats.completed, stats.failed
|
||||
));
|
||||
display.push_str(&format!("{}\n\n", CLEAR_TO_EOL));
|
||||
|
||||
let mut sorted_tasks = tasks.clone();
|
||||
sorted_tasks.sort_by(|a, b| a.id.cmp(&b.id));
|
||||
|
||||
for task in sorted_tasks {
|
||||
display.push_str(&format_task_display(&task));
|
||||
}
|
||||
|
||||
display.push_str(CLEAR_BELOW);
|
||||
display
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
}
|
||||
|
||||
fn format_tasks_complete_from_event(event: &TaskExecutionNotificationEvent) -> String {
|
||||
if let TaskExecutionNotificationEvent::TasksComplete {
|
||||
stats,
|
||||
failed_tasks,
|
||||
} = event
|
||||
{
|
||||
let mut summary = String::new();
|
||||
summary.push_str("Execution Complete!\n");
|
||||
summary.push_str("═══════════════════════\n");
|
||||
|
||||
summary.push_str(&format!("Total Tasks: {}\n", stats.total));
|
||||
summary.push_str(&format!("✅ Completed: {}\n", stats.completed));
|
||||
summary.push_str(&format!("❌ Failed: {}\n", stats.failed));
|
||||
summary.push_str(&format!("📈 Success Rate: {:.1}%\n", stats.success_rate));
|
||||
|
||||
if !failed_tasks.is_empty() {
|
||||
summary.push_str("\n❌ Failed Tasks:\n");
|
||||
for task in failed_tasks {
|
||||
summary.push_str(&format!(" • {}\n", task.name));
|
||||
if let Some(error) = &task.error {
|
||||
summary.push_str(&format!(" Error: {}\n", error));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
summary.push_str("\n📝 Generating summary...\n");
|
||||
summary
|
||||
} else {
|
||||
String::new()
|
||||
}
|
||||
}
|
||||
|
||||
fn format_task_display(task: &TaskInfo) -> String {
|
||||
let mut task_display = String::new();
|
||||
|
||||
let status_icon = match task.status {
|
||||
TaskStatus::Pending => "⏳",
|
||||
TaskStatus::Running => "🏃",
|
||||
TaskStatus::Completed => "✅",
|
||||
TaskStatus::Failed => "❌",
|
||||
};
|
||||
|
||||
task_display.push_str(&format!(
|
||||
"{} {} ({}){}\n",
|
||||
status_icon, task.task_name, task.task_type, CLEAR_TO_EOL
|
||||
));
|
||||
|
||||
if !task.task_metadata.is_empty() {
|
||||
task_display.push_str(&format!(
|
||||
" 📋 Parameters: {}{}\n",
|
||||
task.task_metadata, CLEAR_TO_EOL
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(duration_secs) = task.duration_secs {
|
||||
task_display.push_str(&format!(" ⏱️ {:.1}s{}\n", duration_secs, CLEAR_TO_EOL));
|
||||
}
|
||||
|
||||
if matches!(task.status, TaskStatus::Running) && !task.current_output.trim().is_empty() {
|
||||
let processed_output = process_output_for_display(&task.current_output);
|
||||
if !processed_output.is_empty() {
|
||||
task_display.push_str(&format!(" 💬 {}{}\n", processed_output, CLEAR_TO_EOL));
|
||||
}
|
||||
}
|
||||
|
||||
if matches!(task.status, TaskStatus::Completed) {
|
||||
if let Some(result_data) = &task.result_data {
|
||||
let result_preview = format_result_data_for_display(result_data);
|
||||
if !result_preview.is_empty() {
|
||||
task_display.push_str(&format!(" 📄 {}{}\n", result_preview, CLEAR_TO_EOL));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if matches!(task.status, TaskStatus::Failed) {
|
||||
if let Some(error) = &task.error {
|
||||
let error_preview = safe_truncate(error, 80);
|
||||
task_display.push_str(&format!(
|
||||
" ⚠️ {}{}\n",
|
||||
error_preview.replace('\n', " "),
|
||||
CLEAR_TO_EOL
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
task_display.push_str(&format!("{}\n", CLEAR_TO_EOL));
|
||||
task_display
|
||||
}
|
||||
304
crates/goose-cli/src/session/task_execution_display/tests.rs
Normal file
304
crates/goose-cli/src/session/task_execution_display/tests.rs
Normal file
@@ -0,0 +1,304 @@
|
||||
use super::*;
|
||||
use goose::agents::subagent_execution_tool::notification_events::{
|
||||
FailedTaskInfo, TaskCompletionStats, TaskExecutionStats,
|
||||
};
|
||||
use serde_json::json;
|
||||
|
||||
#[test]
|
||||
fn test_process_output_for_display() {
|
||||
assert_eq!(process_output_for_display("hello world"), "hello world");
|
||||
assert_eq!(
|
||||
process_output_for_display("line1\nline2"),
|
||||
"line1 ... line2"
|
||||
);
|
||||
|
||||
let input = "line1\nline2\nline3\nline4";
|
||||
let result = process_output_for_display(input);
|
||||
assert_eq!(result, "line3 ... line4");
|
||||
|
||||
let long_line = "a".repeat(150);
|
||||
let result = process_output_for_display(&long_line);
|
||||
assert!(result.len() <= 100);
|
||||
assert!(result.ends_with("..."));
|
||||
|
||||
assert_eq!(process_output_for_display(""), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_result_data_for_display() {
|
||||
assert_eq!(
|
||||
format_result_data_for_display(&json!("red text")),
|
||||
"red text"
|
||||
);
|
||||
|
||||
assert_eq!(format_result_data_for_display(&json!(true)), "true");
|
||||
assert_eq!(format_result_data_for_display(&json!(false)), "false");
|
||||
assert_eq!(format_result_data_for_display(&json!(42)), "42");
|
||||
assert_eq!(format_result_data_for_display(&json!(3.14)), "3.14");
|
||||
assert_eq!(format_result_data_for_display(&json!(null)), "null");
|
||||
|
||||
let partial_obj = json!({
|
||||
"partial_output": "some output",
|
||||
"other_field": "ignored"
|
||||
});
|
||||
assert_eq!(
|
||||
format_result_data_for_display(&partial_obj),
|
||||
"Partial output: some output"
|
||||
);
|
||||
|
||||
let obj = json!({"key": "value", "num": 42});
|
||||
let result = format_result_data_for_display(&obj);
|
||||
assert!(result.contains("key"));
|
||||
assert!(result.contains("value"));
|
||||
|
||||
let arr = json!([1, 2, 3]);
|
||||
let result = format_result_data_for_display(&arr);
|
||||
assert!(result.contains("1"));
|
||||
assert!(result.contains("2"));
|
||||
assert!(result.contains("3"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_task_execution_notification_line_output() {
|
||||
let _event = TaskExecutionNotificationEvent::LineOutput {
|
||||
task_id: "task-1".to_string(),
|
||||
output: "Hello World".to_string(),
|
||||
};
|
||||
|
||||
let data = json!({
|
||||
"subtype": "line_output",
|
||||
"task_id": "task-1",
|
||||
"output": "Hello World"
|
||||
});
|
||||
|
||||
let result = format_task_execution_notification(&data);
|
||||
assert!(result.is_some());
|
||||
|
||||
let (formatted, second, third) = result.unwrap();
|
||||
assert_eq!(formatted, "Hello World\n");
|
||||
assert_eq!(second, None);
|
||||
assert_eq!(third, Some("task_execution".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_task_execution_notification_invalid_data() {
|
||||
let invalid_data = json!({
|
||||
"invalid": "structure"
|
||||
});
|
||||
|
||||
let result = format_task_execution_notification(&invalid_data);
|
||||
assert_eq!(result, None);
|
||||
|
||||
let incomplete_data = json!({
|
||||
"subtype": "line_output"
|
||||
});
|
||||
|
||||
let result = format_task_execution_notification(&incomplete_data);
|
||||
assert_eq!(result, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_tasks_update_from_event() {
|
||||
INITIAL_SHOWN.store(false, Ordering::SeqCst);
|
||||
|
||||
let stats = TaskExecutionStats::new(3, 1, 1, 1, 0);
|
||||
let tasks = vec![
|
||||
TaskInfo {
|
||||
id: "task-1".to_string(),
|
||||
status: TaskStatus::Running,
|
||||
duration_secs: Some(1.5),
|
||||
current_output: "Processing...".to_string(),
|
||||
task_type: "sub_recipe".to_string(),
|
||||
task_name: "test-task".to_string(),
|
||||
task_metadata: "param=value".to_string(),
|
||||
error: None,
|
||||
result_data: None,
|
||||
},
|
||||
TaskInfo {
|
||||
id: "task-2".to_string(),
|
||||
status: TaskStatus::Completed,
|
||||
duration_secs: Some(2.3),
|
||||
current_output: "".to_string(),
|
||||
task_type: "text_instruction".to_string(),
|
||||
task_name: "another-task".to_string(),
|
||||
task_metadata: "".to_string(),
|
||||
error: None,
|
||||
result_data: Some(json!({"result": "success"})),
|
||||
},
|
||||
];
|
||||
|
||||
let event = TaskExecutionNotificationEvent::TasksUpdate { stats, tasks };
|
||||
let result = format_tasks_update_from_event(&event);
|
||||
|
||||
assert!(result.contains("🎯 Task Execution Dashboard"));
|
||||
assert!(result.contains("═══════════════════════════"));
|
||||
assert!(result.contains("📊 Progress: 3 total"));
|
||||
assert!(result.contains("⏳ 1 pending"));
|
||||
assert!(result.contains("🏃 1 running"));
|
||||
assert!(result.contains("✅ 1 completed"));
|
||||
assert!(result.contains("❌ 0 failed"));
|
||||
assert!(result.contains("🏃 test-task"));
|
||||
assert!(result.contains("✅ another-task"));
|
||||
assert!(result.contains("📋 Parameters: param=value"));
|
||||
assert!(result.contains("⏱️ 1.5s"));
|
||||
assert!(result.contains("💬 Processing..."));
|
||||
|
||||
let result2 = format_tasks_update_from_event(&event);
|
||||
assert!(!result2.contains("🎯 Task Execution Dashboard"));
|
||||
assert!(result2.contains(MOVE_TO_PROGRESS_LINE));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_tasks_complete_from_event() {
|
||||
let stats = TaskCompletionStats::new(5, 4, 1);
|
||||
let failed_tasks = vec![FailedTaskInfo {
|
||||
id: "task-3".to_string(),
|
||||
name: "failed-task".to_string(),
|
||||
error: Some("Connection timeout".to_string()),
|
||||
}];
|
||||
|
||||
let event = TaskExecutionNotificationEvent::TasksComplete {
|
||||
stats,
|
||||
failed_tasks,
|
||||
};
|
||||
let result = format_tasks_complete_from_event(&event);
|
||||
|
||||
assert!(result.contains("Execution Complete!"));
|
||||
assert!(result.contains("═══════════════════════"));
|
||||
assert!(result.contains("Total Tasks: 5"));
|
||||
assert!(result.contains("✅ Completed: 4"));
|
||||
assert!(result.contains("❌ Failed: 1"));
|
||||
assert!(result.contains("📈 Success Rate: 80.0%"));
|
||||
assert!(result.contains("❌ Failed Tasks:"));
|
||||
assert!(result.contains("• failed-task"));
|
||||
assert!(result.contains("Error: Connection timeout"));
|
||||
assert!(result.contains("📝 Generating summary..."));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_tasks_complete_from_event_no_failures() {
|
||||
let stats = TaskCompletionStats::new(3, 3, 0);
|
||||
let failed_tasks = vec![];
|
||||
|
||||
let event = TaskExecutionNotificationEvent::TasksComplete {
|
||||
stats,
|
||||
failed_tasks,
|
||||
};
|
||||
let result = format_tasks_complete_from_event(&event);
|
||||
|
||||
assert!(!result.contains("❌ Failed Tasks:"));
|
||||
assert!(result.contains("📈 Success Rate: 100.0%"));
|
||||
assert!(result.contains("❌ Failed: 0"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_task_display_running() {
|
||||
let task = TaskInfo {
|
||||
id: "task-1".to_string(),
|
||||
status: TaskStatus::Running,
|
||||
duration_secs: Some(1.5),
|
||||
current_output: "Processing data...\nAlmost done...".to_string(),
|
||||
task_type: "sub_recipe".to_string(),
|
||||
task_name: "data-processor".to_string(),
|
||||
task_metadata: "input=file.txt,output=result.json".to_string(),
|
||||
error: None,
|
||||
result_data: None,
|
||||
};
|
||||
|
||||
let result = format_task_display(&task);
|
||||
|
||||
assert!(result.contains("🏃 data-processor (sub_recipe)"));
|
||||
assert!(result.contains("📋 Parameters: input=file.txt,output=result.json"));
|
||||
assert!(result.contains("⏱️ 1.5s"));
|
||||
assert!(result.contains("💬 Processing data... ... Almost done..."));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_task_display_completed() {
|
||||
let task = TaskInfo {
|
||||
id: "task-2".to_string(),
|
||||
status: TaskStatus::Completed,
|
||||
duration_secs: Some(3.2),
|
||||
current_output: "".to_string(),
|
||||
task_type: "text_instruction".to_string(),
|
||||
task_name: "analyzer".to_string(),
|
||||
task_metadata: "".to_string(),
|
||||
error: None,
|
||||
result_data: Some(json!({"status": "success", "count": 42})),
|
||||
};
|
||||
|
||||
let result = format_task_display(&task);
|
||||
|
||||
assert!(result.contains("✅ analyzer (text_instruction)"));
|
||||
assert!(result.contains("⏱️ 3.2s"));
|
||||
assert!(!result.contains("📋 Parameters"));
|
||||
assert!(result.contains("📄"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_task_display_failed() {
|
||||
let task = TaskInfo {
|
||||
id: "task-3".to_string(),
|
||||
status: TaskStatus::Failed,
|
||||
duration_secs: None,
|
||||
current_output: "".to_string(),
|
||||
task_type: "sub_recipe".to_string(),
|
||||
task_name: "failing-task".to_string(),
|
||||
task_metadata: "".to_string(),
|
||||
error: Some(
|
||||
"Network connection failed after multiple retries. The server is unreachable."
|
||||
.to_string(),
|
||||
),
|
||||
result_data: None,
|
||||
};
|
||||
|
||||
let result = format_task_display(&task);
|
||||
|
||||
assert!(result.contains("❌ failing-task (sub_recipe)"));
|
||||
assert!(!result.contains("⏱️"));
|
||||
assert!(result.contains("⚠️"));
|
||||
assert!(result.contains("Network connection failed after multiple retries"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_task_display_pending() {
|
||||
let task = TaskInfo {
|
||||
id: "task-4".to_string(),
|
||||
status: TaskStatus::Pending,
|
||||
duration_secs: None,
|
||||
current_output: "".to_string(),
|
||||
task_type: "sub_recipe".to_string(),
|
||||
task_name: "waiting-task".to_string(),
|
||||
task_metadata: "priority=high".to_string(),
|
||||
error: None,
|
||||
result_data: None,
|
||||
};
|
||||
|
||||
let result = format_task_display(&task);
|
||||
|
||||
assert!(result.contains("⏳ waiting-task (sub_recipe)"));
|
||||
assert!(result.contains("📋 Parameters: priority=high"));
|
||||
assert!(!result.contains("⏱️"));
|
||||
assert!(!result.contains("💬"));
|
||||
assert!(!result.contains("📄"));
|
||||
assert!(!result.contains("⚠️"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_task_display_empty_current_output() {
|
||||
let task = TaskInfo {
|
||||
id: "task-5".to_string(),
|
||||
status: TaskStatus::Running,
|
||||
duration_secs: Some(0.5),
|
||||
current_output: " \n\t \n ".to_string(),
|
||||
task_type: "sub_recipe".to_string(),
|
||||
task_name: "quiet-task".to_string(),
|
||||
task_metadata: "".to_string(),
|
||||
error: None,
|
||||
result_data: None,
|
||||
};
|
||||
|
||||
let result = format_task_display(&task);
|
||||
|
||||
assert!(!result.contains("💬"));
|
||||
}
|
||||
@@ -12,13 +12,13 @@
|
||||
<h1 id="session-title">Goose Chat</h1>
|
||||
<div class="status" id="connection-status">Connecting...</div>
|
||||
</header>
|
||||
|
||||
|
||||
<div class="chat-container">
|
||||
<div class="messages" id="messages">
|
||||
<div class="welcome-message">
|
||||
<h2>Welcome to Goose!</h2>
|
||||
<p>I'm your AI assistant. How can I help you today?</p>
|
||||
|
||||
|
||||
<div class="suggestion-pills">
|
||||
<div class="suggestion-pill" onclick="sendSuggestion('What can you do?')">What can you do?</div>
|
||||
<div class="suggestion-pill" onclick="sendSuggestion('Demo writing and reading files')">Demo writing and reading files</div>
|
||||
@@ -28,10 +28,10 @@
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<div class="input-container">
|
||||
<textarea
|
||||
id="message-input"
|
||||
<textarea
|
||||
id="message-input"
|
||||
placeholder="Type your message here..."
|
||||
rows="3"
|
||||
autofocus
|
||||
@@ -40,7 +40,7 @@
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<script src="/static/script.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
@@ -50,6 +50,10 @@
|
||||
--tool-bg: #f8f9fa;
|
||||
--code-bg: #f5f5f5;
|
||||
}
|
||||
|
||||
header h1::before {
|
||||
background-image: url('/static/img/logo_light.png');
|
||||
}
|
||||
}
|
||||
|
||||
* {
|
||||
@@ -94,8 +98,14 @@ header h1 {
|
||||
}
|
||||
|
||||
header h1::before {
|
||||
content: "🪿";
|
||||
font-size: 1.5rem;
|
||||
content: "";
|
||||
width: 32px;
|
||||
height: 32px;
|
||||
background-image: url('/static/img/logo_dark.png');
|
||||
background-size: contain;
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
display: inline-block;
|
||||
}
|
||||
|
||||
.status {
|
||||
@@ -460,20 +470,20 @@ header h1::before {
|
||||
padding: 1rem;
|
||||
gap: 1rem;
|
||||
}
|
||||
|
||||
|
||||
.message {
|
||||
max-width: 90%;
|
||||
padding: 0.875rem 1rem;
|
||||
}
|
||||
|
||||
|
||||
.input-container {
|
||||
padding: 1rem;
|
||||
}
|
||||
|
||||
|
||||
header {
|
||||
padding: 0.75rem 1rem;
|
||||
}
|
||||
|
||||
|
||||
.welcome-message {
|
||||
padding: 2rem 1rem;
|
||||
}
|
||||
|
||||
@@ -8,6 +8,9 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
description.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[lib]
|
||||
name = "goose_ffi"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
@@ -247,7 +247,7 @@ pub unsafe extern "C" fn goose_agent_send_message(
|
||||
|
||||
// Block on the async call using our global runtime
|
||||
let response = get_runtime().block_on(async {
|
||||
let mut stream = match agent.reply(&messages, None).await {
|
||||
let mut stream = match agent.reply(&messages, None, None).await {
|
||||
Ok(stream) => stream,
|
||||
Err(e) => return format!("Error getting reply from agent: {}", e),
|
||||
};
|
||||
@@ -266,6 +266,10 @@ pub unsafe extern "C" fn goose_agent_send_message(
|
||||
Ok(AgentEvent::McpNotification(_)) => {
|
||||
// TODO: Handle MCP notifications.
|
||||
}
|
||||
Ok(AgentEvent::ModelChange { .. }) => {
|
||||
// Model change events are informational, just continue
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
full_response.push_str(&format!("\nError in message stream: {}", e));
|
||||
}
|
||||
|
||||
@@ -7,11 +7,15 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
description.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[lib]
|
||||
crate-type = ["lib", "cdylib"]
|
||||
name = "goose_llm"
|
||||
|
||||
[dependencies]
|
||||
goose = { path = "../goose" }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
anyhow = "1.0"
|
||||
@@ -46,7 +50,7 @@ tokio = { version = "1.43", features = ["time", "sync"] }
|
||||
[dev-dependencies]
|
||||
criterion = "0.5"
|
||||
tempfile = "3.15.0"
|
||||
dotenv = "0.15"
|
||||
dotenvy = "0.15.7"
|
||||
lazy_static = "1.5"
|
||||
ctor = "0.2.7"
|
||||
tokio = { version = "1.43", features = ["full"] }
|
||||
@@ -59,3 +63,11 @@ path = "uniffi-bindgen.rs"
|
||||
[[example]]
|
||||
name = "simple"
|
||||
path = "examples/simple.rs"
|
||||
|
||||
[[example]]
|
||||
name = "image"
|
||||
path = "examples/image.rs"
|
||||
|
||||
[[example]]
|
||||
name = "prompt_override"
|
||||
path = "examples/prompt_override.rs"
|
||||
|
||||
@@ -49,7 +49,7 @@ curl -O https://repo1.maven.org/maven2/net/java/dev/jna/jna/5.13.0/jna-5.13.0.ja
|
||||
popd
|
||||
```
|
||||
|
||||
To just create the Kotlin bindings:
|
||||
To just create the Kotlin bindings (for MacOS):
|
||||
|
||||
```bash
|
||||
# run from project root directory
|
||||
@@ -58,6 +58,18 @@ cargo build -p goose-llm
|
||||
cargo run --features=uniffi/cli --bin uniffi-bindgen generate --library ./target/debug/libgoose_llm.dylib --language kotlin --out-dir bindings/kotlin
|
||||
```
|
||||
|
||||
Creating `libgoose_llm.so` for Linux distribution:
|
||||
|
||||
Use `cross` to build for the specific target and then create the bindings:
|
||||
```
|
||||
# x86-64 GNU/Linux (kGoose uses this)
|
||||
rustup target add x86_64-unknown-linux-gnu
|
||||
cross build --release --target x86_64-unknown-linux-gnu -p goose-llm
|
||||
|
||||
# The goose_llm.kt bindings produced should be the same whether we use 'libgoose_llm.dylib' or 'libgoose_llm.so'
|
||||
cross run --features=uniffi/cli --bin uniffi-bindgen generate --library ./target/x86_64-unknown-linux-gnu/release/libgoose_llm.so --language kotlin --out-dir bindings/kotlin
|
||||
```
|
||||
|
||||
|
||||
#### Python -> Rust: generate bindings, run example
|
||||
|
||||
|
||||
53
crates/goose-llm/examples/image.rs
Normal file
53
crates/goose-llm/examples/image.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
use anyhow::Result;
|
||||
use base64::{engine::general_purpose::STANDARD as BASE64, Engine as _};
|
||||
use goose_llm::{
|
||||
completion,
|
||||
message::MessageContent,
|
||||
types::completion::{CompletionRequest, CompletionResponse},
|
||||
Message, ModelConfig,
|
||||
};
|
||||
use serde_json::json;
|
||||
use std::{fs, vec};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let provider = "databricks";
|
||||
let provider_config = json!({
|
||||
"host": std::env::var("DATABRICKS_HOST").expect("Missing DATABRICKS_HOST"),
|
||||
"token": std::env::var("DATABRICKS_TOKEN").expect("Missing DATABRICKS_TOKEN"),
|
||||
});
|
||||
let model_name = "goose-claude-4-sonnet"; // "gpt-4o";
|
||||
let model_config = ModelConfig::new(model_name.to_string());
|
||||
|
||||
let system_preamble = "You are a helpful assistant.";
|
||||
|
||||
// Read and encode test image
|
||||
let image_data = fs::read("examples/test_assets/test_image.png")?;
|
||||
let base64_image = BASE64.encode(image_data);
|
||||
|
||||
let user_msg = Message::user()
|
||||
.with_text("What do you see in this image?")
|
||||
.with_content(MessageContent::image(base64_image, "image/png"));
|
||||
|
||||
let messages = vec![user_msg];
|
||||
|
||||
let completion_response: CompletionResponse = completion(
|
||||
CompletionRequest::new(
|
||||
provider.to_string(),
|
||||
provider_config.clone(),
|
||||
model_config.clone(),
|
||||
Some(system_preamble.to_string()),
|
||||
None,
|
||||
messages,
|
||||
vec![],
|
||||
)
|
||||
.with_request_id("test-image-1".to_string()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Print the response
|
||||
println!("\nCompletion Response:");
|
||||
println!("{}", serde_json::to_string_pretty(&completion_response)?);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
48
crates/goose-llm/examples/prompt_override.rs
Normal file
48
crates/goose-llm/examples/prompt_override.rs
Normal file
@@ -0,0 +1,48 @@
|
||||
use std::vec;
|
||||
|
||||
use anyhow::Result;
|
||||
use goose_llm::{
|
||||
completion,
|
||||
types::completion::{CompletionRequest, CompletionResponse},
|
||||
Message, ModelConfig,
|
||||
};
|
||||
use serde_json::json;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
let provider = "databricks";
|
||||
let provider_config = json!({
|
||||
"host": std::env::var("DATABRICKS_HOST").expect("Missing DATABRICKS_HOST"),
|
||||
"token": std::env::var("DATABRICKS_TOKEN").expect("Missing DATABRICKS_TOKEN"),
|
||||
});
|
||||
// let model_name = "goose-gpt-4-1"; // parallel tool calls
|
||||
let model_name = "claude-3-5-haiku";
|
||||
let model_config = ModelConfig::new(model_name.to_string());
|
||||
|
||||
let system_prompt_override = "You are a helpful assistant. Talk in the style of pirates.";
|
||||
|
||||
for text in ["How was your day?"] {
|
||||
println!("\n---------------\n");
|
||||
println!("User Input: {text}");
|
||||
let messages = vec![
|
||||
Message::user().with_text("Hi there!"),
|
||||
Message::assistant().with_text("How can I help?"),
|
||||
Message::user().with_text(text),
|
||||
];
|
||||
let completion_response: CompletionResponse = completion(CompletionRequest::new(
|
||||
provider.to_string(),
|
||||
provider_config.clone(),
|
||||
model_config.clone(),
|
||||
None,
|
||||
Some(system_prompt_override.to_string()),
|
||||
messages.clone(),
|
||||
vec![],
|
||||
))
|
||||
.await?;
|
||||
// Print the response
|
||||
println!("\nCompletion Response:");
|
||||
println!("{}", serde_json::to_string_pretty(&completion_response)?);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -106,7 +106,8 @@ async fn main() -> Result<()> {
|
||||
provider.to_string(),
|
||||
provider_config.clone(),
|
||||
model_config.clone(),
|
||||
system_preamble.to_string(),
|
||||
Some(system_preamble.to_string()),
|
||||
None,
|
||||
messages.clone(),
|
||||
extensions.clone(),
|
||||
))
|
||||
@@ -115,7 +116,7 @@ async fn main() -> Result<()> {
|
||||
println!("\nCompletion Response:");
|
||||
println!("{}", serde_json::to_string_pretty(&completion_response)?);
|
||||
|
||||
let tooltip = generate_tooltip(provider, provider_config.clone().into(), &messages).await?;
|
||||
let tooltip = generate_tooltip(provider, provider_config.clone(), &messages, None).await?;
|
||||
println!("\nTooltip: {}", tooltip);
|
||||
}
|
||||
|
||||
|
||||
BIN
crates/goose-llm/examples/test_assets/test_image.png
Normal file
BIN
crates/goose-llm/examples/test_assets/test_image.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 4.2 KiB |
@@ -36,13 +36,22 @@ pub async fn completion(req: CompletionRequest) -> Result<CompletionResponse, Co
|
||||
)
|
||||
.map_err(|_| CompletionError::UnknownProvider(req.provider_name.to_string()))?;
|
||||
|
||||
let system_prompt = construct_system_prompt(&req.system_preamble, &req.extensions)?;
|
||||
let system_prompt = construct_system_prompt(
|
||||
&req.system_preamble,
|
||||
&req.system_prompt_override,
|
||||
&req.extensions,
|
||||
)?;
|
||||
let tools = collect_prefixed_tools(&req.extensions);
|
||||
|
||||
// Call the LLM provider
|
||||
let start_provider = Instant::now();
|
||||
let mut response = provider
|
||||
.complete(&system_prompt, &req.messages, &tools)
|
||||
.complete(
|
||||
&system_prompt,
|
||||
&req.messages,
|
||||
&tools,
|
||||
req.request_id.as_deref(),
|
||||
)
|
||||
.await?;
|
||||
let provider_elapsed_sec = start_provider.elapsed().as_secs_f32();
|
||||
let usage_tokens = response.usage.total_tokens;
|
||||
@@ -60,9 +69,24 @@ pub async fn completion(req: CompletionRequest) -> Result<CompletionResponse, Co
|
||||
|
||||
/// Render the global `system.md` template with the provided context.
|
||||
fn construct_system_prompt(
|
||||
system_preamble: &str,
|
||||
preamble: &Option<String>,
|
||||
prompt_override: &Option<String>,
|
||||
extensions: &[ExtensionConfig],
|
||||
) -> Result<String, CompletionError> {
|
||||
// If both system_preamble and system_prompt_override are provided, then prompt_override takes precedence
|
||||
// and we don't render the template using preamble and extensions. Just return the prompt_override as is.
|
||||
if prompt_override.is_some() {
|
||||
return Ok(prompt_override.clone().unwrap());
|
||||
}
|
||||
|
||||
let system_preamble = {
|
||||
if let Some(p) = preamble {
|
||||
p
|
||||
} else {
|
||||
"You are a helpful assistant."
|
||||
}
|
||||
};
|
||||
|
||||
let mut context: HashMap<&str, Value> = HashMap::new();
|
||||
context.insert("system_preamble", Value::String(system_preamble.to_owned()));
|
||||
context.insert("extensions", serde_json::to_value(extensions)?);
|
||||
|
||||
@@ -3,6 +3,7 @@ use crate::providers::errors::ProviderError;
|
||||
use crate::types::core::Role;
|
||||
use crate::{message::Message, types::json_value_ffi::JsonValueFfi};
|
||||
use anyhow::Result;
|
||||
use goose::utils::safe_truncate;
|
||||
use indoc::indoc;
|
||||
use serde_json::{json, Value};
|
||||
|
||||
@@ -38,7 +39,7 @@ fn build_system_prompt() -> String {
|
||||
You are an assistant that crafts a concise session title.
|
||||
Given the first couple user messages in the conversation so far,
|
||||
reply with only a short name (up to 4 words) that best describes
|
||||
this session’s goal.
|
||||
this session's goal.
|
||||
|
||||
Examples:
|
||||
"#}
|
||||
@@ -47,11 +48,12 @@ fn build_system_prompt() -> String {
|
||||
}
|
||||
|
||||
/// Generates a short (≤4 words) session name
|
||||
#[uniffi::export(async_runtime = "tokio")]
|
||||
#[uniffi::export(async_runtime = "tokio", default(request_id = None))]
|
||||
pub async fn generate_session_name(
|
||||
provider_name: &str,
|
||||
provider_config: JsonValueFfi,
|
||||
messages: &[Message],
|
||||
request_id: Option<String>,
|
||||
) -> Result<String, ProviderError> {
|
||||
// Collect up to the first 3 user messages (truncated to 300 chars each)
|
||||
let context: Vec<String> = messages
|
||||
@@ -60,11 +62,7 @@ pub async fn generate_session_name(
|
||||
.take(3)
|
||||
.map(|m| {
|
||||
let text = m.content.concat_text_str();
|
||||
if text.len() > 300 {
|
||||
text.chars().take(300).collect()
|
||||
} else {
|
||||
text
|
||||
}
|
||||
safe_truncate(&text, 300)
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -93,6 +91,7 @@ pub async fn generate_session_name(
|
||||
&system_prompt,
|
||||
&[Message::user().with_text(&user_msg_text)],
|
||||
schema,
|
||||
request_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -52,11 +52,12 @@ fn build_system_prompt() -> String {
|
||||
|
||||
/// Generates a tooltip summarizing the last two messages in the session,
|
||||
/// including any tool calls or results.
|
||||
#[uniffi::export(async_runtime = "tokio")]
|
||||
#[uniffi::export(async_runtime = "tokio", default(request_id = None))]
|
||||
pub async fn generate_tooltip(
|
||||
provider_name: &str,
|
||||
provider_config: JsonValueFfi,
|
||||
messages: &[Message],
|
||||
request_id: Option<String>,
|
||||
) -> Result<String, ProviderError> {
|
||||
// Need at least two messages to generate a tooltip
|
||||
if messages.len() < 2 {
|
||||
@@ -148,6 +149,7 @@ pub async fn generate_tooltip(
|
||||
&system_prompt,
|
||||
&[Message::user().with_text(&user_msg_text)],
|
||||
schema,
|
||||
request_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -82,3 +82,105 @@ uniffi::custom_type!(Contents, Vec<MessageContent>, {
|
||||
Ok(Contents::from(contents))
|
||||
},
|
||||
});
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::types::core::{Content, TextContent, ToolCall, ToolError};
|
||||
use serde_json::json;
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Helpers
|
||||
// ------------------------------------------------------------
|
||||
fn make_tool_req_ok(id: &str) -> MessageContent {
|
||||
let call = ToolCall::new("echo", json!({"text": "hi"}));
|
||||
MessageContent::tool_request(id, Ok(call).into())
|
||||
}
|
||||
|
||||
fn make_tool_resp_ok(id: &str) -> MessageContent {
|
||||
let body = vec![Content::Text(TextContent {
|
||||
text: "done".into(),
|
||||
})];
|
||||
MessageContent::tool_response(id, Ok(body).into())
|
||||
}
|
||||
|
||||
fn make_tool_req_err(id: &str) -> MessageContent {
|
||||
let err = ToolError::NotFound(format!(
|
||||
"The provided function name '{}' had invalid characters",
|
||||
"bad$name"
|
||||
));
|
||||
MessageContent::tool_request(id, Err(err).into())
|
||||
}
|
||||
|
||||
fn make_tool_resp_err(id: &str) -> MessageContent {
|
||||
let err = ToolError::InvalidParameters("Could not interpret tool use parameters".into());
|
||||
MessageContent::tool_response(id, Err(err).into())
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Round-trip: success
|
||||
// ------------------------------------------------------------
|
||||
#[test]
|
||||
fn contents_roundtrip_ok() {
|
||||
let items: Contents = vec![make_tool_req_ok("req-1"), make_tool_resp_ok("resp-1")].into();
|
||||
|
||||
// ---- serialise
|
||||
let json_str = serde_json::to_string(&items).expect("serialise OK");
|
||||
println!("JSON: {:?}", json_str);
|
||||
|
||||
assert!(
|
||||
json_str.contains(r#""type":"toolReq""#)
|
||||
&& json_str.contains(r#""type":"toolResp""#)
|
||||
&& json_str.contains(r#""status":"success""#),
|
||||
"JSON should contain both variants and success-status"
|
||||
);
|
||||
|
||||
// ---- deserialise
|
||||
let parsed: Contents = serde_json::from_str(&json_str).expect("deserialise OK");
|
||||
|
||||
assert_eq!(parsed, items, "full round-trip equality");
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------
|
||||
// Round-trip: error (all variants collapse to ExecutionError)
|
||||
// ------------------------------------------------------------
|
||||
#[test]
|
||||
fn contents_roundtrip_err() {
|
||||
let original_items: Contents =
|
||||
vec![make_tool_req_err("req-e"), make_tool_resp_err("resp-e")].into();
|
||||
|
||||
// ---- serialise
|
||||
let json_str = serde_json::to_string(&original_items).expect("serialise OK");
|
||||
println!("JSON: {:?}", json_str);
|
||||
|
||||
assert!(json_str.contains(r#""status":"error""#));
|
||||
|
||||
// ---- deserialise
|
||||
let parsed: Contents = serde_json::from_str(&json_str).expect("deserialise OK");
|
||||
|
||||
// ─── validate structure ───────────────────────────────────
|
||||
assert_eq!(parsed.len(), 2);
|
||||
|
||||
// ToolReq error
|
||||
match &parsed[0] {
|
||||
MessageContent::ToolReq(req) => match &*req.tool_call {
|
||||
Err(ToolError::ExecutionError(msg)) => {
|
||||
assert!(msg.contains("invalid characters"))
|
||||
}
|
||||
other => panic!("expected ExecutionError, got {:?}", other),
|
||||
},
|
||||
other => panic!("expected ToolReq, got {:?}", other),
|
||||
}
|
||||
|
||||
// ToolResp error
|
||||
match &parsed[1] {
|
||||
MessageContent::ToolResp(resp) => match &*resp.tool_result {
|
||||
Err(ToolError::ExecutionError(msg)) => {
|
||||
assert!(msg.contains("interpret tool use parameters"))
|
||||
}
|
||||
other => panic!("expected ExecutionError, got {:?}", other),
|
||||
},
|
||||
other => panic!("expected ToolResp, got {:?}", other),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json;
|
||||
use serde_json::{self, Deserializer, Serializer};
|
||||
|
||||
use crate::message::tool_result_serde;
|
||||
use crate::types::core::{Content, ImageContent, TextContent, ToolCall, ToolResult};
|
||||
@@ -52,22 +52,43 @@ impl From<Result<Vec<Content>, crate::types::core::ToolError>> for ToolResponseT
|
||||
// — Register the newtypes with UniFFI, converting via JSON strings —
|
||||
// UniFFI’s FFI layer supports only primitive buffers (here String), so we JSON-serialize
|
||||
// through our `tool_result_serde` to preserve the same success/error schema on both sides.
|
||||
// see https://github.com/mozilla/uniffi-rs/issues/2533
|
||||
|
||||
uniffi::custom_type!(ToolRequestToolCall, String, {
|
||||
lower: |obj| {
|
||||
serde_json::to_string(&obj.0).unwrap()
|
||||
lower: |wrapper: &ToolRequestToolCall| {
|
||||
let mut buf = Vec::new();
|
||||
{
|
||||
let mut ser = Serializer::new(&mut buf);
|
||||
// note the borrow on wrapper.0
|
||||
tool_result_serde::serialize(&wrapper.0, &mut ser)
|
||||
.expect("ToolRequestToolCall serialization failed");
|
||||
}
|
||||
String::from_utf8(buf).expect("ToolRequestToolCall produced invalid UTF-8")
|
||||
},
|
||||
try_lift: |val| {
|
||||
Ok(serde_json::from_str(&val).unwrap() )
|
||||
try_lift: |s: String| {
|
||||
let mut de = Deserializer::from_str(&s);
|
||||
let result = tool_result_serde::deserialize(&mut de)
|
||||
.map_err(anyhow::Error::new)?;
|
||||
Ok(ToolRequestToolCall(result))
|
||||
},
|
||||
});
|
||||
|
||||
uniffi::custom_type!(ToolResponseToolResult, String, {
|
||||
lower: |obj| {
|
||||
serde_json::to_string(&obj.0).unwrap()
|
||||
lower: |wrapper: &ToolResponseToolResult| {
|
||||
let mut buf = Vec::new();
|
||||
{
|
||||
let mut ser = Serializer::new(&mut buf);
|
||||
// note the borrow on wrapper.0
|
||||
tool_result_serde::serialize(&wrapper.0, &mut ser)
|
||||
.expect("ToolResponseToolResult serialization failed");
|
||||
}
|
||||
String::from_utf8(buf).expect("ToolResponseToolResult produced invalid UTF-8")
|
||||
},
|
||||
try_lift: |val| {
|
||||
Ok(serde_json::from_str(&val).unwrap() )
|
||||
try_lift: |s: String| {
|
||||
let mut de = Deserializer::from_str(&s);
|
||||
let result = tool_result_serde::deserialize(&mut de)
|
||||
.map_err(anyhow::Error::new)?;
|
||||
Ok(ToolResponseToolResult(result))
|
||||
},
|
||||
});
|
||||
|
||||
@@ -238,3 +259,207 @@ impl From<Content> for MessageContent {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::types::core::{ToolCall, ToolError};
|
||||
use crate::UniFfiTag;
|
||||
use serde_json::json;
|
||||
use uniffi::{FfiConverter, RustBuffer};
|
||||
|
||||
// ---------- ToolRequestToolCall ----------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn tool_request_tool_call_roundtrip_ok() {
|
||||
// Build a valid ToolCall
|
||||
let call = ToolCall::new("my_function", json!({"a": 1, "b": 2}));
|
||||
|
||||
// Wrap it in the new-type
|
||||
let wrapper = ToolRequestToolCall::from(Ok(call.clone()));
|
||||
|
||||
// Serialize → JSON
|
||||
let json_str = serde_json::to_string(&wrapper).expect("serialize OK");
|
||||
assert!(
|
||||
json_str.contains(r#""status":"success""#),
|
||||
"must mark success"
|
||||
);
|
||||
|
||||
// Deserialize ← JSON
|
||||
let parsed: ToolRequestToolCall = serde_json::from_str(&json_str).expect("deserialize OK");
|
||||
|
||||
// Round-trip equality
|
||||
assert_eq!(*parsed, Ok(call));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tool_request_tool_call_roundtrip_err() {
|
||||
// Typical failure variant that could come from `is_valid_function_name`
|
||||
let err = ToolError::NotFound(
|
||||
"The provided function name 'bad$name' had invalid characters".into(),
|
||||
);
|
||||
|
||||
let wrapper = ToolRequestToolCall::from(Err(err.clone()));
|
||||
|
||||
let json_str = serde_json::to_string(&wrapper).expect("serialize OK");
|
||||
assert!(
|
||||
json_str.contains(r#""status":"error""#) && json_str.contains("invalid characters"),
|
||||
"must mark error and carry message"
|
||||
);
|
||||
|
||||
let parsed: ToolRequestToolCall = serde_json::from_str(&json_str).expect("deserialize OK");
|
||||
|
||||
match &*parsed {
|
||||
Err(ToolError::ExecutionError(msg)) => {
|
||||
assert!(msg.contains("invalid characters"))
|
||||
}
|
||||
other => panic!("expected ExecutionError, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
// ---------- ToolResponseToolResult -------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn tool_response_tool_result_roundtrip_ok() {
|
||||
// Minimal content vector (one text item)
|
||||
let content_vec = vec![Content::Text(TextContent {
|
||||
text: "hello".into(),
|
||||
})];
|
||||
|
||||
let wrapper = ToolResponseToolResult::from(Ok(content_vec.clone()));
|
||||
|
||||
let json_str = serde_json::to_string(&wrapper).expect("serialize OK");
|
||||
assert!(json_str.contains(r#""status":"success""#));
|
||||
|
||||
let parsed: ToolResponseToolResult =
|
||||
serde_json::from_str(&json_str).expect("deserialize OK");
|
||||
|
||||
assert_eq!(*parsed, Ok(content_vec));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tool_response_tool_result_roundtrip_err() {
|
||||
let err = ToolError::InvalidParameters("Could not interpret tool use parameters".into());
|
||||
|
||||
let wrapper = ToolResponseToolResult::from(Err(err.clone()));
|
||||
|
||||
let json_str = serde_json::to_string(&wrapper).expect("serialize OK");
|
||||
assert!(json_str.contains(r#""status":"error""#));
|
||||
|
||||
let parsed: ToolResponseToolResult =
|
||||
serde_json::from_str(&json_str).expect("deserialize OK");
|
||||
|
||||
match &*parsed {
|
||||
Err(ToolError::ExecutionError(msg)) => {
|
||||
assert!(msg.contains("interpret tool use"))
|
||||
}
|
||||
other => panic!("expected ExecutionError, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
// ---------- FFI (lower / lift) round-trips ----------------------------------------------
|
||||
// https://mozilla.github.io/uniffi-rs/latest/internals/lifting_and_lowering.html
|
||||
|
||||
#[test]
|
||||
fn ffi_roundtrip_tool_request_ok_and_err() {
|
||||
// ---------- status: success ----------
|
||||
let ok_call = ToolCall::new("echo", json!({"text": "hi"}));
|
||||
let ok_wrapper = ToolRequestToolCall::from(Ok(ok_call.clone()));
|
||||
|
||||
// First lower → inspect JSON
|
||||
let buf1: RustBuffer =
|
||||
<ToolRequestToolCall as FfiConverter<UniFfiTag>>::lower(ok_wrapper.clone());
|
||||
|
||||
let json_ok: String =
|
||||
<String as FfiConverter<UniFfiTag>>::try_lift(buf1).expect("lift String OK");
|
||||
println!("ToolReq - Lowered JSON (status: success): {:?}", json_ok);
|
||||
assert!(json_ok.contains(r#""status":"success""#));
|
||||
|
||||
// Second lower → round-trip wrapper
|
||||
let buf2: RustBuffer =
|
||||
<ToolRequestToolCall as FfiConverter<UniFfiTag>>::lower(ok_wrapper.clone());
|
||||
|
||||
let lifted_ok = <ToolRequestToolCall as FfiConverter<UniFfiTag>>::try_lift(buf2)
|
||||
.expect("lift wrapper OK");
|
||||
println!(
|
||||
"ToolReq - Lifted wrapper (status: success): {:?}",
|
||||
lifted_ok
|
||||
);
|
||||
assert_eq!(lifted_ok, ok_wrapper);
|
||||
|
||||
// ---------- status: error ----------
|
||||
let err_call = ToolError::NotFound("no such function".into());
|
||||
let err_wrapper = ToolRequestToolCall::from(Err(err_call.clone()));
|
||||
|
||||
let buf1: RustBuffer =
|
||||
<ToolRequestToolCall as FfiConverter<UniFfiTag>>::lower(err_wrapper.clone());
|
||||
let json_err: String =
|
||||
<String as FfiConverter<UniFfiTag>>::try_lift(buf1).expect("lift String ERR");
|
||||
println!("ToolReq - Lowered JSON (status: error): {:?}", json_err);
|
||||
assert!(json_err.contains(r#""status":"error""#));
|
||||
|
||||
let buf2: RustBuffer =
|
||||
<ToolRequestToolCall as FfiConverter<UniFfiTag>>::lower(err_wrapper.clone());
|
||||
let lifted_err = <ToolRequestToolCall as FfiConverter<UniFfiTag>>::try_lift(buf2)
|
||||
.expect("lift wrapper ERR");
|
||||
println!("ToolReq - Lifted wrapper (status: error): {:?}", lifted_err);
|
||||
|
||||
match &*lifted_err {
|
||||
Err(ToolError::ExecutionError(msg)) => {
|
||||
assert!(msg.contains("no such function"))
|
||||
}
|
||||
other => panic!("expected ExecutionError, got {:?}", other),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ffi_roundtrip_tool_response_ok_and_err() {
|
||||
// ---------- status: success ----------
|
||||
let body = vec![Content::Text(TextContent {
|
||||
text: "done".into(),
|
||||
})];
|
||||
let ok_wrapper = ToolResponseToolResult::from(Ok(body.clone()));
|
||||
|
||||
let buf1: RustBuffer =
|
||||
<ToolResponseToolResult as FfiConverter<UniFfiTag>>::lower(ok_wrapper.clone());
|
||||
let json_ok: String = <String as FfiConverter<UniFfiTag>>::try_lift(buf1).unwrap();
|
||||
println!("ToolResp - Lowered JSON (status: success): {:?}", json_ok);
|
||||
assert!(json_ok.contains(r#""status":"success""#));
|
||||
|
||||
let buf2: RustBuffer =
|
||||
<ToolResponseToolResult as FfiConverter<UniFfiTag>>::lower(ok_wrapper.clone());
|
||||
let lifted_ok =
|
||||
<ToolResponseToolResult as FfiConverter<UniFfiTag>>::try_lift(buf2).unwrap();
|
||||
println!(
|
||||
"ToolResp - Lifted wrapper (status: success): {:?}",
|
||||
lifted_ok
|
||||
);
|
||||
assert_eq!(lifted_ok, ok_wrapper);
|
||||
|
||||
// ---------- status: error ----------
|
||||
let err_call = ToolError::InvalidParameters("bad params".into());
|
||||
let err_wrapper = ToolResponseToolResult::from(Err(err_call.clone()));
|
||||
|
||||
let buf1: RustBuffer =
|
||||
<ToolResponseToolResult as FfiConverter<UniFfiTag>>::lower(err_wrapper.clone());
|
||||
let json_err: String = <String as FfiConverter<UniFfiTag>>::try_lift(buf1).unwrap();
|
||||
println!("ToolResp - Lowered JSON (status: error): {:?}", json_err);
|
||||
assert!(json_err.contains(r#""status":"error""#));
|
||||
|
||||
let buf2: RustBuffer =
|
||||
<ToolResponseToolResult as FfiConverter<UniFfiTag>>::lower(err_wrapper.clone());
|
||||
let lifted_err =
|
||||
<ToolResponseToolResult as FfiConverter<UniFfiTag>>::try_lift(buf2).unwrap();
|
||||
println!(
|
||||
"ToolResp - Lifted wrapper (status: error): {:?}",
|
||||
lifted_err
|
||||
);
|
||||
|
||||
match &*lifted_err {
|
||||
Err(ToolError::ExecutionError(msg)) => {
|
||||
assert!(msg.contains("bad params"))
|
||||
}
|
||||
other => panic!("expected ExecutionError, got {:?}", other),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -151,7 +151,7 @@ mod tests {
|
||||
.with_text("Hello, I'll help you with that.")
|
||||
.with_tool_request(
|
||||
"tool123",
|
||||
Ok(ToolCall::new("test_tool", json!({"param": "value"})).into()),
|
||||
Ok(ToolCall::new("test_tool", json!({"param": "value"}))),
|
||||
);
|
||||
|
||||
let json_str = serde_json::to_string_pretty(&message).unwrap();
|
||||
|
||||
@@ -69,6 +69,7 @@ pub trait Provider: Send + Sync {
|
||||
/// * `system` - The system prompt that guides the model's behavior
|
||||
/// * `messages` - The conversation history as a sequence of messages
|
||||
/// * `tools` - Optional list of tools the model can use
|
||||
/// * `request_id` - Optional request ID (only used by some providers like Databricks)
|
||||
///
|
||||
/// # Returns
|
||||
/// A tuple containing the model's response message and provider usage statistics
|
||||
@@ -81,6 +82,7 @@ pub trait Provider: Send + Sync {
|
||||
system: &str,
|
||||
messages: &[Message],
|
||||
tools: &[Tool],
|
||||
request_id: Option<&str>,
|
||||
) -> Result<ProviderCompleteResponse, ProviderError>;
|
||||
|
||||
/// Structured extraction: always JSON‐Schema
|
||||
@@ -90,6 +92,7 @@ pub trait Provider: Send + Sync {
|
||||
/// * `messages` – conversation history
|
||||
/// * `schema` – a JSON‐Schema for the expected output.
|
||||
/// Will set strict=true for OpenAI & Databricks.
|
||||
/// * `request_id` - Optional request ID (only used by some providers like Databricks)
|
||||
///
|
||||
/// # Returns
|
||||
/// A `ProviderExtractResponse` whose `data` is a JSON object matching `schema`.
|
||||
@@ -102,6 +105,7 @@ pub trait Provider: Send + Sync {
|
||||
system: &str,
|
||||
messages: &[Message],
|
||||
schema: &serde_json::Value,
|
||||
request_id: Option<&str>,
|
||||
) -> Result<ProviderExtractResponse, ProviderError>;
|
||||
}
|
||||
|
||||
|
||||
@@ -210,6 +210,7 @@ impl Provider for DatabricksProvider {
|
||||
system: &str,
|
||||
messages: &[Message],
|
||||
tools: &[Tool],
|
||||
request_id: Option<&str>,
|
||||
) -> Result<ProviderCompleteResponse, ProviderError> {
|
||||
let mut payload = create_request(
|
||||
&self.model,
|
||||
@@ -224,6 +225,17 @@ impl Provider for DatabricksProvider {
|
||||
.expect("payload should have model key")
|
||||
.remove("model");
|
||||
|
||||
// Add client_request_id if provided
|
||||
if let Some(req_id) = request_id {
|
||||
payload
|
||||
.as_object_mut()
|
||||
.expect("payload should be an object")
|
||||
.insert(
|
||||
"client_request_id".to_string(),
|
||||
serde_json::Value::String(req_id.to_string()),
|
||||
);
|
||||
}
|
||||
|
||||
let response = self.post(payload.clone()).await?;
|
||||
|
||||
// Parse response
|
||||
@@ -247,6 +259,7 @@ impl Provider for DatabricksProvider {
|
||||
system: &str,
|
||||
messages: &[Message],
|
||||
schema: &Value,
|
||||
request_id: Option<&str>,
|
||||
) -> Result<ProviderExtractResponse, ProviderError> {
|
||||
// 1. Build base payload (no tools)
|
||||
let mut payload = create_request(&self.model, system, messages, &[], &ImageFormat::OpenAi)?;
|
||||
@@ -267,6 +280,17 @@ impl Provider for DatabricksProvider {
|
||||
}),
|
||||
);
|
||||
|
||||
// Add client_request_id if provided
|
||||
if let Some(req_id) = request_id {
|
||||
payload
|
||||
.as_object_mut()
|
||||
.expect("payload should be an object")
|
||||
.insert(
|
||||
"client_request_id".to_string(),
|
||||
serde_json::Value::String(req_id.to_string()),
|
||||
);
|
||||
}
|
||||
|
||||
// 3. Call OpenAI
|
||||
let response = self.post(payload.clone()).await?;
|
||||
|
||||
|
||||
@@ -7,10 +7,7 @@ use crate::{
|
||||
providers::{
|
||||
base::Usage,
|
||||
errors::ProviderError,
|
||||
utils::{
|
||||
convert_image, detect_image_path, is_valid_function_name, load_image_file,
|
||||
sanitize_function_name, ImageFormat,
|
||||
},
|
||||
utils::{convert_image, is_valid_function_name, sanitize_function_name, ImageFormat},
|
||||
},
|
||||
types::core::{Content, Role, Tool, ToolCall, ToolError},
|
||||
};
|
||||
@@ -34,30 +31,17 @@ pub fn format_messages(messages: &[Message], image_format: &ImageFormat) -> Vec<
|
||||
match content {
|
||||
MessageContent::Text(text) => {
|
||||
if !text.text.is_empty() {
|
||||
// Check for image paths in the text
|
||||
if let Some(image_path) = detect_image_path(&text.text) {
|
||||
has_multiple_content = true;
|
||||
// Try to load and convert the image
|
||||
if let Ok(image) = load_image_file(image_path) {
|
||||
content_array.push(json!({
|
||||
"type": "text",
|
||||
"text": text.text
|
||||
}));
|
||||
content_array.push(convert_image(&image, image_format));
|
||||
} else {
|
||||
content_array.push(json!({
|
||||
"type": "text",
|
||||
"text": text.text
|
||||
}));
|
||||
}
|
||||
} else {
|
||||
content_array.push(json!({
|
||||
"type": "text",
|
||||
"text": text.text
|
||||
}));
|
||||
}
|
||||
content_array.push(json!({
|
||||
"type": "text",
|
||||
"text": text.text
|
||||
}));
|
||||
}
|
||||
}
|
||||
MessageContent::Image(image) => {
|
||||
// Handle direct image content
|
||||
let converted_image = convert_image(image, image_format);
|
||||
content_array.push(converted_image);
|
||||
}
|
||||
MessageContent::Thinking(content) => {
|
||||
has_multiple_content = true;
|
||||
content_array.push(json!({
|
||||
@@ -166,15 +150,6 @@ pub fn format_messages(messages: &[Message], image_format: &ImageFormat) -> Vec<
|
||||
}
|
||||
}
|
||||
}
|
||||
MessageContent::Image(image) => {
|
||||
// Handle direct image content
|
||||
content_array.push(json!({
|
||||
"type": "image_url",
|
||||
"image_url": {
|
||||
"url": convert_image(image, image_format)
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -681,7 +656,7 @@ mod tests {
|
||||
Message::user().with_text("How are you?"),
|
||||
Message::assistant().with_tool_request(
|
||||
"tool1",
|
||||
Ok(ToolCall::new("example", json!({"param1": "value1"})).into()),
|
||||
Ok(ToolCall::new("example", json!({"param1": "value1"}))),
|
||||
),
|
||||
];
|
||||
|
||||
@@ -791,40 +766,6 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_messages_with_image_path() -> anyhow::Result<()> {
|
||||
// Create a temporary PNG file with valid PNG magic numbers
|
||||
let temp_dir = tempfile::tempdir()?;
|
||||
let png_path = temp_dir.path().join("test.png");
|
||||
let png_data = [
|
||||
0x89, 0x50, 0x4E, 0x47, // PNG magic number
|
||||
0x0D, 0x0A, 0x1A, 0x0A, // PNG header
|
||||
0x00, 0x00, 0x00, 0x0D, // Rest of fake PNG data
|
||||
];
|
||||
std::fs::write(&png_path, &png_data)?;
|
||||
let png_path_str = png_path.to_str().unwrap();
|
||||
|
||||
// Create message with image path
|
||||
let message = Message::user().with_text(format!("Here is an image: {}", png_path_str));
|
||||
let spec = format_messages(&[message], &ImageFormat::OpenAi);
|
||||
|
||||
assert_eq!(spec.len(), 1);
|
||||
assert_eq!(spec[0]["role"], "user");
|
||||
|
||||
// Content should be an array with text and image
|
||||
let content = spec[0]["content"].as_array().unwrap();
|
||||
assert_eq!(content.len(), 2);
|
||||
assert_eq!(content[0]["type"], "text");
|
||||
assert!(content[0]["text"].as_str().unwrap().contains(png_path_str));
|
||||
assert_eq!(content[1]["type"], "image_url");
|
||||
assert!(content[1]["image_url"]["url"]
|
||||
.as_str()
|
||||
.unwrap()
|
||||
.starts_with("data:image/png;base64,"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_response_to_message_text() -> anyhow::Result<()> {
|
||||
let response = json!({
|
||||
|
||||
@@ -7,10 +7,7 @@ use crate::{
|
||||
providers::{
|
||||
base::Usage,
|
||||
errors::ProviderError,
|
||||
utils::{
|
||||
convert_image, detect_image_path, is_valid_function_name, load_image_file,
|
||||
sanitize_function_name, ImageFormat,
|
||||
},
|
||||
utils::{convert_image, is_valid_function_name, sanitize_function_name, ImageFormat},
|
||||
},
|
||||
types::core::{Content, Role, Tool, ToolCall, ToolError},
|
||||
};
|
||||
@@ -31,23 +28,13 @@ pub fn format_messages(messages: &[Message], image_format: &ImageFormat) -> Vec<
|
||||
match content {
|
||||
MessageContent::Text(text) => {
|
||||
if !text.text.is_empty() {
|
||||
// Check for image paths in the text
|
||||
if let Some(image_path) = detect_image_path(&text.text) {
|
||||
// Try to load and convert the image
|
||||
if let Ok(image) = load_image_file(image_path) {
|
||||
converted["content"] = json!([
|
||||
{"type": "text", "text": text.text},
|
||||
convert_image(&image, image_format)
|
||||
]);
|
||||
} else {
|
||||
// If image loading fails, just use the text
|
||||
converted["content"] = json!(text.text);
|
||||
}
|
||||
} else {
|
||||
converted["content"] = json!(text.text);
|
||||
}
|
||||
converted["content"] = json!(text.text);
|
||||
}
|
||||
}
|
||||
MessageContent::Image(image) => {
|
||||
// Handle direct image content
|
||||
converted["content"] = json!([convert_image(image, image_format)]);
|
||||
}
|
||||
MessageContent::Thinking(_) => {
|
||||
// Thinking blocks are not directly used in OpenAI format
|
||||
continue;
|
||||
@@ -134,10 +121,6 @@ pub fn format_messages(messages: &[Message], image_format: &ImageFormat) -> Vec<
|
||||
}
|
||||
}
|
||||
}
|
||||
MessageContent::Image(image) => {
|
||||
// Handle direct image content
|
||||
converted["content"] = json!([convert_image(image, image_format)]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -664,40 +647,6 @@ mod tests {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_messages_with_image_path() -> anyhow::Result<()> {
|
||||
// Create a temporary PNG file with valid PNG magic numbers
|
||||
let temp_dir = tempfile::tempdir()?;
|
||||
let png_path = temp_dir.path().join("test.png");
|
||||
let png_data = [
|
||||
0x89, 0x50, 0x4E, 0x47, // PNG magic number
|
||||
0x0D, 0x0A, 0x1A, 0x0A, // PNG header
|
||||
0x00, 0x00, 0x00, 0x0D, // Rest of fake PNG data
|
||||
];
|
||||
std::fs::write(&png_path, &png_data)?;
|
||||
let png_path_str = png_path.to_str().unwrap();
|
||||
|
||||
// Create message with image path
|
||||
let message = Message::user().with_text(format!("Here is an image: {}", png_path_str));
|
||||
let spec = format_messages(&[message], &ImageFormat::OpenAi);
|
||||
|
||||
assert_eq!(spec.len(), 1);
|
||||
assert_eq!(spec[0]["role"], "user");
|
||||
|
||||
// Content should be an array with text and image
|
||||
let content = spec[0]["content"].as_array().unwrap();
|
||||
assert_eq!(content.len(), 2);
|
||||
assert_eq!(content[0]["type"], "text");
|
||||
assert!(content[0]["text"].as_str().unwrap().contains(png_path_str));
|
||||
assert_eq!(content[1]["type"], "image_url");
|
||||
assert!(content[1]["image_url"]["url"]
|
||||
.as_str()
|
||||
.unwrap()
|
||||
.starts_with("data:image/png;base64,"));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_response_to_message_text() -> anyhow::Result<()> {
|
||||
let response = json!({
|
||||
|
||||
@@ -149,6 +149,7 @@ impl Provider for OpenAiProvider {
|
||||
system: &str,
|
||||
messages: &[Message],
|
||||
tools: &[Tool],
|
||||
_request_id: Option<&str>, // OpenAI doesn't use request_id, so we ignore it
|
||||
) -> Result<ProviderCompleteResponse, ProviderError> {
|
||||
let payload = create_request(&self.model, system, messages, tools, &ImageFormat::OpenAi)?;
|
||||
|
||||
@@ -175,6 +176,7 @@ impl Provider for OpenAiProvider {
|
||||
system: &str,
|
||||
messages: &[Message],
|
||||
schema: &Value,
|
||||
_request_id: Option<&str>, // OpenAI doesn't use request_id, so we ignore it
|
||||
) -> Result<ProviderExtractResponse, ProviderError> {
|
||||
// 1. Build base payload (no tools)
|
||||
let mut payload = create_request(&self.model, system, messages, &[], &ImageFormat::OpenAi)?;
|
||||
|
||||
@@ -181,30 +181,6 @@ fn is_image_file(path: &Path) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
/// Detect if a string contains a path to an image file
|
||||
pub fn detect_image_path(text: &str) -> Option<&str> {
|
||||
// Basic image file extension check
|
||||
let extensions = [".png", ".jpg", ".jpeg"];
|
||||
|
||||
// Find any word that ends with an image extension
|
||||
for word in text.split_whitespace() {
|
||||
if extensions
|
||||
.iter()
|
||||
.any(|ext| word.to_lowercase().ends_with(ext))
|
||||
{
|
||||
let path = Path::new(word);
|
||||
// Check if it's an absolute path and file exists
|
||||
if path.is_absolute() && path.is_file() {
|
||||
// Verify it's actually an image file
|
||||
if is_image_file(path) {
|
||||
return Some(word);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Convert a local image file to base64 encoded ImageContent
|
||||
pub fn load_image_file(path: &str) -> Result<ImageContent, ProviderError> {
|
||||
let path = Path::new(path);
|
||||
@@ -267,81 +243,6 @@ pub fn emit_debug_trace(
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_detect_image_path() {
|
||||
// Create a temporary PNG file with valid PNG magic numbers
|
||||
let temp_dir = tempfile::tempdir().unwrap();
|
||||
let png_path = temp_dir.path().join("test.png");
|
||||
let png_data = [
|
||||
0x89, 0x50, 0x4E, 0x47, // PNG magic number
|
||||
0x0D, 0x0A, 0x1A, 0x0A, // PNG header
|
||||
0x00, 0x00, 0x00, 0x0D, // Rest of fake PNG data
|
||||
];
|
||||
std::fs::write(&png_path, &png_data).unwrap();
|
||||
let png_path_str = png_path.to_str().unwrap();
|
||||
|
||||
// Create a fake PNG (wrong magic numbers)
|
||||
let fake_png_path = temp_dir.path().join("fake.png");
|
||||
std::fs::write(&fake_png_path, b"not a real png").unwrap();
|
||||
|
||||
// Test with valid PNG file using absolute path
|
||||
let text = format!("Here is an image {}", png_path_str);
|
||||
assert_eq!(detect_image_path(&text), Some(png_path_str));
|
||||
|
||||
// Test with non-image file that has .png extension
|
||||
let text = format!("Here is a fake image {}", fake_png_path.to_str().unwrap());
|
||||
assert_eq!(detect_image_path(&text), None);
|
||||
|
||||
// Test with non-existent file
|
||||
let text = "Here is a fake.png that doesn't exist";
|
||||
assert_eq!(detect_image_path(text), None);
|
||||
|
||||
// Test with non-image file
|
||||
let text = "Here is a file.txt";
|
||||
assert_eq!(detect_image_path(text), None);
|
||||
|
||||
// Test with relative path (should not match)
|
||||
let text = "Here is a relative/path/image.png";
|
||||
assert_eq!(detect_image_path(text), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_image_file() {
|
||||
// Create a temporary PNG file with valid PNG magic numbers
|
||||
let temp_dir = tempfile::tempdir().unwrap();
|
||||
let png_path = temp_dir.path().join("test.png");
|
||||
let png_data = [
|
||||
0x89, 0x50, 0x4E, 0x47, // PNG magic number
|
||||
0x0D, 0x0A, 0x1A, 0x0A, // PNG header
|
||||
0x00, 0x00, 0x00, 0x0D, // Rest of fake PNG data
|
||||
];
|
||||
std::fs::write(&png_path, &png_data).unwrap();
|
||||
let png_path_str = png_path.to_str().unwrap();
|
||||
|
||||
// Create a fake PNG (wrong magic numbers)
|
||||
let fake_png_path = temp_dir.path().join("fake.png");
|
||||
std::fs::write(&fake_png_path, b"not a real png").unwrap();
|
||||
let fake_png_path_str = fake_png_path.to_str().unwrap();
|
||||
|
||||
// Test loading valid PNG file
|
||||
let result = load_image_file(png_path_str);
|
||||
assert!(result.is_ok());
|
||||
let image = result.unwrap();
|
||||
assert_eq!(image.mime_type, "image/png");
|
||||
|
||||
// Test loading fake PNG file
|
||||
let result = load_image_file(fake_png_path_str);
|
||||
assert!(result.is_err());
|
||||
assert!(result
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("not a valid image"));
|
||||
|
||||
// Test non-existent file
|
||||
let result = load_image_file("nonexistent.png");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sanitize_function_name() {
|
||||
assert_eq!(sanitize_function_name("hello-world"), "hello-world");
|
||||
|
||||
@@ -6,13 +6,14 @@ use crate::{
|
||||
|
||||
/// Generates a structured output based on the provided schema,
|
||||
/// system prompt and user messages.
|
||||
#[uniffi::export(async_runtime = "tokio")]
|
||||
#[uniffi::export(async_runtime = "tokio", default(request_id = None))]
|
||||
pub async fn generate_structured_outputs(
|
||||
provider_name: &str,
|
||||
provider_config: JsonValueFfi,
|
||||
system_prompt: &str,
|
||||
messages: &[Message],
|
||||
schema: JsonValueFfi,
|
||||
request_id: Option<String>,
|
||||
) -> Result<ProviderExtractResponse, ProviderError> {
|
||||
// Use OpenAI models specifically for this task
|
||||
let model_name = if provider_name == "databricks" {
|
||||
@@ -23,7 +24,9 @@ pub async fn generate_structured_outputs(
|
||||
let model_cfg = ModelConfig::new(model_name.to_string()).with_temperature(Some(0.0));
|
||||
let provider = create(provider_name, provider_config, model_cfg)?;
|
||||
|
||||
let resp = provider.extract(system_prompt, messages, &schema).await?;
|
||||
let resp = provider
|
||||
.extract(system_prompt, messages, &schema, request_id.as_deref())
|
||||
.await?;
|
||||
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
@@ -16,9 +16,11 @@ pub struct CompletionRequest {
|
||||
pub provider_name: String,
|
||||
pub provider_config: serde_json::Value,
|
||||
pub model_config: ModelConfig,
|
||||
pub system_preamble: String,
|
||||
pub system_preamble: Option<String>,
|
||||
pub system_prompt_override: Option<String>,
|
||||
pub messages: Vec<Message>,
|
||||
pub extensions: Vec<ExtensionConfig>,
|
||||
pub request_id: Option<String>,
|
||||
}
|
||||
|
||||
impl CompletionRequest {
|
||||
@@ -26,7 +28,8 @@ impl CompletionRequest {
|
||||
provider_name: String,
|
||||
provider_config: serde_json::Value,
|
||||
model_config: ModelConfig,
|
||||
system_preamble: String,
|
||||
system_preamble: Option<String>,
|
||||
system_prompt_override: Option<String>,
|
||||
messages: Vec<Message>,
|
||||
extensions: Vec<ExtensionConfig>,
|
||||
) -> Self {
|
||||
@@ -34,30 +37,47 @@ impl CompletionRequest {
|
||||
provider_name,
|
||||
provider_config,
|
||||
model_config,
|
||||
system_prompt_override,
|
||||
system_preamble,
|
||||
messages,
|
||||
extensions,
|
||||
request_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_request_id(mut self, request_id: String) -> Self {
|
||||
self.request_id = Some(request_id);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[uniffi::export]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[uniffi::export(default(system_preamble = None, system_prompt_override = None))]
|
||||
pub fn create_completion_request(
|
||||
provider_name: &str,
|
||||
provider_config: JsonValueFfi,
|
||||
model_config: ModelConfig,
|
||||
system_preamble: &str,
|
||||
system_preamble: Option<String>,
|
||||
system_prompt_override: Option<String>,
|
||||
messages: Vec<Message>,
|
||||
extensions: Vec<ExtensionConfig>,
|
||||
request_id: Option<String>,
|
||||
) -> CompletionRequest {
|
||||
CompletionRequest::new(
|
||||
let mut request = CompletionRequest::new(
|
||||
provider_name.to_string(),
|
||||
provider_config,
|
||||
model_config,
|
||||
system_preamble.to_string(),
|
||||
system_preamble,
|
||||
system_prompt_override,
|
||||
messages,
|
||||
extensions,
|
||||
)
|
||||
);
|
||||
|
||||
if let Some(req_id) = request_id {
|
||||
request = request.with_request_id(req_id);
|
||||
}
|
||||
|
||||
request
|
||||
}
|
||||
|
||||
uniffi::custom_type!(CompletionRequest, String, {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user