diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index e2e8fa34b63..0a957360d4d 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,3 @@ # These owners will be the default owners for everything in the repo -* @mrubens @cte @jr @hannesrudolph @daniel-lxs +# Update with your GitHub username or team +# * @your-username diff --git a/.github/workflows/marketplace-publish.yml b/.github/workflows/marketplace-publish.yml deleted file mode 100644 index aef91b2d323..00000000000 --- a/.github/workflows/marketplace-publish.yml +++ /dev/null @@ -1,92 +0,0 @@ -name: Publish Extension - -on: - pull_request: - types: [closed] - workflow_dispatch: - -env: - GIT_REF: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || 'main' }} - -jobs: - publish-extension: - runs-on: ubuntu-latest - permissions: - contents: write # Required for pushing tags. - if: > - ( github.event_name == 'pull_request' && - github.event.pull_request.base.ref == 'main' && - contains(github.event.pull_request.title, 'Changeset version bump') ) || - github.event_name == 'workflow_dispatch' - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ env.GIT_REF }} - - name: Setup Node.js and pnpm - uses: ./.github/actions/setup-node-pnpm - - name: Configure Git - run: | - git config user.name "github-actions[bot]" - git config user.email "github-actions[bot]@users.noreply.github.com" - - name: Create .env file - run: echo "POSTHOG_API_KEY=${{ secrets.POSTHOG_API_KEY }}" >> .env - - name: Package Extension - run: | - current_package_version=$(node -p "require('./src/package.json').version") - pnpm vsix - - # Save VSIX contents to a temporary file to avoid broken pipe issues. - unzip -l bin/roo-cline-${current_package_version}.vsix > /tmp/roo-code-vsix-contents.txt - - # Check for required files. - grep -q "extension/package.json" /tmp/roo-code-vsix-contents.txt || exit 1 - grep -q "extension/package.nls.json" /tmp/roo-code-vsix-contents.txt || exit 1 - grep -q "extension/dist/extension.js" /tmp/roo-code-vsix-contents.txt || exit 1 - grep -q "extension/webview-ui/audio/celebration.wav" /tmp/roo-code-vsix-contents.txt || exit 1 - grep -q "extension/webview-ui/build/assets/index.js" /tmp/roo-code-vsix-contents.txt || exit 1 - grep -q "extension/assets/codicons/codicon.ttf" /tmp/roo-code-vsix-contents.txt || exit 1 - grep -q "extension/assets/vscode-material-icons/icons/3d.svg" /tmp/roo-code-vsix-contents.txt || exit 1 - grep -q ".env" /tmp/roo-code-vsix-contents.txt || exit 1 - - # Clean up temporary file. - rm /tmp/roo-code-vsix-contents.txt - - name: Create and Push Git Tag - run: | - current_package_version=$(node -p "require('./src/package.json').version") - git tag -a "v${current_package_version}" -m "Release v${current_package_version}" - git push origin "v${current_package_version}" --no-verify - echo "Successfully created and pushed git tag v${current_package_version}" - - name: Publish Extension - env: - VSCE_PAT: ${{ secrets.VSCE_PAT }} - OVSX_PAT: ${{ secrets.OVSX_PAT }} - run: | - current_package_version=$(node -p "require('./src/package.json').version") - pnpm --filter roo-cline publish:marketplace - echo "Successfully published version $current_package_version to VS Code Marketplace" - - name: Create GitHub Release - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - current_package_version=$(node -p "require('./src/package.json').version") - - # Extract changelog for current version - echo "Extracting changelog for version ${current_package_version}" - changelog_content=$(sed -n "/## \\[${current_package_version}\\]/,/## \\[/p" CHANGELOG.md | sed '$d') - - # If changelog extraction failed, use a default message - if [ -z "$changelog_content" ]; then - echo "Warning: No changelog section found for version ${current_package_version}" - changelog_content="Release v${current_package_version}" - else - echo "Found changelog section for version ${current_package_version}" - fi - - # Create release with changelog content - gh release create "v${current_package_version}" \ - --title "Release v${current_package_version}" \ - --notes "$changelog_content" \ - --target ${{ env.GIT_REF }} \ - bin/roo-cline-${current_package_version}.vsix - echo "Successfully created GitHub Release v${current_package_version}" diff --git a/.github/workflows/nightly-publish.yml b/.github/workflows/nightly-publish.yml deleted file mode 100644 index e25bdba990a..00000000000 --- a/.github/workflows/nightly-publish.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Nightly Publish - -on: - push: - branches: [main] - workflow_dispatch: # Allows manual triggering. - -jobs: - publish-nightly: - runs-on: ubuntu-latest - - permissions: - contents: read # No tags pushed → read is enough. - - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Setup Node.js and pnpm - uses: ./.github/actions/setup-node-pnpm - with: - install-args: '--frozen-lockfile' - - name: Forge numeric Nightly version - id: version - env: - RUN_NUMBER: ${{ github.run_number }} - run: echo "number=$(( 5500 + ${RUN_NUMBER} ))" >> $GITHUB_OUTPUT - - name: Patch package.json version - env: - VERSION_NUMBER: ${{ steps.version.outputs.number }} - run: | - node <<'EOF' - const fs = require('fs'); - const path = require('path'); - const pkgPath = path.join(__dirname, 'apps', 'vscode-nightly', 'package.nightly.json'); - const pkg = JSON.parse(fs.readFileSync(pkgPath,'utf8')); - const [maj, min] = pkg.version.split('.'); - pkg.version = `${maj}.${min}.${process.env.VERSION_NUMBER}`; - fs.writeFileSync(pkgPath, JSON.stringify(pkg, null, 2)); - console.log(`🔖 Nightly version set to ${pkg.version}`); - EOF - - name: Build VSIX - run: pnpm vsix:nightly # Produces bin/roo-code-nightly-0.0.[count].vsix - - name: Publish to VS Code Marketplace - env: - VSCE_PAT: ${{ secrets.VSCE_PAT }} - run: npx vsce publish --packagePath "bin/$(/bin/ls bin | head -n1)" - - name: Publish to Open VSX Registry - env: - OVSX_PAT: ${{ secrets.OVSX_PAT }} - run: npx ovsx publish "bin/$(ls bin | head -n1)" diff --git a/.github/workflows/update-contributors.yml b/.github/workflows/update-contributors.yml deleted file mode 100644 index 5709bdc10a0..00000000000 --- a/.github/workflows/update-contributors.yml +++ /dev/null @@ -1,67 +0,0 @@ -name: Update Contributors # Refresh contrib.rocks image cache - -on: - workflow_dispatch: - -permissions: - contents: write - pull-requests: write - -jobs: - refresh-contrib-cache: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - - name: Bump cacheBust in all README files - run: | - set -euo pipefail - TS="$(date +%s)" - # Target only the root README.md and localized READMEs under locales/*/README.md - mapfile -t FILES < <(git ls-files README.md 'locales/*/README.md' || true) - - if [ "${#FILES[@]}" -eq 0 ]; then - echo "No target README files found." >&2 - exit 1 - fi - - UPDATED=0 - for f in "${FILES[@]}"; do - if grep -q 'cacheBust=' "$f"; then - # Use portable sed in GNU environment of ubuntu-latest - sed -i -E "s/cacheBust=[0-9]+/cacheBust=${TS}/g" "$f" - echo "Updated cacheBust in $f" - UPDATED=1 - else - echo "Warning: cacheBust parameter not found in $f" >&2 - fi - done - - if [ "$UPDATED" -eq 0 ]; then - echo "No files were updated. Ensure READMEs embed contrib.rocks with cacheBust param." >&2 - exit 1 - fi - - - name: Detect changes - id: changes - run: | - if git diff --quiet; then - echo "changed=false" >> $GITHUB_OUTPUT - else - echo "changed=true" >> $GITHUB_OUTPUT - fi - - - name: Create Pull Request - if: steps.changes.outputs.changed == 'true' - uses: peter-evans/create-pull-request@v7 - with: - token: ${{ secrets.GITHUB_TOKEN }} - commit-message: "docs: update contributors list [skip ci]" - committer: "github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>" - branch: refresh-contrib-cache - delete-branch: true - title: "Refresh contrib.rocks image cache (all READMEs)" - body: | - Automated refresh of the contrib.rocks image cache by bumping the cacheBust parameter in README.md and locales/*/README.md. - base: main diff --git a/.github/workflows/website-deploy.yml b/.github/workflows/website-deploy.yml deleted file mode 100644 index da2d4228f51..00000000000 --- a/.github/workflows/website-deploy.yml +++ /dev/null @@ -1,59 +0,0 @@ -name: Deploy roocode.com - -on: - push: - branches: - - main - paths: - - 'apps/web-roo-code/**' - workflow_dispatch: - -concurrency: - group: deploy-roocode-com - cancel-in-progress: true - -env: - VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }} - VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }} - -jobs: - check-secrets: - runs-on: ubuntu-latest - outputs: - has-vercel-token: ${{ steps.check.outputs.has-vercel-token }} - steps: - - name: Check if VERCEL_TOKEN exists - id: check - run: | - if [ -n "${{ secrets.VERCEL_TOKEN }}" ]; then - echo "has-vercel-token=true" >> $GITHUB_OUTPUT - else - echo "has-vercel-token=false" >> $GITHUB_OUTPUT - fi - - deploy: - runs-on: ubuntu-latest - needs: check-secrets - if: ${{ needs.check-secrets.outputs.has-vercel-token == 'true' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - - name: Setup Node.js and pnpm - uses: ./.github/actions/setup-node-pnpm - - name: Run lint - run: pnpm lint - working-directory: apps/web-roo-code - - name: Run type check - run: pnpm check-types - working-directory: apps/web-roo-code - - name: Run build - run: pnpm build - working-directory: apps/web-roo-code - - name: Install Vercel CLI - run: npm install --global vercel@latest - - name: Pull Vercel Environment Information - run: npx vercel pull --yes --environment=production --token=${{ secrets.VERCEL_TOKEN }} - - name: Build Project Artifacts - run: npx vercel build --prod --token=${{ secrets.VERCEL_TOKEN }} - - name: Deploy Project Artifacts to Vercel - run: npx vercel deploy --prebuilt --prod --token=${{ secrets.VERCEL_TOKEN }} diff --git a/.github/workflows/website-preview.yml b/.github/workflows/website-preview.yml deleted file mode 100644 index 9446bc77531..00000000000 --- a/.github/workflows/website-preview.yml +++ /dev/null @@ -1,102 +0,0 @@ -name: Preview roocode.com - -on: - push: - branches-ignore: - - main - paths: - - "apps/web-roo-code/**" - pull_request: - paths: - - "apps/web-roo-code/**" - workflow_dispatch: - -concurrency: - group: preview-roocode-com-${{ github.ref }} - cancel-in-progress: true - -env: - VERCEL_ORG_ID: ${{ secrets.VERCEL_ORG_ID }} - VERCEL_PROJECT_ID: ${{ secrets.VERCEL_PROJECT_ID }} - -jobs: - check-secrets: - runs-on: ubuntu-latest - outputs: - has-vercel-token: ${{ steps.check.outputs.has-vercel-token }} - steps: - - name: Check if VERCEL_TOKEN exists - id: check - run: | - if [ -n "${{ secrets.VERCEL_TOKEN }}" ]; then - echo "has-vercel-token=true" >> $GITHUB_OUTPUT - else - echo "has-vercel-token=false" >> $GITHUB_OUTPUT - fi - - preview: - runs-on: ubuntu-latest - needs: check-secrets - if: ${{ needs.check-secrets.outputs.has-vercel-token == 'true' }} - steps: - - name: Checkout code - uses: actions/checkout@v4 - - name: Setup Node.js and pnpm - uses: ./.github/actions/setup-node-pnpm - - name: Run lint - run: pnpm lint - working-directory: apps/web-roo-code - - name: Run type check - run: pnpm check-types - working-directory: apps/web-roo-code - - name: Run build - run: pnpm build - working-directory: apps/web-roo-code - - name: Install Vercel CLI - run: npm install --global vercel@latest - - name: Pull Vercel Environment Information - run: npx vercel pull --yes --environment=preview --token=${{ secrets.VERCEL_TOKEN }} - - name: Build Project Artifacts - run: npx vercel build --token=${{ secrets.VERCEL_TOKEN }} - - name: Deploy Project Artifacts to Vercel - id: deploy - run: | - DEPLOYMENT_URL=$(npx vercel deploy --prebuilt --token=${{ secrets.VERCEL_TOKEN }}) - echo "deployment_url=$DEPLOYMENT_URL" >> $GITHUB_OUTPUT - echo "Preview deployed to: $DEPLOYMENT_URL" - - - name: Comment PR with preview link - if: github.event_name == 'pull_request' - uses: actions/github-script@v7 - with: - script: | - const deploymentUrl = '${{ steps.deploy.outputs.deployment_url }}'; - const commentIdentifier = ''; - - const { data: comments } = await github.rest.issues.listComments({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.issue.number, - }); - - const existingComment = comments.find(comment => - comment.body.includes(commentIdentifier) - ); - - const comment = commentIdentifier + '\n🚀 **Preview deployed!**\n\nYour changes have been deployed to Vercel:\n\n**Preview URL:** ' + deploymentUrl + '\n\nThis preview will be updated automatically when you push new commits to this PR.'; - - if (existingComment) { - await github.rest.issues.updateComment({ - owner: context.repo.owner, - repo: context.repo.repo, - comment_id: existingComment.id, - body: comment - }); - } else { - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: context.issue.number, - body: comment - }); - } diff --git a/.gitignore b/.gitignore index 1dbcdc6a362..3e23555e61e 100644 --- a/.gitignore +++ b/.gitignore @@ -55,3 +55,4 @@ qdrant_storage/ plans/ roo-cli-*.tar.gz* +`nnul \ No newline at end of file diff --git a/.roo-hooks.json.example b/.roo-hooks.json.example new file mode 100644 index 00000000000..4bab41b2c7d --- /dev/null +++ b/.roo-hooks.json.example @@ -0,0 +1,59 @@ +{ + "_comment": "Example hooks configuration file for Roo Code. Rename to .roo-hooks.json to use.", + "_comment2": "Hooks are executed before (PreCompact) and after (PostCompact) context condensation.", + "_comment3": "Each hook can be a command (shell command) or http (HTTP POST request).", + "PreCompact": [ + { + "matcher": "auto", + "hooks": [ + { + "id": "log-pre-compact", + "type": "command", + "description": "Log before automatic condensation", + "command": "echo 'Pre-compact hook triggered at $(date)'", + "shell": "bash", + "timeout": 10 + } + ] + }, + { + "matcher": "manual", + "hooks": [ + { + "id": "log-manual-compact", + "type": "command", + "description": "Log before manual condensation", + "command": "echo 'Manual compact triggered'", + "shell": "bash" + } + ] + } + ], + "PostCompact": [ + { + "matcher": "auto", + "hooks": [ + { + "id": "log-post-compact", + "type": "command", + "description": "Log after automatic condensation", + "command": "echo 'Post-compact hook: Summary length is ${#HOOK_INPUT}'", + "shell": "bash", + "timeout": 10 + }, + { + "id": "notify-webhook", + "type": "http", + "description": "Send notification to webhook", + "url": "https://example.com/webhook", + "method": "POST", + "headers": { + "Content-Type": "application/json", + "Authorization": "Bearer YOUR_TOKEN" + }, + "timeout": 30 + } + ] + } + ] +} diff --git a/.roo/teams/fullstack.json b/.roo/teams/fullstack.json new file mode 100644 index 00000000000..df8e2bfba62 --- /dev/null +++ b/.roo/teams/fullstack.json @@ -0,0 +1,66 @@ +{ + "slug": "fullstack", + "name": "Full-Stack Feature Team", + "description": "Three-phase team for building full-stack features: discovery → implementation → review", + "orchestratorMode": "orchestrator", + "conventions": ".roo/teams/conventions/fullstack.md", + "phases": [ + { + "name": "discovery", + "label": "Discovery & Planning", + "concurrent": true, + "requireApproval": true, + "agents": [ + { + "mode": "architect", + "role": "Backend Architect", + "instruction": "Analyze the backend requirements for the following task and produce a detailed technical spec.\n\nTask: {{task}}\n\nDeliverables:\n- API endpoints needed (method, path, request/response shapes)\n- Database schema changes (if any)\n- Key implementation risks or unknowns\n\nOutput as structured markdown." + }, + { + "mode": "architect", + "role": "Frontend Architect", + "instruction": "Analyze the frontend requirements for the following task and produce a detailed technical spec.\n\nTask: {{task}}\n\nDeliverables:\n- Component tree and data flow\n- State management approach\n- API integration points\n- UX edge cases to handle\n\nOutput as structured markdown." + } + ] + }, + { + "name": "implementation", + "label": "Implementation", + "concurrent": false, + "requireApproval": false, + "abortOnChildFailure": true, + "agents": [ + { + "mode": "code", + "role": "Backend Engineer", + "instruction": "Implement the backend changes for this task.\n\nTask: {{task}}\n\nDiscovery results:\n{{context}}\n\nWrite production-quality code. Run tests after each significant change. Do not leave TODOs.", + "worktree": "feat/{{team}}-backend" + }, + { + "mode": "code", + "role": "Frontend Engineer", + "instruction": "Implement the frontend changes for this task.\n\nTask: {{task}}\n\nDiscovery results:\n{{context}}\n\nWrite production-quality code. Ensure the UI handles loading, error, and empty states. Do not leave TODOs.", + "worktree": "feat/{{team}}-frontend" + } + ] + }, + { + "name": "review", + "label": "Code Review", + "concurrent": true, + "requireApproval": false, + "agents": [ + { + "mode": "code", + "role": "Security Reviewer", + "instruction": "Review the implementation for security issues.\n\nTask: {{task}}\n\nImplementation summary:\n{{context}}\n\nFocus on: input validation, auth/authz, injection risks, sensitive data exposure. Output a list of findings (severity: critical/high/medium/low) with suggested fixes." + }, + { + "mode": "code", + "role": "QA Engineer", + "instruction": "Review the implementation for correctness and test coverage.\n\nTask: {{task}}\n\nImplementation summary:\n{{context}}\n\nFocus on: missing test cases, edge cases not handled, regression risks. Output a list of findings with suggested test additions." + } + ] + } + ] +} diff --git a/CHANGELOG.md b/CHANGELOG.md index a6b9e72720f..eeb90692163 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3434 +1,5 @@ -# Roo Code Changelog +# Moo Code Changelog -## 3.52.1 +This project was forked from [Roo Code](https://github.com/RooCodeInc/Roo-Code). -### Patch Changes - -- Add correct JSON schema for `.roomodes` configuration files (#11790 by @algorhythm85, PR #11791 by @app/roomote-v0) -- Remove the hiring announcement from the VS Code extension UI (PR #12108 by @app/roomote-v0) - -## 3.52.0 - -### Minor Changes - -- Add Poe as an AI provider so users can access Poe models directly in Roo Code (PR #12015 by @kamilio) -- Improve the xAI provider by migrating it to the Responses API with reusable transform utilities (#11961 by @carlesso, PR #11962 by @carlesso) -- Fix MiniMax model listings and context window handling for more reliable configuration (#11999 by @Rexarrior, PR #12069 by @Rexarrior) -- Add xAI Grok-4.20 models and update the default xAI model selection (#11955 by @carlesso, PR #11956 by @carlesso) -- Add OpenAI GPT-5.4 mini and nano models to expand the available OpenAI model lineup (PR #11946 by @PeterDaveHello) -- Chore: include the automated version bump PR from the previous release cycle for complete release accounting (PR #11892 by @app/github-actions) - -### Patch Changes - -- Add support for OpenAI `gpt-5.4-mini` and `gpt-5.4-nano` models. - -## 3.51.1 - -### Patch Changes - -- Feat: Add Cohere Embed v4 model support for Bedrock and improve credential handling (#11823 by @cscvenkatmadurai, PR #11824 by @cscvenkatmadurai) -- Feat: Add Gemini 3.1 Pro customtools model to Vertex AI provider (PR #11857 by @NVolcz) -- Feat: Add gpt-5.4 to ChatGPT Plus/Pro (Codex) model catalog (PR #11876 by @roomote-v0) - -## 3.51.0 - -### Minor Changes - -- Add OpenAI GPT-5.4 and GPT-5.3 Chat Latest model support so Roo Code can use the newest OpenAI chat models (PR #11848 by @PeterDaveHello) -- Add support for exposing skills as slash commands with skill fallback execution for faster workflows (PR #11834 by @hannesrudolph) -- Add CLI support for `--create-with-session-id` plus UUID session validation for more controlled session creation (PR #11859 by @cte) -- Add support for choosing a specific shell when running terminal commands (PR #11851 by @jr) -- Feature: Add the `ROO_ACTIVE` environment variable to terminal session settings for safer terminal guardrails (#11864 by @ajjuaire, PR #11862 by @ajjuaire) -- Improve cloud settings freshness by updating the refresh interval to one hour (PR #11749 by @roomote-v0) -- Add CLI session resume/history support plus an upgrade command for better long-running workflows (PR #11768 by @cte) -- Add support for images in CLI stdin stream commands (PR #11831 by @cte) -- Include `exitCode` in CLI command `tool_result` events for more reliable automation (PR #11820 by @cte) -- Add CLI types to improve development ergonomics and type safety (PR #11781 by @cte) -- Add CLI integration coverage for stdin stream routing and race-condition invariants (PR #11846 by @cte) -- Fix the CLI stdin-stream cancel race and add an integration test suite to prevent regressions (PR #11817 by @cte) -- Improve CLI stream recovery and add a configurable consecutive mistake limit (PR #11775 by @cte) -- Fix CLI streaming deltas, task ID propagation, cancel recovery, and other runtime edge cases (PR #11736 by @cte) -- Fix CLI task resumption so paused work can reliably continue (PR #11739 by @cte) -- Recover from unhandled exceptions in the CLI instead of failing hard (PR #11750 by @cte) -- Scope CLI session and resume flags to the current workspace to avoid cross-workspace confusion (PR #11774 by @cte) -- Fix stdin prompt streaming to forward task configuration correctly (PR #11778 by @daniel-lxs) -- Handle stdin-stream control-flow errors gracefully in the CLI runtime (PR #11811 by @cte) -- Fix stdin stream queued messages and command output streaming in the CLI (PR #11814 by @cte) -- Increase the CLI command execution timeout for long-running commands (PR #11815 by @cte) -- Fix knip checks to keep repository validation green (PR #11819 by @cte) -- Fix CLI upgrade version detection so upgrades resolve the correct target version (PR #11829 by @cte) -- Ignore model-provided timeout values in the CLI runtime to keep command handling consistent (PR #11835 by @cte) -- Fix redundant skill reloading during conversations to reduce duplicate work (PR #11838 by @hannesrudolph) -- Ensure full command output is streamed before the CLI reports completion (PR #11842 by @cte) -- Fix CLI follow-up routing after completion prompts so next actions land in the right place (PR #11844 by @cte) -- Remove the Netflix logo from the homepage (PR #11787 by @roomote-v0) -- Chore: Prepare CLI release v0.1.2 (PR #11737 by @cte) -- Chore: Prepare CLI release v0.1.3 (PR #11740 by @cte) -- Chore: Prepare CLI release v0.1.4 (PR #11751 by @cte) -- Chore: Prepare CLI release v0.1.5 (PR #11772 by @cte) -- Chore: Prepare CLI release v0.1.6 (PR #11780 by @cte) -- Release Roo Code v1.113.0 (PR #11782 by @cte) -- Chore: Prepare CLI release v0.1.7 (PR #11812 by @cte) -- Chore: Prepare CLI release v0.1.8 (PR #11816 by @cte) -- Chore: Prepare CLI release v0.1.9 (PR #11818 by @cte) -- Chore: Prepare CLI release v0.1.10 (PR #11821 by @cte) -- Release Roo Code v1.114.0 (PR #11822 by @cte) -- Chore: Prepare CLI release v0.1.11 (PR #11832 by @cte) -- Release Roo Code v1.115.0 (PR #11833 by @cte) -- Chore: Prepare CLI release v0.1.12 (PR #11836 by @cte) -- Chore: Prepare CLI release v0.1.13 (PR #11837 by @hannesrudolph) -- Chore: Prepare CLI release v0.1.14 (PR #11843 by @cte) -- Chore: Prepare CLI release v0.1.15 (PR #11845 by @cte) -- Chore: Prepare CLI release v0.1.16 (PR #11852 by @cte) -- Chore: Prepare CLI release v0.1.17 (PR #11860 by @cte) - -### Patch Changes - -- Add OpenAI's GPT-5.3-Chat-Latest model support -- Add OpenAI's GPT-5.3-Codex model support -- Add OpenAI's GPT-5.4 model support -- Add OpenAI's GPT-5.3-Codex model support (PR #11728 by @PeterDaveHello) -- Warm Roo models on CLI startup for faster initial responses (PR #11722 by @cte) -- Fix spelling/grammar and casing inconsistencies (#11478 by @PeterDaveHello, PR #11485 by @PeterDaveHello) -- Fix: Restore Linear integration page (PR #11725 by @roomote) -- Chore: Prepare CLI release v0.1.1 (PR #11723 by @cte) - -## [3.50.4] - 2026-02-21 - -- Feat: Add MiniMax M2.5 model support (#11471 by @love8ko, PR #11458 by @roomote) - -## [3.50.3] - 2026-02-20 - -- Fix: Correct Vertex AI claude-sonnet-4-6 model ID (#11625 by @yuvarajl, PR #11626 by @roomote) -- Restore Unbound as a provider (PR #11624 by @pugazhendhi-m) - -## [3.50.2] - 2026-02-20 - -- Fix: Inline terminal rendering parity with the VSCode Terminal (#10699 by @jerrill-johnson-bitwerx, PR #11361 by @RussellZager) -- Fix: Enable prompt caching for Bedrock custom ARN and default to ON (#10846 by @wisestmumbler, PR #11373 by @roomote) -- Feat: Add visual feedback to copy button in task actions (#11401 by @omagoduck, PR #11403 by @omagoduck) - -## [3.50.1] - 2026-02-20 - -- Fix OpenAI Codex and OpenAI Native stream parsing for done-only and `content_part` events, including duplicate-text guards when deltas are already streamed. - -## [3.50.0] - 2026-02-19 - -- Add Gemini 3.1 Pro support and set as default Gemini model (PR #11608 by @PeterDaveHello) -- Add NDJSON stdin protocol, list subcommands, and modularize CLI run command (PR #11597 by @cte) -- Prepare CLI v0.1.0 release (PR #11599 by @cte) -- Remove integration tests (PR #11598 by @roomote) -- Changeset version bump (PR #11596 by @github-actions) - -## [3.49.0] - 2026-02-19 - -- Add file changes panel to track all file modifications per conversation (#11493 by @saneroen, PR #11494 by @saneroen) -- Add per-workspace indexing opt-in and stop/cancel indexing controls (#11455 by @JamesRobert20, PR #11456 by @JamesRobert20) -- Add per-task file-based history store for cross-instance safety (PR #11490 by @roomote) -- Fix: Redesign rehydration scroll lifecycle for smoother chat experience (PR #11483 by @hannesrudolph) -- Fix: Bump @roo-code/types metadata version to 1.111.0 after revert regression (PR #11588 by @roomote) - -## [3.48.1] - 2026-02-18 - -- Fix: Await MCP server initialization before returning McpHub instance, preventing race conditions (PR #11518 by @daniel-lxs) -- Fix: Correct Bedrock Claude Sonnet 4.6 model ID (#11509 by @PeterDaveHello, PR #11569 by @PeterDaveHello) -- Add DeleteQueuedMessage IPC command for managing queued messages (PR #11464 by @roomote) - -## [3.48.0] - 2026-02-17 - -- Add Anthropic Claude Sonnet 4.6 support across all providers — Anthropic, Bedrock, Vertex, OpenRouter, and Vercel AI Gateway (PR #11509 by @PeterDaveHello) -- Add lock toggle to pin API config across all modes in a workspace (PR #11295 by @hannesrudolph) -- Fix: Prevent parent task state loss during orchestrator delegation (PR #11281 by @hannesrudolph) -- Fix: Resolve race condition in new_task delegation that loses parent task history (PR #11331 by @daniel-lxs) -- Fix: Serialize taskHistory writes and fix delegation status overwrite race (PR #11335 by @hannesrudolph) -- Fix: Prevent chat history loss during cloud/settings navigation (#11371 by @SannidhyaSah, PR #11372 by @SannidhyaSah) -- Fix: Preserve condensation summary during task resume (#11487 by @SannidhyaSah, PR #11488 by @SannidhyaSah) -- Fix: Resolve chat scroll anchoring and task-switch scroll race conditions (PR #11385 by @hannesrudolph) -- Fix: Preserve pasted images in chatbox during chat activity (PR #11375 by @app/roomote) -- Add disabledTools setting to globally disable native tools (PR #11277 by @daniel-lxs) -- Rename search_and_replace tool to edit and unify edit-family UI (PR #11296 by @hannesrudolph) -- Render nested subtasks as recursive tree in history view (PR #11299 by @hannesrudolph) -- Remove 9 low-usage providers and add retired-provider UX (PR #11297 by @hannesrudolph) -- Remove browser use functionality entirely (PR #11392 by @hannesrudolph) -- Remove built-in skills and built-in skills mechanism (PR #11414 by @hannesrudolph) -- Remove footgun prompting (file-based system prompt override) (PR #11387 by @hannesrudolph) -- Batch consecutive tool calls in chat UI with shared utility (PR #11245 by @hannesrudolph) -- Validate Gemini thinkingLevel against model capabilities and handle empty streams (PR #11303 by @hannesrudolph) -- Add GLM-5 model support to Z.ai provider (PR #11440 by @app/roomote) -- Fix: Prevent double notification sound playback (PR #11283 by @hannesrudolph) -- Fix: Prevent false unsaved changes prompt with OpenAI Compatible headers (#8230 by @hannesrudolph, PR #11334 by @daniel-lxs) -- Fix: Cancel backend auto-approval timeout when auto-approve is toggled off mid-countdown (PR #11439 by @SannidhyaSah) -- Fix: Add follow_up param validation in AskFollowupQuestionTool (PR #11484 by @rossdonald) -- Fix: Prevent webview postMessage crashes and make dispose idempotent (PR #11313 by @0xMink) -- Fix: Avoid zsh process-substitution false positives in assignments (PR #11365 by @hannesrudolph) -- Fix: Harden command auto-approval against inline JS false positives (PR #11382 by @hannesrudolph) -- Fix: Make tab close best-effort in DiffViewProvider.open (PR #11363 by @0xMink) -- Fix: Canonicalize core.worktree comparison to prevent Windows path mismatch failures (PR #11346 by @0xMink) -- Fix: Make removeClineFromStack() delegation-aware to prevent orphaned parent tasks (PR #11302 by @app/roomote) -- Fix task resumption in the API module (PR #11369 by @cte) -- Make defaultTemperature required in getModelParams to prevent silent temperature overrides (PR #11218 by @app/roomote) -- Remove noisy console.warn logs from NativeToolCallParser (PR #11264 by @daniel-lxs) -- Consolidate getState calls in resolveWebviewView (PR #11320 by @0xMink) -- Clean up repo-facing mode rules (PR #11410 by @hannesrudolph) -- Implement ModelMessage storage layer with AI SDK response messages (PR #11409 by @daniel-lxs) -- Extract translation and merge resolver modes into reusable skills (PR #11215 by @app/roomote) -- Add blog section with initial posts to roocode.com (PR #11127 by @app/roomote) -- Replace Roomote Control with Linear Integration in cloud features grid (PR #11280 by @app/roomote) -- Add IPC query handlers for commands, modes, and models (PR #11279 by @cte) -- Add stdin stream mode for the CLI (PR #11476 by @cte) -- Make CLI auto-approve by default with require-approval opt-in (PR #11424 by @cte) -- Update CLI default model from Opus 4.5 to Opus 4.6 (PR #11273 by @app/roomote) -- Add linux-arm64 support for the Roo CLI (PR #11314 by @cte) -- CLI release: v0.0.51 (PR #11274 by @cte) -- CLI release: v0.0.52 (PR #11324 by @cte) -- CLI release: v0.0.53 (PR #11425 by @cte) -- CLI release: v0.0.54 (PR #11477 by @cte) - -## [3.45.0] - 2026-01-27 - -![3.45.0 Release - Smart Code Folding](/releases/3.45.0-release.png) - -- Smart Code Folding: Context condensation now intelligently preserves a lightweight map of files you worked on—function signatures, class declarations, and type definitions—so Roo can continue referencing them accurately after condensing. Files are prioritized by most recent access, with a ~50k character budget ensuring your latest work is always preserved. (Idea by @shariqriazz, PR #10942 by @hannesrudolph) - -## [3.44.2] - 2026-01-27 - -- Re-enable parallel tool calling with new_task isolation safeguards (PR #11006 by @mrubens) -- Fix worktree indexing by using relative paths in isPathInIgnoredDirectory (PR #11009 by @daniel-lxs) -- Fix local model validation error for Ollama models (PR #10893 by @roomote) -- Fix duplicate tool_call emission from Responses API providers (PR #11008 by @daniel-lxs) - -## [3.44.1] - 2026-01-27 - -- Fix LiteLLM tool ID validation errors for Bedrock proxy (PR #10990 by @daniel-lxs) -- Add temperature=0.9 and top_p=0.95 to zai-glm-4.7 model for better generation quality (PR #10945 by @sebastiand-cerebras) -- Add quality checks to marketing site deployment workflows (PR #10959 by @mp-roocode) - -## [3.44.0] - 2026-01-26 - -![3.44.0 Release - Worktrees](/releases/3.44.0-release.png) - -- Add worktree selector and creation UX (PR #10940 by @brunobergher, thanks Cline!) -- Improve subtask visibility and navigation in history and chat views (PR #10864 by @brunobergher) -- Add wildcard support for MCP alwaysAllow configuration (PR #10948 by @app/roomote) -- Fix: Prevent nested condensing from including previously-condensed content (PR #10985 by @hannesrudolph) -- Fix: VS Code LM token counting returns 0 outside requests, breaking context condensing (#10968 by @srulyt, PR #10983 by @daniel-lxs) -- Fix: Record truncation event when condensation fails but truncation succeeds (PR #10984 by @hannesrudolph) -- Replace hyphen encoding with fuzzy matching for MCP tool names (PR #10775 by @daniel-lxs) -- Remove MCP SERVERS section from system prompt for cleaner prompts (PR #10895 by @daniel-lxs) -- new_task tool creates checkpoint the same way write_to_file does (PR #10982 by @daniel-lxs) -- Update Fireworks provider with new models (#10674 by @hannesrudolph, PR #10679 by @ThanhNguyxn) -- Fix: Truncate AWS Bedrock toolUseId to 64 characters (PR #10902 by @daniel-lxs) -- Fix: Restore opaque background to settings section headers (PR #10951 by @app/roomote) -- Fix: Remove unsupported Fireworks model tool fields (PR #10937 by @app/roomote) -- Update and improve zh-TW Traditional Chinese locale and docs (PR #10953 by @PeterDaveHello) -- Chore: Remove POWER_STEERING experiment remnants (PR #10980 by @hannesrudolph) - -## [3.43.0] - 2026-01-23 - -![3.43.0 Release - Intelligent Context Condensation](/releases/3.43.0-release.png) - -- Intelligent Context Condensation v2: New context condensation system that intelligently summarizes conversation history when approaching context limits, preserving important information while reducing token usage (PR #10873 by @hannesrudolph) -- Improved context condensation with environment details, accurate token counts, and lazy evaluation for better performance (PR #10920 by @hannesrudolph) -- Move condense prompt editor to Context Management tab for better discoverability and organization (PR #10909 by @hannesrudolph) -- Update Z.AI models with new variants and pricing (#10859 by @ErdemGKSL, PR #10860 by @ErdemGKSL) -- Add pnpm install:vsix:nightly command for easier nightly build installation (PR #10912 by @hannesrudolph) -- Fix: Convert orphaned tool_results to text blocks after condensing to prevent API errors (PR #10927 by @daniel-lxs) -- Fix: Auto-migrate v1 condensing prompt and handle invalid providers on import (PR #10931 by @hannesrudolph) -- Fix: Use json-stream-stringify for pretty-printing MCP config files to prevent memory issues with large configs (#9862 by @Michaelzag, PR #9864 by @Michaelzag) -- Fix: Correct Gemini 3 pricing for Flash and Pro models (#10432 by @rossdonald, PR #10487 by @roomote) -- Fix: Skip thoughtSignature blocks during markdown export for cleaner output (#10199 by @rossdonald, PR #10932 by @rossdonald) -- Fix: Duplicate model display for OpenAI Codex provider (PR #10930 by @roomote) -- Remove diffEnabled and fuzzyMatchThreshold settings as they are no longer needed (#10648 by @hannesrudolph, PR #10298 by @hannesrudolph) -- Remove MULTI_FILE_APPLY_DIFF experiment (PR #10925 by @hannesrudolph) -- Remove POWER_STEERING experimental feature (PR #10926 by @hannesrudolph) -- Remove legacy XML tool calling code (getToolDescription) for cleaner codebase (PR #10929 by @hannesrudolph) - -## [3.42.0] - 2026-01-22 - -![3.42.0 Release - ChatGPT Usage Tracking](/releases/3.42.0-release.png) - -- Added UI to track your ChatGPT usage limits in the OpenAI Codex provider (PR #10813 by @hannesrudolph) -- Removed deprecated Claude Code provider (PR #10883 by @daniel-lxs) -- Streamlined codebase by removing legacy XML tool calling functionality (#10848 by @hannesrudolph, PR #10841 by @hannesrudolph) -- Standardize model selectors across all providers: Improved consistency of model selection UI (#10650 by @hannesrudolph, PR #10294 by @hannesrudolph) -- Enable prompt caching for Cerebras zai-glm-4.7 model (#10601 by @jahanson, PR #10670 by @app/roomote) -- Add Kimi K2 thinking model to VertexAI provider (#9268 by @diwakar-s-maurya, PR #9269 by @app/roomote) -- Warn users when too many MCP tools are enabled (PR #10772 by @app/roomote) -- Migrate context condensing prompt to customSupportPrompts (PR #10881 by @hannesrudolph) -- Unify export path logic and default to Downloads folder (PR #10882 by @hannesrudolph) -- Performance improvements for webview state synchronization (PR #10842 by @hannesrudolph) -- Fix: Handle mode selector empty state on workspace switch (#10660 by @hannesrudolph, PR #9674 by @app/roomote) -- Fix: Resolve race condition in context condensing prompt input (PR #10876 by @hannesrudolph) -- Fix: Prevent double emission of text/reasoning in OpenAI native and codex handlers (PR #10888 by @hannesrudolph) -- Fix: Prevent task abortion when resuming via IPC/bridge (PR #10892 by @cte) -- Fix: Enforce file restrictions for all editing tools (PR #10896 by @app/roomote) -- Fix: Remove custom condensing model option (PR #10901 by @hannesrudolph) -- Unify user content tags to for consistent prompt formatting (#10658 by @hannesrudolph, PR #10723 by @app/roomote) -- Clarify linked SKILL.md file handling in prompts (PR #10907 by @hannesrudolph) -- Fix: Padding on Roo Code Cloud teaser (PR #10889 by @app/roomote) - -## [3.41.3] - 2026-01-18 - -- Fix: Thinking block word-breaking to prevent horizontal scroll in the chat UI (PR #10806 by @roomote) -- Add Claude-like CLI flags and authentication fixes for the Roo Code CLI (PR #10797 by @cte) -- Improve CLI authentication by using a redirect instead of a fetch (PR #10799 by @cte) -- Fix: Roo Code Router fixes for the CLI (PR #10789 by @cte) -- Release CLI v0.0.48 with latest improvements (PR #10800 by @cte) -- Release CLI v0.0.47 (PR #10798 by @cte) -- Revert E2E tests enablement to address stability issues (PR #10794 by @cte) - -## [3.41.2] - 2026-01-16 - -- Add button to open markdown in VSCode preview for easier reading of formatted content (PR #10773 by @brunobergher) -- Fix: Reset invalid model selection when using OpenAI Codex provider (PR #10777 by @hannesrudolph) -- Fix: Add openai-codex to providers that don't require an API key (PR #10786 by @roomote) -- Fix: Detect Gemini models with space-separated names for proper thought signature injection in LiteLLM (PR #10787 by @daniel-lxs) - -## [3.41.1] - 2026-01-16 - -![3.41.1 Release - Aggregated Subtask Costs](/releases/3.41.1-release.png) - -- Feat: Aggregate subtask costs in parent task (#5376 by @hannesrudolph, PR #10757 by @taltas) -- Fix: Prevent duplicate tool_use IDs causing API 400 errors (PR #10760 by @daniel-lxs) -- Fix: Handle missing tool identity in OpenAI Native streams (PR #10719 by @hannesrudolph) -- Fix: Truncate call_id to 64 chars for OpenAI Responses API (PR #10763 by @daniel-lxs) -- Fix: Gemini thought signature validation errors (PR #10694 by @daniel-lxs) -- Fix: Filter out empty text blocks from user messages for Gemini compatibility (PR #10728 by @daniel-lxs) -- Fix: Flatten top-level anyOf/oneOf/allOf in MCP tool schemas (PR #10726 by @daniel-lxs) -- Fix: Filter Ollama models without native tool support (PR #10735 by @daniel-lxs) -- Feat: Add settings tab titles to search index (PR #10761 by @roomote) -- Feat: Clarify Slack and Linear are Cloud Team only features (PR #10748 by @roomote) - -## [3.41.0] - 2026-01-15 - -![3.41.0 Release - OpenAI - ChatGPT Plus/Pro Provider](/releases/3.41.0-release.png) - -- Add OpenAI - ChatGPT Plus/Pro Provider that gives subscription-based access to Codex models without per-token costs (PR #10736 by @hannesrudolph) -- Add gpt-5.2-codex model to openai-native provider, providing access to the latest GPT model with enhanced coding capabilities (PR #10731 by @hannesrudolph) -- Fix: Clear terminal output buffers to prevent memory leaks that could cause gray screens and performance degradation (#10666, PR #7666 by @hannesrudolph) -- Fix: Inject dummy thought signatures on ALL tool calls for Gemini models, resolving issues with Gemini tool call handling through LiteLLM (PR #10743 by @daniel-lxs) -- Enable E2E tests with 39 passing tests, improving test coverage and reliability (PR #10720 by @ArchimedesCrypto) -- Add alwaysAllow config for MCP time server tools in E2E tests (PR #10733 by @ArchimedesCrypto) - -## [3.40.1] - 2026-01-13 - -- Fix: Add allowedFunctionNames support for Gemini to prevent mode switch errors (#10711 by @hannesrudolph, PR #10708 by @hannesrudolph) - -## [3.40.0] - 2026-01-13 - -![3.40.0 Release - Settings Search](/releases/3.40.0-release.png) - -- Add settings search functionality to quickly find and navigate to specific settings (PR #10619 by @mrubens) -- Improve settings search UI with better styling and usability (PR #10633 by @brunobergher) -- Add standardized stop button for improved task cancellation visibility (PR #10639 by @brunobergher) -- Display edit_file errors in UI after consecutive failures for better debugging feedback (PR #10581 by @daniel-lxs) -- Improve error display styling and visibility in chat messages (PR #10692 by @brunobergher) -- Improve stop button visibility and streamline error handling (PR #10696 by @brunobergher) -- Fix: Omit parallel_tool_calls when not explicitly enabled to prevent API errors (#10553 by @Idlebrand, PR #10671 by @daniel-lxs) -- Fix: Encode hyphens in MCP tool names before sanitization (#10642 by @pdecat, PR #10644 by @pdecat) -- Fix: Correct Gemini 3 thought signature injection format via OpenRouter (PR #10640 by @daniel-lxs) -- Fix: Sanitize tool_use IDs to match API validation pattern (PR #10649 by @daniel-lxs) -- Fix: Use placeholder for empty tool result content to fix Gemini API validation (PR #10672 by @daniel-lxs) -- Fix: Return empty string from getReadablePath when path is empty (PR #10638 by @daniel-lxs) -- Optimize message block cloning in presentAssistantMessage for better performance (PR #10616 by @ArchimedesCrypto) - -## [3.39.3] - 2026-01-10 - -![3.39.3 Release - Roo Code Router](/releases/3.39.3-release.png) - -- Rename Roo Code Cloud Provider to Roo Code Router for clearer branding (PR #10560 by @roomote) -- Update Roo Code Router service name throughout the codebase (PR #10607 by @mrubens) -- Update router name in types for consistency (PR #10605 by @mrubens) -- Improve ExtensionHost code organization and cleanup (PR #10600 by @cte) -- Add local installation option to CLI release script for testing (PR #10597 by @cte) -- Reorganize CLI file structure for better maintainability (PR #10599 by @cte) -- Add TUI to CLI (PR #10480 by @cte) - -## [3.39.2] - 2026-01-09 - -- Fix: Ensure all tools have consistent strict mode values for Cerebras compatibility (#10334 by @brianboysen51, PR #10589 by @app/roomote) -- Fix: Remove convertToSimpleMessages to restore tool calling for OpenAI-compatible providers (PR #10575 by @daniel-lxs) -- Fix: Make edit_file matching more resilient to prevent false negatives (PR #10585 by @hannesrudolph) -- Fix: Order text parts before tool calls in assistant messages for vscode-lm (PR #10573 by @daniel-lxs) -- Fix: Ensure assistant message content is never undefined for Gemini compatibility (PR #10559 by @daniel-lxs) -- Fix: Merge approval feedback into tool result instead of pushing duplicate messages (PR #10519 by @daniel-lxs) -- Fix: Round-trip Gemini thought signatures for tool calls (PR #10590 by @hannesrudolph) -- Feature: Improve error messaging for stream termination errors from provider (PR #10548 by @daniel-lxs) -- Feature: Add debug setting to settings page for easier troubleshooting (PR #10580 by @hannesrudolph) -- Chore: Disable edit_file tool for Gemini/Vertex providers (PR #10594 by @hannesrudolph) -- Chore: Stop overriding tool allow/deny lists for Gemini (PR #10592 by @hannesrudolph) -- Chore: Change default CLI model to anthropic/claude-opus-4.5 (PR #10544 by @mrubens) -- Chore: Update Terms of Service effective January 9, 2026 (PR #10568 by @mrubens) -- Chore: Move more types to @roo-code/types for CLI support (PR #10583 by @cte) -- Chore: Add functionality to @roo-code/core for CLI support (PR #10584 by @cte) -- Chore: Add slash commands useful for CLI development (PR #10586 by @cte) - -## [3.39.1] - 2026-01-08 - -- Fix: Stabilize file paths during native tool call streaming to prevent path corruption (PR #10555 by @daniel-lxs) -- Fix: Disable Gemini thought signature persistence to prevent corrupted signature errors (PR #10554 by @daniel-lxs) -- Fix: Change minItems from 2 to 1 for Anthropic API compatibility (PR #10551 by @daniel-lxs) - -## [3.39.0] - 2026-01-08 - -![3.39.0 Release - Kangaroo go BRRR](/releases/3.39.0-release.png) - -- Implement sticky provider profile for task-level API config persistence (#8010 by @hannesrudolph, PR #10018 by @hannesrudolph) -- Add support for image file @mentions (PR #10189 by @hannesrudolph) -- Rename YOLO to BRRR (#8574 by @mojomast, PR #10507 by @roomote) -- Add debug-mode proxy routing for debugging API calls (#7042 by @SleeperSmith, PR #10467 by @hannesrudolph) -- Add Kimi K2 thinking model to Fireworks AI provider (#9201 by @kavehsfv, PR #9202 by @roomote) -- Add xhigh reasoning effort to OpenAI compatible endpoints (#10060 by @Soorma718, PR #10061 by @roomote) -- Filter @ mention file search results using .rooignore (#10169 by @jerrill-johnson-bitwerx, PR #10174 by @roomote) -- Add image support documentation to read_file native tool description (#10440 by @nabilfreeman, PR #10442 by @roomote) -- Add zai-glm-4.7 to Cerebras models (PR #10500 by @sebastiand-cerebras) -- VSCode shim and basic CLI for running Roo Code headlessly (PR #10452 by @cte) -- Add CLI installer for headless Roo Code (PR #10474 by @cte) -- Add option to use CLI for evals (PR #10456 by @cte) -- Remember last Roo model selection in web-evals and add evals skill (PR #10470 by @hannesrudolph) -- Tweak the style of follow up suggestion modes (PR #9260 by @mrubens) -- Fix: Handle PowerShell ENOENT error in os-name on Windows (#9859 by @Yang-strive, PR #9897 by @roomote) -- Fix: Make command chaining examples shell-aware for Windows compatibility (#10352 by @AlexNek, PR #10434 by @roomote) -- Fix: Preserve tool_use blocks for all tool_results in kept messages during condensation (PR #10471 by @daniel-lxs) -- Fix: Add additionalProperties: false to MCP tool schemas for OpenAI Responses API (PR #10472 by @daniel-lxs) -- Fix: Prevent duplicate tool_result blocks causing API errors (PR #10497 by @daniel-lxs) -- Fix: Add explicit deduplication for duplicate tool_result blocks (#10465 by @nabilfreeman, PR #10466 by @roomote) -- Fix: Use task stored API config as fallback for rate limit (PR #10266 by @roomote) -- Fix: Remove legacy Claude 2 series models from Bedrock provider (#9220 by @KevinZhao, PR #10501 by @roomote) -- Fix: Add missing description fields for debugProxy configuration (PR #10505 by @roomote) -- Fix: Glitchy kangaroo bounce animation on welcome screen (PR #10035 by @objectiveSee) - -## [3.38.3] - 2026-01-03 - -- Feat: Add option in Context settings to recursively load `.roo/rules` and `AGENTS.md` from subdirectories (PR #10446 by @mrubens) -- Fix: Stop frequent Claude Code sign-ins by hardening OAuth refresh token handling (PR #10410 by @hannesrudolph) -- Fix: Add `maxConcurrentFileReads` limit to native `read_file` tool schema (PR #10449 by @app/roomote) -- Fix: Add type check for `lastMessage.text` in TTS useEffect to prevent runtime errors (PR #10431 by @app/roomote) - -## [3.38.2] - 2025-12-31 - -![3.38.2 Release - Skill Alignment](/releases/3.38.2-release.png) - -- Align skills system with Agent Skills specification (PR #10409 by @hannesrudolph) -- Prevent write_to_file from creating files at truncated paths (PR #10415 by @mrubens and @daniel-lxs) -- Update Cerebras maxTokens to 16384 (PR #10387 by @sebastiand-cerebras) -- Fix rate limit wait display (PR #10389 by @hannesrudolph) -- Remove human-relay provider (PR #10388 by @hannesrudolph) -- Replace Todo Lists video with Context Management video in documentation (PR #10375 by @SannidhyaSah) - -## [3.38.1] - 2025-12-29 - -![3.38.1 Release - Bug Fixes and Stability](/releases/3.38.1-release.png) - -- Fix: Flush pending tool results before condensing context (PR #10379 by @daniel-lxs) -- Fix: Revert mergeToolResultText for OpenAI-compatible providers (PR #10381 by @hannesrudolph) -- Fix: Enforce maxConcurrentFileReads limit in read_file tool (PR #10363 by @roomote) -- Fix: Improve feedback message when read_file is used on a directory (PR #10371 by @roomote) -- Fix: Handle custom tool use similarly to MCP tools for IPC schema purposes (PR #10364 by @jr) -- Fix: Correct GitHub repository URL in marketing page (#10376 by @jishnuteegala, PR #10377 by @roomote) -- Docs: Clarify path to Security Settings in privacy policy (PR #10367 by @roomote) - -## [3.38.0] - 2025-12-27 - -![3.38.0 Release - Skills](/releases/3.38.0-release.png) - -- Add support for [Agent Skills](https://agentskills.io/), enabling reusable packages of prompts, tools, and resources to extend Roo's capabilities (PR #10335 by @mrubens) -- Add optional mode field to slash command front matter, allowing commands to automatically switch to a specific mode when triggered (PR #10344 by @app/roomote) -- Add support for npm packages and .env files to custom tools, allowing custom tools to import dependencies and access environment variables (PR #10336 by @cte) -- Remove simpleReadFileTool feature, streamlining the file reading experience (PR #10254 by @app/roomote) -- Remove OpenRouter Transforms feature (PR #10341 by @app/roomote) -- Fix mergeToolResultText handling in Roo provider (PR #10359 by @mrubens) - -## [3.37.1] - 2025-12-23 - -![3.37.1 Release - Tool Fixes and Provider Improvements](/releases/3.37.1-release.png) - -- Fix: Send native tool definitions by default for OpenAI to ensure proper tool usage (PR #10314 by @hannesrudolph) -- Fix: Preserve reasoning_details shape to prevent malformed responses when processing model output (PR #10313 by @hannesrudolph) -- Fix: Drain queued messages while waiting for ask to prevent message loss (PR #10315 by @hannesrudolph) -- Feat: Add grace retry for empty assistant messages to improve reliability (PR #10297 by @hannesrudolph) -- Feat: Enable mergeToolResultText for all OpenAI-compatible providers for better tool result handling (PR #10299 by @hannesrudolph) -- Feat: Enable mergeToolResultText for Roo Code Router (PR #10301 by @hannesrudolph) -- Feat: Strengthen native tool-use guidance in prompts for improved model behavior (PR #10311 by @hannesrudolph) -- UX: Account-centric signup flow for improved onboarding experience (PR #10306 by @brunobergher) - -## [3.37.0] - 2025-12-22 - -![3.37.0 Release - Custom Tool Calling](/releases/3.37.0-release.png) - -- Add MiniMax M2.1 and improve environment_details handling for Minimax thinking models (PR #10284 by @hannesrudolph) -- Add GLM-4.7 model with thinking mode support for Zai provider (PR #10282 by @hannesrudolph) -- Add experimental custom tool calling - define custom tools that integrate seamlessly with your AI workflow (PR #10083 by @cte) -- Deprecate XML tool protocol selection and force native tool format for new tasks (PR #10281 by @daniel-lxs) -- Fix: Emit tool_call_end events in OpenAI handler when streaming ends (#10275 by @torxeon, PR #10280 by @daniel-lxs) -- Fix: Emit tool_call_end events in BaseOpenAiCompatibleProvider (PR #10293 by @hannesrudolph) -- Fix: Disable strict mode for MCP tools to preserve optional parameters (PR #10220 by @daniel-lxs) -- Fix: Move array-specific properties into anyOf variant in normalizeToolSchema (PR #10276 by @daniel-lxs) -- Fix: Add CRLF line ending normalization to search_replace and search_and_replace tools (PR #10288 by @hannesrudolph) -- Fix: Add graceful fallback for model parsing in Chutes provider (PR #10279 by @hannesrudolph) -- Fix: Enable Requesty refresh models with credentials (PR #10273 by @daniel-lxs) -- Fix: Improve reasoning_details accumulation and serialization (PR #10285 by @hannesrudolph) -- Fix: Preserve reasoning_content in condense summary for DeepSeek-reasoner (PR #10292 by @hannesrudolph) -- Refactor Zai provider to merge environment_details into tool result instead of system message (PR #10289 by @hannesrudolph) -- Remove parallel_tool_calls parameter from litellm provider (PR #10274 by @roomote) -- Add Cloud Team page with comprehensive team management features (PR #10267 by @roomote) -- Add message log deduper utility for evals (PR #10286 by @hannesrudolph) - -## [3.36.16] - 2025-12-19 - -- Fix: Normalize tool schemas for VS Code LM API to resolve error 400 when using VS Code Language Model API providers (PR #10221 by @hannesrudolph) - -## [3.36.15] - 2025-12-19 - -![3.36.15 Release - 1M Context Window Support](/releases/3.36.15-release.png) - -- Add 1M context window beta support for Claude Sonnet 4 on Vertex AI, enabling significantly larger context for complex tasks (PR #10209 by @hannesrudolph) -- Add native tool calling support for LM Studio and Qwen-Code providers, improving compatibility with local models (PR #10208 by @hannesrudolph) -- Add native tool call defaults for OpenAI-compatible providers, expanding native function calling across more configurations (PR #10213 by @hannesrudolph) -- Enable native tool calls for Requesty provider (PR #10211 by @daniel-lxs) -- Improve API error handling and visibility with clearer error messages and better user feedback (PR #10204 by @brunobergher) -- Add downloadable error diagnostics from chat errors, making it easier to troubleshoot and report issues (PR #10188 by @brunobergher) -- Fix refresh models button not properly flushing the cache, ensuring model lists update correctly (#9682 by @tl-hbk, PR #9870 by @pdecat) -- Fix additionalProperties handling for strict mode compatibility, resolving schema validation issues with certain providers (PR #10210 by @daniel-lxs) - -## [3.36.14] - 2025-12-18 - -![3.36.14 Release - Native Tool Calling for Claude on Vertex AI](/releases/3.36.14-release.png) - -- Add native tool calling support for Claude models on Vertex AI, enabling more efficient and reliable tool interactions (PR #10197 by @hannesrudolph) -- Fix JSON Schema format value stripping for OpenAI compatibility, resolving issues with unsupported format values (PR #10198 by @daniel-lxs) -- Improve "no tools used" error handling with graceful retry mechanism for better reliability when tools fail to execute (PR #10196 by @hannesrudolph) - -## [3.36.13] - 2025-12-18 - -![3.36.13 Release - Native Tool Protocol](/releases/3.36.13-release.png) - -- Change default tool protocol from XML to native for improved reliability and performance (PR #10186 by @mrubens) -- Add native tool support for VS Code Language Model API providers (PR #10191 by @daniel-lxs) -- Lock task tool protocol for consistent task resumption, ensuring tasks resume with the same protocol they started with (PR #10192 by @daniel-lxs) -- Replace edit_file tool alias with actual edit_file tool for improved diff editing capabilities (PR #9983 by @hannesrudolph) -- Fix LiteLLM router models by merging default model info for native tool calling support (PR #10187 by @daniel-lxs) -- Add PostHog exception tracking for consecutive mistake errors to improve error monitoring (PR #10193 by @daniel-lxs) - -## [3.36.12] - 2025-12-18 - -![3.36.12 Release - Better telemetry and Bedrock fixes](/releases/3.36.12-release.png) - -- Fix: Add userAgentAppId to Bedrock embedder for code indexing (#10165 by @jackrein, PR #10166 by @roomote) -- Update OpenAI and Gemini tool preferences for improved model behavior (PR #10170 by @hannesrudolph) -- Extract error messages from JSON payloads for better PostHog error grouping (PR #10163 by @daniel-lxs) - -## [3.36.11] - 2025-12-17 - -![3.36.11 Release - Native Tool Calling Enhancements](/releases/3.36.11-release.png) - -- Add support for Claude Code Provider native tool calling, improving tool execution performance and reliability (PR #10077 by @hannesrudolph) -- Enable native tool calling by default for Z.ai models for better model compatibility (PR #10158 by @app/roomote) -- Enable native tools by default for OpenAI compatible provider to improve tool calling support (PR #10159 by @daniel-lxs) -- Fix: Normalize MCP tool schemas for Bedrock and OpenAI strict mode to ensure proper tool compatibility (PR #10148 by @daniel-lxs) -- Fix: Remove dots and colons from MCP tool names for Bedrock compatibility (PR #10152 by @daniel-lxs) -- Fix: Convert tool_result to XML text when native tools disabled for Bedrock (PR #10155 by @daniel-lxs) -- Fix: Refresh Roo models cache with session token on auth state change to resolve model list refresh issues (PR #10156 by @daniel-lxs) -- Fix: Support AWS GovCloud and China region ARNs in Bedrock provider for expanded regional support (PR #10157 by @app/roomote) - -## [3.36.10] - 2025-12-17 - -![3.36.10 Release - Gemini 3 Flash Preview](/releases/3.36.10-release.png) - -- Add support for Gemini 3 Flash Preview model in the Gemini provider (PR #10151 by @hannesrudolph) -- Implement interleaved thinking mode for DeepSeek Reasoner, enabling streaming reasoning output (PR #9969 by @hannesrudolph) -- Fix: Preserve reasoning_content during tool call sequences in DeepSeek (PR #10141 by @hannesrudolph) -- Fix: Correct token counting for context truncation display (PR #9961 by @hannesrudolph) -- Update Next.js dependency to ~15.2.8 (PR #10140 by @jr) - -## [3.36.9] - 2025-12-15 - -![3.36.9 Release - Cross-Provider Compatibility](/releases/3.36.9-release.png) - -- Fix: Normalize tool call IDs for cross-provider compatibility via OpenRouter, ensuring consistent handling across different AI providers (PR #10102 by @daniel-lxs) -- Fix: Add additionalProperties: false to nested MCP tool schemas, improving schema validation and preventing unexpected properties (PR #10109 by @daniel-lxs) -- Fix: Validate tool_result IDs in delegation resume flow, preventing errors when resuming delegated tasks (PR #10135 by @daniel-lxs) -- Feat: Add full error details to streaming failure dialog, providing more comprehensive information for debugging streaming issues (PR #10131 by @roomote) -- Feat: Improve evals UI with tool groups and duration fix, enhancing the evaluation interface organization and timing accuracy (PR #10133 by @hannesrudolph) - -## [3.36.8] - 2025-12-16 - -![3.36.8 Release - Native Tools Enabled by Default](/releases/3.36.8-release.png) - -- Implement incremental token-budgeted file reading for smarter, more efficient file content retrieval (PR #10052 by @jr) -- Enable native tools by default for multiple providers including OpenAI, Azure, Google, Vertex, and more (PR #10059 by @daniel-lxs) -- Enable native tools by default for Anthropic and add telemetry tracking for tool format usage (PR #10021 by @daniel-lxs) -- Fix: Prevent race condition from deleting wrong API messages during streaming (PR #10113 by @hannesrudolph) -- Fix: Prevent duplicate MCP tools error by deduplicating servers at source (PR #10096 by @daniel-lxs) -- Remove strict ARN validation for Bedrock custom ARN users allowing more flexibility (#10108 by @wisestmumbler, PR #10110 by @roomote) -- Add metadata to error details dialog for improved debugging (PR #10050 by @roomote) -- Add configuration to control public sharing feature (PR #10105 by @mrubens) -- Remove description from Bedrock service tiers for cleaner UI (PR #10118 by @mrubens) -- Fix: Correct link to provider pricing page on web (PR #10107 by @brunobergher) - -## [3.36.7] - 2025-12-15 - -- Improve tool configuration for OpenAI models in OpenRouter (PR #10082 by @hannesrudolph) -- Capture more detailed provider-specific error information from OpenRouter for better debugging (PR #10073 by @jr) -- Add Amazon Nova 2 Lite model to Bedrock provider (#9802 by @Smartsheet-JB-Brown, PR #9830 by @roomote) -- Add AWS Bedrock service tier support (#9874 by @Smartsheet-JB-Brown, PR #9955 by @roomote) -- Remove auto-approve toggles for to-do and retry actions to simplify the approval workflow (PR #10062 by @hannesrudolph) -- Move isToolAllowedForMode out of shared directory for better code organization (PR #10089 by @cte) -- Improve run logs and formatters in web-evals for better evaluation tracking (PR #10081 by @hannesrudolph) - -## [3.36.6] - 2025-12-12 - -![3.36.6 Release - Tool Alias Support](/releases/3.36.6-release.png) - -- Add tool alias support for model-specific tool customization, allowing users to configure how tools are presented to different AI models (PR #9989 by @daniel-lxs) -- Sanitize MCP server and tool names for API compatibility, ensuring special characters don't cause issues with API calls (PR #10054 by @daniel-lxs) -- Improve auto-approve timer visibility in follow-up suggestions for better user awareness of pending actions (PR #10048 by @brunobergher) -- Fix: Cancel auto-approval timeout when user starts typing, preventing accidental auto-approvals during user interaction (PR #9937 by @roomote) -- Add WorkspaceTaskVisibility type for organization cloud settings to support team visibility controls (PR #10020 by @roomote) -- Fix: Extract raw error message from OpenRouter metadata for clearer error reporting (PR #10039 by @daniel-lxs) -- Fix: Show tool protocol dropdown for LiteLLM provider, restoring missing configuration option (PR #10053 by @daniel-lxs) - -## [3.36.5] - 2025-12-11 - -![3.36.5 Release - GPT-5.2](/releases/3.36.5-release.png) - -- Add: GPT-5.2 model to openai-native provider (PR #10024 by @hannesrudolph) -- Add: Toggle for Enter key behavior in chat input allowing users to configure whether Enter sends or creates new line (#8555 by @lmtr0, PR #10002 by @hannesrudolph) -- Add: App version to telemetry exception captures and filter 402 errors (PR #9996 by @daniel-lxs) -- Fix: Handle empty Gemini responses and reasoning loops to prevent infinite retries (PR #10007 by @hannesrudolph) -- Fix: Add missing tool_result blocks to prevent API errors when tool results are expected (PR #10015 by @daniel-lxs) -- Fix: Filter orphaned tool_results when more results than tool_uses to prevent message validation errors (PR #10027 by @daniel-lxs) -- Fix: Add general API endpoints for Z.ai provider (#9879 by @richtong, PR #9894 by @roomote) -- Fix: Apply versioned settings on nightly builds (PR #9997 by @hannesrudolph) -- Remove: Glama provider (PR #9801 by @hannesrudolph) -- Remove: Deprecated list_code_definition_names tool (PR #10005 by @hannesrudolph) - -## [3.36.4] - 2025-12-10 - -![3.36.4 Release - Error Details Modal](/releases/3.36.4-release.png) - -- Add error details modal with on-demand display for improved error visibility when debugging issues (PR #9985 by @roomote) -- Fix: Prevent premature rawChunkTracker clearing for MCP tools, improving reliability of MCP tool streaming (PR #9993 by @daniel-lxs) -- Fix: Filter out 429 rate limit errors from API error telemetry for cleaner metrics (PR #9987 by @daniel-lxs) -- Fix: Correct TODO list display order in chat view to show items in proper sequence (PR #9991 by @roomote) - -## [3.36.3] - 2025-12-09 - -![3.36.3 Release](/releases/3.36.3-release.png) - -- Refactor: Unified context-management architecture with improved UX for better context control (PR #9795 by @hannesrudolph) -- Add new `search_replace` native tool for single-replacement operations with improved editing precision (PR #9918 by @hannesrudolph) -- Streaming tool stats and token usage throttling for better real-time feedback during generation (PR #9926 by @hannesrudolph) -- Add versioned settings support with minPluginVersion gating for Roo provider (PR #9934 by @hannesrudolph) -- Make Architect mode save plans to `/plans` directory and gitignore it (PR #9944 by @brunobergher) -- Add announcement support CTA and social icons to UI (PR #9945 by @hannesrudolph) -- Add ability to save screenshots from the browser tool (PR #9963 by @mrubens) -- Refactor: Decouple tools from system prompt for cleaner architecture (PR #9784 by @daniel-lxs) -- Update DeepSeek models to V3.2 with new pricing (PR #9962 by @hannesrudolph) -- Add minimal and medium reasoning effort levels for Gemini models (PR #9973 by @hannesrudolph) -- Update xAI models catalog with latest model options (PR #9872 by @hannesrudolph) -- Add DeepSeek V3-2 support for Baseten provider (PR #9861 by @AlexKer) -- Tweaks to Baseten model definitions for better defaults (PR #9866 by @mrubens) -- Fix: Add xhigh reasoning effort support for gpt-5.1-codex-max (#9891 by @andrewginns, PR #9900 by @andrewginns) -- Fix: Add Kimi, MiniMax, and Qwen model configurations for Bedrock (#9902 by @jbearak, PR #9905 by @app/roomote) -- Configure tool preferences for xAI models (PR #9923 by @hannesrudolph) -- Default to using native tools when supported on OpenRouter (PR #9878 by @mrubens) -- Fix: Exclude apply_diff from native tools when diffEnabled is false (#9919 by @denis-kudelin, PR #9920 by @app/roomote) -- Fix: Always show tool protocol selector for openai-compatible provider (#9965 by @bozoweed, PR #9966 by @hannesrudolph) -- Fix: Respect explicit supportsReasoningEffort array values for proper model configuration (PR #9970 by @hannesrudolph) -- Add timeout configuration to OpenAI Compatible Provider Client (PR #9898 by @dcbartlett) -- Revert default tool protocol change from xml to native for stability (PR #9956 by @mrubens) -- Remove defaultTemperature from Roo provider configuration (PR #9932 by @mrubens) -- Improve OpenAI error messages to be more useful for debugging (PR #9639 by @mrubens) -- Better error logs for parseToolCall exceptions (PR #9857 by @cte) -- Improve cloud job error logging for RCC provider errors (PR #9924 by @cte) -- Fix: Display actual API error message instead of generic text on retry (PR #9954 by @hannesrudolph) -- Add API error telemetry to OpenRouter provider for better diagnostics (PR #9953 by @daniel-lxs) -- Fix: Sanitize removed/invalid API providers to prevent infinite loop (PR #9869 by @hannesrudolph) -- Fix: Use foreground color for context-management icons (PR #9912 by @hannesrudolph) -- Fix: Suppress 'ask promise was ignored' error in handleError (PR #9914 by @daniel-lxs) -- Fix: Process finish_reason to emit tool_call_end events properly (PR #9927 by @daniel-lxs) -- Fix: Add finish_reason processing to xai.ts provider (PR #9929 by @daniel-lxs) -- Fix: Validate and fix tool_result IDs before API requests (PR #9952 by @daniel-lxs) -- Fix: Return undefined instead of 0 for disabled API timeout (PR #9960 by @hannesrudolph) -- Stop making unnecessary count_tokens requests for better performance (PR #9884 by @mrubens) -- Refactor: Consolidate ThinkingBudget components and fix disable handling (PR #9930 by @hannesrudolph) -- Forbid time estimates in architect mode for more focused planning (PR #9931 by @app/roomote) -- Web: Add product pages (PR #9865 by @brunobergher) -- Make eval runs deletable in the web UI (PR #9909 by @mrubens) -- Feat: Change defaultToolProtocol default from xml to native (later reverted) (PR #9892 by @app/roomote) - -## [3.36.2] - 2025-12-04 - -![3.36.2 Release - Dynamic API Settings](/releases/3.36.2-release.png) - -- Restrict GPT-5 tool set to apply_patch for improved compatibility (PR #9853 by @hannesrudolph) -- Add dynamic settings support for Roo models from API, allowing model-specific configurations to be fetched dynamically (PR #9852 by @hannesrudolph) -- Fix: Resolve Chutes provider model fetching issue (PR #9854 by @cte) - -## [3.36.1] - 2025-12-04 - -![3.36.1 Release - Message Management & Stability Improvements](/releases/3.36.1-release.png) - -- Add MessageManager layer for centralized history coordination, fixing message synchronization issues (PR #9842 by @hannesrudolph) -- Fix: Prevent cascading truncation loop by only truncating visible messages (PR #9844 by @hannesrudolph) -- Fix: Handle unknown/invalid native tool calls to prevent extension freeze (PR #9834 by @daniel-lxs) -- Always enable reasoning for models that require it (PR #9836 by @cte) -- ChatView: Smoother stick-to-bottom behavior during streaming (PR #8999 by @hannesrudolph) -- UX: Improved error messages and documentation links (PR #9777 by @brunobergher) -- Fix: Overly round follow-up question suggestions styling (PR #9829 by @brunobergher) -- Add symlink support for slash commands in .roo/commands folder (PR #9838 by @mrubens) -- Ignore input to the execa terminal process for safer command execution (PR #9827 by @mrubens) -- Be safer about large file reads (PR #9843 by @jr) -- Add gpt-5.1-codex-max model to OpenAI provider (PR #9848 by @hannesrudolph) -- Evals UI: Add filtering, bulk delete, tool consolidation, and run notes (PR #9837 by @hannesrudolph) -- Evals UI: Add multi-model launch and UI improvements (PR #9845 by @hannesrudolph) -- Web: New pricing page (PR #9821 by @brunobergher) - -## [3.36.0] - 2025-12-04 - -![3.36.0 Release - Rewind Kangaroo](/releases/3.36.0-release.png) - -- Fix: Restore context when rewinding after condense (#8295 by @hannesrudolph, PR #9665 by @hannesrudolph) -- Add reasoning_details support to Roo provider for enhanced model reasoning visibility (PR #9796 by @app/roomote) -- Default to native tools for all models in the Roo provider for improved performance (PR #9811 by @mrubens) -- Enable search_and_replace for Minimax models (PR #9780 by @mrubens) -- Fix: Resolve Vercel AI Gateway model fetching issues (PR #9791 by @cte) -- Fix: Apply conservative max tokens for Cerebras provider (PR #9804 by @sebastiand-cerebras) -- Fix: Remove omission detection logic to eliminate false positives (#9785 by @Michaelzag, PR #9787 by @app/roomote) -- Refactor: Remove deprecated insert_content tool (PR #9751 by @daniel-lxs) -- Chore: Hide parallel tool calls experiment and disable feature (PR #9798 by @hannesrudolph) -- Update next.js documentation site dependencies (PR #9799 by @jr) -- Fix: Correct download count display on homepage (PR #9807 by @mrubens) - -## [3.35.5] - 2025-12-03 - -- Feat: Add provider routing selection for OpenRouter embeddings (#9144 by @SannidhyaSah, PR #9693 by @SannidhyaSah) -- Default Minimax M2 to native tool calling (PR #9778 by @mrubens) -- Sanitize the native tool calls to fix a bug with Gemini (PR #9769 by @mrubens) -- UX: Updates to CloudView (PR #9776 by @roomote) - -## [3.35.4] - 2025-12-02 - -- Fix: Handle malformed native tool calls to prevent hanging (PR #9758 by @daniel-lxs) -- Fix: Remove reasoning toggles for GLM-4.5 and GLM-4.6 on z.ai provider (PR #9752 by @roomote) -- Refactor: Remove line_count parameter from write_to_file tool (PR #9667 by @hannesrudolph) - -## [3.35.3] - 2025-12-02 - -- Switch to new welcome view for improved onboarding experience (PR #9741 by @mrubens) -- Update homepage with latest changes (PR #9675 by @brunobergher) -- Improve privacy for stealth models by adding vendor confidentiality section to system prompt (PR #9742 by @mrubens) - -## [3.35.2] - 2025-12-01 - -![3.35.2 Release - Model Default Temperatures](/releases/3.35.2-release.png) - -- Allow models to contain default temperature settings for provider-specific optimal defaults (PR #9734 by @mrubens) -- Add tag-based native tool calling detection for Roo provider models (PR #9735 by @mrubens) -- Enable native tool support for all LiteLLM models by default (PR #9736 by @mrubens) -- Pass app version to provider for improved request tracking (PR #9730 by @cte) - -## [3.35.1] - 2025-12-01 - -- Fix: Flush pending tool results before task delegation (PR #9726 by @daniel-lxs) -- Improve: Better IPC error logging for easier debugging (PR #9727 by @cte) - -## [3.35.0] - 2025-12-01 - -![3.35.0 Release - Subtasks & Native Tools](/releases/3.35.0-release.png) - -- Metadata-driven subtasks with automatic parent resume and single-open safety for improved task orchestration (#8081 by @hannesrudolph, PR #9090 by @hannesrudolph) -- Native tool calling support expanded across many providers: Bedrock (PR #9698 by @mrubens), Cerebras (PR #9692 by @mrubens), Chutes with auto-detection from API (PR #9715 by @daniel-lxs), DeepInfra (PR #9691 by @mrubens), DeepSeek and Doubao (PR #9671 by @daniel-lxs), Groq (PR #9673 by @daniel-lxs), LiteLLM (PR #9719 by @daniel-lxs), Ollama (PR #9696 by @mrubens), OpenAI-compatible providers (PR #9676 by @daniel-lxs), Requesty (PR #9672 by @daniel-lxs), Unbound (PR #9699 by @mrubens), Vercel AI Gateway (PR #9697 by @mrubens), Vertex Gemini (PR #9678 by @daniel-lxs), and xAI with new Grok 4 Fast and Grok 4.1 Fast models (PR #9690 by @mrubens) -- Fix: Preserve tool_use blocks in summary for parallel tool calls (#9700 by @SilentFlower, PR #9714 by @SilentFlower) -- Default Grok Code Fast to native tools for better performance (PR #9717 by @mrubens) -- UX improvements to the Roo Code Router-centric onboarding flow (PR #9709 by @brunobergher) -- UX toolbar cleanup and settings consolidation for a cleaner interface (PR #9710 by @brunobergher) -- Add model-specific tool customization via `excludedTools` and `includedTools` configuration (PR #9641 by @daniel-lxs) -- Add new `apply_patch` native tool for more efficient file editing operations (PR #9663 by @hannesrudolph) -- Add new `search_and_replace` tool for batch text replacements across files (PR #9549 by @hannesrudolph) -- Add debug buttons to view API and UI history for troubleshooting (PR #9684 by @hannesrudolph) -- Include tool format in environment details for better context awareness (PR #9661 by @mrubens) -- Fix: Display install count in millions instead of thousands (PR #9677 by @app/roomote) -- Web-evals improvements: add task log viewing, export failed logs, and new run options (PR #9637 by @hannesrudolph) -- Web-evals updates: add kill run functionality (PR #9681 by @hannesrudolph) -- Fix: Prevent navigation buttons from wrapping on smaller screens (PR #9721 by @app/roomote) - -## [3.34.8] - 2025-11-27 - -![3.34.8 Release - Race Condition Fix](/releases/3.34.8-release.png) - -- Fix: Race condition in new_task tool for native protocol (PR #9655 by @daniel-lxs) - -## [3.34.7] - 2025-11-27 - -![3.34.7 Release - More Native Tool Integrations](/releases/3.34.7-release.png) - -- Support native tools in the Anthropic provider for improved tool calling (PR #9644 by @mrubens) -- Enable native tool calling for z.ai models (PR #9645 by @mrubens) -- Enable native tool calling for Moonshot models (PR #9646 by @mrubens) -- Fix: OpenRouter tool calls handling improvements (PR #9642 by @mrubens) -- Fix: OpenRouter GPT-5 strict schema validation for read_file tool (PR #9633 by @daniel-lxs) -- Fix: Create parent directories early in write_to_file to prevent ENOENT errors (#9634 by @ivanenev, PR #9640 by @daniel-lxs) -- Fix: Disable native tools and temperature support for claude-code provider (PR #9643 by @hannesrudolph) -- Add 'taking you to cloud' screen after provider welcome for improved onboarding (PR #9652 by @mrubens) - -## [3.34.6] - 2025-11-26 - -![3.34.6 Release - Bedrock Embeddings](/releases/3.34.6-release.png) - -- Add support for AWS Bedrock embeddings in code indexing (#8658 by @kyle-hobbs, PR #9475 by @ggoranov-smar) -- Add native tool calling support for Mistral provider (PR #9625 by @hannesrudolph) -- Wire MULTIPLE_NATIVE_TOOL_CALLS experiment to OpenAI parallel_tool_calls for parallel tool execution (PR #9621 by @hannesrudolph) -- Add fine grained tool streaming for OpenRouter Anthropic (PR #9629 by @mrubens) -- Allow global inference selection for Bedrock when cross-region is enabled (PR #9616 by @roomote) -- Fix: Filter non-Anthropic content blocks before sending to Vertex API (#9583 by @cardil, PR #9618 by @hannesrudolph) -- Fix: Restore content undefined check in WriteToFileTool.handlePartial() (#9611 by @Lissanro, PR #9614 by @daniel-lxs) -- Fix: Prevent model cache from persisting empty API responses (#9597 by @zx2021210538, PR #9623 by @daniel-lxs) -- Fix: Exclude access_mcp_resource tool when MCP has no resources (PR #9615 by @daniel-lxs) -- Fix: Update default settings for inline terminal and codebase indexing (PR #9622 by @roomote) -- Fix: Convert line_ranges strings to lineRanges objects in native tool calls (PR #9627 by @daniel-lxs) -- Fix: Defer new_task tool_result until subtask completes for native protocol (PR #9628 by @daniel-lxs) - -## [3.34.5] - 2025-11-25 - -![3.34.5 Release - Experimental Parallel Tool Calling](/releases/3.34.5-release.png) - -- Experimental feature to enable multiple native tool calls per turn (PR #9273 by @daniel-lxs) -- Add Bedrock Opus 4.5 to global inference model list (PR #9595 by @roomote) -- Fix: Update API handler when toolProtocol changes (PR #9599 by @mrubens) -- Set native tools as default for minimax-m2 and claude-haiku-4.5 (PR #9586 by @daniel-lxs) -- Make single file read only apply to XML tools (PR #9600 by @mrubens) -- Enhance web-evals dashboard with dynamic tool columns and UX improvements (PR #9592 by @hannesrudolph) -- Revert "Add support for Roo Code Cloud as an embeddings provider" while we fix some issues (PR #9602 by @mrubens) - -## [3.34.4] - 2025-11-25 - -![3.34.4 Release - BFL Image Generation](/releases/3.34.4-release.png) - -- Add new Black Forest Labs image generation models, free on Roo Code Cloud and also available on OpenRouter (PR #9587 and #9589 by @mrubens) -- Fix: Preserve dynamic MCP tool names in native mode API history to prevent tool name mismatches (PR #9559 by @daniel-lxs) -- Fix: Preserve tool_use blocks in summary message during condensing with native tools to maintain conversation context (PR #9582 by @daniel-lxs) - -## [3.34.3] - 2025-11-25 - -![3.34.3 Release - Streaming and Opus 4.5](/releases/3.34.3-release.png) - -- Implement streaming for native tool calls, providing real-time feedback during tool execution (PR #9542 by @daniel-lxs) -- Add Claude Opus 4.5 model to Claude Code provider (PR #9560 by @mrubens) -- Add Claude Opus 4.5 model to Bedrock provider (#9571 by @pisicode, PR #9572 by @roomote) -- Enable caching for Opus 4.5 model to improve performance (#9567 by @iainRedro, PR #9568 by @roomote) -- Add support for Roo Code Cloud as an embeddings provider (PR #9543 by @mrubens) -- Fix ask_followup_question streaming issue and add missing tool cases (PR #9561 by @daniel-lxs) -- Add contact links to About Roo Code settings page (PR #9570 by @roomote) -- Switch from asdf to mise-en-place in bare-metal evals setup script (PR #9548 by @cte) - -## [3.34.2] - 2025-11-24 - -![3.34.2 Release - Opus Conductor](/releases/3.34.2-release.png) - -- Add support for Claude Opus 4.5 in Anthropic and Vertex providers (PR #9541 by @daniel-lxs) -- Add support for Claude Opus 4.5 in OpenRouter with prompt caching and reasoning budget (PR #9540 by @daniel-lxs) -- Add Roo Code Cloud as an image generation provider (PR #9528 by @mrubens) -- Fix: Gracefully skip unsupported content blocks in Gemini transformer (PR #9537 by @daniel-lxs) -- Fix: Flush LiteLLM cache when credentials change on refresh (PR #9536 by @daniel-lxs) -- Fix: Ensure XML parser state matches tool protocol on config update (PR #9535 by @daniel-lxs) -- Update Cerebras models (PR #9527 by @sebastiand-cerebras) -- Fix: Support reasoning_details format for Gemini 3 models (PR #9506 by @daniel-lxs) - -## [3.34.1] - 2025-11-23 - -- Show the prompt for image generation in the UI (PR #9505 by @mrubens) -- Fix double todo list display issue (PR #9517 by @mrubens) -- Add tracking for cloud synced messages (PR #9518 by @mrubens) -- Enable the Roo Code Router in evals (PR #9492 by @cte) - -## [3.34.0] - 2025-11-21 - -![3.34.0 Release - Browser Use 2.0](/releases/3.34.0-release.png) - -- Add Browser Use 2.0 with enhanced browser interaction capabilities (PR #8941 by @hannesrudolph) -- Add support for Baseten as a new AI provider (PR #9461 by @AlexKer) -- Improve base OpenAI compatible provider with better error handling and configuration (PR #9462 by @mrubens) -- Add provider-oriented welcome screen to improve onboarding experience (PR #9484 by @mrubens) -- Pin Roo provider to the top of the provider list for better discoverability (PR #9485 by @mrubens) -- Enhance native tool descriptions with examples and clarifications for better AI understanding (PR #9486 by @daniel-lxs) -- Fix: Make cancel button immediately responsive during streaming (#9435 by @jwadow, PR #9448 by @daniel-lxs) -- Fix: Resolve apply_diff performance regression from earlier changes (PR #9474 by @daniel-lxs) -- Fix: Implement model cache refresh to prevent stale disk cache issues (PR #9478 by @daniel-lxs) -- Fix: Copy model-level capabilities to OpenRouter endpoint models correctly (PR #9483 by @daniel-lxs) -- Fix: Add fallback to yield tool calls regardless of finish_reason (PR #9476 by @daniel-lxs) - -## [3.33.3] - 2025-11-20 - -![3.33.3 Release - Gemini 3 Pro Image Preview](/releases/3.33.3-release.png) - -- Add Google Gemini 3 Pro Image Preview to image generation models (PR #9440 by @app/roomote) -- Add support for Minimax as Anthropic-compatible provider (PR #9455 by @daniel-lxs) -- Store reasoning in conversation history for all providers (PR #9451 by @daniel-lxs) -- Fix: Improve preserveReasoning flag to control API reasoning inclusion (PR #9453 by @daniel-lxs) -- Fix: Prevent OpenAI Native parallel tool calls for native tool calling (PR #9433 by @hannesrudolph) -- Fix: Improve search and replace symbol parsing (PR #9456 by @daniel-lxs) -- Fix: Send tool_result blocks for skipped tools in native protocol (PR #9457 by @daniel-lxs) -- Fix: Improve markdown formatting and add reasoning support (PR #9458 by @daniel-lxs) -- Fix: Prevent duplicate environment_details when resuming cancelled tasks (PR #9442 by @daniel-lxs) -- Improve read_file tool description with examples (PR #9422 by @daniel-lxs) -- Update glob dependency to ^11.1.0 (PR #9449 by @jr) -- Update tar-fs to 3.1.1 via pnpm override (PR #9450 by @app/roomote) - -## [3.33.2] - 2025-11-19 - -- Enable native tool calling for Gemini provider (PR #9343 by @hannesrudolph) -- Add RCC credit balance display (PR #9386 by @jr) -- Fix: Preserve user images in native tool call results (PR #9401 by @daniel-lxs) -- Perf: Reduce excessive getModel() calls and implement disk cache fallback (PR #9410 by @daniel-lxs) -- Show zero price for free models (PR #9419 by @mrubens) - -## [3.33.1] - 2025-11-18 - -![3.33.1 Release - Native Tool Protocol Fixes](/releases/3.33.1-release.png) - -- Add native tool calling support to OpenAI-compatible (PR #9369 by @mrubens) -- Fix: Resolve native tool protocol race condition causing 400 errors (PR #9363 by @daniel-lxs) -- Fix: Update tools to return structured JSON for native protocol (PR #9373 by @daniel-lxs) -- Fix: Include nativeArgs in tool repetition detection (PR #9377 by @daniel-lxs) -- Fix: Ensure no XML parsing when protocol is native (PR #9371 by @daniel-lxs) -- Fix: Gemini maxOutputTokens and reasoning config (PR #9375 by @hannesrudolph) -- Fix: Gemini thought signature validation and token counting errors (PR #9380 by @hannesrudolph) -- Fix: Exclude XML tool examples from MODES section when native protocol enabled (PR #9367 by @daniel-lxs) -- Retry eval tasks if API instability detected (PR #9365 by @cte) -- Add toolProtocol property to PostHog tool usage telemetry (PR #9374 by @app/roomote) - -## [3.33.0] - 2025-11-18 - -![3.33.0 Release - Twin Kangaroos and the Gemini Constellation](/releases/3.33.0-release.png) - -- Add Gemini 3 Pro Preview model (PR #9357 by @hannesrudolph) -- Improve Google Gemini defaults with better temperature and cost reporting (PR #9327 by @hannesrudolph) -- Enable native tool calling for openai-native provider (PR #9348 by @hannesrudolph) -- Add git status information to environment details (PR #9310 by @daniel-lxs) -- Add tool protocol selector to advanced settings (PR #9324 by @daniel-lxs) -- Implement dynamic tool protocol resolution with proper precedence hierarchy (PR #9286 by @daniel-lxs) -- Move Import/Export functionality to Modes view toolbar and cleanup Mode Edit view (PR #9077 by @hannesrudolph) -- Update cloud agent CTA to point to setup page (PR #9338 by @app/roomote) -- Fix: Prevent duplicate tool_result blocks in native tool protocol (PR #9248 by @daniel-lxs) -- Fix: Format tool responses properly for native protocol (PR #9270 by @daniel-lxs) -- Fix: Centralize toolProtocol configuration checks (PR #9279 by @daniel-lxs) -- Fix: Preserve tool blocks for native protocol in conversation history (PR #9319 by @daniel-lxs) -- Fix: Prevent infinite loop when task_done succeeds (PR #9325 by @daniel-lxs) -- Fix: Sync parser state with profile/model changes (PR #9355 by @daniel-lxs) -- Fix: Pass tool protocol parameter to lineCountTruncationError (PR #9358 by @daniel-lxs) -- Use VSCode theme color for outline button borders (PR #9336 by @app/roomote) -- Replace broken badgen.net badges with shields.io (PR #9318 by @app/roomote) -- Add max git status files setting to evals (PR #9322 by @mrubens) -- Roo Code Router pricing page and changes elsewhere (PR #9195 by @brunobergher) - -## [3.32.1] - 2025-11-14 - -![3.32.1 Release - Bug Fixes](/releases/3.32.1-release.png) - -- Fix: Add abort controller for request cancellation in OpenAI native protocol (PR #9276 by @daniel-lxs) -- Fix: Resolve duplicate tool blocks causing 'tool has already been used' error in native protocol mode (PR #9275 by @daniel-lxs) -- Fix: Prevent duplicate tool_result blocks in native protocol mode for read_file (PR #9272 by @daniel-lxs) -- Fix: Correct OpenAI Native handling of encrypted reasoning blocks to prevent errors during condensing (PR #9263 by @hannesrudolph) -- Fix: Disable XML parser for native tool protocol to prevent parsing conflicts (PR #9277 by @daniel-lxs) - -## [3.32.0] - 2025-11-14 - -![3.32.0 Release - GPT-5.1 models and OpenAI prompt caching](/releases/3.32.0-release.png) - -- Feature: Add GPT-5.1 models to OpenAI provider (PR #9252 by @hannesrudolph) -- Feature: Support for OpenAI Responses 24 hour prompt caching (PR #9259 by @hannesrudolph) -- Fix: Repair the share button in the UI (PR #9253 by @hannesrudolph) -- Docs: Include PR numbers in the release guide to improve traceability (PR #9236 by @hannesrudolph) - -## [3.31.3] - 2025-11-13 - -![3.31.3 Release - Kangaroo Decrypting a Message](/releases/3.31.3-release.png) - -- Fix: OpenAI Native encrypted_content handling and remove gpt-5-chat-latest verbosity flag (#9225 by @politsin, PR by @hannesrudolph) -- Fix: Roo Code Router Anthropic input token normalization to avoid double-counting (thanks @hannesrudolph!) -- Refactor: Rename sliding-window to context-management and truncateConversationIfNeeded to manageContext (thanks @hannesrudolph!) - -## [3.31.2] - 2025-11-12 - -- Fix: Apply updated API profile settings when provider/model unchanged (#9208 by @hannesrudolph, PR by @hannesrudolph) -- Migrate conversation continuity to plugin-side encrypted reasoning items using Responses API for improved reliability (thanks @hannesrudolph!) -- Fix: Include mcpServers in getState() for auto-approval (#9190 by @bozoweed, PR by @daniel-lxs) -- Batch settings updates from the webview to the extension host for improved performance (thanks @cte!) -- Fix: Replace rate-limited badges with badgen.net to improve README reliability (thanks @daniel-lxs!) - -## [3.31.1] - 2025-11-11 - -![3.31.1 Release - Kangaroo Stuck in the Clouds](/releases/3.31.1-release.png) - -- Fix: Prevent command_output ask from blocking in cloud/headless environments (thanks @daniel-lxs!) -- Add IPC command for sending messages to the current task (thanks @mrubens!) -- Fix: Model switch re-applies selected profile, ensuring task configuration stays in sync (#9179 by @hannesrudolph, PR by @hannesrudolph) -- Move auto-approval logic from `ChatView` to `Task` for better architecture (thanks @cte!) -- Add custom Button component with variant system (thanks @brunobergher!) - -## [3.31.0] - 2025-11-07 - -![3.31.0 Release - Todo List and Task Header Improvements](/releases/3.31.0-release.png) - -- Improvements to to-do lists and task headers (thanks @brunobergher!) -- Fix: Prevent crash when streaming chunks have null choices array (thanks @daniel-lxs!) -- Fix: Prevent context condensing on settings save when provider/model unchanged (#4430 by @hannesrudolph, PR by @daniel-lxs) -- Fix: Respect custom OpenRouter URL for all API operations (#8947 by @sstraus, PR by @roomote) -- Add comprehensive error logging to Roo Cloud provider (thanks @daniel-lxs!) -- UX: Less caffeinated kangaroo (thanks @brunobergher!) - -## [3.30.3] - 2025-11-06 - -![3.30.3 Release - Moonshot Brain](/releases/3.30.3-release.png) - -- Feat: Add kimi-k2-thinking model to Moonshot provider (thanks @daniel-lxs!) -- Fix: Auto-retry on empty assistant response to prevent task failures (#9076 by @Akillatech, PR by @daniel-lxs) -- Fix: Use system role for OpenAI Compatible provider when streaming is disabled (#8215 by @whitfin, PR by @roomote) -- Fix: Prevent notification sound on attempt_completion with queued messages (#8537 by @hannesrudolph, PR by @roomote) -- Feat: Auto-switch to imported mode with architect fallback for better mode detection (#8239 by @hannesrudolph, PR by @daniel-lxs) -- Feat: Add MiniMax-M2-Stable model and enable prompt caching (#9070 by @nokaka, PR by @roomote) -- Feat: Improve diff appearance in main chat view (thanks @hannesrudolph!) -- UX: Home screen visuals (thanks @brunobergher!) -- Docs: Clarify that setting 0 disables Error & Repetition Limit (thanks @roomote!) -- Chore: Update dependency @changesets/cli to v2.29.7 (thanks @renovate!) - -## [3.30.2] - 2025-11-05 - -![3.30.2 Release - Eliminating UI Flicker](/releases/3.30.2-release.png) - -- Fix: eliminate UI flicker during task cancellation (thanks @daniel-lxs!) -- Add Global Inference support for Bedrock models (#8750 by @ronyblum, PR by @hannesrudolph) -- Add Qwen3 embedding models (0.6B and 4B) to OpenRouter support (#9058 by @dmarkey, PR by @app/roomote) -- Fix: resolve incorrect commit location when GIT_DIR set in Dev Containers (#4567 by @nonsleepr, PR by @heyseth) -- Fix: keep pinned models fixed at top of scrollable list (#8812 by @XiaoYingYo, PR by @app/roomote) -- Fix: update Opus 4.1 max tokens from 8K to 32K (#9045 by @kaveh-deriv, PR by @app/roomote) -- Set Claude Sonnet 4.5 as default for key providers (thanks @hannesrudolph!) -- Fix: dynamic provider model validation to prevent cross-contamination (#9047 by @NotADev137, PR by @daniel-lxs) -- Fix: Bedrock user agent to report full SDK details (#9031 by @ajjuaire, PR by @ajjuaire) -- Add file path tooltips with centralized PathTooltip component (#8278 by @da2ce7, PR by @daniel-lxs) -- Add conditional test running to pre-push hook (thanks @daniel-lxs!) -- Update Cerebras integration (thanks @sebastiand-cerebras!) - -## [3.30.1] - 2025-11-04 - -- Fix: Correct OpenRouter Mistral model embedding dimension from 3072 to 1536 (thanks @daniel-lxs!) -- Revert: Previous UI flicker fix that caused issues with task resumption (thanks @mrubens!) - -## [3.30.0] - 2025-11-03 - -![3.30.0 Release - PR Fixer](/releases/3.30.0-release.png) - -- Feat: Add OpenRouter embedding provider support (#8972 by @dmarkey, PR by @dmarkey) -- Feat: Add GLM-4.6 model to Fireworks provider (#8752 by @mmealman, PR by @app/roomote) -- Feat: Add MiniMax M2 model to Fireworks provider (#8961 by @dmarkey, PR by @app/roomote) -- Feat: Add preserveReasoning flag to include reasoning in API history (thanks @daniel-lxs!) -- Fix: Prevent message loss during queue drain race condition (#8536 by @hannesrudolph, PR by @daniel-lxs) -- Fix: Capture the reasoning content in base-openai-compatible for GLM 4.6 (thanks @mrubens!) -- Fix: Create new Requesty profile during OAuth (thanks @Thibault00!) -- Fix: Prevent UI flicker and enable resumption after task cancellation (thanks @daniel-lxs!) -- Fix: Cleanup terminal settings tab and change default terminal to inline (thanks @hannesrudolph!) - -## [3.29.5] - 2025-11-01 - -- Fix: Resolve Qdrant codebase_search error by adding keyword index for type field (#8963 by @rossdonald, PR by @app/roomote) -- Fix cost and token tracking between provider styles to ensure accurate usage metrics (thanks @mrubens!) - -## [3.29.4] - 2025-10-30 - -- Feat: Add Minimax Provider (thanks @Maosghoul!) -- Fix: prevent infinite loop when canceling during auto-retry (#8901 by @mini2s, PR by @app/roomote) -- Fix: Enhanced codebase index recovery and reuse ('Start Indexing' button now reuses existing Qdrant index) (#8129 by @jaroslaw-weber, PR by @heyseth) -- Fix: make code index initialization non-blocking at activation (#8777 by @cjlawson02, PR by @daniel-lxs) -- Fix: remove search_and_replace tool from codebase (#8891 by @hannesrudolph, PR by @app/roomote) -- Fix: custom modes under custom path not showing (#8122 by @hannesrudolph, PR by @elianiva) -- Fix: prevent MCP server restart when toggling tool permissions (#8231 by @hannesrudolph, PR by @heyseth) -- Fix: truncate type definition to match max read line (#8149 by @chenxluo, PR by @elianiva) -- Fix: auto-sync enableReasoningEffort with reasoning dropdown selection (thanks @daniel-lxs!) -- Fix: Gate auth-driven Roo model refresh to active provider only (thanks @daniel-lxs!) -- Prevent a noisy cloud agent exception (thanks @cte!) -- Feat: improve @ file search for large projects (#5721 by @Naituw, PR by @daniel-lxs) -- Feat: add zai-glm-4.6 model to Cerebras and set gpt-oss-120b as default (thanks @kevint-cerebras!) -- Feat: rename MCP Errors tab to Logs for mixed-level messages (#8893 by @hannesrudolph, PR by @app/roomote) -- docs(vscode-lm): clarify VS Code LM API integration warning (thanks @hannesrudolph!) - -## [3.29.3] - 2025-10-28 - -- Update Gemini models with latest 09-2025 versions including Gemini 2.5 Pro and Flash (#8485 by @cleacos, PR by @roomote) -- Add reasoning support for Z.ai GLM binary thinking mode (#8465 by @BeWater799, PR by @daniel-lxs) -- Enable reasoning in Roo provider (thanks @mrubens!) -- Add settings to configure time and cost display in system prompt (#8450 by @jaxnb, PR by @roomote) -- Fix: Use max_output_tokens when available in LiteLLM fetcher (#8454 by @fabb, PR by @roomote) -- Fix: Process queued messages after context condensing completes (#8477 by @JosXa, PR by @roomote) -- Fix: Use monotonic clock for rate limiting to prevent timing issues (#7770 by @intermarkec, PR by @chrarnoldus) -- Fix: Resolve checkpoint menu popover overflow (thanks @daniel-lxs!) -- Fix: LiteLLM test failures after merge (thanks @daniel-lxs!) -- Improve UX: Focus textbox and add newlines after adding to context (thanks @mrubens!) - -## [3.29.2] - 2025-10-27 - -- Add support for LongCat-Flash-Thinking-FP8 models in Chutes AI provider (#8425 by @leakless21, PR by @roomote) -- Fix: Remove specific Claude model version from settings descriptions to avoid outdated references (#8435 by @rwydaegh, PR by @roomote) -- Fix: Correct caching logic in Roo provider to improve performance (thanks @mrubens!) -- Fix: Ensure free models don't display pricing information in the UI (thanks @mrubens!) - -## [3.29.1] - 2025-10-26 - -![3.29.1 Release - Window Cleaning](/releases/3.29.1-release.png) - -- Fix: Clean up max output token calculations to prevent context window overruns (#8821 by @enerage, PR by @roomote) -- Fix: Change Add to Context keybinding to avoid Redo conflict (#8652 by @swythan, PR by @roomote) -- Fix provider model loading race conditions (thanks @mrubens!) - -## [3.29.0] - 2025-10-24 - -![3.29.0 Release - Intelligent File Reading](/releases/3.29.0-release.png) - -- Add token-budget based file reading with intelligent preview to avoid context overruns (thanks @daniel-lxs!) -- Enable browser-use tool for all image-capable models (#8116 by @hannesrudolph, PR by @app/roomote!) -- Add dynamic model loading for Roo Code Router (thanks @app/roomote!) -- Fix: Respect nested .gitignore files in search_files (#7921 by @hannesrudolph, PR by @daniel-lxs) -- Fix: Preserve trailing newlines in stripLineNumbers for apply_diff (#8020 by @liyi3c, PR by @app/roomote) -- Fix: Exclude max tokens field for models that don't support it in export (#7944 by @hannesrudolph, PR by @elianiva) -- Retry API requests on stream failures instead of aborting task (thanks @daniel-lxs!) -- Improve auto-approve button responsiveness (thanks @daniel-lxs!) -- Add checkpoint initialization timeout settings and fix checkpoint timeout warnings (#7843 by @NaccOll, PR by @NaccOll) -- Always show checkpoint restore options regardless of change detection (thanks @daniel-lxs!) -- Improve checkpoint menu translations (thanks @daniel-lxs!) -- Add GLM-4.6-turbo model to chutes ai provider (thanks @mohammad154!) -- Add Claude Haiku 4.5 to prompt caching models (thanks @hannesrudolph!) -- Expand Z.ai model coverage with GLM-4.5-X, AirX, Flash (thanks @hannesrudolph!) -- Update Mistral Medium model name (#8362 by @ThomsenDrake, PR by @ThomsenDrake) -- Remove GPT-5 instructions/reasoning_summary from UI message metadata to prevent ui_messages.json bloat (thanks @hannesrudolph!) -- Normalize docs-extractor audience tags; remove admin/stakeholder; strip tool invocations (thanks @hannesrudolph!) -- Update X/Twitter username from roo_code to roocode (thanks @app/roomote!) -- Update Configuring Profiles video link (thanks @app/roomote!) -- Fix link text for Roomote Control in README (thanks @laz-001!) -- Remove verbose error for cloud agents (thanks @cte!) -- Try 5s status mutation timeout (thanks @cte!) - -## [3.28.18] - 2025-10-17 - -- Fix: Remove request content from UI messages to improve performance and reduce clutter (#5601 by @MuriloFP, #8594 by @multivac2x, #8690 by @hannesrudolph, PR by @mrubens) -- Fix: Prevent file editing issues when git diff views are open (thanks @hassoncs!) -- Fix: Add userAgent to Bedrock client for version tracking (#8660 by @ajjuaire, PR by @app/roomote) -- Feat: Z AI now uses only two coding endpoints for better performance (#8687 by @hannesrudolph) -- Feat: Update image generation model selection for improved quality (thanks @chrarnoldus!) - -## [3.28.17] - 2025-10-15 - -- Add support for Claude Haiku 4.5 model (thanks @daniel-lxs!) -- Fix: Update zh-TW run command title translation (thanks @PeterDaveHello!) - -## [3.28.16] - 2025-10-09 - -![3.28.16 Release - Expanded Context Window](/releases/3.28.16-release.png) - -- feat: Add Claude Sonnet 4.5 1M context window support for Claude Code (thanks @ColbySerpa!) -- feat: Identify cloud tasks in the extension bridge (thanks @cte!) -- fix: Add the parent task ID in telemetry (thanks @mrubens!) - -## [3.28.15] - 2025-10-03 - -![3.28.15 Release - Kangaroo Sliding Down a Chute](/releases/3.28.15-release.png) - -- Add new DeepSeek and GLM models with detailed descriptions to the Chutes provider (thanks @mohammad154!) -- Fix: properly reset cost limit tracking when user clicks "Reset and Continue" (#6889 by @alecoot, PR by app/roomote) -- Fix: improve save button activation in prompts settings (#5780 by @beccare, PR by app/roomote) -- Fix: overeager 'there are unsaved changes' dialog in settings (thanks @brunobergher!) -- Fix: show send button when only images are selected in chat textarea (thanks app/roomote!) -- Fix: Claude Sonnet 4.5 compatibility improvements (thanks @mrubens!) -- Add UsageStats schema and type for better analytics tracking (thanks app/roomote!) -- Include reasoning messages in cloud tasks (thanks @mrubens!) -- Security: update dependency vite to v6.3.6 (thanks app/renovate!) -- Deprecate free grok 4 fast model (thanks @mrubens!) -- Remove unsupported Gemini 2.5 Flash Image Preview free model (thanks @SannidhyaSah!) -- Add structured data to the homepage for better SEO (thanks @mrubens!) -- Update dependency glob to v11.0.3 (thanks app/renovate!) - -## [3.28.14] - 2025-09-30 - -![3.28.14 Release - GLM-4.6 Model Support](/releases/3.28.14-release.png) - -- Add support for GLM-4.6 model for z.ai provider (#8406 by @dmarkey, PR by @roomote) - -## [3.28.13] - 2025-09-29 - -- Fix: Remove topP parameter from Bedrock inference config (#8377 by @ronyblum, PR by @daniel-lxs) -- Fix: Correct Vertex AI Sonnet 4.5 model configuration (#8387 by @nickcatal, PR by @mrubens!) - -## [3.28.12] - 2025-09-29 - -- Fix: Correct Anthropic Sonnet 4.5 model ID and add Bedrock 1M context checkbox (thanks @daniel-lxs!) - -## [3.28.11] - 2025-09-29 - -- Fix: Correct Amazon Bedrock Claude Sonnet 4.5 model identifier (#8371 by @sunhyung, PR by @app/roomote) -- Fix: Correct Claude Sonnet 4.5 model ID format (thanks @daniel-lxs!) - -## [3.28.10] - 2025-09-29 - -![3.28.10 Release - Kangaroo Writing Sonnet 4.5](/releases/3.28.10-release.png) - -- Feat: Add Sonnet 4.5 support (thanks @daniel-lxs!) -- Fix: Resolve max_completion_tokens issue for GPT-5 models in LiteLLM provider (#6979 by @lx1054331851, PR by @roomote) -- Fix: Make chat icons properly sized with shrink-0 class (thanks @mrubens!) -- Enhancement: Track telemetry settings changes for better analytics (thanks @mrubens!) -- Web: Add testimonials section to website (thanks @brunobergher!) -- CI: Refresh contrib.rocks cache workflow for contributor badges (thanks @hannesrudolph!) - -## [3.28.9] - 2025-09-26 - -![3.28.9 Release - Supernova Upgrade](/releases/3.28.9-release.png) - -- The free Supernova model now has a 1M token context window (thanks @mrubens!) -- Experiment to show the Roo provider on the welcome screen (thanks @mrubens!) -- Web: Website improvements to https://roocode.com/ (thanks @brunobergher!) -- Fix: Remove tags from prompts for cleaner output and fewer tokens (#8318 by @hannesrudolph, PR by @app/roomote) -- Correct tool use suggestion to improve model adherence to suggestion (thanks @hannesrudolph!) -- feat: log out from cloud when resetting extension state (thanks @app/roomote!) -- feat: Add telemetry tracking to DismissibleUpsell component (thanks @app/roomote!) -- refactor: remove pr-reviewer mode (thanks @daniel-lxs!) -- Removing user hint when refreshing models (thanks @requesty-JohnCosta27!) - -## [3.28.8] - 2025-09-25 - -![3.28.8 Release - Bug fixes and improvements](/releases/3.28.8-release.png) - -- Fix: Resolve frequent "No tool used" errors by clarifying tool-use rules (thanks @hannesrudolph!) -- Fix: Include initial ask in condense summarization (thanks @hannesrudolph!) -- Add support for more free models in the Roo provider (thanks @mrubens!) -- Show cloud switcher and option to add a team when logged in (thanks @mrubens!) -- Add Opengraph image for web (thanks @brunobergher!) - -## [3.28.7] - 2025-09-23 - -![3.28.7 Release - Hidden Thinking](/releases/3.28.7-release.png) - -- UX: Collapse thinking blocks by default with UI settings to always show them (thanks @brunobergher!) -- Fix: Resolve checkpoint restore popover positioning issue (#8219 by @NaccOll, PR by @app/roomote) -- Add cloud account switcher functionality (thanks @mrubens!) -- Add support for zai-org/GLM-4.5-turbo model in Chutes provider (#8155 by @mugnimaestra, PR by @app/roomote) - -## [3.28.6] - 2025-09-23 - -![3.28.6 Release - Kangaroo studying ancient codex](/releases/3.28.6-release.png) - -- Feat: Add GPT-5-Codex model (thanks @daniel-lxs!) -- Feat: Add keyboard shortcut for toggling auto-approve (Cmd/Ctrl+Alt+A) (thanks @brunobergher!) -- Fix: Improve reasoning block formatting for better readability (thanks @daniel-lxs!) -- Fix: Respect Ollama Modelfile num_ctx configuration (#7797 by @hannesrudolph, PR by @app/roomote) -- Fix: Prevent checkpoint text from wrapping in non-English languages (#8206 by @NaccOll, PR by @app/roomote) -- Remove language selection and word wrap toggle from CodeBlock (thanks @mrubens!) -- Feat: Add package.nls.json checking to find-missing-translations script (thanks @app/roomote!) -- Fix: Bare metal evals fixes (thanks @cte!) -- Fix: Follow-up questions should trigger the "interactive" state (thanks @cte!) - -## [3.28.5] - 2025-09-20 - -![3.28.5 Release - Kangaroo staying hydrated](/releases/3.28.5-release.png) - -- Fix: Resolve duplicate rehydrate during reasoning; centralize rehydrate and preserve cancel metadata (#8153 by @hannesrudolph, PR by @hannesrudolph) -- Add an announcement for Supernova (thanks @mrubens!) -- Wrap code blocks by default for improved readability (thanks @mrubens!) -- Fix: Support dash prefix in parseMarkdownChecklist for todo lists (#8054 by @NaccOll, PR by app/roomote) -- Fix: Apply tiered pricing for Gemini models via Vertex AI (#8017 by @ikumi3, PR by app/roomote) -- Update SambaNova models to latest versions (thanks @snova-jorgep!) -- Update privacy policy to allow occasional emails (thanks @jdilla1277!) - -## [3.28.4] - 2025-09-19 - -![3.28.4 Release - Supernova Discovery](/releases/3.28.4-release.png) - -- UX: Redesigned Message Feed (thanks @brunobergher!) -- UX: Responsive Auto-Approve (thanks @brunobergher!) -- Add telemetry retry queue for network resilience (thanks @daniel-lxs!) -- Fix: Transform keybindings in nightly build to fix command+y shortcut (thanks @app/roomote!) -- New code-supernova stealth model in the Roo Code Router (thanks @mrubens!) - -## [3.28.3] - 2025-09-16 - -![3.28.3 Release - UI/UX Improvements and Bug Fixes](/releases/3.28.3-release.png) - -- Fix: Filter out Claude Code built-in tools (ExitPlanMode, BashOutput, KillBash) (#7817 by @juliettefournier-econ, PR by @roomote) -- Replace + icon with edit icon for New Task button (#7941 by @hannesrudolph, PR by @roomote) -- Fix: Corrected C# tree-sitter query (#5238 by @vadash, PR by @mubeen-zulfiqar) -- Add keyboard shortcut for "Add to Context" action (#7907 by @hannesrudolph, PR by @roomote) -- Fix: Context menu is obscured when edit message (#7759 by @mini2s, PR by @NaccOll) -- Fix: Handle ByteString conversion errors in OpenAI embedders (#7959 by @PavelA85, PR by @daniel-lxs) -- Add Z.ai coding plan support (thanks @daniel-lxs!) -- Move slash commands to Settings tab with gear icon for discoverability (thanks @roomote!) -- Reposition Add Image button inside ChatTextArea (thanks @roomote!) -- Bring back a way to temporarily and globally pause auto-approve without losing your toggle state (thanks @brunobergher!) -- Makes text area buttons appear only when there's text (thanks @brunobergher!) -- CONTRIBUTING.md tweaks and issue template rewrite (thanks @hannesrudolph!) -- Bump axios from 1.9.0 to 1.12.0 (thanks @dependabot!) - -## [3.28.2] - 2025-09-14 - -![3.28.2 Release - Auto-approve improvements](/releases/3.28.2-release.png) - -- Improve auto-approve UI with smaller and more subtle design (thanks @brunobergher!) -- Fix: Message queue re-queue loop in Task.ask() causing performance issues (#7861 by @hannesrudolph, PR by @daniel-lxs) -- Fix: Restrict @-mention parsing to line-start or whitespace boundaries to prevent false triggers (#7875 by @hannesrudolph, PR by @app/roomote) -- Fix: Make nested git repository warning persistent with path info for better visibility (#7884 by @hannesrudolph, PR by @app/roomote) -- Fix: Include API key in Ollama /api/tags requests for authenticated instances (#7902 by @ItsOnlyBinary, PR by @app/roomote) -- Fix: Preserve original first message context during conversation condensing (thanks @daniel-lxs!) -- Add Qwen3 Next 80B A3B models to chutes provider (thanks @daniel-lxs!) -- Disable Roomote Control on logout for better security (thanks @cte!) -- Add padding to the cloudview for better visual spacing (thanks @mrubens!) - -## [3.28.1] - 2025-09-11 - -![3.28.1 Release - Kangaroo riding rocket to the clouds](/releases/3.28.1-release.png) - -- Announce Roo Code Cloud! -- Add cloud task button for opening tasks in Roo Code Cloud (thanks @app/roomote!) -- Make Posthog telemetry the default (thanks @mrubens!) -- Show notification when the checkpoint initialization fails (thanks @app/roomote!) -- Bust cache in generated image preview (thanks @mrubens!) -- Fix: Center active mode in selector dropdown on open (#7882 by @hannesrudolph, PR by @app/roomote) -- Fix: Preserve first message during conversation condensing (thanks @daniel-lxs!) - -## [3.28.0] - 2025-09-10 - -![3.28.0 Release - Continue tasks in Roo Code Cloud](/releases/3.28.0-release.png) - -- feat: Continue tasks in Roo Code Cloud (thanks @brunobergher!) -- feat: Support connecting to Cloud without redirect handling (thanks @mrubens!) -- feat: Add toggle to control task syncing to Cloud (thanks @jr!) -- feat: Add click-to-edit, ESC-to-cancel, and fix padding consistency for chat messages (#7788 by @hannesrudolph, PR by @app/roomote) -- feat: Make reasoning more visible (thanks @app/roomote!) -- fix: Fix Groq context window display (thanks @mrubens!) -- fix: Add GIT_EDITOR env var to merge-resolver mode for non-interactive rebase (thanks @daniel-lxs!) -- fix: Resolve chat message edit/delete duplication issues (thanks @daniel-lxs!) -- fix: Reduce CodeBlock button z-index to prevent overlap with popovers (#7703 by @A0nameless0man, PR by @daniel-lxs) -- fix: Revert PR #7188 - Restore temperature parameter to fix TabbyApi/ExLlamaV2 crashes (#7581 by @drknyt, PR by @daniel-lxs) -- fix: Make ollama models info transport work like lmstudio (#7674 by @ItsOnlyBinary, PR by @ItsOnlyBinary) -- fix: Update DeepSeek pricing to new unified rates effective Sept 5, 2025 (#7685 by @NaccOll, PR by @app/roomote) -- feat: Update Vertex AI models and regions (#7725 by @ssweens, PR by @ssweens) -- chore: Update dependency eslint-plugin-turbo to v2.5.6 (thanks @app/renovate!) -- chore: Update dependency @changesets/cli to v2.29.6 (thanks @app/renovate!) -- chore: Update dependency nock to v14.0.10 (thanks @app/renovate!) -- chore: Update dependency eslint-config-prettier to v10.1.8 (thanks @app/renovate!) -- chore: Update dependency esbuild to v0.25.9 (thanks @app/renovate!) - -## [3.27.0] - 2025-09-05 - -![3.27.0 Release - Bug Fixes and Improvements](/releases/3.27.0-release.png) - -- Add: User message editing and deletion functionality (thanks @NaccOll!) -- Add: Kimi K2-0905 model support in Chutes provider (#7700 by @pwilkin, PR by @app/roomote) -- Fix: Prevent stack overflow in codebase indexing for large projects (#7588 by @StarTrai1, PR by @daniel-lxs) -- Fix: Resolve race condition in Gemini Grounding Sources by improving code design (#6372 by @daniel-lxs, PR by @HahaBill) -- Fix: Preserve conversation context by retrying with full conversation on invalid previous_response_id (thanks @daniel-lxs!) -- Fix: Identify MCP and slash command config path in multiple folder workspaces (#6720 by @kfuglsang, PR by @NaccOll) -- Fix: Handle array paths from VSCode terminal profiles correctly (#7695 by @Amosvcc, PR by @app/roomote) -- Fix: Improve WelcomeView styling and readability (thanks @daniel-lxs!) -- Fix: Resolve CI e2e test ETIMEDOUT errors when downloading VS Code (thanks @daniel-lxs!) - -## [3.26.7] - 2025-09-04 - -![3.26.7 Release - OpenAI Service Tiers](/releases/3.26.7-release.png) - -- Feature: Add OpenAI Responses API service tiers (flex/priority) with UI selector and pricing (thanks @hannesrudolph!) -- Feature: Add DeepInfra as a model provider in Roo Code (#7661 by @Thachnh, PR by @Thachnh) -- Feature: Update kimi-k2-0905-preview and kimi-k2-turbo-preview models on the Moonshot provider (thanks @CellenLee!) -- Feature: Add kimi-k2-0905-preview to Groq, Moonshot, and Fireworks (thanks @daniel-lxs and Cline!) -- Fix: Prevent countdown timer from showing in history for answered follow-up questions (#7624 by @XuyiK, PR by @daniel-lxs) -- Fix: Moonshot's maximum return token count limited to 1024 issue resolved (#6936 by @greyishsong, PR by @wangxiaolong100) -- Fix: Add error transform to cryptic OpenAI SDK errors when API key is invalid (#7483 by @A0nameless0man, PR by @app/roomote) -- Fix: Validate MCP tool exists before execution (#7631 by @R-omk, PR by @app/roomote) -- Fix: Handle zsh glob qualifiers correctly (thanks @mrubens!) -- Fix: Handle zsh process substitution correctly (thanks @mrubens!) -- Fix: Minor zh-TW Traditional Chinese locale typo fix (thanks @PeterDaveHello!) - -## [3.26.6] - 2025-09-03 - -![3.26.6 Release - Bug Fixes and Tool Improvements](/releases/3.26.6-release.png) - -- Add experimental run_slash_command tool to let the model initiate slash commands (thanks @app/roomote!) -- Fix: use askApproval wrapper in insert_content and search_and_replace tools (#7648 by @hannesrudolph, PR by @app/roomote) -- Add Kimi K2 Turbo model configuration to moonshotModels (thanks @wangxiaolong100!) -- Fix: preserve scroll position when switching tabs in settings (thanks @DC-Dancao!) - -## [3.26.5] - 2025-09-03 - -![3.26.5 Release - Enhanced AI Thinking Capabilities](/releases/3.26.5-release.png) - -- feat: Add support for Qwen3 235B A22B Thinking 2507 model in chutes (thanks @mohammad154!) -- feat: Add auto-approve support for MCP access_resource tool (#7565 by @m-ibm, PR by @daniel-lxs) -- feat: Add configurable embedding batch size for code indexing (#7356 by @BenLampson, PR by @app/roomote) -- fix: Add cache reporting support for OpenAI-Native provider (thanks @hannesrudolph!) -- feat: Move message queue to the extension host for better performance (thanks @cte!) - -## [3.26.4] - 2025-09-01 - -![3.26.4 Release - Memory Optimization](/releases/3.26.4-release.png) - -- Optimize memory usage for image handling in webview (thanks @daniel-lxs!) -- Fix: Special tokens should not break task processing (#7539 by @pwilkin, PR by @pwilkin) -- Add Ollama API key support for Turbo mode (#7147 by @LivioGama, PR by @app/roomote) -- Rename Account tab to Cloud tab for clarity (thanks @app/roomote!) -- Add kangaroo-themed release image generation (thanks @mrubens!) - -## [3.26.3] - 2025-08-29 - -![3.26.3 Release - Kangaroo Photo Editor](/releases/3.26.3-release.png) - -- Add optional input image parameter to image generation tool (thanks @roomote!) -- Refactor: Flatten image generation settings structure (thanks @daniel-lxs!) -- Show console logging in vitests when the --no-silent flag is set (thanks @hassoncs!) - -## [3.26.2] - 2025-08-28 - -![3.26.2 Release - Kangaroo Digital Artist](/releases/3.26.2-release.png) - -- feat: Add experimental image generation tool with OpenRouter integration (thanks @daniel-lxs!) -- Fix: Resolve GPT-5 Responses API issues with condensing and image support (#7334 by @nlbuescher, PR by @daniel-lxs) -- Fix: Hide .rooignore'd files from environment details by default (#7368 by @AlexBlack772, PR by @app/roomote) -- Fix: Exclude browser scroll actions from repetition detection (#7470 by @cgrierson-smartsheet, PR by @app/roomote) - -## [3.26.1] - 2025-08-27 - -![3.26.1 Release - Kangaroo Network Engineer](/releases/3.26.1-release.png) - -- Add Vercel AI Gateway provider integration (thanks @joshualipman123!) -- Add support for Vercel embeddings (thanks @mrubens!) -- Enable on-disk storage for Qdrant vectors and HNSW index (thanks @daniel-lxs!) -- Show model ID in API configuration dropdown (thanks @daniel-lxs!) -- Update tooltip component to match native VSCode tooltip shadow styling (thanks @roomote!) -- Fix: remove duplicate cache display in task header (thanks @mrubens!) -- Random chat text area cleanup (thanks @cte!) - -## [3.26.0] - 2025-08-26 - -![3.26.0 Release - Kangaroo Speed Racer](/releases/3.26.0-release.png) - -- Sonic -> Grok Code Fast -- feat: Add Qwen Code CLI API Support with OAuth Authentication (thanks @evinelias and Cline!) -- feat: Add Deepseek v3.1 to Fireworks AI provider (#7374 by @dmarkey, PR by @app/roomote) -- Add a built-in /init slash command (thanks @mrubens and @hannesrudolph!) -- Fix: Make auto approve toggle trigger stay (#3909 by @kyle-apex, PR by @elianiva) -- Fix: Preserve user input when selecting follow-up choices (#7316 by @teihome, PR by @daniel-lxs) -- Fix: Handle Mistral thinking content as reasoning chunks (#6842 by @Biotrioo, PR by @app/roomote) -- Fix: Resolve newTaskRequireTodos setting not working correctly (thanks @hannesrudolph!) -- Fix: Requesty model listing (#7377 by @dtrugman, PR by @dtrugman) -- feat: Hide static providers with no models from provider list (thanks @daniel-lxs!) -- Add todos parameter to new_task tool usage in issue-fixer mode (thanks @hannesrudolph!) -- Handle substitution patterns in command validation (thanks @mrubens!) -- Mark code-workspace files as protected (thanks @mrubens!) -- Update list of default allowed commands (thanks @mrubens!) -- Follow symlinks in rooignore checks (thanks @mrubens!) -- Show cache read and write prices for OpenRouter inference providers (thanks @chrarnoldus!) -- chore(deps): Update dependency drizzle-kit to v0.31.4 (thanks @app/renovate!) - -## [3.25.23] - 2025-08-22 - -- feat: add custom base URL support for Requesty provider (thanks @requesty-JohnCosta27!) -- feat: add DeepSeek V3.1 model to Chutes AI provider (#7294 by @dmarkey, PR by @app/roomote) -- Revert "feat: enable loading Roo modes from multiple files in .roo/modes directory" temporarily to fix a bug with mode installation - -## [3.25.22] - 2025-08-22 - -- Add prompt caching support for Kimi K2 on Groq (thanks @daniel-lxs and @benank!) -- Add documentation links for global custom instructions in UI (thanks @app/roomote!) - -## [3.25.21] - 2025-08-21 - -- Ensure subtask results are provided to GPT-5 in OpenAI Responses API -- Promote the experimental AssistantMessageParser to the default parser -- Update DeepSeek models context window to 128k (thanks @JuanPerezReal) -- Enable grounding features for Vertex AI (thanks @anguslees) -- Allow orchestrator to pass TODO lists to subtasks -- Improved MDM handling -- Handle nullish token values in ContextCondenseRow to prevent UI crash (thanks @s97712) -- Improved context window error handling for OpenAI and other providers -- Add "installed" filter to Roo Marketplace (thanks @semidark) -- Improve filesystem access checks (thanks @elianiva) -- Support for loading Roo modes from multiple YAML files in the `.roo/modes/` directory (thanks @farazoman) -- Add Featherless provider (thanks @DarinVerheijke) - -## [3.25.20] - 2025-08-19 - -- Add announcement for Sonic model - -## [3.25.19] - 2025-08-19 - -- Fix issue where new users couldn't select the Roo Code Router (thanks @daniel-lxs!) - -## [3.25.18] - 2025-08-19 - -- Add new stealth Sonic model through the Roo Code Router -- Fix: respect enableReasoningEffort setting when determining reasoning usage (#7048 by @ikbencasdoei, PR by @app/roomote) -- Fix: prevent duplicate LM Studio models with case-insensitive deduplication (#6954 by @fbuechler, PR by @daniel-lxs) -- Feat: simplify ask_followup_question prompt documentation (thanks @daniel-lxs!) -- Feat: simple read_file tool for single-file-only models (thanks @daniel-lxs!) -- Fix: Add missing zaiApiKey and doubaoApiKey to SECRET_STATE_KEYS (#7082 by @app/roomote) -- Feat: Add new models and update configurations for vscode-lm (thanks @NaccOll!) - -## [3.25.17] - 2025-08-17 - -- Fix: Resolve terminal reuse logic issues - -## [3.25.16] - 2025-08-16 - -- Add support for OpenAI gpt-5-chat-latest model (#7057 by @PeterDaveHello, PR by @app/roomote) -- Fix: Use native Ollama API instead of OpenAI compatibility layer (#7070 by @LivioGama, PR by @daniel-lxs) -- Fix: Prevent XML entity decoding in diff tools (#7107 by @indiesewell, PR by @app/roomote) -- Fix: Add type check before calling .match() on diffItem.content (#6905 by @pwilkin, PR by @app/roomote) -- Refactor task execution system: improve call stack management (thanks @catrielmuller!) -- Fix: Enable save button for provider dropdown and checkbox changes (thanks @daniel-lxs!) -- Add an API for resuming tasks by ID (thanks @mrubens!) -- Emit event when a task ask requires interaction (thanks @cte!) -- Make enhance with task history default to true (thanks @liwilliam2021!) -- Fix: Use cline.cwd as primary source for workspace path in codebaseSearchTool (thanks @NaccOll!) -- Hotfix multiple folder workspace checkpoint (thanks @NaccOll!) - -## [3.25.15] - 2025-08-14 - -- Fix: Remove 500-message limit to prevent scrollbar jumping in long conversations (#7052, #7063 by @daniel-lxs, PR by @app/roomote) -- Fix: Reset condensing state when switching tasks (#6919 by @f14XuanLv, PR by @f14XuanLv) -- Fix: Implement sitemap generation in TypeScript and remove XML file (#5231 by @abumalick, PR by @abumalick) -- Fix: allowedMaxRequests and allowedMaxCost values not showing in the settings UI (thanks @chrarnoldus!) - -## [3.25.14] - 2025-08-13 - -- Fix: Only include verbosity parameter for models that support it (#7054 by @eastonmeth, PR by @app/roomote) -- Fix: Amazon Bedrock 1M context - Move anthropic_beta to additionalModelRequestFields (thanks @daniel-lxs!) -- Fix: Make cancelling requests more responsive by reverting recent changes - -## [3.25.13] - 2025-08-12 - -- Add Sonnet 1M context checkbox to Bedrock -- Fix: add --no-messages flag to ripgrep to suppress file access errors (#6756 by @R-omk, PR by @app/roomote) -- Add support for AGENT.md alongside AGENTS.md (#6912 by @Brendan-Z, PR by @app/roomote) -- Remove deprecated GPT-4.5 Preview model (thanks @PeterDaveHello!) - -## [3.25.12] - 2025-08-12 - -- Update: Claude Sonnet 4 context window configurable to 1 million tokens in Anthropic provider (thanks @daniel-lxs!) -- Add: Minimal reasoning support to OpenRouter (thanks @daniel-lxs!) -- Fix: Add configurable API request timeout for local providers (#6521 by @dabockster, PR by @app/roomote) -- Fix: Add --no-sandbox flag to browser launch options (#6632 by @QuinsZouls, PR by @QuinsZouls) -- Fix: Ensure JSON files respect .rooignore during indexing (#6690 by @evermoving, PR by @app/roomote) -- Add: New Chutes provider models (#6698 by @fstandhartinger, PR by @app/roomote) -- Add: OpenAI gpt-oss models to Amazon Bedrock dropdown (#6752 by @josh-clanton-powerschool, PR by @app/roomote) -- Fix: Correct tool repetition detector to not block first tool call when limit is 1 (#6834 by @NaccOll, PR by @app/roomote) -- Fix: Improve checkpoint service initialization handling (thanks @NaccOll!) -- Update: Improve zh-TW Traditional Chinese locale (thanks @PeterDaveHello!) -- Add: Task expand and collapse translations (thanks @app/roomote!) -- Update: Exclude GPT-5 models from 20% context window output token cap (thanks @app/roomote!) -- Fix: Truncate long model names in model selector to prevent overflow (thanks @app/roomote!) -- Add: Requesty base url support (thanks @requesty-JohnCosta27!) - -## [3.25.11] - 2025-08-11 - -- Add: Native OpenAI provider support for Codex Mini model (#5386 by @KJ7LNW, PR by @daniel-lxs) -- Add: IO Intelligence Provider support (thanks @ertan2002!) -- Fix: MCP startup issues and remove refresh notifications (thanks @hannesrudolph!) -- Fix: Improvements to GPT-5 OpenAI provider configuration (thanks @hannesrudolph!) -- Fix: Clarify codebase_search path parameter as optional and improve tool descriptions (thanks @app/roomote!) -- Fix: Bedrock provider workaround for LiteLLM passthrough issues (thanks @jr!) -- Fix: Token usage and cost being underreported on cancelled requests (thanks @chrarnoldus!) - -## [3.25.10] - 2025-08-07 - -- Add support for GPT-5 (thanks Cline and @app/roomote!) -- Fix: Use CDATA sections in XML examples to prevent parser errors (#4852 by @hannesrudolph, PR by @hannesrudolph) -- Fix: Add missing MCP error translation keys (thanks @app/roomote!) - -## [3.25.9] - 2025-08-07 - -- Fix: Resolve rounding issue with max tokens (#6806 by @markp018, PR by @mrubens) -- Add support for GLM-4.5 and OpenAI gpt-oss models in Fireworks provider (#6753 by @alexfarlander, PR by @app/roomote) -- Improve UX by focusing chat input when clicking plus button in extension menu (thanks @app/roomote!) - -## [3.25.8] - 2025-08-06 - -- Fix: Prevent disabled MCP servers from starting processes and show correct status (#6036 by @hannesrudolph, PR by @app/roomote) -- Fix: Handle current directory path "." correctly in codebase_search tool (#6514 by @hannesrudolph, PR by @app/roomote) -- Fix: Trim whitespace from OpenAI base URL to fix model detection (#6559 by @vauhochzett, PR by @app/roomote) -- Feat: Reduce Gemini 2.5 Pro minimum thinking budget to 128 (thanks @app/roomote!) -- Fix: Improve handling of net::ERR_ABORTED errors in URL fetching (#6632 by @QuinsZouls, PR by @app/roomote) -- Fix: Recover from error state when Qdrant becomes available (#6660 by @hannesrudolph, PR by @app/roomote) -- Fix: Resolve memory leak in ChatView virtual scrolling implementation (thanks @xyOz-dev!) -- Add: Swift files to fallback list (#5857 by @niteshbalusu11, #6555 by @sealad886, PR by @niteshbalusu11) -- Feat: Clamp default model max tokens to 20% of context window (thanks @mrubens!) - -## [3.25.7] - 2025-08-05 - -- Add support for Claude Opus 4.1 -- Add Fireworks AI provider (#6653 by @ershang-fireworks, PR by @ershang-fireworks) -- Add Z AI provider (thanks @jues!) -- Add Groq support for GPT-OSS -- Add Cerebras support for GPT-OSS -- Add code indexing support for multiple folders similar to task history (#6197 by @NaccOll, PR by @NaccOll) -- Make mode selection dropdowns responsive (#6423 by @AyazKaan, PR by @AyazKaan) -- Redesigned task header and task history (thanks @brunobergher!) -- Fix checkpoints timing and ensure checkpoints work properly (#4827 by @mrubens, PR by @NaccOll) -- Fix empty mode names from being saved (#5766 by @kfxmvp, PR by @app/roomote) -- Fix MCP server creation when setting is disabled (#6607 by @characharm, PR by @app/roomote) -- Update highlight layer style and align to textarea (#6647 by @NaccOll, PR by @NaccOll) -- Fix UI for approving chained commands -- Use assistantMessageParser class instead of parseAssistantMessage (#5340 by @qdaxb, PR by @qdaxb) -- Conditionally include reminder section based on todo list config (thanks @NaccOll!) -- Task and TaskProvider event emitter cleanup with new events (thanks @cte!) - -## [3.25.6] - 2025-08-01 - -- Set horizon-beta model max tokens to 32k for OpenRouter (requested by @hannesrudolph, PR by @app/roomote) -- Add support for syncing provider profiles from the cloud - -## [3.25.5] - 2025-08-01 - -- Fix: Improve Claude Code ENOENT error handling with installation guidance (#5866 by @JamieJ1, PR by @app/roomote) -- Fix: LM Studio model context length (#5075 by @Angular-Angel, PR by @pwilkin) -- Fix: VB.NET indexing by implementing fallback chunking system (#6420 by @JensvanZutphen, PR by @daniel-lxs) -- Add auto-approved cost limits (thanks @hassoncs!) -- Add Cerebras as a provider (thanks @kevint-cerebras!) -- Add Qwen 3 Coder from Cerebras (thanks @kevint-cerebras!) -- Fix: Handle Qdrant deletion errors gracefully to prevent indexing interruption (thanks @daniel-lxs!) -- Fix: Restore message sending when clicking save button (thanks @daniel-lxs!) -- Fix: Linter not applied to locales/\*/README.md (thanks @liwilliam2021!) -- Handle more variations of chaining and subshell command validation -- More tolerant search/replace match -- Clean up the auto-approve UI (thanks @mrubens!) -- Skip interpolation for non-existent slash commands (thanks @app/roomote!) - -## [3.25.4] - 2025-07-30 - -- feat: add SambaNova provider integration (#6077 by @snova-jorgep, PR by @snova-jorgep) -- feat: add Doubao provider integration (thanks @AntiMoron!) -- feat: set horizon-alpha model max tokens to 32k for OpenRouter (thanks @app/roomote!) -- feat: add zai-org/GLM-4.5-FP8 model to Chutes AI provider (#6440 by @leakless21, PR by @app/roomote) -- feat: add symlink support for AGENTS.md file loading (thanks @app/roomote!) -- feat: optionally add task history context to prompt enhancement (thanks @liwilliam2021!) -- fix: remove misleading task resumption message (#5850 by @KJ7LNW, PR by @KJ7LNW) -- feat: add pattern to support Databricks /invocations endpoints (thanks @adambrand!) -- fix: resolve navigator global error by updating mammoth and bluebird dependencies (#6356 by @hishtadlut, PR by @app/roomote) -- feat: enhance token counting by extracting text from messages using VSCode LM API (#6112 by @sebinseban, PR by @NaccOll) -- feat: auto-refresh marketplace data when organization settings change (thanks @app/roomote!) -- fix: kill button for execute_command tool (thanks @daniel-lxs!) - -## [3.25.3] - 2025-07-30 - -- Allow queueing messages with images -- Increase Claude Code default max output tokens to 16k (#6125 by @bpeterson1991, PR by @app/roomote) -- Add docs link for slash commands -- Hide Gemini checkboxes on the welcome view -- Clarify apply_diff tool descriptions to emphasize surgical edits -- Fix: Prevent input clearing when clicking chat buttons (thanks @hassoncs!) -- Update PR reviewer rules and mode configuration (thanks @daniel-lxs!) -- Add translation check action to pull_request.opened event (thanks @app/roomote!) -- Remove "(prev Roo Cline)" from extension title in all languages (thanks @app/roomote!) -- Remove event types mention from PR reviewer rules (thanks @daniel-lxs!) - -## [3.25.2] - 2025-07-29 - -- Fix: Show diff view before approval when background edits are disabled (thanks @daniel-lxs!) -- Add support for organization-level MCP controls -- Fix zap icon hover state - -## [3.25.1] - 2025-07-29 - -- Add support for GLM-4.5-Air model to Chutes AI provider (#6376 by @matbgn, PR by @app/roomote) -- Improve subshell validation for commands - -## [3.25.0] - 2025-07-29 - -- Add message queueing (thanks @app/roomote!) -- Add custom slash commands -- Add options for URL Context and Grounding with Google Search to the Gemini provider (thanks @HahaBill!) -- Add image support to read_file tool (thanks @samhvw8!) -- Add experimental setting to prevent editor focus disruption (#4784 by @hannesrudolph, PR by @app/roomote) -- Add prompt caching support for LiteLLM (#5791 by @steve-gore-snapdocs, PR by @MuriloFP) -- Add markdown table rendering support -- Fix list_files recursive mode now works for dot directories (#2992 by @avtc, #4807 by @zhang157686, #5409 by @MuriloFP, PR by @MuriloFP) -- Add search functionality to mode selector popup and reorganize layout -- Sync API config selector style with mode selector -- Fix keyboard shortcuts for non-QWERTY layouts (#6161 by @shlgug, PR by @app/roomote) -- Add ESC key handling for modes, API provider, and indexing settings popovers (thanks @app/roomote!) -- Make task mode sticky to task (thanks @app/roomote!) -- Add text wrapping to command patterns in Manage Command Permissions (thanks @app/roomote!) -- Update list-files test for fixed hidden files bug (thanks @daniel-lxs!) -- Fix normalize Windows paths to forward slashes in mode export (#6307 by @hannesrudolph, PR by @app/roomote) -- Ensure form-data >= 4.0.4 -- Fix filter out non-text tab inputs (Kilo-Org/kilocode#712 by @szermatt, PR by @hassoncs) - -## [3.24.0] - 2025-07-25 - -- Add Hugging Face provider with support for open source models (thanks @TGlide!) -- Add terminal command permissions UI to chat interface -- Add support for Agent Rules standard via AGENTS.md (thanks @sgryphon!) -- Add settings to control diagnostic messages -- Fix auto-approve checkbox to be toggled at any time (thanks @KJ7LNW!) -- Add efficiency warning for single SEARCH/REPLACE blocks in apply_diff (thanks @KJ7LNW!) -- Fix respect maxReadFileLine setting for file mentions to prevent context exhaustion (thanks @sebinseban!) -- Fix Ollama API URL normalization by removing trailing slashes (thanks @Naam!) -- Fix restore list styles for markdown lists in chat interface (thanks @village-way!) -- Add support for bedrock api keys -- Add confirmation dialog and proper cleanup for marketplace mode removal -- Fix cancel auto-approve timer when editing follow-up suggestion (thanks @hassoncs!) -- Fix add error message when no workspace folder is open for code indexing - -## [3.23.19] - 2025-07-23 - -- Add Roo Code Cloud Waitlist CTAs (thanks @brunobergher!) -- Split commands on newlines when evaluating auto-approve -- Smarter auto-deny of commands - -## [3.23.18] - 2025-07-23 - -- Fix: Resolve 'Bad substitution' error in command parsing (#5978 by @KJ7LNW, PR by @daniel-lxs) -- Fix: Add ErrorBoundary component for better error handling (#5731 by @elianiva, PR by @KJ7LNW) -- Fix: Todo list toggle not working (thanks @chrarnoldus!) -- Improve: Use SIGKILL for command execution timeouts in the "execa" variant (thanks @cte!) - -## [3.23.17] - 2025-07-22 - -- Add: todo list tool enable checkbox to provider advanced settings -- Add: Moonshot provider (thanks @CellenLee!) -- Add: Qwen/Qwen3-235B-A22B-Instruct-2507 model to Chutes AI provider -- Fix: move context condensing prompt to Prompts section (thanks @SannidhyaSah!) -- Add: jump icon for newly created files -- Fix: add character limit to prevent terminal output context explosion -- Fix: resolve global mode export not including rules files -- Fix: enable export, share, and copy buttons during API operations (thanks @MuriloFP!) -- Add: configurable timeout for evals (5-10 min) -- Add: auto-omit MCP content when no servers are configured -- Fix: sort symlinked rules files by symlink names, not target names -- Docs: clarify when to use update_todo_list tool -- Add: Mistral embedding provider (thanks @SannidhyaSah!) -- Fix: add run parameter to vitest command in rules (thanks @KJ7LNW!) -- Update: the max_tokens fallback logic in the sliding window -- Fix: Bedrock and Vertex token counting improvements (thanks @daniel-lxs!) -- Add: llama-4-maverick model to Vertex AI provider (thanks @MuriloFP!) -- Fix: properly distinguish between user cancellations and API failures -- Fix: add case sensitivity mention to suggested fixes in apply_diff error message - -## [3.23.16] - 2025-07-19 - -- Add global rate limiting for OpenAI-compatible embeddings (thanks @daniel-lxs!) -- Add batch limiting to code indexer (thanks @daniel-lxs!) -- Fix Docker port conflicts for evals services - -## [3.23.15] - 2025-07-18 - -- Fix configurable delay for diagnostics to prevent premature error reporting -- Add command timeout allowlist -- Add description and whenToUse fields to custom modes in .roomodes (thanks @RandalSchwartz!) -- Fix Claude model detection by name for API protocol selection (thanks @daniel-lxs!) -- Move marketplace icon from overflow menu to top navigation -- Optional setting to prevent completion with open todos -- Added YouTube to website footer (thanks @thill2323!) - -## [3.23.14] - 2025-07-17 - -- Log api-initiated tasks to a tmp directory - -## [3.23.13] - 2025-07-17 - -- Add the ability to "undo" enhance prompt changes -- Fix a bug where the path component of the baseURL for the LiteLLM provider contains path in it (thanks @ChuKhaLi) -- Add support for Vertex AI model name formatting when using Claude Code with Vertex AI (thanks @janaki-sasidhar) -- The list-files tool must include at least the first-level directory contents (thanks @qdaxb) -- Add a configurable limit that controls both consecutive errors and tool repetitions (thanks @MuriloFP) -- Add `.terraform/` and `.terragrunt-cache/` directories to the checkpoint exclusion patterns (thanks @MuriloFP) -- Increase Ollama API timeout values (thanks @daniel-lxs) -- Fix an issue where you need to "discard changes" before saving even though there are no settings changes -- Fix `DirectoryScanner` memory leak and improve file limit handling (thanks @daniel-lxs) -- Fix time formatting in environment (thanks @chrarnoldus) -- Prevent empty mode names from being saved (thanks @daniel-lxs) -- Improve auto-approve checkbox UX -- Improve the chat message edit / delete functionality (thanks @liwilliam2021) -- Add `commandExecutionTimeout` to `GlobalSettings` - -## [3.23.12] - 2025-07-15 - -- Update the max-token calculation in model-params to better support Kimi K2 and others - -## [3.23.11] - 2025-07-14 - -- Add Kimi K2 model to Groq along with fixes to context condensing math -- Add Cmd+Shift+. keyboard shortcut for previous mode switching - -## [3.23.10] - 2025-07-14 - -- Prioritize built-in model dimensions over custom dimensions (thanks @daniel-lxs!) -- Add padding to the index model options - -## [3.23.9] - 2025-07-14 - -- Enable Claude Code provider to run natively on Windows (thanks @SannidhyaSah!) -- Add gemini-embedding-001 model to code-index service (thanks @daniel-lxs!) -- Resolve vector dimension mismatch error when switching embedding models -- Return the cwd in the exec tool's response so that the model is not lost after subsequent calls (thanks @chris-garrett!) -- Add configurable timeout for command execution in VS Code settings - -## [3.23.8] - 2025-07-13 - -- Add enable/disable toggle for code indexing (thanks @daniel-lxs!) -- Add a command auto-deny list to auto-approve settings -- Add navigation link to history tab in HistoryPreview - -## [3.23.7] - 2025-07-11 - -- Fix Mermaid syntax warning (thanks @MuriloFP!) -- Expand Vertex AI region config to include all available regions in GCP Vertex AI (thanks @shubhamgupta731!) -- Handle Qdrant vector dimension mismatch when switching embedding models (thanks @daniel-lxs!) -- Fix typos in comment & document (thanks @noritaka1166!) -- Improve the display of codebase search results -- Correct translation fallback logic for embedding errors (thanks @daniel-lxs!) -- Clean up MCP tool disabling -- Link to marketplace from modes and MCP tab -- Fix TTS button display (thanks @sensei-woo!) -- Add Devstral Medium model support -- Add comprehensive error telemetry to code-index service (thanks @daniel-lxs!) -- Exclude cache tokens from context window calculation (thanks @daniel-lxs!) -- Enable dynamic tool selection in architect mode for context discovery -- Add configurable max output tokens setting for claude-code - -## [3.23.6] - 2025-07-10 - -- Grok 4 - -## [3.23.5] - 2025-07-09 - -- Fix: use decodeURIComponent in openFile (thanks @vivekfyi!) -- Fix(embeddings): Translate error messages before sending to UI (thanks @daniel-lxs!) -- Make account tab visible - -## [3.23.4] - 2025-07-09 - -- Update chat area icons for better discoverability & consistency -- Fix a bug that allowed `list_files` to return directory results that should be excluded by .gitignore -- Add an overflow header menu to make the UI a little tidier (thanks @dlab-anton) -- Fix a bug the issue where null custom modes configuration files cause a 'Cannot read properties of null' error (thanks @daniel-lxs!) -- Replace native title attributes with StandardTooltip component for consistency (thanks @daniel-lxs!) - -## [3.23.3] - 2025-07-09 - -- Remove erroneous line from announcement modal - -## [3.23.2] - 2025-07-09 - -- Fix bug where auto-approval was intermittently failing - -## [3.23.1] - 2025-07-09 - -- Always show the code indexing dot under the chat text area - -## [3.23.0] - 2025-07-08 - -- Move codebase indexing out of experimental (thanks @daniel-lxs and @MuriloFP!) -- Add todo list tool (thanks @qdaxb!) -- Fix code index secret persistence and improve settings UX (thanks @daniel-lxs!) -- Add Gemini embedding provider for codebase indexing (thanks @SannidhyaSah!) -- Support full endpoint URLs in OpenAI Compatible provider (thanks @SannidhyaSah!) -- Add markdown support to codebase indexing (thanks @MuriloFP!) -- Add Search/Filter Functionality to API Provider Selection in Settings (thanks @GOODBOY008!) -- Add configurable max search results (thanks @MuriloFP!) -- Add copy prompt button to task actions (thanks @Juice10 and @vultrnerd!) -- Fix insertContentTool to create new files with content (thanks @Ruakij!) -- Fix typescript compiler watch path inconsistency (thanks @bbenshalom!) -- Use actual max_completion_tokens from OpenRouter API (thanks @shariqriazz!) -- Prevent completion sound from replaying when reopening completed tasks (thanks @SannidhyaSah!) -- Fix access_mcp_resource fails to handle images correctly (thanks @s97712!) -- Prevent chatbox focus loss during automated file editing (thanks @hannesrudolph!) -- Resolve intermittent hangs and lack of clear error feedback in apply_diff tool (thanks @lhish!) -- Resolve Go duplicate references in tree-sitter queries (thanks @MuriloFP!) -- Chat UI consistency and layout shifts (thanks @seedlord!) -- Chat index UI enhancements (thanks @MuriloFP!) -- Fix model search being prefilled on dropdown (thanks @kevinvandijk!) -- Improve chat UI - add camera icon margin and make placeholder non-selectable (thanks @MuriloFP!) -- Delete .roo/rules-{mode} folder when custom mode is deleted -- Enforce file restrictions for all edit tools in architect mode -- Add User-Agent header to API providers -- Fix auto question timer unmount (thanks @liwilliam2021!) -- Fix new_task tool streaming issue -- Optimize file listing when maxWorkspaceFiles is 0 (thanks @daniel-lxs!) -- Correct export/import of OpenAI Compatible codebase indexing settings (thanks @MuriloFP!) -- Resolve workspace path inconsistency in code indexing for multi-workspace scenarios - -## [3.22.6] - 2025-07-02 - -- Add timer-based auto approve for follow up questions (thanks @liwilliam2021!) -- Add import/export modes functionality -- Add persistent version indicator on chat screen -- Add automatic configuration import on extension startup (thanks @takakoutso!) -- Add user-configurable search score threshold slider for semantic search (thanks @hannesrudolph!) -- Add default headers and testing for litellm fetcher (thanks @andrewshu2000!) -- Fix consistent cancellation error messages for thinking vs streaming phases -- Fix Amazon Bedrock cross-region inference profile mapping (thanks @KevinZhao!) -- Fix URL loading timeout issues in @ mentions (thanks @MuriloFP!) -- Fix API retry exponential backoff capped at 10 minutes (thanks @MuriloFP!) -- Fix Qdrant URL field auto-filling with default value (thanks @SannidhyaSah!) -- Fix profile context condensation threshold (thanks @PaperBoardOfficial!) -- Fix apply_diff tool documentation for multi-file capabilities -- Fix cache files excluded from rules compilation (thanks @MuriloFP!) -- Add streamlined extension installation and documentation (thanks @devxpain!) -- Prevent Architect mode from providing time estimates -- Remove context size from environment details -- Change default mode to architect for new installations -- Suppress Mermaid error rendering -- Improve Mermaid buttons with light background in light mode (thanks @chrarnoldus!) -- Add .vscode/ to write-protected files/directories -- Update Amazon Bedrock cross-region inference profile mapping (thanks @KevinZhao!) - -## [3.22.5] - 2025-06-28 - -- Remove Gemini CLI provider while we work with Google on a better integration - -## [3.22.4] - 2025-06-27 - -- Fix: resolve E2BIG error by passing large prompts via stdin to Claude CLI (thanks @Fovty!) -- Add optional mode suggestions to follow-up questions -- Fix: move StandardTooltip inside PopoverTrigger in ShareButton (thanks @daniel-lxs!) - -## [3.22.3] - 2025-06-27 - -- Restore JSON backwards compatibility for .roomodes files (thanks @daniel-lxs!) - -## [3.22.2] - 2025-06-27 - -- Fix: eliminate XSS vulnerability in CodeBlock component (thanks @KJ7LNW!) -- Fix terminal keyboard shortcut error when adding content to context (thanks @MuriloFP!) -- Fix checkpoint popover not opening due to StandardTooltip wrapper conflict (thanks @daniel-lxs!) -- Fix(i18n): correct gemini cli error translation paths (thanks @daniel-lxs!) -- Code Index (Qdrant) recreate services when change configurations (thanks @catrielmuller!) - -## [3.22.1] - 2025-06-26 - -- Add Gemini CLI provider (thanks Cline!) -- Fix undefined mcp command (thanks @qdaxb!) -- Use upstream_inference_cost for OpenRouter BYOK cost calculation and show cached token count (thanks @chrarnoldus!) -- Update maxTokens value for qwen/qwen3-32b model on Groq (thanks @KanTakahiro!) -- Standardize tooltip delays to 300ms - -## [3.22.0] - 2025-06-25 - -- Add 1-click task sharing -- Add support for loading rules from a global .roo directory (thanks @samhvw8!) -- Modes selector improvements (thanks @brunobergher!) -- Use safeWriteJson for all JSON file writes to avoid task history corruption (thanks @KJ7LNW!) -- Improve YAML error handling when editing modes -- Register importSettings as VSCode command (thanks @shivamd1810!) -- Add default task names for empty tasks (thanks @daniel-lxs!) -- Improve translation workflow to avoid unnecessary file reads (thanks @KJ7LNW!) -- Allow write_to_file to handle newline-only and empty content (thanks @Githubguy132010!) -- Address multiple memory leaks in CodeBlock component (thanks @kiwina!) -- Memory cleanup (thanks @xyOz-dev!) -- Fix port handling bug in code indexing for HTTPS URLs (thanks @benashby!) -- Improve Bedrock error handling for throttling and streaming contexts -- Handle long Claude code messages (thanks @daniel-lxs!) -- Fixes to Claude Code caching and image upload -- Disable reasoning budget UI controls for Claude Code provider -- Remove temperature parameter for Azure OpenAI reasoning models (thanks @ExactDoug!) -- Allowed commands import/export (thanks @catrielmuller!) -- Add VS Code setting to disable quick fix context actions (thanks @OlegOAndreev!) - -## [3.21.5] - 2025-06-23 - -- Fix Qdrant URL prefix handling for QdrantClient initialization (thanks @CW-B-W!) -- Improve LM Studio model detection to show all downloaded models (thanks @daniel-lxs!) -- Resolve Claude Code provider JSON parsing and reasoning block display - -## [3.21.4] - 2025-06-23 - -- Fix start line not working in multiple apply diff (thanks @samhvw8!) -- Resolve diff editor issues with markdown preview associations (thanks @daniel-lxs!) -- Resolve URL port handling bug for HTTPS URLs in Qdrant (thanks @benashby!) -- Mark unused Ollama schema properties as optional (thanks @daniel-lxs!) -- Close the local browser when used as fallback for remote (thanks @markijbema!) -- Add Claude Code provider for local CLI integration (thanks @BarreiroT!) - -## [3.21.3] - 2025-06-21 - -- Add profile-specific context condensing thresholds (thanks @SannidhyaSah!) -- Fix context length for lmstudio and ollama (thanks @thecolorblue!) -- Resolve MCP tool eye icon state and hide in chat context (thanks @daniel-lxs!) - -## [3.21.2] - 2025-06-20 - -- Add LaTeX math equation rendering in chat window -- Add toggle for excluding MCP server tools from the prompt (thanks @Rexarrior!) -- Add symlink support to list_files tool -- Fix marketplace blanking after populating -- Fix recursive directory scanning in @ mention "Add Folder" functionality (thanks @village-way!) -- Resolve phantom subtask display on cancel during API retry -- Correct Gemini 2.5 Flash pricing (thanks @daniel-lxs!) -- Resolve marketplace timeout issues and display installed MCPs (thanks @daniel-lxs!) -- Onboarding tweaks to emphasize modes (thanks @brunobergher!) -- Rename 'Boomerang Tasks' to 'Task Orchestration' for clarity -- Remove command execution from attempt_completion -- Fix markdown for links followed by punctuation (thanks @xyOz-dev!) - -## [3.21.1] - 2025-06-19 - -- Fix tree-sitter issues that were preventing codebase indexing from working correctly -- Improve error handling for codebase search embeddings -- Resolve MCP server execution on Windows with node version managers -- Default 'Enable MCP Server Creation' to false -- Rate limit correctly when starting a subtask (thanks @olweraltuve!) - -## [3.21.0] - 2025-06-17 - -- Add Roo Marketplace to make it easy to discover and install great MCPs and modes! -- Add Gemini 2.5 models (Pro, Flash and Flash Lite) (thanks @daniel-lxs!) -- Add support for Excel (.xlsx) files in tools (thanks @chrarnoldus!) -- Add max tokens checkbox option for OpenAI compatible provider (thanks @AlexandruSmirnov!) -- Update provider models and prices for Groq & Mistral (thanks @KanTakahiro!) -- Add proper error handling for API conversation history issues (thanks @KJ7LNW!) -- Fix ambiguous model id error (thanks @elianiva!) -- Fix save/discard/revert flow for Prompt Settings (thanks @hassoncs!) -- Fix codebase indexing alignment with list-files hidden directory filtering (thanks @daniel-lxs!) -- Fix subtask completion mismatch (thanks @feifei325!) -- Fix Windows path normalization in MCP variable injection (thanks @daniel-lxs!) -- Update marketplace branding to 'Roo Marketplace' (thanks @SannidhyaSah!) -- Refactor to more consistent history UI (thanks @elianiva!) -- Adjust context menu positioning to be near Copilot -- Update evals Docker setup to work on Windows (thanks @StevenTCramer!) -- Include current working directory in terminal details -- Encourage use of start_line in multi-file diff to match legacy diff -- Always focus the panel when clicked to ensure menu buttons are visible (thanks @hassoncs!) - -## [3.20.3] - 2025-06-13 - -- Resolve diff editor race condition in multi-monitor setups (thanks @daniel-lxs!) -- Add logic to prevent auto-approving edits of configuration files -- Adjust searching and listing files outside of the workspace to respect the auto-approve settings -- Add Indonesian translation support (thanks @chrarnoldus and @daniel-lxs!) -- Fix multi-file diff error handling and UI feedback (thanks @daniel-lxs!) -- Improve prompt history navigation to not interfere with text editing (thanks @daniel-lxs!) -- Fix errant maxReadFileLine default - -## [3.20.2] - 2025-06-13 - -- Limit search_files to only look within the workspace for improved security -- Force tar-fs >=2.1.3 for security vulnerability fix -- Add cache breakpoints for custom vertex models on Unbound (thanks @pugazhendhi-m!) -- Reapply reasoning for bedrock with fix (thanks @daniel-lxs!) -- Sync BatchDiffApproval styling with BatchFilePermission for UI consistency (thanks @samhvw8!) -- Add max height constraint to MCP execution response for better UX (thanks @samhvw8!) -- Prevent MCP 'installed' label from being squeezed #4630 (thanks @daniel-lxs!) -- Allow a lower context condensing threshold (thanks @SECKainersdorfer!) -- Avoid type system duplication for cleaner codebase (thanks @EamonNerbonne!) - -## [3.20.1] - 2025-06-12 - -- Temporarily revert thinking support for Bedrock models -- Improve performance of MCP execution block -- Add indexing status badge to chat view - -## [3.20.0] - 2025-06-12 - -- Add experimental Marketplace for extensions and modes (thanks @Smartsheet-JB-Brown, @elianiva, @monkeyDluffy6017, @NamesMT, @daniel-lxs, Cline, and more!) -- Add experimental multi-file edits (thanks @samhvw8!) -- Move concurrent reads setting to context settings with default of 5 -- Improve MCP execution UX (thanks @samhvw8!) -- Add magic variables support for MCPs with `workspaceFolder` injection (thanks @NamesMT!) -- Add prompt history navigation via arrow up/down in prompt field -- Add support for escaping context mentions (thanks @KJ7LNW!) -- Add DeepSeek R1 support to Chutes provider -- Add reasoning budget support to Bedrock models for extended thinking -- Add mermaid diagram support buttons (thanks @qdaxb!) -- Update XAI models and pricing (thanks @edwin-truthsearch-io!) -- Update O3 model pricing -- Add manual OpenAI-compatible format specification and parsing (thanks @dflatline!) -- Add core tools integration tests for comprehensive coverage -- Add JSDoc documentation for ClineAsk and ClineSay types (thanks @hannesrudolph!) -- Populate whenToUse descriptions for built-in modes -- Fix file write tool with early relPath & newContent validation checks (thanks @Ruakij!) -- Fix TaskItem display and copy issues with HTML tags in task messages (thanks @forestyoo!) -- Fix OpenRouter cost calculation with BYOK (thanks @chrarnoldus!) -- Fix terminal busy state reset after manual commands complete -- Fix undefined output on multi-file apply_diff operations (thanks @daniel-lxs!) - -## [3.19.7] - 2025-06-11 - -- Fix McpHub sidebar focus behavior to prevent unwanted focus grabbing -- Disable checkpoint functionality when nested git repositories are detected to prevent conflicts -- Remove unused Storybook components and dependencies to reduce bundle size -- Add data-testid ESLint rule for improved testing standards (thanks @elianiva!) -- Update development dependencies including eslint, knip, @types/node, i18next, fast-xml-parser, and @google/genai -- Improve CI infrastructure with GitHub Actions and Blacksmith runner migrations - -## [3.19.6] - 2025-06-09 - -- Replace explicit caching with implicit caching to reduce latency for Gemini models -- Clarify that the default concurrent file read limit is 15 files (thanks @olearycrew!) -- Fix copy button logic (thanks @samhvw8!) -- Fade buttons on history preview if no interaction in progress (thanks @sachasayan!) -- Allow MCP server refreshing, fix state changes in MCP server management UI view (thanks @taylorwilsdon!) -- Remove unnecessary npx usage in some npm scripts (thanks @user202729!) -- Bug fix for trailing slash error when using LiteLLM provider (thanks @kcwhite!) - -## [3.19.5] - 2025-06-05 - -- Fix Gemini 2.5 Pro Preview thinking budget bug - -## [3.19.4] - 2025-06-05 - -- Add Gemini Pro 06-05 model support (thanks @daniel-lxs and @shariqriazz!) -- Fix reading PDF, DOCX, and IPYNB files in read_file tool (thanks @samhvw8!) -- Fix Mermaid CSP errors with enhanced bundling strategy (thanks @KJ7LNW!) -- Improve model info detection for custom Bedrock ARNs (thanks @adamhill!) -- Add OpenAI Compatible embedder for codebase indexing (thanks @SannidhyaSah!) -- Fix multiple memory leaks in ChatView component (thanks @kiwina!) -- Fix WorkspaceTracker resource leaks by disposing FileSystemWatcher (thanks @kiwina!) -- Fix RooTips setTimeout cleanup to prevent state updates on unmounted components (thanks @kiwina!) -- Fix FileSystemWatcher leak in RooIgnoreController (thanks @kiwina!) -- Fix clipboard memory leak by clearing setTimeout in useCopyToClipboard (thanks @kiwina!) -- Fix ClineProvider instance cleanup (thanks @xyOz-dev!) -- Enforce codebase_search as primary tool for code understanding tasks (thanks @hannesrudolph!) -- Improve Docker setup for evals -- Move evals into pnpm workspace, switch from SQLite to Postgres -- Refactor MCP to use getDefaultEnvironment for stdio client transport (thanks @samhvw8!) -- Get rid of "partial" component in names referencing not necessarily partial messages (thanks @wkordalski!) -- Improve feature request template (thanks @elianiva!) - -## [3.19.3] - 2025-06-02 - -- Fix SSE MCP Invocation - Fixed SSE connection issue in McpHub.ts by ensuring transport.start override only applies to stdio transports, allowing SSE and streamable-http transports to retain their original start methods (thanks @taylorwilsdon!) - -## [3.19.2] - 2025-06-01 - -- Add support for Streamable HTTP Transport MCP servers (thanks @taylorwilsdon!) -- Add cached read and writes to stats and cost calculation for LiteLLM provider (thanks @mollux!) -- Prevent dump of an entire file into the context on user edit (thanks @KJ7LNW!) -- Fix directory link handling in markdown (thanks @KJ7LNW!) -- Prevent start_line/end_line in apply_diff REPLACE (thanks @KJ7LNW!) -- Unify history item UI with TaskItem and TaskItemHeader (thanks @KJ7LNW!) -- Fix the label of the OpenAI-compatible API keys -- Fix Virtuoso footer re-rendering issue (thanks @kiwina!) -- Optimize ChatRowContent layout and styles (thanks @zhangtony239!) -- Release memory in apply diff (thanks @xyOz-dev!) -- Upgrade Node.js to v20.19.2 for security enhancements (thanks @PeterDaveHello!) -- Fix typos (thanks @noritaka1166!) - -## [3.19.1] - 2025-05-30 - -- Experimental feature to allow reading multiple files at once (thanks @samhvw8!) -- Fix to correctly pass headers to SSE MCP servers -- Adding support for custom VPC endpoints when using Amazon Bedrock (thanks @kcwhite!) -- Fix bug with context condensing in Amazon Bedrock -- Fix UTF-8 encoding in ExecaTerminalProcess (thanks @mr-ryan-james!) -- Set sidebar name bugfix (thanks @chrarnoldus!) -- Fix link to CONTRIBUTING.md in feature request template (thanks @cannuri!) -- Add task metadata to Unbound and improve caching logic (thanks @pugazhendhi-m!) - -## [3.19.0] - 2025-05-29 - -- Enable intelligent content condensing by default and move condense button out of expanded task menu -- Skip condense and show error if context grows during condensing -- Transform Prompts tab into Modes tab and move support prompts to Settings for better organization -- Add DeepSeek R1 0528 model support to Chutes provider (thanks @zeozeozeo!) -- Fix @directory not respecting .rooignore files (thanks @xyOz-dev!) -- Add rooignore checking for insert_content and search_and_replace tools -- Fix menu breaking when Roo is moved between primary and secondary sidebars (thanks @chrarnoldus!) -- Resolve memory leak in ChatView by stabilizing callback props (thanks @samhvw8!) -- Fix write_to_file to properly create empty files when content is empty (thanks @Ruakij!) -- Fix chat input clearing during running tasks (thanks @xyOz-dev!) -- Update AWS regions to include Spain and Hyderabad -- Improve POSIX shell compatibility in pre-push hook (thanks @PeterDaveHello and @chrarnoldus!) -- Update PAGER environment variable for Windows compatibility in Terminal (thanks @SmartManoj!) -- Add environment variable injection support for whole MCP config (thanks @NamesMT!) -- Update codebase search description to emphasize English query requirements (thanks @ChuKhaLi!) - -## [3.18.5] - 2025-05-27 - -- Add thinking controls for Requesty (thanks @dtrugman!) -- Re-enable telemetry -- Improve zh-TW Traditional Chinese locale (thanks @PeterDaveHello and @chrarnoldus!) -- Improve model metadata for LiteLLM - -## [3.18.4] - 2025-05-25 - -- Fix codebase indexing settings saving and Ollama indexing (thanks @daniel-lxs!) -- Fix handling BOM when user rejects apply_diff (thanks @avtc!) -- Fix wrongfully clearing input on auto-approve (thanks @Ruakij!) -- Fix correct spawnSync parameters for pnpm check in bootstrap.mjs (thanks @ChuKhaLi!) -- Update xAI models and default model ID (thanks @PeterDaveHello!) -- Add metadata to create message (thanks @dtrugman!) - -## [3.18.3] - 2025-05-24 - -- Add reasoning support for Claude 4 and Gemini 2.5 Flash on OpenRouter, plus a fix for o1-pro -- Add experimental codebase indexing + semantic search feature (thanks @daniel-lxs!) -- For providers that used to default to Sonnet 3.7, change to Sonnet 4 -- Enable prompt caching for Gemini 2.5 Flash Preview (thanks @shariqriazz!) -- Preserve model settings when selecting a specific OpenRouter provider -- Add ability to refresh LiteLLM models list -- Improve tool descriptions to guide proper file editing tool selection -- Fix MCP Server error loading config when running with npx and bunx (thanks @devxpain!) -- Improve pnpm bootstrapping and add compile script (thanks @KJ7LNW!) -- Simplify object assignment & use startsWith (thanks @noritaka1166!) -- Fix mark-as-read logic in the context tracker (thanks @samhvw8!) -- Remove deprecated claude-3.7-sonnet models from vscodelm (thanks @shariqriazz!) - -## [3.18.2] - 2025-05-23 - -- Fix vscode-material-icons in the file picker -- Fix global settings export -- Respect user-configured terminal integration timeout (thanks @KJ7LNW) -- Context condensing enhancements (thanks @SannidhyaSah) - -## [3.18.1] - 2025-05-22 - -- Add support for Claude Sonnet 4 and Claude Opus 4 models with thinking variants in Anthropic, Bedrock, and Vertex (thanks @shariqriazz!) -- Fix README gif display in all localized versions -- Fix referer URL -- Switch codebase to a monorepo and create an automated "nightly" build - -## [3.18.0] - 2025-05-21 - -- Add support for Gemini 2.5 Flash preview models (thanks @shariqriazz and @daniel-lxs!) -- Add button to task header to intelligently condense content with visual feedback -- Add YAML support for mode definitions (thanks @R-omk!) -- Add allowedMaxRequests feature to cap consecutive auto-approved requests (inspired by Cline, thanks @hassoncs!) -- Add Qwen3 model series to the Chutes provider (thanks @zeozeozeo!) -- Fix more causes of grey screen issues (thanks @xyOz-dev!) -- Add LM Studio reasoning support (thanks @avtc!) -- Add refresh models button for Unbound provider (thanks @pugazhendhi-m!) -- Add template variables for version numbers in announcement strings (thanks @ChuKhaLi!) -- Make prompt input textareas resizable again -- Fix diffview scroll display (thanks @qdaxb!) -- Fix LM Studio and Ollama usage tracking (thanks @xyOz-dev!) -- Fix links to filename:0 (thanks @RSO!) -- Fix missing or inconsistent syntax highlighting across UI components (thanks @KJ7LNW!) -- Fix packaging to include correct tiktoken.wasm (thanks @vagadiya!) -- Fix import settings bugs and position error messages correctly (thanks @ChuKhaLi!) -- Move audio playing to the webview to ensure cross-platform support (thanks @SmartManoj and @samhvw8!) -- Simplify loop syntax in multiple components (thanks @noritaka1166!) -- Auto reload extension core changes in dev mode (thanks @hassoncs!) - -## [3.17.2] - 2025-05-15 - -- Revert "Switch to the new Roo message parser" (appears to cause a tool parsing bug) -- Lock the versions of vsce and ovsx - -## [3.17.1] - 2025-05-15 - -- Fix the display of the command to execute during approval -- Fix incorrect reserved tokens calculation on OpenRouter (thanks @daniel-lxs!) - -## [3.17.0] - 2025-05-14 - -- Enable Gemini implicit caching -- Add "when to use" section to mode definitions to enable better orchestration -- Add experimental feature to intelligently condense the task context instead of truncating it -- Fix one of the causes of the gray screen issue (thanks @xyOz-dev!) -- Focus improvements for better UI interactions (thanks Cline!) -- Switch to the new Roo message parser for improved performance (thanks Cline!) -- Enable source maps for improved debugging (thanks @KJ7LNW!) -- Update OpenRouter provider to use provider-specific model info (thanks @daniel-lxs!) -- Fix Requesty cost/token reporting (thanks @dtrugman!) -- Improve command execution UI -- Add more in-app links to relevant documentation -- Update the new task tool description and the ask mode custom instructions in the system prompt -- Add IPC types to roo-code.d.ts -- Add build VSIX workflow to pull requests (thanks @SmartManoj!) -- Improve apply_diff tool to intelligently deduce line numbers (thanks @samhvw8!) -- Fix command validation for shell array indexing (thanks @KJ7LNW!) -- Handle diagnostics that point at a directory URI (thanks @daniel-lxs!) -- Fix "Current ask promise was ignored" error (thanks @zxdvd!) - -## [3.16.6] - 2025-05-12 - -- Restore "Improve provider profile management in the external API" -- Fix to subtask sequencing (thanks @wkordalski!) -- Fix webview terminal output processing error (thanks @KJ7LNW!) -- Fix textarea empty string fallback logic (thanks @elianiva!) - -## [3.16.5] - 2025-05-10 - -- Revert "Improve provider profile management in the external API" until we track down a bug with defaults - -## [3.16.4] - 2025-05-09 - -- Improve provider profile management in the external API -- Enforce provider selection in OpenRouter by using 'only' parameter and disabling fallbacks (thanks @shariqriazz!) -- Fix display issues with long profile names (thanks @cannuri!) -- Prevent terminal focus theft on paste after command execution (thanks @MuriloFP!) -- Save OpenAI compatible custom headers correctly -- Fix race condition when updating prompts (thanks @elianiva!) -- Fix display issues in high contrast themes (thanks @zhangtony239!) -- Fix not being able to use specific providers on Openrouter (thanks @daniel-lxs!) -- Show properly formatted multi-line commands in preview (thanks @KJ7LNW!) -- Handle unsupported language errors gracefully in read_file tool (thanks @KJ7LNW!) -- Enhance focus styles in select-dropdown and fix docs URL (thanks @zhangtony239!) -- Properly handle mode name overflow in UI (thanks @elianiva!) -- Fix project MCP always allow issue (thanks @aheizi!) - -## [3.16.3] - 2025-05-08 - -- Revert Tailwind migration while we fix a few spots -- Add Elixir file extension support in language parser (thanks @pfitz!) - -## [3.16.2] - 2025-05-07 - -- Clarify XML tool use formatting instructions -- Error handling code cleanup (thanks @monkeyDluffy6017!) - -## [3.16.1] - 2025-05-07 - -- Add LiteLLM provider support -- Improve stability by detecting and preventing tool loops -- Add Dutch localization (thanks @Githubguy132010!) -- Add editor name to telemetry for better analytics -- Migrate to Tailwind CSS for improved UI consistency -- Fix footer button wrapping in About section on narrow screens (thanks @ecmasx!) -- Update evals defaults -- Update dependencies to latest versions - -## [3.16.0] - 2025-05-06 - -- Add vertical tab navigation to the settings (thanks @dlab-anton) -- Add Groq and Chutes API providers (thanks @shariqriazz) -- Clickable code references in code block (thanks @KJ7LNW) -- Improve accessibility of auto-approve toggles (thanks @Deon588) -- Requesty provider fixes (thanks @dtrugman) -- Fix migration and persistence of per-mode API profiles (thanks @alasano) -- Fix usage of `path.basename` in the extension webview (thanks @samhvw8) -- Fix display issue of the programming language dropdown in the code block component (thanks @zhangtony239) -- MCP server errors are now captured and shown in a new "Errors" tab (thanks @robertheadley) -- Error logging will no longer break MCP functionality if the server is properly connected (thanks @ksze) -- You can now toggle the `terminal.integrated.inheritEnv` VSCode setting directly for the Roo Code settings (thanks @KJ7LNW) -- Add `gemini-2.5-pro-preview-05-06` to the Vertex and Gemini providers (thanks @zetaloop) -- Ensure evals exercises are up-to-date before running evals (thanks @shariqriazz) -- Lots of general UI improvements (thanks @elianiva) -- Organize provider settings into separate components -- Improved icons and translations for the code block component -- Add support for tests that use ESM libraries -- Move environment detail generation to a separate module -- Enable prompt caching by default for supported Gemini models - -## [3.15.5] - 2025-05-05 - -- Update @google/genai to 0.12 (includes some streaming completion bug fixes) -- Rendering performance improvements for code blocks in chat (thanks @KJ7LNW) - -## [3.15.4] - 2025-05-04 - -- Fix a nasty bug that would cause Roo Code to hang, particularly in orchestrator mode -- Improve Gemini caching efficiency - -## [3.15.3] - 2025-05-02 - -- Terminal: Fix empty command bug -- Terminal: More robust process killing -- Optimize Gemini prompt caching for OpenRouter -- Chat view performance improvements - -## [3.15.2] - 2025-05-02 - -- Fix terminal performance issues -- Handle Mermaid validation errors -- Add customizable headers for OpenAI-compatible provider (thanks @mark-bradshaw!) -- Add config option to overwrite OpenAI's API base (thanks @GOODBOY008!) -- Fixes to padding and height issues when resizing the sidebar (thanks @zhangtony239!) -- Remove tool groups from orchestrator mode definition -- Add telemetry for title button clicks - -## [3.15.1] - 2025-04-30 - -- Capture stderr in execa-spawned processes -- Play sound only when action needed from the user (thanks @olearycrew) -- Make retries respect the global auto approve checkbox -- Fix a selection mode bug in the history view (thanks @jr) - -## [3.15.0] - 2025-04-30 - -- Add prompt caching to the Google Vertex provider (thanks @ashktn) -- Add a fallback mechanism for executing terminal commands if VSCode terminal shell integration fails -- Improve the UI/UX of code snippets in the chat (thanks @KJ7LNW) -- Add a reasoning effort setting for the OpenAI Compatible provider (thanks @mr-ryan-james) -- Allow terminal commands to be stopped directly from the chat UI -- Adjust chat view padding to accommodate small width layouts (thanks @zhangtony239) -- Fix file mentions for filenames containing spaces -- Improve the auto-approve toggle buttons for some high-contrast VSCode themes -- Offload expensive count token operations to a web worker (thanks @samhvw8) -- Improve support for multi-root workspaces (thanks @snoyiatk) -- Simplify and streamline Roo Code's quick actions -- Allow Roo Code settings to be imported from the welcome screen (thanks @julionav) -- Remove unused types (thanks @wkordalski) -- Improve the performance of mode switching (thanks @dlab-anton) -- Fix importing & exporting of custom modes (thanks @julionav) - -## [3.14.3] - 2025-04-25 - -- Add Boomerang Orchestrator as a built-in mode -- Improve home screen UI -- Make token count estimation more efficient to reduce gray screens -- Revert change to automatically close files after edit until we figure out how to make it work well with diagnostics -- Clean up settings data model -- Omit reasoning params for non-reasoning models -- Clearer documentation for adding settings (thanks @shariqriazz!) -- Fix word wrapping in Roo message title (thanks @zhangtony239!) -- Update default model id for Unbound from claude 3.5 to 3.7 (thanks @pugazhendhi-m!) - -## [3.14.2] - 2025-04-24 - -- Enable prompt caching for Gemini (with some improvements) -- Allow users to turn prompt caching on / off for Gemini 2.5 on OpenRouter -- Compress terminal output with backspace characters (thanks @KJ7LNW) -- Add Russian language (Спасибо @asychin) - -## [3.14.1] - 2025-04-24 - -- Disable Gemini caching while we investigate issues reported by the community. - -## [3.14.0] - 2025-04-23 - -- Add prompt caching for `gemini-2.5-pro-preview-03-25` in the Gemini provider (Vertex and OpenRouter coming soon!) -- Improve the search_and_replace and insert_content tools and bring them out of experimental, and deprecate append_to_file (thanks @samhvw8!) -- Use material icons for files and folders in mentions (thanks @elianiva!) -- Make the list_files tool more efficient and smarter about excluding directories like .git/ -- Fix file drag and drop on Windows and when using SSH tunnels (thanks @NyxJae!) -- Correctly revert changes and suggest alternative tools when write_to_file fails on a missing line count -- Allow interpolation of `workspace`, `mode`, `language`, `shell`, and `operatingSystem` into custom system prompt overrides (thanks @daniel-lxs!) -- Fix interpolation bug in the “add to context” code action (thanks @elianiva!) -- Preserve editor state and prevent tab unpinning during diffs (thanks @seedlord!) -- Improvements to icon rendering on Linux (thanks @elianiva!) -- Improvements to Requesty model list fetching (thanks @dtrugman!) -- Fix user feedback not being added to conversation history in API error state, redundant ‘TASK RESUMPTION’ prompts, and error messages not showing after cancelling API requests (thanks @System233!) -- Track tool use errors in evals -- Fix MCP hub error when dragging extension to another sidebar -- Improve display of long MCP tool arguments -- Fix redundant ‘TASK RESUMPTION’ prompts (thanks @System233!) -- Fix bug opening files when editor has no workspace root -- Make the VS Code LM provider show the correct model information (thanks @QuinsZouls!) -- Fixes to make the focusInput command more reliable (thanks @hongzio!) -- Better handling of aftercursor content in context mentions (thanks @elianiva!) -- Support injecting environment variables in MCP config (thanks @NamesMT!) -- Better handling of FakeAI “controller” object (thanks @wkordalski) -- Remove unnecessary calculation from VS Code LM provider (thanks @d-oit!) -- Allow Amazon Bedrock Marketplace ARNs (thanks @mlopezr!) -- Give better loading feedback on chat rows (thanks @elianiva!) -- Performance improvements to task size calculations -- Don’t immediately show a model ID error when changing API providers -- Fix apply_diff edge cases -- Use a more sensible task export icon -- Use path aliases in webview source files -- Display a warning when the system prompt is overridden -- Better progress indicator for apply_diff tools (thanks @qdaxb!) -- Fix terminal carriage return handling for correct progress bar display (thanks @Yikai-Liao!) - -## [3.13.2] - 2025-04-18 - -- Allow custom URLs for Gemini provider - -## [3.13.1] - 2025-04-18 - -- Support Gemini 2.5 Flash thinking mode (thanks @monotykamary) -- Make auto-approval toggle on/off states more obvious (thanks @sachasayan) -- Add telemetry for shell integration errors -- Fix the path of files dragging into the chat textarea on Windows (thanks @NyxJae) - -## [3.13.0] - 2025-04-17 - -- UI improvements to task header, chat view, history preview, and welcome view (thanks @sachasayan!) -- Add append_to_file tool for appending content to files (thanks @samhvw8!) -- Add Gemini 2.5 Flash Preview to Gemini and Vertex providers (thanks @nbihan-mediware!) -- Fix image support in Bedrock (thanks @Smartsheet-JB-Brown!) -- Make diff edits more resilient to models passing in incorrect parameters - -## [3.12.3] - 2025-04-17 - -- Fix character escaping issues in Gemini diff edits -- Support dragging and dropping tabs into the chat box (thanks @NyxJae!) -- Make sure slash commands only fire at the beginning of the chat box (thanks @logosstone!) - -## [3.12.2] - 2025-04-16 - -- Add OpenAI o3 & 4o-mini (thanks @PeterDaveHello!) -- Improve file/folder context mention UI (thanks @elianiva!) -- Improve diff error telemetry - -## [3.12.1] - 2025-04-16 - -- Bugfix to Edit button visibility in the select dropdowns - -## [3.12.0] - 2025-04-15 - -- Add xAI provider and expose reasoning effort options for Grok on OpenRouter (thanks Cline!) -- Make diff editing config per-profile and improve pre-diff string normalization -- Make checkpoints faster and more reliable -- Add a search bar to mode and profile select dropdowns (thanks @samhvw8!) -- Add telemetry for code action usage, prompt enhancement usage, and consecutive mistake errors -- Suppress zero cost values in the task header (thanks @do-it!) -- Make JSON parsing safer to avoid crashing the webview on bad input -- Allow users to bind a keyboard shortcut for accepting suggestions or input in the chat view (thanks @axkirillov!) - -## [3.11.17] - 2025-04-14 - -- Improvements to OpenAI cache reporting and cost estimates (thanks @monotykamary and Cline!) -- Visual improvements to the auto-approve toggles (thanks @sachasayan!) -- Bugfix to diff apply logic (thanks @avtc for the test case!) and telemetry to track errors going forward -- Fix race condition in capturing short-running terminal commands (thanks @KJ7LNW!) -- Fix eslint error (thanks @nobu007!) - -## [3.11.16] - 2025-04-14 - -- Add gpt-4.1, gpt-4.1-mini, and gpt-4.1-nano to the OpenAI provider -- Include model ID in environment details and when exporting tasks (thanks @feifei325!) - -## [3.11.15] - 2025-04-13 - -- Add ability to filter task history by workspace (thanks @samhvw8!) -- Fix Node.js version in the .tool-versions file (thanks @bogdan0083!) -- Fix duplicate suggested mentions for open tabs (thanks @samhvw8!) -- Fix Bedrock ARN validation and token expiry issue when using profiles (thanks @vagadiya!) -- Add Anthropic option to pass API token as Authorization header instead of X-Api-Key (thanks @mecab!) -- Better documentation for adding new settings (thanks @KJ7LNW!) -- Localize package.json (thanks @samhvw8!) -- Add option to hide the welcome message and fix the background color for the new profile dialog (thanks @zhangtony239!) -- Restore the focus ring for the VSCodeButton component (thanks @pokutuna!) - -## [3.11.14] - 2025-04-11 - -- Support symbolic links in rules folders to directories and other symbolic links (thanks @taisukeoe!) -- Stronger enforcement of the setting to always read full files instead of doing partial reads - -## [3.11.13] - 2025-04-11 - -- Loads of terminal improvements: command delay, PowerShell counter, and ZSH EOL mark (thanks @KJ7LNW!) -- Add file context tracking system (thanks @samhvw8 and @canvrno!) -- Improved display of diff errors + easy copying for investigation -- Fixes to .vscodeignore (thanks @franekp!) -- Fix a zh-CN translation for model capabilities (thanks @zhangtony239!) -- Rename Amazon Bedrock to Amazon Bedrock (thanks @ronyblum!) -- Update extension title and description (thanks @StevenTCramer!) - -## [3.11.12] - 2025-04-09 - -- Make Grok3 streaming work with OpenAI Compatible (thanks @amittell!) -- Tweak diff editing logic to make it more tolerant of model errors - -## [3.11.11] - 2025-04-09 - -- Fix highlighting interaction with mode/profile dropdowns (thanks @atlasgong!) -- Add the ability to set Host header and legacy OpenAI API in the OpenAI-compatible provider for better proxy support -- Improvements to TypeScript, C++, Go, Java, Python tree-sitter parsers (thanks @KJ7LNW!) -- Fixes to terminal working directory logic (thanks @KJ7LNW!) -- Improve readFileTool XML output format (thanks @KJ7LNW!) -- Add o1-pro support (thanks @arthurauffray!) -- Follow symlinked rules files/directories to allow for more flexible rule setups -- Focus Roo Code in the sidebar when running tasks in the sidebar via the API -- Improve subtasks UI - -## [3.11.10] - 2025-04-08 - -- Fix bug where nested .roo/rules directories are not respected properly (thanks @taisukeoe!) -- Handle long command output more efficiently in the chat row (thanks @samhvw8!) -- Fix cache usage tracking for OpenAI-compatible providers -- Add custom translation instructions for zh-CN (thanks @System233!) -- Code cleanup after making rate-limits per-profile (thanks @ross!) - -## [3.11.9] - 2025-04-07 - -- Rate-limit setting updated to be per-profile (thanks @ross and @olweraltuve!) -- You can now place multiple rules files in the .roo/rules/ and .roo/rules-{mode}/ folders (thanks @upamune!) -- Prevent unnecessary autoscroll when buttons appear (thanks @shtse8!) -- Add Gemini 2.5 Pro Preview to Vertex AI (thanks @nbihan-mediware!) -- Tidy up following ClineProvider refactor (thanks @diarmidmackenzie!) -- Clamp negative line numbers when reading files (thanks @KJ7LNW!) -- Enhance Rust tree-sitter parser with advanced language structures (thanks @KJ7LNW!) -- Persist settings on api.setConfiguration (thanks @gtaylor!) -- Add deep links to settings sections -- Add command to focus Roo Code input field (thanks @axkirillov!) -- Add resize and hover actions to the browser (thanks @SplittyDev!) -- Add resumeTask and isTaskInHistory to the API (thanks @franekp!) -- Fix bug displaying boolean/numeric suggested answers -- Dynamic Vite port detection for webview development (thanks @KJ7LNW!) - -## [3.11.8] - 2025-04-05 - -- Improve combineApiRequests performance to reduce gray screens of death (thanks @kyle-apex!) -- Add searchable dropdown to API config profiles on the settings screen (thanks @samhvw8!) -- Add workspace tracking to history items in preparation for future filtering (thanks @samhvw8!) -- Fix search highlighting UI in history search (thanks @samhvw8!) -- Add support for .roorules and give deprecation warning for .clinerules (thanks @upamune!) -- Fix nodejs version format in .tool-versions file (thanks @upamune!) - -## [3.11.7] - 2025-04-04 - -- Improve file tool context formatting and diff error guidance -- Improve zh-TW localization (thanks @PeterDaveHello!) -- Implement reference counting for McpHub disposal -- Update buttons to be more consistent (thanks @kyle-apex!) -- Improve zh-CN localization (thanks @System233!) - -## [3.11.6] - 2025-04-04 - -- Add the gemini 2.5 pro preview model with upper bound pricing - -## [3.11.5] - 2025-04-03 - -- Add prompt caching for Amazon Bedrock (thanks @Smartsheet-JB-Brown!) -- Add support for configuring the current working directory of MCP servers (thanks @shoopapa!) -- Add profile management functions to API (thanks @gtaylor!) -- Improvements to diff editing functionality, tests, and error messages (thanks @p12tic!) -- Fix for follow-up questions grabbing the focus (thanks @diarmidmackenzie!) -- Show menu buttons when popping the extension out into a new tab (thanks @benny123tw!) - -## [3.11.4] - 2025-04-02 - -- Correctly post state to webview when the current task is cleared (thanks @wkordalski!) -- Fix unit tests to run properly on Windows (thanks @StevenTCramer!) -- Tree-sitter enhancements: TSX, TypeScript, JSON, and Markdown support (thanks @KJ7LNW!) -- Fix issue with line number stripping for deletions in apply_diff -- Update history selection mode button spacing (thanks @kyle-apex!) -- Limit dropdown menu height to 80% of the viewport (thanks @axmo!) -- Update dependencies via `npm audit fix` (thanks @PeterDaveHello!) -- Enable model select when api fails (thanks @kyle-apex!) -- Fix issue where prompts and settings tabs were not scrollable when accessed from dropdown menus -- Update AWS region dropdown menu to the most recent data (thanks @Smartsheet-JB-Brown!) -- Fix prompt enhancement for Bedrock (thanks @Smartsheet-JB-Brown!) -- Allow processes to access the Roo Code API via a unix socket -- Improve zh-TW Traditional Chinese translations (thanks @PeterDaveHello!) -- Add support for Azure AI Inference Service with DeepSeek-V3 model (thanks @thomasjeung!) -- Fix off-by-one error in tree-sitter line numbers -- Remove the experimental unified diff -- Make extension icon more visible in different themes - -## [3.11.3] - 2025-03-31 - -- Revert mention changes in case they're causing performance issues/crashes - -## [3.11.2] - 2025-03-31 - -- Fix bug in loading Requesty key balance -- Fix bug with Bedrock inference profiles -- Update the webview when changing settings via the API -- Refactor webview messages code (thanks @diarmidmackenzie!) - -## [3.11.1] - 2025-03-30 - -- Relax provider profiles schema and add telemetry - -## [3.11.0] - 2025-03-30 - -- Replace single-block-diff with multi-block-diff fast editing strategy -- Support project-level MCP config in .roo/mcp.json (thanks @aheizi!) -- Show OpenRouter and Requesty key balance on the settings screen -- Support import/export of settings -- Add pinning and sorting for API configuration dropdown (thanks @jwcraig!) -- Add Gemini 2.5 Pro to GCP Vertex AI provider (thanks @nbihan-mediware!) -- Smarter retry logic for Gemini -- Fix Gemini command escaping -- Support @-mentions of files with spaces in the name (thanks @samhvw8!) -- Improvements to partial file reads (thanks @KJ7LNW!) -- Fix list_code_definition_names to support files (thanks @KJ7LNW!) -- Refactor tool-calling logic to make the code a lot easier to work with (thanks @diarmidmackenzie, @bramburn, @KJ7LNW, and everyone else who helped!) -- Prioritize “Add to Context” in the code actions and include line numbers (thanks @samhvw8!) -- Add an activation command that other extensions can use to interface with Roo Code (thanks @gtaylor!) -- Preserve language characters in file @-mentions (thanks @aheizi!) -- Browser tool improvements (thanks @afshawnlotfi!) -- Display info about partial reads in the chat row -- Link to the settings page from the auto-approve toolbar -- Link to provider docs from the API options -- Fix switching profiles to ensure only the selected profile is switched (thanks @feifei325!) -- Allow custom o3-mini- model from OpenAI-compatible providers (thanks @snoyiatk!) -- Edit suggested answers before accepting them (thanks @samhvw8!) - -## [3.10.5] - 2025-03-25 - -- Updated value of max tokens for gemini-2.5-pro-03-25 to 65,536 (thanks @linegel!) -- Fix logic around when we fire task completion events - -## [3.10.4] - 2025-03-25 - -- Dynamically fetch instructions for creating/editing custom modes and MCP servers (thanks @diarmidmackenzie!) -- Added Gemini 2.5 Pro model to Google Gemini provider (thanks @samsilveira!) -- Add settings to control whether to auto-approve reads and writes outside of the workspace -- Update UX for chat text area (thanks @chadgauth!) -- Support a custom storage path for tasks (thanks @Chenjiayuan195!) -- Add a New Task command in the Command Palette (thanks @qdaxb!) -- Add R1 support checkbox to Open AI compatible provider to support QWQ (thanks @teddyOOXX!) -- Support test declarations in TypeScript tree-sitter queries (thanks @KJ7LNW!) -- Add Bedrock support for application-inference-profile (thanks @maekawataiki!) -- Rename and migrate global MCP and modes files (thanks @StevenTCramer!) -- Add watchPaths option to McpHub for file change detection (thanks @01Rian!) -- Read image responses from MCP calls (thanks @nevermorec!) -- Add taskCreated event to API and subscribe to Cline events earlier (thanks @wkordalski!) -- Fixes to numeric formatting suffix internationalization (thanks @feifei325!) -- Fix open tab support in the context mention suggestions (thanks @aheizi!) -- Better display of OpenRouter “overloaded” error messages -- Fix browser tool visibility in system prompt preview (thanks @cannuri!) -- Fix the supportsPromptCache value for OpenAI models (thanks @PeterDaveHello!) -- Fix readme links to docs (thanks @kvokka!) -- Run ‘npm audit fix’ on all of our libraries - -## [3.10.3] - 2025-03-23 - -- Update the welcome page to provide 1-click OAuth flows with LLM routers (thanks @dtrugman!) -- Switch to a more direct method of tracking OpenRouter tokens/spend -- Make partial file reads backwards-compatible with custom system prompts and give users more control over the chunk size -- Fix issues where questions and suggestions weren’t showing up for non-streaming models and were hard to read in some themes -- A variety of fixes and improvements to experimental multi-block diff (thanks @KJ7LNW!) -- Fix opacity of drop-down menus in settings (thanks @KJ7LNW!) -- Fix bugs with reading and mentioning binary files like PDFs -- Fix the pricing information for OpenRouter free models (thanks @Jdo300!) -- Fix an issue with our unit tests on Windows (thanks @diarmidmackenzie!) -- Fix a maxTokens issue for the Outbound provider (thanks @pugazhendhi-m!) -- Fix a line number issue with partial file reads (thanks @samhvw8!) - -## [3.10.2] - 2025-03-21 - -- Fixes to context mentions on Windows -- Fixes to German translations (thanks @cannuri!) -- Fixes to telemetry banner internationalization -- Sonnet 3.7 non-thinking now correctly uses 8192 max output tokens - -## [3.10.1] - 2025-03-20 - -- Make the suggested responses optional to not break overridden system prompts - -## [3.10.0] - 2025-03-20 - -- Suggested responses to questions (thanks samhvw8!) -- Support for reading large files in chunks (thanks samhvw8!) -- More consistent @-mention lookups of files and folders -- Consolidate code actions into a submenu (thanks samhvw8!) -- Fix MCP error logging (thanks aheizi!) -- Improvements to search_files tool formatting and logic (thanks KJ7LNW!) -- Fix changelog formatting in GitHub Releases (thanks pdecat!) -- Add fake provider for integration tests (thanks franekp!) -- Reflect Cross-region inference option in ap-xx region (thanks Yoshino-Yukitaro!) -- Fix bug that was causing task history to be lost when using WSL - -## [3.9.2] - 2025-03-19 - -- Update GitHub Actions workflow to automatically create GitHub Releases (thanks @pdecat!) -- Correctly persist the text-to-speech speed state (thanks @heyseth!) -- Fixes to French translations (thanks @arthurauffray!) -- Optimize build time for local development (thanks @KJ7LNW!) -- VSCode theme fixes for select, dropdown and command components -- Bring back the ability to manually enter a model name in the model picker -- Fix internationalization of the announcement title and the browser - -## [3.9.1] - 2025-03-18 - -- Pass current language to system prompt correctly so Roo thinks and speaks in the selected language - -## [3.9.0] - 2025-03-18 - -- Internationalize Roo Code into Catalan, German, Spanish, French, Hindi, Italian, Japanese, Korean, Polish, Portuguese, Turkish, Vietnamese, Simplified Chinese, and Traditional Chinese (thanks @feifei325!) -- Bring back support for MCP over SSE (thanks @aheizi!) -- Add a text-to-speech option to have Roo talk to you as it works (thanks @heyseth!) -- Choose a specific provider when using OpenRouter (thanks PhunkyBob!) -- Support batch deletion of task history (thanks @aheizi!) -- Internationalize Human Relay, adjust the layout, and make it work on the welcome screen (thanks @NyxJae!) -- Fix shell integration race condition (thanks @KJ7LNW!) -- Fix display updating for Bedrock custom ARNs that are prompt routers (thanks @Smartsheet-JB-Brown!) -- Fix to exclude search highlighting when copying items from task history (thanks @im47cn!) -- Fix context mentions to work with multiple-workspace projects (thanks @teddyOOXX!) -- Fix to task history saving when running multiple Roos (thanks @samhvw8!) -- Improve task deletion when underlying files are missing (thanks @GitlyHallows!) -- Improve support for NixOS & direnv (thanks @wkordalski!) -- Fix wheel scrolling when Roo is opened in editor tabs (thanks @GitlyHallows!) -- Don’t automatically mention the file when using the "Add to context" code action (thanks @qdaxb!) -- Expose task stack in `RooCodeAPI` (thanks @franekp!) -- Give the models visibility into the current task's API cost - -## [3.8.6] - 2025-03-13 - -- Revert SSE MCP support while we debug some config validation issues - -## [3.8.5] - 2025-03-12 - -- Refactor terminal architecture to address critical issues with the current design (thanks @KJ7LNW!) -- MCP over SSE (thanks @aheizi!) -- Support for remote browser connections (thanks @afshawnlotfi!) -- Preserve parent-child relationship when cancelling subtasks (thanks @cannuri!) -- Custom baseUrl for Google AI Studio Gemini (thanks @dqroid!) -- PowerShell-specific command handling (thanks @KJ7LNW!) -- OpenAI-compatible DeepSeek/QwQ reasoning support (thanks @lightrabbit!) -- Anthropic-style prompt caching in the OpenAI-compatible provider (thanks @dleen!) -- Add Deepseek R1 for Amazon Bedrock (thanks @ATempsch!) -- Fix MarkdownBlock text color for Dark High Contrast theme (thanks @cannuri!) -- Add gemini-2.0-pro-exp-02-05 model to vertex (thanks @shohei-ihaya!) -- Bring back progress status for multi-diff edits (thanks @qdaxb!) -- Refactor alert dialog styles to use the correct vscode theme (thanks @cannuri!) -- Custom ARNs in Amazon Bedrock (thanks @Smartsheet-JB-Brown!) -- Update MCP servers directory path for platform compatibility (thanks @hannesrudolph!) -- Fix browser system prompt inclusion rules (thanks @cannuri!) -- Publish git tags to GitHub from CI (thanks @pdecat!) -- Fixes to OpenAI-style cost calculations (thanks @dtrugman!) -- Fix to allow using an excluded directory as your working directory (thanks @Szpadel!) -- Kotlin language support in list_code_definition_names tool (thanks @kohii!) -- Better handling of diff application errors (thanks @qdaxb!) -- Update Bedrock prices to the latest (thanks @Smartsheet-JB-Brown!) -- Fixes to OpenRouter custom baseUrl support -- Fix usage tracking for SiliconFlow and other providers that include usage on every chunk -- Telemetry for checkpoint save/restore/diff and diff strategies - -## [3.8.4] - 2025-03-09 - -- Roll back multi-diff progress indicator temporarily to fix a double-confirmation in saving edits -- Add an option in the prompts tab to save tokens by disabling the ability to ask Roo to create/edit custom modes for you (thanks @hannesrudolph!) - -## [3.8.3] - 2025-03-09 - -- Fix VS Code LM API model picker truncation issue - -## [3.8.2] - 2025-03-08 - -- Create an auto-approval toggle for subtask creation and completion (thanks @shaybc!) -- Show a progress indicator when using the multi-diff editing strategy (thanks @qdaxb!) -- Add o3-mini support to the OpenAI-compatible provider (thanks @yt3trees!) -- Fix encoding issue where unreadable characters were sometimes getting added to the beginning of files -- Fix issue where settings dropdowns were getting truncated in some cases - -## [3.8.1] - 2025-03-07 - -- Show the reserved output tokens in the context window visualization -- Improve the UI of the configuration profile dropdown (thanks @DeXtroTip!) -- Fix bug where custom temperature could not be unchecked (thanks @System233!) -- Fix bug where decimal prices could not be entered for OpenAI-compatible providers (thanks @System233!) -- Fix bug with enhance prompt on Sonnet 3.7 with a high thinking budget (thanks @moqimoqidea!) -- Fix bug with the context window management for thinking models (thanks @ReadyPlayerEmma!) -- Fix bug where checkpoints were no longer enabled by default -- Add extension and VSCode versions to telemetry - -## [3.8.0] - 2025-03-07 - -- Add opt-in telemetry to help us improve Roo Code faster (thanks Cline!) -- Fix terminal overload / gray screen of death, and other terminal issues -- Add a new experimental diff editing strategy that applies multiple diff edits at once (thanks @qdaxb!) -- Add support for a .rooignore to prevent Roo Code from read/writing certain files, with a setting to also exclude them from search/lists (thanks Cline!) -- Update the new_task tool to return results to the parent task on completion, supporting better orchestration (thanks @shaybc!) -- Support running Roo in multiple editor windows simultaneously (thanks @samhvw8!) -- Make checkpoints asynchronous and exclude more files to speed them up -- Redesign the settings page to make it easier to navigate -- Add credential-based authentication for Vertex AI, enabling users to easily switch between Google Cloud accounts (thanks @eonghk!) -- Update the DeepSeek provider with the correct baseUrl and track caching correctly (thanks @olweraltuve!) -- Add a new “Human Relay” provider that allows you to manually copy information to a Web AI when needed, and then paste the AI's response back into Roo Code (thanks @NyxJae)! -- Add observability for OpenAI providers (thanks @refactorthis!) -- Support speculative decoding for LM Studio local models (thanks @adamwlarson!) -- Improve UI for mode/provider selectors in chat -- Improve styling of the task headers (thanks @monotykamary!) -- Improve context mention path handling on Windows (thanks @samhvw8!) - -## [3.7.12] - 2025-03-03 - -- Expand max tokens of thinking models to 128k, and max thinking budget to over 100k (thanks @monotykamary!) -- Fix issue where keyboard mode switcher wasn't updating API profile (thanks @aheizi!) -- Use the count_tokens API in the Anthropic provider for more accurate context window management -- Default middle-out compression to on for OpenRouter -- Exclude MCP instructions from the prompt if the mode doesn't support MCP -- Add a checkbox to disable the browser tool -- Show a warning if checkpoints are taking too long to load -- Update the warning text for the VS LM API -- Correctly populate the default OpenRouter model on the welcome screen - -## [3.7.11] - 2025-03-02 - -- Don't honor custom max tokens for non thinking models -- Include custom modes in mode switching keyboard shortcut -- Support read-only modes that can run commands - -## [3.7.10] - 2025-03-01 - -- Add Gemini models on Vertex AI (thanks @ashktn!) -- Keyboard shortcuts to switch modes (thanks @aheizi!) -- Add support for Mermaid diagrams (thanks Cline!) - -## [3.7.9] - 2025-03-01 - -- Delete task confirmation enhancements -- Smarter context window management -- Prettier thinking blocks -- Fix maxTokens defaults for Claude 3.7 Sonnet models -- Terminal output parsing improvements (thanks @KJ7LNW!) -- UI fix to dropdown hover colors (thanks @SamirSaji!) -- Add support for Claude Sonnet 3.7 thinking via Vertex AI (thanks @lupuletic!) - -## [3.7.8] - 2025-02-27 - -- Add Vertex AI prompt caching support for Claude models (thanks @aitoroses and @lupuletic!) -- Add gpt-4.5-preview -- Add an advanced feature to customize the system prompt - -## [3.7.7] - 2025-02-27 - -- Graduate checkpoints out of beta -- Fix enhance prompt button when using Thinking Sonnet -- Add tooltips to make what buttons do more obvious - -## [3.7.6] - 2025-02-26 - -- Handle really long text better in the ChatRow similar to TaskHeader (thanks @joemanley201!) -- Support multiple files in drag-and-drop -- Truncate search_file output to avoid crashing the extension -- Better OpenRouter error handling (no more "Provider Error") -- Add slider to control max output tokens for thinking models - -## [3.7.5] - 2025-02-26 - -- Fix context window truncation math (see [#1173](https://github.com/RooCodeInc/Roo-Code/issues/1173)) -- Fix various issues with the model picker (thanks @System233!) -- Fix model input / output cost parsing (thanks @System233!) -- Add drag-and-drop for files -- Enable the "Thinking Budget" slider for Claude 3.7 Sonnet on OpenRouter - -## [3.7.4] - 2025-02-25 - -- Fix a bug that prevented the "Thinking" setting from properly updating when switching profiles. - -## [3.7.3] - 2025-02-25 - -- Support for ["Thinking"](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking) Sonnet 3.7 when using the Anthropic provider. - -## [3.7.2] - 2025-02-24 - -- Fix computer use and prompt caching for OpenRouter's `anthropic/claude-3.7-sonnet:beta` (thanks @cte!) -- Fix sliding window calculations for Sonnet 3.7 that were causing a context window overflow (thanks @cte!) -- Encourage diff editing more strongly in the system prompt (thanks @hannesrudolph!) - -## [3.7.1] - 2025-02-24 - -- Add Amazon Bedrock support for Sonnet 3.7 and update some defaults to Sonnet 3.7 instead of 3.5 - -## [3.7.0] - 2025-02-24 - -- Introducing Roo Code 3.7, with support for the new Claude Sonnet 3.7. Because who cares about skipping version numbers anymore? Thanks @lupuletic and @cte for the PRs! - -## [3.3.26] - 2025-02-27 - -- Adjust the default prompt for Debug mode to focus more on diagnosis and to require user confirmation before moving on to implementation - -## [3.3.25] - 2025-02-21 - -- Add a "Debug" mode that specializes in debugging tricky problems (thanks [Ted Werbel](https://x.com/tedx_ai/status/1891514191179309457) and [Carlos E. Perez](https://x.com/IntuitMachine/status/1891516362486337739)!) -- Add an experimental "Power Steering" option to significantly improve adherence to role definitions and custom instructions - -## [3.3.24] - 2025-02-20 - -- Fixed a bug with region selection preventing Amazon Bedrock profiles from being saved (thanks @oprstchn!) -- Updated the price of gpt-4o (thanks @marvijo-code!) - -## [3.3.23] - 2025-02-20 - -- Handle errors more gracefully when reading custom instructions from files (thanks @joemanley201!) -- Bug fix to hitting "Done" on settings page with unsaved changes (thanks @System233!) - -## [3.3.22] - 2025-02-20 - -- Improve the Provider Settings configuration with clear Save buttons and warnings about unsaved changes (thanks @System233!) -- Correctly parse `` reasoning tags from Ollama models (thanks @System233!) -- Add support for setting custom preferred languages on the Prompts tab, as well as adding Catalan to the list of languages (thanks @alarno!) -- Add a button to delete MCP servers (thanks @hannesrudolph!) -- Fix a bug where the button to copy the system prompt preview always copied the Code mode version -- Fix a bug where the .roomodes file was not automatically created when adding custom modes from the Prompts tab -- Allow setting a wildcard (`*`) to auto-approve all command execution (use with caution!) - -## [3.3.21] - 2025-02-17 - -- Fix input box revert issue and configuration loss during profile switch (thanks @System233!) -- Fix default preferred language for zh-cn and zh-tw (thanks @System233!) -- Fix Mistral integration (thanks @d-oit!) -- Feature to mention `@terminal` to pull terminal output into context (thanks Cline!) -- Fix system prompt to make sure Roo knows about all available modes -- Enable streaming mode for OpenAI o1 - -## [3.3.20] - 2025-02-14 - -- Support project-specific custom modes in a .roomodes file -- Add more Mistral models (thanks @d-oit and @bramburn!) -- By popular request, make it so Ask mode can't write to Markdown files and is purely for chatting with -- Add a setting to control the number of open editor tabs to tell the model about (665 is probably too many!) -- Fix race condition bug with entering API key on the welcome screen - -## [3.3.19] - 2025-02-12 - -- Fix a bug where aborting in the middle of file writes would not revert the write -- Honor the VS Code theme for dialog backgrounds -- Make it possible to clear out the default custom instructions for built-in modes -- Add a help button that links to our new documentation site (which we would love help from the community to improve!) -- Switch checkpoints logic to use a shadow git repository to work around issues with hot reloads and polluting existing repositories (thanks Cline for the inspiration!) - -## [3.3.18] - 2025-02-11 - -- Add a per-API-configuration model temperature setting (thanks @joemanley201!) -- Add retries for fetching usage stats from OpenRouter (thanks @jcbdev!) -- Fix bug where disabled MCP servers would not show up in the settings on initialization (thanks @MuriloFP!) -- Add the Requesty provider and clean up a lot of shared model picker code (thanks @samhvw8!) -- Add a button on the Prompts tab to copy the full system prompt to the clipboard (thanks @mamertofabian!) -- Fix issue where Ollama/LMStudio URLs would flicker back to previous while entering them in settings -- Fix logic error where automatic retries were waiting twice as long as intended -- Rework the checkpoints code to avoid conflicts with file locks on Windows (sorry for the hassle!) - -## [3.3.17] - 2025-02-09 - -- Fix the restore checkpoint popover -- Unset git config that was previously set incorrectly by the checkpoints feature - -## [3.3.16] - 2025-02-09 - -- Support Volcano Ark platform through the OpenAI-compatible provider -- Fix jumpiness while entering API config by updating on blur instead of input -- Add tooltips on checkpoint actions and fix an issue where checkpoints were overwriting existing git name/email settings - thanks for the feedback! - -## [3.3.15] - 2025-02-08 - -- Improvements to MCP initialization and server restarts (thanks @MuriloFP and @hannesrudolph!) -- Add a copy button to the recent tasks (thanks @hannesrudolph!) -- Improve the user experience for adding a new API profile -- Another significant fix to API profile switching on the settings screen -- Opt-in experimental version of checkpoints in the advanced settings - -## [3.3.14] - -- Should have skipped floor 13 like an elevator. This fixes the broken 3.3.13 release by reverting some changes to the deployment scripts. - -## [3.3.13] - -- Ensure the DeepSeek r1 model works with Ollama (thanks @sammcj!) -- Enable context menu commands in the terminal (thanks @samhvw8!) -- Improve sliding window truncation strategy for models that do not support prompt caching (thanks @nissa-seru!) -- First step of a more fundamental fix to the bugs around switching API profiles. If you've been having issues with this please try again and let us know if works any better! More to come soon, including fixing the laggy text entry in provider settings. - -## [3.3.12] - -- Bug fix to changing a mode's API configuration on the prompts tab -- Add new Gemini models - -## [3.3.11] - -- Safer shell profile path check to avoid an error on Windows -- Autocomplete for slash commands - -## [3.3.10] - -- Add shortcuts to the currently open tabs in the "Add File" section of @-mentions (thanks @olup!) -- Fix pricing for o1-mini (thanks @hesara!) -- Fix context window size calculation (thanks @MuriloFP!) -- Improvements to experimental unified diff strategy and selection logic in code actions (thanks @nissa-seru!) -- Enable markdown formatting in o3 and o1 (thanks @nissa-seru!) -- Improved terminal shell detection logic (thanks @canvrno for the original and @nissa-seru for the port!) -- Fix occasional errors when switching between API profiles (thanks @samhvw8!) -- Visual improvements to the list of modes on the prompts tab -- Fix double-scrollbar in provider dropdown -- Visual cleanup to the list of modes on the prompts tab -- Improvements to the default prompts for Architect and Ask mode -- Allow switching between modes with slash messages like `/ask why is the sky blue?` - -## [3.3.9] - -- Add o3-mini-high and o3-mini-low - -## [3.3.8] - -- Fix o3-mini in the Glama provider (thanks @Punkpeye!) -- Add the option to omit instructions for creating MCP servers from the system prompt (thanks @samhvw8!) -- Fix a bug where renaming API profiles without actually changing the name would delete them (thanks @samhvw8!) - -## [3.3.7] - -- Support for o3-mini (thanks @shpigunov!) -- Code Action improvements to allow selecting code and adding it to context, plus bug fixes (thanks @samhvw8!) -- Ability to include a message when approving or rejecting tool use (thanks @napter!) -- Improvements to chat input box styling (thanks @psv2522!) -- Capture reasoning from more variants of DeepSeek R1 (thanks @Szpadel!) -- Use an exponential backoff for API retries (if delay after first error is 5s, delay after second consecutive error will be 10s, then 20s, etc) -- Add a slider in advanced settings to enable rate limiting requests to avoid overloading providers (i.e. wait at least 10 seconds between API requests) -- Prompt tweaks to make Roo better at creating new custom modes for you - -## [3.3.6] - -- Add a "new task" tool that allows Roo to start new tasks with an initial message and mode -- Fix a bug that was preventing the use of qwen-max and potentially other OpenAI-compatible providers (thanks @Szpadel!) -- Add support for perplexity/sonar-reasoning (thanks @Szpadel!) -- Visual fixes to dropdowns (thanks @psv2522!) -- Add the [Unbound](https://getunbound.ai/) provider (thanks @vigneshsubbiah16!) - -## [3.3.5] - -- Make information about the conversation's context window usage visible in the task header for humans and in the environment for models (thanks @MuriloFP!) -- Add checkboxes to auto-approve mode switch requests (thanks @MuriloFP!) -- Add new experimental editing tools `insert_content` (for inserting blocks of text at a line number) and `search_and_replace` (for replacing all instances of a phrase or regex) to complement diff editing and whole file editing (thanks @samhvw8!) -- Improved DeepSeek R1 support by capturing reasoning from DeepSeek API as well as more OpenRouter variants, not using system messages, and fixing a crash on empty chunks. Still depends on the DeepSeek API staying up but we'll be in a better place when it does! (thanks @Szpadel!) - -## [3.3.4] - -- Add per-server MCP network timeout configuration ranging from 15 seconds to an hour -- Speed up diff editing (thanks @hannesrudolph and @KyleHerndon!) -- Add option to perform explain/improve/fix code actions either in the existing task or a new task (thanks @samhvw8!) - -## [3.3.3] - -- Throw errors sooner when a mode tries to write a restricted file -- Styling improvements to the mode/configuration dropdowns (thanks @psv2522!) - -## [3.3.2] - -- Add a dropdown to select the API configuration for a mode in the Prompts tab -- Fix bug where always allow wasn't showing up for MCP tools -- Improve OpenRouter DeepSeek-R1 integration by setting temperature to the recommended 0.6 and displaying the reasoning output (thanks @Szpadel - it's really fascinating to watch!) -- Allow specifying a custom OpenRouter base URL (thanks @dairui1!) -- Make the UI for nested settings nicer (thanks @PretzelVector!) - -## [3.3.1] - -- Fix issue where the terminal management system was creating unnecessary new terminals (thanks @evan-fannin!) -- Fix bug where the saved API provider for a mode wasn't being selected after a mode switch command - -## [3.3.0] - -- Native VS Code code actions support with quick fixes and refactoring options -- Modes can now request to switch to other modes when needed -- Ask and Architect modes can now edit markdown files -- Custom modes can now be restricted to specific file patterns (for example, a technical writer who can only edit markdown files 👋) -- Support for configuring the Bedrock provider with AWS Profiles -- New Roo Code community Discord at https://roocode.com/discord! - -## [3.2.8] - -- Fixed bug opening custom modes settings JSON -- Reverts provider key entry back to checking onInput instead of onChange to hopefully address issues entering API keys (thanks @samhvw8!) -- Added explicit checkbox to use Azure for OpenAI compatible providers (thanks @samhvw8!) -- Fixed Glama usage reporting (thanks @punkpeye!) -- Added Llama 3.3 70B Instruct model to the Amazon Bedrock provider options (thanks @Premshay!) - -## [3.2.7] - -- Fix bug creating new configuration profiles - -## [3.2.6] - -- Fix bug with role definition overrides for built-in modes - -## [3.2.5] - -- Added gemini flash thinking 01-21 model and a few visual fixes (thanks @monotykamary!) - -## [3.2.4] - -- Only allow use of the diff tool if it's enabled in settings - -## [3.2.3] - -- Fix bug where language selector wasn't working - -## [3.2.0 - 3.2.2] - -- **Name Change From Roo Cline to Roo Code:** We're excited to announce our new name! After growing beyond 50,000 installations, we've rebranded from Roo Cline to Roo Code to better reflect our identity as we chart our own course. - -- **Custom Modes:** Create your own personas for Roo Code! While our built-in modes (Code, Architect, Ask) are still here, you can now shape entirely new ones: - - Define custom prompts - - Choose which tools each mode can access - - Create specialized assistants for any workflow - - Just type "Create a new mode for " or visit the Prompts tab in the top menu to get started - -Join us at https://www.reddit.com/r/RooCode to share your custom modes and be part of our next chapter! - -## [3.1.7] - -- DeepSeek-R1 support (thanks @philipnext!) -- Experimental new unified diff algorithm can be enabled in settings (thanks @daniel-lxs!) -- More fixes to configuration profiles (thanks @samhvw8!) - -## [3.1.6] - -- Add Mistral (thanks Cline!) -- Fix bug with VSCode LM configuration profile saving (thanks @samhvw8!) - -## [3.1.4 - 3.1.5] - -- Bug fixes to the auto approve menu - -## [3.1.3] - -- Add auto-approve chat bar (thanks Cline!) -- Fix bug with VS Code Language Models integration - -## [3.1.2] - -- Experimental support for VS Code Language Models including Copilot (thanks @RaySinner / @julesmons!) -- Fix bug related to configuration profile switching (thanks @samhvw8!) -- Improvements to fuzzy search in mentions, history, and model lists (thanks @samhvw8!) -- PKCE support for Glama (thanks @punkpeye!) -- Use 'developer' message for o1 system prompt - -## [3.1.1] - -- Visual fixes to chat input and settings for the light+ themes - -## [3.1.0] - -- You can now customize the role definition and instructions for each chat mode (Code, Architect, and Ask), either through the new Prompts tab in the top menu or mode-specific .clinerules-mode files. Prompt Enhancements have also been revamped: the "Enhance Prompt" button now works with any provider and API configuration, giving you the ability to craft messages with fully customizable prompts for even better results. -- Add a button to copy markdown out of the chat - -## [3.0.3] - -- Update required vscode engine to ^1.84.0 to match cline - -## [3.0.2] - -- A couple more tiny tweaks to the button alignment in the chat input - -## [3.0.1] - -- Fix the reddit link and a small visual glitch in the chat input - -## [3.0.0] - -- This release adds chat modes! Now you can ask Roo Code questions about system architecture or the codebase without immediately jumping into writing code. You can even assign different API configuration profiles to each mode if you prefer to use different models for thinking vs coding. Would love feedback in the new Roo Code Reddit! https://www.reddit.com/r/RooCode - -## [2.2.46] - -- Only parse @-mentions in user input (not in files) - -## [2.2.45] - -- Save different API configurations to quickly switch between providers and settings (thanks @samhvw8!) - -## [2.2.44] - -- Automatically retry failed API requests with a configurable delay (thanks @RaySinner!) - -## [2.2.43] - -- Allow deleting single messages or all subsequent messages - -## [2.2.42] - -- Add a Git section to the context mentions - -## [2.2.41] - -- Checkbox to disable streaming for OpenAI-compatible providers - -## [2.2.40] - -- Add the Glama provider (thanks @punkpeye!) - -## [2.2.39] - -- Add toggle to enable/disable the MCP-related sections of the system prompt (thanks @daniel-lxs!) - -## [2.2.38] - -- Add a setting to control the number of terminal output lines to pass to the model when executing commands - -## [2.2.36 - 2.2.37] - -- Add a button to delete user messages - -## [2.2.35] - -- Allow selection of multiple browser viewport sizes and adjusting screenshot quality - -## [2.2.34] - -- Add the DeepSeek provider - -## [2.2.33] - -- "Enhance prompt" button (OpenRouter models only for now) -- Support listing models for OpenAI compatible providers (thanks @samhvw8!) - -## [2.2.32] - -- More efficient workspace tracker - -## [2.2.31] - -- Improved logic for auto-approving chained commands - -## [2.2.30] - -- Fix bug with auto-approving commands - -## [2.2.29] - -- Add configurable delay after auto-writes to allow diagnostics to catch up - -## [2.2.28] - -- Use createFileSystemWatcher to more reliably update list of files to @-mention - -## [2.2.27] - -- Add the current time to the system prompt and improve browser screenshot quality (thanks @libertyteeth!) - -## [2.2.26] - -- Tweaks to preferred language (thanks @yongjer) - -## [2.2.25] - -- Add a preferred language dropdown - -## [2.2.24] - -- Default diff editing to on for new installs - -## [2.2.23] - -- Fix context window for gemini-2.0-flash-thinking-exp-1219 (thanks @student20880) - -## [2.2.22] - -- Add gemini-2.0-flash-thinking-exp-1219 - -## [2.2.21] - -- Take predicted file length into account when detecting omissions - -## [2.2.20] - -- Make fuzzy diff matching configurable (and default to off) - -## [2.2.19] - -- Add experimental option to use a bigger browser (1280x800) - -## [2.2.18] - -- More targeted styling fix for Gemini chats - -## [2.2.17] - -- Improved regex for auto-execution of chained commands - -## [2.2.16] - -- Incorporate Premshay's [PR](https://github.com/RooCodeInc/Roo-Code/pull/60) to add support for Amazon Nova and Meta Llama Models via Bedrock (3, 3.1, 3.2) and unified Bedrock calls using BedrockClient and Bedrock Runtime API - -## [2.2.14 - 2.2.15] - -- Make diff editing more robust to transient errors / fix bugs - -## [2.2.13] - -- Fixes to sound playing and applying diffs - -## [2.2.12] - -- Better support for pure deletion and insertion diffs - -## [2.2.11] - -- Added settings checkbox for verbose diff debugging - -## [2.2.6 - 2.2.10] - -- More fixes to search/replace diffs - -## [2.2.5] - -- Allow MCP servers to be enabled/disabled - -## [2.2.4] - -- Tweak the prompt to encourage diff edits when they're enabled - -## [2.2.3] - -- Clean up the settings screen - -## [2.2.2] - -- Add checkboxes to auto-approve MCP tools - -## [2.2.1] - -- Fix another diff editing indentation bug - -## [2.2.0] - -- Incorporate MCP changes from Cline 2.2.0 - -## [2.1.21] - -- Larger text area input + ability to drag images into it - -## [2.1.20] - -- Add Gemini 2.0 - -## [2.1.19] - -- Better error handling for diff editing - -## [2.1.18] - -- Diff editing bugfix to handle Windows line endings - -## [2.1.17] - -- Switch to search/replace diffs in experimental diff editing mode - -## [2.1.16] - -- Allow copying prompts from the history screen - -## [2.1.15] - -- Incorporate dbasclpy's [PR](https://github.com/RooCodeInc/Roo-Code/pull/54) to add support for gemini-exp-1206 -- Make it clear that diff editing is very experimental - -## [2.1.14] - -- Fix bug where diffs were not being applied correctly and try Aider's [unified diff prompt](https://github.com/Aider-AI/aider/blob/3995accd0ca71cea90ef76d516837f8c2731b9fe/aider/coders/udiff_prompts.py#L75-L105) -- If diffs are enabled, automatically reject write_to_file commands that lead to truncated output - -## [2.1.13] - -- Fix https://github.com/RooCodeInc/Roo-Code/issues/50 where sound effects were not respecting settings - -## [2.1.12] - -- Incorporate JoziGila's [PR](https://github.com/cline/cline/pull/158) to add support for editing through diffs - -## [2.1.11] - -- Incorporate lloydchang's [PR](https://github.com/RooCodeInc/Roo-Code/pull/42) to add support for OpenRouter compression - -## [2.1.10] - -- Incorporate HeavenOSK's [PR](https://github.com/cline/cline/pull/818) to add sound effects to Cline - -## [2.1.9] - -- Add instructions for using .clinerules on the settings screen - -## [2.1.8] - -- Roo Cline now allows configuration of which commands are allowed without approval! - -## [2.1.7] - -- Updated extension icon and metadata - -## [2.2.0] - -- Add support for Model Context Protocol (MCP), enabling Cline to use custom tools like web-search tool or GitHub tool -- Add MCP server management tab accessible via the server icon in the menu bar -- Add ability for Cline to dynamically create new MCP servers based on user requests (e.g., "add a tool that gets the latest npm docs") - -## [2.1.6] - -- Roo Cline now runs in all VSCode-compatible editors - -## [2.1.5] - -- Fix bug in browser action approval - -## [2.1.4] - -- Roo Cline now can run side-by-side with Cline - -## [2.1.3] - -- Roo Cline now allows browser actions without approval when `alwaysAllowBrowser` is true - -## [2.1.2] - -- Support for auto-approval of write operations and command execution -- Support for .clinerules custom instructions +For the upstream changelog history, see the [Roo Code repository](https://github.com/RooCodeInc/Roo-Code/blob/main/CHANGELOG.md). diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 328bb5c1b2e..9bd959b6844 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,16 +1,3 @@ -
- - -English • [Català](locales/ca/CODE_OF_CONDUCT.md) • [Deutsch](locales/de/CODE_OF_CONDUCT.md) • [Español](locales/es/CODE_OF_CONDUCT.md) • [Français](locales/fr/CODE_OF_CONDUCT.md) • [हिंदी](locales/hi/CODE_OF_CONDUCT.md) • [Bahasa Indonesia](locales/id/CODE_OF_CONDUCT.md) • [Italiano](locales/it/CODE_OF_CONDUCT.md) • [日本語](locales/ja/CODE_OF_CONDUCT.md) - - - - -[한국어](locales/ko/CODE_OF_CONDUCT.md) • [Nederlands](locales/nl/CODE_OF_CONDUCT.md) • [Polski](locales/pl/CODE_OF_CONDUCT.md) • [Português (BR)](locales/pt-BR/CODE_OF_CONDUCT.md) • [Русский](locales/ru/CODE_OF_CONDUCT.md) • [Türkçe](locales/tr/CODE_OF_CONDUCT.md) • [Tiếng Việt](locales/vi/CODE_OF_CONDUCT.md) • [简体中文](locales/zh-CN/CODE_OF_CONDUCT.md) • [繁體中文](locales/zh-TW/CODE_OF_CONDUCT.md) - - -
- # Contributor Covenant Code of Conduct ## Our Pledge @@ -52,7 +39,7 @@ response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or +that are not aligned with this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. @@ -68,11 +55,11 @@ further defined and clarified by project maintainers. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting the project team at support@roocode.com. All complaints -will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. The project team is -obligated to maintain confidentiality with regard to the reporter of an incident. -Further details of specific enforcement policies may be posted separately. +reported by opening a GitHub Issue. All complaints will be reviewed and +investigated and will result in a response that is deemed necessary and +appropriate to the circumstances. The project team is obligated to maintain +confidentiality with regard to the reporter of an incident. Further details of +specific enforcement policies may be posted separately. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other @@ -80,10 +67,9 @@ members of the project's leadership. ## Attribution -This Code of Conduct is adapted from [Cline's version][cline_coc] of the [Contributor Covenant][homepage], version 1.4, +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html -[cline_coc]: https://github.com/cline/cline/blob/main/CODE_OF_CONDUCT.md [homepage]: https://www.contributor-covenant.org For answers to common questions about this code of conduct, see diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 869b59a16da..ff44edc5e5d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,141 +1,15 @@ -
- +# Contributing to Moo Code -English • [Català](locales/ca/CONTRIBUTING.md) • [Deutsch](locales/de/CONTRIBUTING.md) • [Español](locales/es/CONTRIBUTING.md) • [Français](locales/fr/CONTRIBUTING.md) • [हिंदी](locales/hi/CONTRIBUTING.md) • [Bahasa Indonesia](locales/id/CONTRIBUTING.md) • [Italiano](locales/it/CONTRIBUTING.md) • [日本語](locales/ja/CONTRIBUTING.md) +This is a personal fork of [Roo Code](https://github.com/RooCodeInc/Roo-Code). - - +Contributions are welcome via pull requests. Please ensure: -[한국어](locales/ko/CONTRIBUTING.md) • [Nederlands](locales/nl/CONTRIBUTING.md) • [Polski](locales/pl/CONTRIBUTING.md) • [Português (BR)](locales/pt-BR/CONTRIBUTING.md) • [Русский](locales/ru/CONTRIBUTING.md) • [Türkçe](locales/tr/CONTRIBUTING.md) • [Tiếng Việt](locales/vi/CONTRIBUTING.md) • [简体中文](locales/zh-CN/CONTRIBUTING.md) • [繁體中文](locales/zh-TW/CONTRIBUTING.md) +1. Your changes are well-tested +2. You follow the existing code style +3. You include a clear description of your changes - -
+For bug reports and feature requests, please open a GitHub Issue. -# Contributing to Roo Code +## License -Roo Code is a community-driven project, and we deeply value every contribution. To streamline collaboration, we operate on an [Issue-First](#issue-first-approach) basis, meaning all [Pull Requests (PRs)](#submitting-a-pull-request) must first be linked to a GitHub Issue. Please review this guide carefully. - -## Table of Contents - -- [Before You Contribute](#before-you-contribute) -- [Finding & Planning Your Contribution](#finding--planning-your-contribution) -- [Development & Submission Process](#development--submission-process) -- [Legal](#legal) - -## Before You Contribute - -### 1. Code of Conduct - -All contributors must adhere to our [Code of Conduct](./CODE_OF_CONDUCT.md). - -### 2. Project Roadmap - -Our roadmap guides the project's direction. Align your contributions with these key goals: - -### Reliability First - -- Ensure diff editing and command execution are consistently reliable. -- Reduce friction points that deter regular usage. -- Guarantee smooth operation across all locales and platforms. -- Expand robust support for a wide variety of AI providers and models. - -### Enhanced User Experience - -- Streamline the UI/UX for clarity and intuitiveness. -- Continuously improve the workflow to meet the high expectations developers have for daily-use tools. - -### Leading on Agent Performance - -- Establish comprehensive evaluation benchmarks (evals) to measure real-world productivity. -- Make it easy for everyone to easily run and interpret these evals. -- Ship improvements that demonstrate clear increases in eval scores. - -Mention alignment with these areas in your PRs. - -### 3. Join the Roo Code Community - -- **Primary:** Join our [Discord](https://discord.gg/roocode) and DM **Hannes Rudolph (`hrudolph`)**. -- **Alternative:** Experienced contributors can engage directly via [GitHub Projects](https://github.com/orgs/RooCodeInc/projects/1). - -## Finding & Planning Your Contribution - -### Types of Contributions - -- **Bug Fixes:** Addressing code issues. -- **New Features:** Adding functionality. -- **Documentation:** Improving guides and clarity. - -### Issue-First Approach - -All contributions start with a GitHub Issue using our skinny templates. - -- **Check existing issues**: Search [GitHub Issues](https://github.com/RooCodeInc/Roo-Code/issues). -- **Create an issue** using: - - **Enhancements:** "Enhancement Request" template (plain language focused on user benefit). - - **Bugs:** "Bug Report" template (minimal repro + expected vs actual + version). -- **Want to work on it?** Comment "Claiming" on the issue and DM **Hannes Rudolph (`hrudolph`)** on [Discord](https://discord.gg/roocode) to get assigned. Assignment will be confirmed in the thread. -- **PRs must link to the issue.** Unlinked PRs may be closed. - -### Deciding What to Work On - -- Check the [GitHub Project](https://github.com/orgs/RooCodeInc/projects/1) for "Issue [Unassigned]" issues. -- For docs, visit [Roo Code Docs](https://github.com/RooCodeInc/Roo-Code-Docs). - -### Reporting Bugs - -- Check for existing reports first. -- Create a new bug using the ["Bug Report" template](https://github.com/RooCodeInc/Roo-Code/issues/new/choose) with: - - Clear, numbered reproduction steps - - Expected vs actual result - - Roo Code version (required); API provider/model if relevant -- **Security issues**: Report privately via [security advisories](https://github.com/RooCodeInc/Roo-Code/security/advisories/new). - -## Development & Submission Process - -### Development Setup - -1. **Fork & Clone:** - -``` -git clone https://github.com/YOUR_USERNAME/Roo-Code.git -``` - -2. **Install Dependencies:** - -``` -pnpm install -``` - -3. **Debugging:** Open with VS Code (`F5`). - -### Writing Code Guidelines - -- One focused PR per feature or fix. -- Follow ESLint and TypeScript best practices. -- Write clear, descriptive commits referencing issues (e.g., `Fixes #123`). -- Provide thorough testing (`npm test`). -- Rebase onto the latest `main` branch before submission. - -### Submitting a Pull Request - -- Begin as a **Draft PR** if seeking early feedback. -- Clearly describe your changes following the Pull Request Template. -- Link the issue in the PR description/title (e.g., "Fixes #123"). -- Provide screenshots/videos for UI changes. -- Indicate if documentation updates are necessary. - -### Pull Request Policy - -- Must reference an assigned GitHub Issue. To get assigned: comment "Claiming" on the issue and DM **Hannes Rudolph (`hrudolph`)** on [Discord](https://discord.gg/roocode). Assignment will be confirmed in the thread. -- Unlinked PRs may be closed. -- PRs should pass CI tests, align with the roadmap, and have clear documentation. - -### Review Process - -- **Daily Triage:** Quick checks by maintainers. -- **Weekly In-depth Review:** Comprehensive assessment. -- **Iterate promptly** based on feedback. - -## Legal - -By contributing, you agree your contributions will be licensed under the Apache 2.0 License, consistent with Roo Code's licensing. +By contributing, you agree that your contributions will be licensed under the Apache License 2.0. diff --git a/PRIVACY.md b/PRIVACY.md index 02e8e151034..e5b9714b74c 100644 --- a/PRIVACY.md +++ b/PRIVACY.md @@ -1,38 +1,27 @@ -# Roo Code Privacy Policy +# Moo Code Privacy Policy -**Last Updated: September 11th, 2025** +**Last Updated: April 2026** -Roo Code respects your privacy and is committed to transparency about how we handle your data. Below is a simple breakdown of where key pieces of data go—and, importantly, where they don’t. +## Privacy Statement -### **Where Your Data Goes (And Where It Doesn’t)** +Moo Code does **not** collect, transmit, or store any user data. All processing happens locally on your machine. -- **Code & Files**: Roo Code accesses files on your local machine when needed for AI-assisted features. When you send commands to Roo Code, relevant files may be transmitted to your chosen AI model provider (e.g., OpenAI, Anthropic, OpenRouter) to generate responses. If you select Roo Code Cloud as the model provider (proxy mode), your code may transit Roo Code servers only to forward it to the upstream provider. We do not store your code; it is deleted immediately after forwarding. Otherwise, your code is sent directly to the provider. AI providers may store data per their privacy policies. -- **Commands**: Any commands executed through Roo Code happen on your local environment. However, when you use AI-powered features, the relevant code and context from your commands may be transmitted to your chosen AI model provider (e.g., OpenAI, Anthropic, OpenRouter) to generate responses. We do not have access to or store this data, but AI providers may process it per their privacy policies. -- **Prompts & AI Requests**: When you use AI-powered features, your prompts and relevant project context are sent to your chosen AI model provider (e.g., OpenAI, Anthropic, OpenRouter) to generate responses. We do not store or process this data. These AI providers have their own privacy policies and may store data per their terms of service. If you choose Roo Code Cloud as the provider (proxy mode), prompts may transit Roo Code servers only to forward them to the upstream model and are not stored. -- **API Keys & Credentials**: If you enter an API key (e.g., to connect an AI model), it is stored locally on your device and never sent to us or any third party, except the provider you have chosen. -- **Telemetry (Usage Data)**: We collect anonymous feature usage and error data to help us improve Roo Code. This telemetry is powered by PostHog and includes your VS Code machine ID, feature usage patterns, and exception reports. This telemetry does **not** collect personally identifiable information, your code, or AI prompts. You can opt out of this telemetry at any time through the settings. -- **Marketplace Requests**: When you browse or search the Marketplace for Model Configuration Profiles (MCPs) or Custom Modes, Roo Code makes a secure API call to Roo Code's backend servers to retrieve listing information. These requests send only the query parameters (e.g., extension version, search term) necessary to fulfill the request and do not include your code, prompts, or personally identifiable information. +### Data Handling -### **How We Use Your Data (If Collected)** +- **Code & Files:** Moo Code accesses files on your local machine only. When you use AI-powered features, relevant files are sent **directly** to your chosen AI model provider (e.g., OpenAI, Anthropic, OpenRouter). Moo Code does not intercept, store, or forward this data. +- **API Keys & Credentials:** If you enter an API key, it is stored locally on your device and never sent to any third party other than the provider you have configured. +- **Telemetry:** Moo Code does **not** collect any telemetry, usage data, or analytics. There is no tracking, no error reporting to external servers, and no usage metrics collection. +- **Marketplace:** Moo Code does not connect to any marketplace or cloud services. -- We use telemetry to understand feature usage and improve Roo Code. -- We do **not** sell or share your data. -- We do **not** train any models on your data. +### Your Choices & Control -### **Your Choices & Control** +- You can run models locally to prevent data from being sent to third parties. +- No data is collected by Moo Code itself, so there is nothing to opt out of. -- You can run models locally to prevent data being sent to third-parties. -- Telemetry collection is enabled by default to help us improve Roo Code, but you can opt out at any time through the settings. -- You can delete Roo Code to stop all data collection. +### Contact -### **Security & Updates** - -We take reasonable measures to secure your data, but no system is 100% secure. If our privacy policy changes, we will notify you within the extension. - -### **Contact Us** - -For any privacy-related questions, reach out to us at support@roocode.com. +For privacy-related questions, please open a GitHub Issue. --- -By using Roo Code, you agree to this Privacy Policy. +By using Moo Code, you agree to this Privacy Policy. diff --git a/README.md b/README.md index 7a21f92e8ab..a58c5fc2d9d 100644 --- a/README.md +++ b/README.md @@ -1,181 +1,60 @@ -

- VS Code Marketplace - X - YouTube - Join Discord - Join r/RooCode -

-

- Get help fast → Join Discord • Prefer async? → Join r/RooCode -

- -# Roo Code - -> Your AI-Powered Dev Team, Right in Your Editor - -## What's New in v3.52.0 - -- Add Poe as an AI provider so you can access Poe models directly in Roo Code. -- Improve the xAI provider with a Responses API migration, reusable transform utilities, and updated Grok-4.20 defaults. -- Fix MiniMax model listings and context window handling for more reliable setup. - -
- 🌐 Available languages - -- [English](README.md) -- [Català](locales/ca/README.md) -- [Deutsch](locales/de/README.md) -- [Español](locales/es/README.md) -- [Français](locales/fr/README.md) -- [हिंदी](locales/hi/README.md) -- [Bahasa Indonesia](locales/id/README.md) -- [Italiano](locales/it/README.md) -- [日本語](locales/ja/README.md) -- [한국어](locales/ko/README.md) -- [Nederlands](locales/nl/README.md) -- [Polski](locales/pl/README.md) -- [Português (BR)](locales/pt-BR/README.md) -- [Русский](locales/ru/README.md) -- [Türkçe](locales/tr/README.md) -- [Tiếng Việt](locales/vi/README.md) -- [简体中文](locales/zh-CN/README.md) -- [繁體中文](locales/zh-TW/README.md) -- ... -
- ---- - -## What Can Roo Code Do For YOU? - -- Generate Code from natural language descriptions and specs -- Adapt with Modes: Code, Architect, Ask, Debug, and Custom Modes -- Refactor & Debug existing code -- Write & Update documentation -- Answer Questions about your codebase -- Automate repetitive tasks -- Utilize MCP Servers - -## Modes - -Roo Code adapts to how you work: - -- Code Mode: everyday coding, edits, and file ops -- Architect Mode: plan systems, specs, and migrations -- Ask Mode: fast answers, explanations, and docs -- Debug Mode: trace issues, add logs, isolate root causes -- Custom Modes: build specialized modes for your team or workflow - -Learn more: [Using Modes](https://docs.roocode.com/basic-usage/using-modes) • [Custom Modes](https://docs.roocode.com/advanced-usage/custom-modes) - -## Tutorial & Feature Videos +# Moo Code -
+A self-hosted fork of [Roo Code](https://github.com/RooCodeInc/Roo-Code) — an AI-powered coding assistant for VS Code. -| | | | -| :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -|
Installing Roo Code |
Configuring Profiles |
Codebase Indexing | -|
Custom Modes |
Checkpoints |
Context Management | +> **Note:** This is an independent fork maintained separately from the upstream Roo Code project. It is not affiliated with or endorsed by Roo Code Veterinary Inc. -
-

-More quick tutorial and feature videos... -

+## Features -## Resources +- Generate code from natural language descriptions and specs +- Adapt with Modes: Code, Architect, Ask, Debug, and Custom Modes +- Refactor & debug existing code +- Write & update documentation +- Answer questions about your codebase +- Automate repetitive tasks +- Utilize MCP Servers -- **[Documentation](https://docs.roocode.com):** The official guide to installing, configuring, and mastering Roo Code. -- **[YouTube Channel](https://youtube.com/@roocodeyt?feature=shared):** Watch tutorials and see features in action. -- **[Discord Server](https://discord.gg/roocode):** Join the community for real-time help and discussion. -- **[Reddit Community](https://www.reddit.com/r/RooCode):** Share your experiences and see what others are building. -- **[GitHub Issues](https://github.com/RooCodeInc/Roo-Code/issues):** Report bugs and track development. -- **[Feature Requests](https://github.com/RooCodeInc/Roo-Code/discussions/categories/feature-requests?discussions_q=is%3Aopen+category%3A%22Feature+Requests%22+sort%3Atop):** Have an idea? Share it with the developers. +## Build & Install ---- +### Prerequisites -## Local Setup & Development +- [Node.js](https://nodejs.org/) (see `.nvmrc` for version) +- [pnpm](https://pnpm.io/) -1. **Clone** the repo: +### Build ```sh -git clone https://github.com/RooCodeInc/Roo-Code.git +pnpm install +pnpm build ``` -2. **Install dependencies**: +### Create VSIX ```sh -pnpm install +cd src && pnpm vsix ``` -3. **Run the extension**: - -There are several ways to run the Roo Code extension: - -### Development Mode (F5) - -For active development, use VSCode's built-in debugging: - -Press `F5` (or go to **Run** → **Start Debugging**) in VSCode. This will open a new VSCode window with the Roo Code extension running. - -- Changes to the webview will appear immediately. -- Changes to the core extension will also hot reload automatically. - -### Automated VSIX Installation - -To build and install the extension as a VSIX package directly into VSCode: +### Install Extension ```sh -pnpm install:vsix [-y] [--editor=] +code --install-extension ../bin/moo-code-{version}.vsix ``` -This command will: - -- Ask which editor command to use (code/cursor/code-insiders) - defaults to 'code' -- Uninstall any existing version of the extension. -- Build the latest VSIX package. -- Install the newly built VSIX. -- Prompt you to restart VS Code for changes to take effect. - -Options: - -- `-y`: Skip all confirmation prompts and use defaults -- `--editor=`: Specify the editor command (e.g., `--editor=cursor` or `--editor=code-insiders`) - -### Manual VSIX Installation - -If you prefer to install the VSIX package manually: - -1. First, build the VSIX package: - ```sh - pnpm vsix - ``` -2. A `.vsix` file will be generated in the `bin/` directory (e.g., `bin/roo-cline-.vsix`). -3. Install it manually using the VSCode CLI: - ```sh - code --install-extension bin/roo-cline-.vsix - ``` +## Development ---- +1. Clone the repo +2. Run `pnpm install` +3. Open in VS Code +4. Press `F5` to launch the extension development host -We use [changesets](https://github.com/changesets/changesets) for versioning and publishing. Check our `CHANGELOG.md` for release notes. - ---- - -## Disclaimer - -**Please note** that Roo Code, Inc does **not** make any representations or warranties regarding any code, models, or other tools provided or made available in connection with Roo Code, any associated third-party tools, or any resulting outputs. You assume **all risks** associated with the use of any such tools or outputs; such tools are provided on an **"AS IS"** and **"AS AVAILABLE"** basis. Such risks may include, without limitation, intellectual property infringement, cyber vulnerabilities or attacks, bias, inaccuracies, errors, defects, viruses, downtime, property loss or damage, and/or personal injury. You are solely responsible for your use of any such tools or outputs (including, without limitation, the legality, appropriateness, and results thereof). - ---- - -## Contributing - -We love community contributions! Get started by reading our [CONTRIBUTING.md](CONTRIBUTING.md). +## Modes ---- +- **Code Mode:** Everyday coding, edits, and file ops +- **Architect Mode:** Plan systems, specs, and migrations +- **Ask Mode:** Fast answers, explanations, and docs +- **Debug Mode:** Trace issues, add logs, isolate root causes +- **Custom Modes:** Build specialized modes for your workflow ## License -[Apache 2.0 © 2025 Roo Code, Inc.](./LICENSE) - ---- - -**Enjoy Roo Code!** Whether you keep it on a short leash or let it roam autonomously, we can’t wait to see what you build. If you have questions or feature ideas, drop by our [Reddit community](https://www.reddit.com/r/RooCode/) or [Discord](https://discord.gg/roocode). Happy coding! +Licensed under the [Apache License 2.0](LICENSE). diff --git a/SECURITY.md b/SECURITY.md index 8057b38bda7..6b10f687367 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -2,16 +2,16 @@ ## Supported Versions -We actively patch only the most recent minor release of Roo Code. Older versions receive fixes at our discretion. +We actively patch only the most recent release of Moo Code. Older versions receive fixes at our discretion. ## Reporting a Vulnerability -Email security@roocode.com with: +Please open a GitHub Issue or Security Advisory with: - A short summary of the issue - Steps to reproduce or a proof of concept - Any logs, stack traces, or screenshots that might help us understand the problem -We acknowledge reports within 48 hours and aim to release a fix or mitigation within 30 days. While we work on a resolution, please keep the details private. +We acknowledge reports within 48 hours and aim to release a fix or mitigation within 30 days. While we work on a resolution, please keep the details private. -Thank you for helping us keep Roo Code users safe. +Thank you for helping us keep Moo Code users safe. diff --git a/apps/vscode-nightly/package.json b/apps/vscode-nightly/package.json index 56872a2aeb2..2584345bbb1 100644 --- a/apps/vscode-nightly/package.json +++ b/apps/vscode-nightly/package.json @@ -1,6 +1,6 @@ { "name": "@roo-code/vscode-nightly", - "description": "Nightly build for the Roo Code VSCode extension.", + "description": "Nightly build for the Moo Code VSCode extension.", "private": true, "packageManager": "pnpm@10.8.1", "scripts": { diff --git a/apps/vscode-nightly/package.nightly.json b/apps/vscode-nightly/package.nightly.json index 94bc2c8b67a..7bf98a4a98b 100644 --- a/apps/vscode-nightly/package.nightly.json +++ b/apps/vscode-nightly/package.nightly.json @@ -1,5 +1,6 @@ { - "name": "roo-code-nightly", + "name": "moo-code-nightly", + "publisher": "moo-code", "version": "0.0.1", "icon": "assets/icons/icon-nightly.png", "scripts": {} diff --git a/apps/vscode-nightly/package.nls.nightly.json b/apps/vscode-nightly/package.nls.nightly.json index b76bbf4eb8e..acc692c6fea 100644 --- a/apps/vscode-nightly/package.nls.nightly.json +++ b/apps/vscode-nightly/package.nls.nightly.json @@ -1,7 +1,7 @@ { - "extension.displayName": "Roo Code Nightly", - "views.contextMenu.label": "Roo Code Nightly", - "views.terminalMenu.label": "Roo Code Nightly", - "views.activitybar.title": "Roo Code Nightly", - "configuration.title": "Roo Code Nightly" + "extension.displayName": "Moo Code Nightly", + "views.contextMenu.label": "Moo Code Nightly", + "views.terminalMenu.label": "Moo Code Nightly", + "views.activitybar.title": "Moo Code Nightly", + "configuration.title": "Moo Code Nightly" } diff --git a/docs/prd-swarm-multi-agent.md b/docs/prd-swarm-multi-agent.md new file mode 100644 index 00000000000..3dda6a0118b --- /dev/null +++ b/docs/prd-swarm-multi-agent.md @@ -0,0 +1,547 @@ +# PRD: Swarm / True Multi-Agent Execution in Moo Code + +**Status:** In progress — P1, PT, P2, P3, P4, P5 shipped +**Author:** davincidreams +**Branch:** `feat/swarm-prd` +**Created:** 2026-04-25 +**Last updated:** 2026-04-25 + +--- + +## 1. Problem Statement + +~~Moo Code today runs tasks **strictly sequentially**.~~ **Phase 1 shipped** — `clineStack` has +been replaced with `tasks: Map` and true concurrent fan-out is available via +`spawn_parallel_tasks` with `concurrent: true`. The remaining problem is the higher phases: +agent identity, cross-agent communication, permission bridging, and external process backends. + +The original problem for context: + +> When `spawn_parallel_tasks` was used, children executed one at a time via a queued delegation +> chain (`parallelQueue` on `HistoryItem`). The root cause was the `clineStack: Task[]` LIFO +> invariant in `ClineProvider`, which enforced "only one task open at a time" and disposed the +> parent before the child started. A 4-task "parallel" job took 4× the wall-clock time of the +> longest subtask. + +The goal of this PRD is to close the gap with claude-code's **swarm/teammate** architecture and +deliver genuine multi-instance, multi-agent execution inside Moo Code. + +--- + +## 2. Goals + +| # | Goal | Success metric | +| --- | ---------------------------- | --------------------------------------------------------- | +| G1 | True in-process concurrency | N tasks run simultaneously, wall time ≈ max(subtask time) | +| G2 | Swarm identity model | Each agent has stable ID, name, color; visible in UI | +| G3 | Agent-to-agent communication | Leader can send tasks/messages to named workers | +| G4 | Permission delegation | Worker tool-use approvals surfaced in leader's VS Code UI | +| G5 | External process backend | Spawn headless CLI workers or new VS Code windows | +| G6 | Fault isolation | One failing worker does not kill the swarm | + +**Non-goals (this version):** + +- Distributed execution across machines +- Custom agent personas / system-prompt authoring UI +- Persistent cross-session team identities (teams dissolve when the session ends) + +--- + +## 3. Background: How claude-code Solves This + +Reference implementation: `C:\Users\lisam\laud code\claude-code\src\utils\swarm\` + +### 3.1 Concurrent task map + +claude-code stores all running agents in a flat `AppState.tasks: Record` map. +There is no stack, no single-open invariant. Any number of `InProcessTeammateTask` objects can +be `status: "running"` simultaneously. + +### 3.2 AsyncLocalStorage context isolation + +Each in-process teammate runs inside `runWithTeammateContext(context, fn)` which uses Node.js +`AsyncLocalStorage`. Every async operation on any call stack descending from that invocation +sees the same `TeammateContext` — no cross-agent state bleed. + +```typescript +// teammateContext.ts +const teammateContextStorage = new AsyncLocalStorage() + +export function runWithTeammateContext(ctx: TeammateContext, fn: () => T): T { + return teammateContextStorage.run(ctx, fn) +} +``` + +Note: Moo Code `Task` instances are already class-isolated (each holds its own messages, API +client, history). `AsyncLocalStorage` is needed primarily for utility functions that look up +"current context" without being passed an explicit reference. + +### 3.3 Two-level abort + +Each teammate has: + +- `abortController` — kills the whole agent lifecycle +- `currentWorkAbortController` — aborts only the current LLM turn (like pressing Escape), + leaving the agent alive and idle + +### 3.4 Idle loop + mailbox polling + +After finishing a turn, a worker does **not** complete. It enters an idle loop that polls a +file-based mailbox (`~/.claude/teams/{team}/inboxes/{agent}.json`) every 500 ms for: + +- A new task message from the leader +- A shutdown request +- A permission response + +### 3.5 File-based mailboxes with locking + +``` +~/.claude/teams//inboxes/.json +``` + +Writes use `proper-lockfile` with retry logic to prevent concurrent corruption. Message types: +`idle_notification`, `shutdown_request`, `task_assignment`, `permission_request`, +`permission_response`. + +### 3.6 Permission bridge + +A module-level singleton (`leaderPermissionBridge.ts`) lets in-process workers inject a +`WorkerPermissionRequest` directly into the leader's `ToolUseConfirm` queue — the same dialog +the human uses — with a colored worker badge showing which agent is asking. + +### 3.7 Backend registry + +``` +detectAndGetBackend(): + priority 1: already inside tmux → TmuxBackend (native splits) + priority 2: iTerm2 with it2 CLI → ITermBackend (native splits) + priority 3: any tmux available → TmuxBackend (external session) + priority 4: headless / no terminal → InProcessBackend +``` + +--- + +## 4. Proposed Architecture + +### 4.1 Phase 1 — Concurrent task map (foundation) + +**The single highest-impact change.** Remove the `clineStack: Task[]` LIFO invariant. + +```typescript +// ClineProvider.ts — replace line 140 +// FROM: +private clineStack: Task[] = [] + +// TO: +private tasks: Map = new Map() +private focusedTaskId?: string // which task the VS Code UI is showing +private leaderTaskId?: string // root leader of the current swarm session +``` + +Changes cascade through: + +- `getCurrentTask()` → returns `tasks.get(focusedTaskId)` (UI focus) +- `addClineToStack()` → `registerTask(task)` — adds to map, emits `TaskFocused` +- `removeClineFromStack()` → `unregisterTask(taskId)` — removes from map; if + leader is unregistered, tears down swarm +- `delegateParentAndOpenChild()` — **stops disposing parent**; instead sets + `focusedTaskId` to child, parent remains `status: "delegated"` but alive in map +- `createTask()` — no longer enforces single-open invariant for swarm spawns + +Estimated scope: ~400 LOC in `ClineProvider.ts` + updates to `webviewMessageHandler.ts`, +event emitters, and any call site that assumes `clineStack.length <= 1`. + +### 4.2 Phase 2 — Swarm identity and registry + +New file: `src/core/swarm/SwarmRegistry.ts` + +```typescript +export type AgentColorName = "red" | "blue" | "green" | "yellow" | "purple" | "orange" | "pink" | "cyan" + +export interface AgentIdentity { + agentId: string // "@" + agentName: string + teamName: string + color: AgentColorName + isLeader: boolean + taskId: string // linked Moo Code task ID +} + +export interface SwarmSession { + sessionId: string + leaderTaskId: string + teammates: Map // keyed by agentId + taskList: string[] // unclaimed tasks (worker-pull model) +} + +export class SwarmRegistry { + private sessions: Map = new Map() + private colorIndex = 0 + private readonly COLORS: AgentColorName[] = [ + "red","blue","green","yellow","purple","orange","pink","cyan" + ] + + createSession(leaderTaskId: string): SwarmSession { … } + assignColor(agentId: string): AgentColorName { … } // round-robin + registerTeammate(sessionId: string, identity: AgentIdentity): void { … } + unregisterTeammate(sessionId: string, agentId: string): void { … } + getSession(sessionId: string): SwarmSession | undefined { … } + destroySession(sessionId: string): void { … } +} +``` + +The registry lives on `ClineProvider` (one per workspace). The `AgentIdentity` is stored in +`HistoryItem` (extend schema) so it survives process restart. + +### 4.3 Phase 3 — Mailbox communication + +New file: `src/core/swarm/MailboxService.ts` + +Two implementations behind one interface: + +```typescript +export interface IMailboxService { + send(to: string, msg: TeammateMessage): Promise + read(agentName: string): Promise + markRead(agentName: string, idx: number): Promise +} +``` + +- **`InMemoryMailbox`** — for in-process workers; uses a `Map` + with async mutex instead of file locks. Zero latency. +- **`FileMailbox`** — for cross-process/cross-window workers; stores at + `~/.roo/teams//inboxes/.json` with `proper-lockfile`. + +Message types to implement first: + +| Type | Direction | Purpose | +| --------------------- | --------------- | ---------------------------- | +| `task_assignment` | leader → worker | Assign a new task | +| `idle_notification` | worker → leader | "I finished, ready for more" | +| `shutdown_request` | leader → worker | "Stop after current turn" | +| `permission_request` | worker → leader | Request tool-use approval | +| `permission_response` | leader → worker | Approval/rejection result | + +### 4.4 Phase 4 — Idle worker loop + +Extend `Task.ts` with a `runSwarmWorkerLoop()` method. After each LLM turn completes, instead of +marking the task done, the worker: + +1. Sends `idle_notification` to leader with summary. +2. Polls mailbox every 500 ms waiting for `task_assignment` or `shutdown_request`. +3. On `task_assignment`: adds user message to conversation, re-enters + `recursivelyMakeClineRequests`. +4. On `shutdown_request`: marks task completed after current turn. +5. On leader abort signal: hard-stops immediately. + +This mirrors `waitForNextPromptOrShutdown` in `inProcessRunner.ts:689`. + +Two-level abort (mirrors claude-code): + +```typescript +class Task { + readonly lifecycleAbortController = new AbortController() // kills whole worker + // currentRequestAbortController already exists — rename to turnAbortController + // (aborts only the ongoing LLM stream, worker stays alive) +} +``` + +### 4.5 Phase 5 — Permission bridge + +New file: `src/core/swarm/LeaderPermissionBridge.ts` + +```typescript +export type WorkerPermissionRequest = { + requestId: string + workerTaskId: string + agentName: string + color: AgentColorName + toolName: string + toolUseId: string + input: Record + description: string + onAllow(updatedInput?: Record): void + onReject(reason?: string): void +} + +// Module-level singleton — workers inject, leader UI reads +let pendingRequests: WorkerPermissionRequest[] = [] +let notifyLeader: (() => void) | null = null + +export function registerLeaderNotifier(fn: () => void): void { + notifyLeader = fn +} +export function submitPermissionRequest(req: WorkerPermissionRequest): void { + pendingRequests.push(req) + notifyLeader?.() +} +export function consumePendingRequests(): WorkerPermissionRequest[] { + const copy = [...pendingRequests] + pendingRequests = [] + return copy +} +``` + +The VS Code UI renders pending worker permission requests as either: + +- An inline badge in the existing tool-approval ask UI (preferred), or +- A VS Code `showInformationMessage` with Allow/Reject buttons (fallback) + +### 4.6 Phase 6 — External process backend + +For full OS-level isolation (separate heaps, separate git worktrees): + +**Option A — Headless CLI worker** (lighter, recommended first): + +```bash +moo-worker \ + --agent-id researcher@my-team \ + --team-name my-team \ + --parent-session $SESSION_ID \ + --worktree ~/.roo/worktrees/project-a1b2c3d4 \ + --mailbox-dir ~/.roo/teams/my-team/inboxes \ + --model claude-sonnet-4-6 +``` + +The worker process reads its first task from its mailbox file and uses `FileMailbox` for +bidirectional communication. Mirrors claude-code's `PaneBackendExecutor.spawn()`. + +**Option B — New VS Code window** (full extension, heavier): + +```typescript +vscode.commands.executeCommand("vscode.openFolder", worktreeUri, { + forceNewWindow: true, +}) +// + write initial task to mailbox before window opens +``` + +**Backend registry** (mirrors claude-code `registry.ts`): + +```typescript +export interface IWorkerBackend { + spawn(config: WorkerSpawnConfig): Promise + terminate(workerId: string): Promise + isActive(workerId: string): Promise +} + +// Priority order: +// 1. InProcessBackend — default; no extra dependencies +// 2. CliWorkerBackend — opt-in; requires moo-worker binary +// 3. VsCodeWindowBackend — opt-in; spawns new VS Code window +``` + +--- + +## 5. Data Model Changes + +### 5.1 HistoryItem extensions + +```typescript +// packages/types/src/history.ts — additions +agentId?: string // "@" if this task is a swarm worker +agentName?: string +agentColor?: string +teamName?: string +swarmSessionId?: string +isSwarmLeader?: boolean +isIdle?: boolean // worker is alive but waiting for next task +``` + +### 5.2 New types package exports + +```typescript +// packages/types/src/swarm.ts (new file) +export type AgentColorName = "red" | "blue" | "green" | "yellow" | "purple" | "orange" | "pink" | "cyan" + +export interface AgentIdentity { … } +export interface SwarmSession { … } +export interface TeammateMessage { … } +export type TeammateMessageType = + | "task_assignment" + | "idle_notification" + | "shutdown_request" + | "permission_request" + | "permission_response" +``` + +### 5.3 New event names + +```typescript +// packages/types/src/events.ts — additions to RooCodeEventName +SwarmSessionStarted = "swarmSessionStarted" +SwarmSessionEnded = "swarmSessionEnded" +WorkerSpawned = "workerSpawned" +WorkerIdle = "workerIdle" +WorkerShutdown = "workerShutdown" +PermissionRequested = "permissionRequested" // worker→leader +PermissionResolved = "permissionResolved" // leader→worker +``` + +--- + +## 6. API surface (spawn_parallel_tasks evolution) + +Today `spawn_parallel_tasks` runs tasks sequentially. Once Phase 1+2 land, it gains a +`concurrent: true` flag: + +```json +{ + "tool": "spawn_parallel_tasks", + "tasks": [ + { "mode": "code", "message": "implement auth service", "worktree": "auto" }, + { "mode": "code", "message": "implement payment service", "worktree": "auto" }, + { "mode": "code", "message": "implement notification service", "worktree": "auto" } + ], + "concurrent": true, + "abortOnChildFailure": false +} +``` + +When `concurrent: true`, all tasks start immediately against the task map (Phase 1). The parent +registers `onIdle` callbacks on each child and resumes only when all have called +`idle_notification` with a completed status. + +A new `spawn_swarm` tool (Phase 3+) allows richer worker control: + +```json +{ + "tool": "spawn_swarm", + "teamName": "feature-v2", + "workers": [ + { "name": "researcher", "mode": "architect", "color": "blue" }, + { "name": "coder-1", "mode": "code", "color": "green" }, + { "name": "reviewer", "mode": "code", "color": "yellow" } + ], + "taskList": [ + "Research existing auth implementations", + "Implement JWT refresh token flow", + "Review and test the JWT implementation" + ] +} +``` + +Workers pull tasks from the `taskList` atomically; the leader monitors idle notifications and +can push new tasks dynamically. + +--- + +## 7. UI Changes + +### 7.1 Active workers panel + +A new collapsible section in the Chat sidebar shows all running workers with their color dot, +current status (running / idle), and latest tool in use. Mirrors how SubtaskRow renders +child tasks today. + +### 7.2 Worker permission badge + +When a worker requests tool approval, the existing tool-ask dialog renders a colored badge +(`[researcher · blue]`) above the file diff / bash command block. + +### 7.3 Swarm activity log + +The Output Channel gains a `Moo Code Swarm` channel that logs all cross-agent messages, +idle transitions, and permission events in real time. + +--- + +## 7b. Teams System (Shipped) + +The Teams system provides a lightweight, config-driven multi-agent workflow layer that sits on +top of the Phase 1 concurrent task map. It is inspired by the +[Atlas-Agent-Teams](https://github.com/Logos-Liber/Atlas-Agent-Teams) CLI plugin. + +### How it works + +1. A team is defined in `.roo/teams/.json`. The file describes an ordered list of phases; + each phase runs one or more specialist agents (each in a Roo-Code mode). +2. An orchestrator agent (any mode with `run_team_phase` available) reads the config with + `read_file`, then calls `run_team_phase` once per phase in order. +3. The tool handles concurrent vs sequential dispatch, conventions injection, and template + interpolation — the orchestrator only needs to pass `task` and accumulated `context`. + +### Key components + +| File | Role | +| ------------------------------------------------------- | ----------------------------------------------------- | +| `packages/types/src/team.ts` | `TeamConfig`, `TeamPhase`, `TeamAgentSpec` interfaces | +| `src/services/teams/TeamsManager.ts` | Scans `.roo/teams/*.json`; caches configs by slug | +| `src/core/tools/RunTeamPhaseTool.ts` | Tool implementation | +| `src/core/prompts/tools/native-tools/run_team_phase.ts` | LLM tool schema | +| `.roo/teams/fullstack.json` | Sample 3-phase full-stack team | + +### Config format (summary) + +```json +{ + "slug": "my-team", + "name": "My Team", + "orchestratorMode": "orchestrator", + "conventions": ".roo/teams/conventions/my-team.md", + "phases": [ + { + "name": "discovery", + "concurrent": true, + "requireApproval": true, + "agents": [ + { + "mode": "architect", + "role": "Backend Architect", + "instruction": "Analyze backend requirements for: {{task}}\n\nPrior context: {{context}}" + } + ] + } + ] +} +``` + +Template variables available in `instruction` and `worktree` fields: `{{task}}`, `{{context}}`, +`{{phase}}`, `{{team}}`. + +See [`docs/teams.md`](./teams.md) for the full reference. + +--- + +## 8. Phased Delivery + +| Phase | Name | Key deliverables | Status | Estimated effort | +| ----- | ------------------- | ---------------------------------------------------------------------------------------------------------------------- | ----------- | ----------------- | +| P1 | Concurrent task map | Replace `clineStack` with `tasks: Map`; remove single-open invariant; `concurrent: true` on `spawn_parallel_tasks` | **Shipped** | Large (1–2 weeks) | +| PT | Teams system | `run_team_phase` tool; `TeamsManager`; `.roo/teams/*.json` config format; conventions injection; `abortOnChildFailure` | **Shipped** | Medium (2–3 days) | +| P2 | Swarm identity | `SwarmRegistry`, `AgentIdentity`, color assignment; UI color dots | Planned | Medium (3–5 days) | +| P3 | In-process mailbox | `InMemoryMailbox`, idle loop in `Task`, `idle_notification` / `task_assignment` messages | Planned | Medium (3–5 days) | +| P4 | Permission bridge | `LeaderPermissionBridge`, worker badge in tool-ask UI | Planned | Medium (3–5 days) | +| P5 | File mailbox | `FileMailbox` with lockfile; cross-process swarm works | Planned | Small (2–3 days) | +| P6 | External backends | `CliWorkerBackend`; `moo-worker` entry point; `spawn_swarm` tool | Planned | Large (1–2 weeks) | + +--- + +## 9. Risks and Mitigations + +| Risk | Impact | Mitigation | +| ------------------------------------------------------------------------ | ------ | -------------------------------------------------------------------------------------------------------------- | +| Removing single-open invariant breaks webview state assumptions | High | Audit all `getCurrentTask()` call sites; add `getFocusedTask()` as new name to make call sites obvious | +| Concurrent tasks racing on `taskHistoryStore` writes | Medium | Serialize writes via async mutex (one per taskId); reads are safe (immutable snapshots) | +| VS Code extension host is single-threaded — "concurrency" is cooperative | Medium | Tasks yield naturally at `await` points; long CPU-bound turns block others. Acceptable for I/O-bound LLM tasks | +| File mailbox lock contention with many workers | Low | In-process workers use `InMemoryMailbox`; file mailbox only for cross-process | +| External CLI worker binary distribution | Medium | Ship as optional; document manual install; in-process is always available | + +--- + +## 10. Prior Art / References + +- **claude-code swarm source:** `C:\Users\lisam\laud code\claude-code\src\utils\swarm\` + - `inProcessRunner.ts` — idle loop, permission bridge + - `teammateContext.ts` — AsyncLocalStorage pattern + - `backends/registry.ts` — pluggable backend selection + - `teammateMailbox.ts` — file-based mailbox with lockfile + - `leaderPermissionBridge.ts` — module-level permission injection +- **Current Moo Code parallel implementation:** + - `src/core/webview/ClineProvider.ts` — `delegateParentAndOpenChild`, `reopenParentFromDelegation` + - `src/core/tools/SpawnParallelTasksTool.ts` + - `packages/types/src/history.ts` — `parallelQueue`, `parallelResults` +- **Previous improvements (merged in `moo-code-standalone`):** + - Worktree orphan detection + - `abortOnChildFailure` flag + - `getParallelTaskStatus()` introspection API + - Telemetry for worktree/parallel events diff --git a/docs/teams.md b/docs/teams.md new file mode 100644 index 00000000000..d095e41a1ef --- /dev/null +++ b/docs/teams.md @@ -0,0 +1,332 @@ +# Teams + +This document is the technical reference for the Roo-Code Teams feature. + +## Overview + +Teams are pre-configured, phased multi-agent workflows defined in `.roo/teams/.json` inside your workspace. An orchestrator agent reads the config, advances through each named phase in order, and invokes specialist agents per phase using the `run_team_phase` tool. Use teams when a task is too large or too cross-cutting for a single agent — for example, building a full-stack feature that benefits from dedicated backend, frontend, and review agents working in a structured sequence. + +--- + +## Quick Start + +**1. Create the config file.** + +``` +.roo/teams/my-team.json +``` + +Minimal valid config: + +```json +{ + "slug": "my-team", + "name": "My Team", + "phases": [ + { + "name": "build", + "agents": [ + { + "mode": "code", + "instruction": "Implement the following task: {{task}}" + } + ] + } + ] +} +``` + +**2. Invoke the team.** + +Switch to `orchestrator` mode (or whatever mode is specified in `orchestratorMode`) and give the agent the task description. The orchestrator reads the config with `read_file`, then calls `run_team_phase` once per phase in order. + +--- + +## Config File Schema + +### `TeamConfig` + +Top-level object in the `.json` file. + +| Field | Type | Required | Default | Description | +| ------------------ | ------------- | -------- | ------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `slug` | `string` | Yes | — | Unique identifier used in `run_team_phase` calls and skill registration (e.g., `"fullstack"`). Must match the filename stem. | +| `name` | `string` | Yes | — | Human-readable team name shown in listings (e.g., `"Full-Stack Dev Team"`). | +| `description` | `string` | No | — | Short description of what the team does. Shown in team listings. | +| `phases` | `TeamPhase[]` | Yes | — | Ordered list of phases. The orchestrator executes them in array order. | +| `conventions` | `string` | No | — | Workspace-relative path to a Markdown file containing shared conventions. The content is injected into every agent's message inside a `` block. | +| `orchestratorMode` | `string` | No | `"architect"` | Mode slug for the orchestrating task. Informs skill/invocation setup. | +| `$source` | `string` | No | — | Auto-populated with the source file path at load time. Do not set this manually. | + +### `TeamPhase` + +An entry in the `phases` array. + +| Field | Type | Required | Default | Description | +| --------------------- | ----------------- | -------- | ------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | `string` | Yes | — | Phase identifier used when calling `run_team_phase` (e.g., `"discovery"`). Must be unique within the team. | +| `label` | `string` | No | `name` | Human-readable label for UI display. Defaults to `name` if omitted. | +| `concurrent` | `boolean` | No | `false` | When `true`, all agents in the phase start simultaneously. When `false`, agents run one at a time in array order. | +| `requireApproval` | `boolean` | No | `false` | When `true`, signals the orchestrator to pause and request user confirmation before the phase starts. See [Phase Approval Gates](#phase-approval-gates). | +| `abortOnChildFailure` | `boolean` | No | `false` | When `true` and `concurrent` is also `true`, cancels all remaining sibling agents as soon as one fails. Has no effect in sequential mode. | +| `agents` | `TeamAgentSpec[]` | Yes | — | Agents to run in this phase. Must contain at least one entry. | + +### `TeamAgentSpec` + +An entry in a phase's `agents` array. + +| Field | Type | Required | Default | Description | +| ------------- | -------- | -------- | ------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `mode` | `string` | Yes | — | Mode slug for the agent (e.g., `"code"`, `"architect"`). | +| `role` | `string` | No | — | Role label shown in results (e.g., `"Backend Engineer"`). Optional; for readability and result attribution. | +| `instruction` | `string` | Yes | — | Instruction template sent to the agent. Supports template variables (see below). | +| `worktree` | `string` | No | — | Git worktree isolation. `"auto"` creates a new branch and worktree automatically. Any other string is used as the branch name and supports template variables. See [Worktree Support](#worktree-support). | + +--- + +## Template Variables + +The `instruction` field (and the `worktree` field) support the following placeholders. They are substituted at execution time before the instruction is sent to the agent. + +| Variable | Expands to | +| ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `{{task}}` | The original user task description passed as the `task` parameter to `run_team_phase`. Pass the same value for every phase so all agents share the original intent. | +| `{{context}}` | The `context` parameter passed to `run_team_phase` — a JSON string of accumulated results from all prior phases. Empty or `null` for the first phase. Use this to forward discovery output to implementation agents, and implementation output to review agents. | +| `{{phase}}` | The `name` of the currently executing phase (e.g., `"discovery"`). Useful when a generic instruction template is shared across phases. | +| `{{team}}` | The `slug` of the team (e.g., `"fullstack"`). Most useful in the `worktree` field to create team-namespaced branch names. | + +Example using multiple variables in a single instruction: + +```json +{ + "mode": "code", + "instruction": "Phase: {{phase}}\nTask: {{task}}\n\nPrior context:\n{{context}}\n\nImplement the changes described above." +} +``` + +--- + +## Execution Modes + +### Sequential (default) + +When `concurrent` is `false` (or omitted), agents in the phase run one at a time in the order they appear in the `agents` array. Each agent completes before the next starts. Results are aggregated and returned together after the last agent finishes. + +Sequential mode is appropriate when agents in a phase have implicit dependencies on each other's output — for example, a migration agent that must finish before an integration agent runs. + +### Concurrent + +When `concurrent` is `true`, all agents in the phase start simultaneously. The orchestrator receives aggregated results when every agent in the phase has finished (or one has failed, if `abortOnChildFailure` is set). + +Concurrent mode reduces wall-clock time when agents are independent. The `discovery` phase in `fullstack.json` uses concurrent mode because the backend and frontend architects can analyze their respective concerns in parallel without waiting for each other. + +### `abortOnChildFailure` + +Applicable only when `concurrent: true`. When set to `true`, any agent failure in the phase immediately cancels all other in-progress sibling agents. This prevents wasted work — for example, if the backend implementation fails there is no point in letting the frontend implementation continue independently. + +Has no effect in sequential mode because each agent already runs to completion (or failure) before the next one starts. + +--- + +## Conventions File + +The optional `conventions` field in `TeamConfig` points to a workspace-relative path of a Markdown file: + +```json +{ + "conventions": ".roo/teams/conventions/fullstack.md" +} +``` + +The file should contain style rules, coding standards, naming conventions, or any other shared instructions that every agent in every phase should follow. The content is automatically injected into every agent's message inside a `` block before the agent's own `instruction`. + +Use the conventions file to avoid repeating boilerplate across agent instructions — for example, language preferences, error-handling conventions, or output format requirements. + +--- + +## Orchestrator Loop + +The orchestrator mode follows this loop when running a team. This pattern is described in the `run_team_phase` tool definition and should be treated as the canonical execution contract. + +**Step 1 — Read the config.** + +Call `read_file` on `.roo/teams/.json` to load the phase list and inspect `requireApproval` flags before starting any work. + +**Step 2 — Iterate over phases.** + +For each phase in the `phases` array, in order: + +a. If `requireApproval` is `true`, call `ask_followup_question` to request user sign-off before proceeding. Wait for confirmation. + +b. Call `run_team_phase` with: + +- `team_slug` — the team's `slug` value +- `phase_name` — the phase's `name` value +- `task` — the original user task, unchanged, passed through every phase +- `context` — the JSON-stringified accumulated results from all prior phases (`null` for the first phase) + +c. Append the phase results to the accumulated context for use in subsequent phases. + +**Step 3 — Complete.** + +After the last phase, call `attempt_completion` with a final summary drawn from the accumulated results. + +**Constraint:** `run_team_phase` must be called alone — do not invoke it alongside other tools in the same turn. + +TypeScript pseudocode for reference: + +```typescript +// Orchestrator pseudocode — not executable, illustrative only +const config = JSON.parse(await readFile(`.roo/teams/${slug}.json`)) +let context: string | null = null + +for (const phase of config.phases) { + if (phase.requireApproval) { + await askFollowupQuestion(`Ready to start phase "${phase.label ?? phase.name}". Proceed?`) + } + + const result = await runTeamPhase({ + team_slug: config.slug, + phase_name: phase.name, + task: originalTask, + context, + }) + + context = JSON.stringify({ ...JSON.parse(context ?? "{}"), [phase.name]: result }) +} + +await attemptCompletion(summarize(context)) +``` + +--- + +## Full Example + +The canonical example is `.roo/teams/fullstack.json`. It defines a three-phase team for building full-stack features. + +```json +{ + "slug": "fullstack", + "name": "Full-Stack Feature Team", + "description": "Three-phase team for building full-stack features: discovery → implementation → review", + "orchestratorMode": "orchestrator", + "conventions": ".roo/teams/conventions/fullstack.md", + "phases": [ + { + "name": "discovery", + "label": "Discovery & Planning", + "concurrent": true, + "requireApproval": true, + "agents": [ + { + "mode": "architect", + "role": "Backend Architect", + "instruction": "Analyze the backend requirements for the following task and produce a detailed technical spec.\n\nTask: {{task}}\n\nDeliverables:\n- API endpoints needed (method, path, request/response shapes)\n- Database schema changes (if any)\n- Key implementation risks or unknowns\n\nOutput as structured markdown." + }, + { + "mode": "architect", + "role": "Frontend Architect", + "instruction": "Analyze the frontend requirements for the following task and produce a detailed technical spec.\n\nTask: {{task}}\n\nDeliverables:\n- Component tree and data flow\n- State management approach\n- API integration points\n- UX edge cases to handle\n\nOutput as structured markdown." + } + ] + }, + { + "name": "implementation", + "label": "Implementation", + "concurrent": false, + "requireApproval": false, + "abortOnChildFailure": true, + "agents": [ + { + "mode": "code", + "role": "Backend Engineer", + "instruction": "Implement the backend changes for this task.\n\nTask: {{task}}\n\nDiscovery results:\n{{context}}\n\nWrite production-quality code. Run tests after each significant change. Do not leave TODOs.", + "worktree": "feat/{{team}}-backend" + }, + { + "mode": "code", + "role": "Frontend Engineer", + "instruction": "Implement the frontend changes for this task.\n\nTask: {{task}}\n\nDiscovery results:\n{{context}}\n\nWrite production-quality code. Ensure the UI handles loading, error, and empty states. Do not leave TODOs.", + "worktree": "feat/{{team}}-frontend" + } + ] + }, + { + "name": "review", + "label": "Code Review", + "concurrent": true, + "requireApproval": false, + "agents": [ + { + "mode": "code", + "role": "Security Reviewer", + "instruction": "Review the implementation for security issues.\n\nTask: {{task}}\n\nImplementation summary:\n{{context}}\n\nFocus on: input validation, auth/authz, injection risks, sensitive data exposure. Output a list of findings (severity: critical/high/medium/low) with suggested fixes." + }, + { + "mode": "code", + "role": "QA Engineer", + "instruction": "Review the implementation for correctness and test coverage.\n\nTask: {{task}}\n\nImplementation summary:\n{{context}}\n\nFocus on: missing test cases, edge cases not handled, regression risks. Output a list of findings with suggested test additions." + } + ] + } + ] +} +``` + +**Phase breakdown:** + +| Phase | `name` | `concurrent` | `requireApproval` | What happens | +| ----- | ---------------- | ------------ | ----------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 1 | `discovery` | `true` | `true` | The orchestrator pauses for user approval, then runs the Backend Architect and Frontend Architect in parallel. Each produces a technical spec using only `{{task}}`. No prior context is available yet. | +| 2 | `implementation` | `false` | `false` | Runs sequentially. Backend Engineer executes first, then Frontend Engineer. `{{context}}` contains the discovery specs. Each agent works in an isolated git worktree. `abortOnChildFailure: true` means a backend failure cancels the frontend agent. | +| 3 | `review` | `true` | `false` | Security Reviewer and QA Engineer run concurrently. `{{context}}` contains both the discovery specs and the implementation summaries from phase 2. | + +--- + +## Phase Approval Gates + +Setting `requireApproval: true` on a phase does not cause `run_team_phase` to pause automatically. It is a declarative signal to the orchestrator that it should call `ask_followup_question` before calling `run_team_phase` for that phase. + +This design keeps the approval UX under the orchestrator's control — the orchestrator can customize the approval message, include a summary of what the phase will do, or skip the gate conditionally based on accumulated context. + +**Pattern:** + +```json +{ + "name": "deployment", + "label": "Deploy to Production", + "requireApproval": true, + "agents": [...] +} +``` + +The orchestrator should detect this flag during its config-reading step (before the loop starts) so it knows which phases will need a gate, rather than checking at call time. + +--- + +## Worktree Support + +The optional `worktree` field on a `TeamAgentSpec` isolates that agent's file operations in a separate git worktree, preventing concurrent agents from stepping on each other's changes. + +**Values:** + +| Value | Behavior | +| ---------------- | ----------------------------------------------------------------------------------- | +| `"auto"` | Roo-Code creates a new branch and worktree automatically. Branch name is generated. | +| Any other string | Used directly as the branch name. Template variables are supported. | + +**Template interpolation in `worktree`:** + +The `worktree` field supports the same variables as `instruction`. The most common pattern uses `{{team}}` and a role-specific suffix: + +```json +{ + "worktree": "feat/{{team}}-backend" +} +``` + +With `team.slug = "fullstack"` this expands to `feat/fullstack-backend`. Each agent on the same team gets its own isolated branch, making it straightforward to review, merge, or discard each agent's changes independently. + +**When to use worktrees:** + +Use `worktree` when agents in the same phase or across phases modify overlapping files concurrently. The `implementation` phase in `fullstack.json` is sequential but still uses worktrees because the backend and frontend agents may touch shared files (e.g., `package.json`, shared types) and the team convention is to keep their changes on separate branches until a human-reviewed merge. diff --git a/ellipsis.yaml b/ellipsis.yaml deleted file mode 100644 index 1044b94fdc1..00000000000 --- a/ellipsis.yaml +++ /dev/null @@ -1,22 +0,0 @@ -version: 1.3 -pr_review: - - # Modify confidence_threshold to show fewer/more comments. Increase this to show fewer, but higher quality comments. - # If there’s too much noise, we suggest 0.9. The default value is 0.7. - confidence_threshold: 0.7 - - # If quiet mode is enabled, Ellipsis will only leave reviews when it has comments, so “Looks good to me” reviews - # will be skipped. This can reduce clutter. - quiet: true - - # You can disable automatic code review using auto_review_enabled. This will override any global settings you - # have configured via the web UI. - auto_review_enabled: true - - # You can enable auto-review on draft PRs using auto_review_draft. This will override any global settings you - # have configured via the web UI. - auto_review_draft: false - - # You can allow Ellipsis to approve PRs using enable_approve_prs. Note: in common branch GitHub protection configurations, - # the Ellipsis approval will count towards the approval total and allow the PR to be merged when it otherwise may not be. - enable_approve_prs: false diff --git a/packages/cloud/src/CloudAPI.ts b/packages/cloud/src/CloudAPI.ts index 239dc9b5648..6728f7435d6 100644 --- a/packages/cloud/src/CloudAPI.ts +++ b/packages/cloud/src/CloudAPI.ts @@ -1,10 +1,7 @@ -import { z } from "zod" - -import { type AuthService, type ShareVisibility, type ShareResponse, shareResponseSchema } from "@roo-code/types" +import { type AuthService, type ShareVisibility, type ShareResponse } from "@roo-code/types" import { getRooCodeApiUrl } from "./config.js" -import { getUserAgent } from "./utils.js" -import { AuthenticationError, CloudAPIError, NetworkError, TaskNotFoundError } from "./errors.js" +import { CloudAPIError } from "./errors.js" interface CloudAPIRequestOptions extends Omit { timeout?: number @@ -22,126 +19,34 @@ export class CloudAPI { this.baseUrl = getRooCodeApiUrl() } + // Cloud features disabled — all HTTP methods are no-ops. + private async request( - endpoint: string, - options: CloudAPIRequestOptions & { + _endpoint: string, + _options: CloudAPIRequestOptions & { parseResponse?: (data: unknown) => T } = {}, ): Promise { - const { timeout = 30_000, parseResponse, headers = {}, ...fetchOptions } = options - - const sessionToken = this.authService.getSessionToken() - - if (!sessionToken) { - throw new AuthenticationError() - } - - const url = `${this.baseUrl}${endpoint}` - - const requestHeaders = { - "Content-Type": "application/json", - Authorization: `Bearer ${sessionToken}`, - "User-Agent": getUserAgent(), - ...headers, - } - - try { - const response = await fetch(url, { - ...fetchOptions, - headers: requestHeaders, - signal: AbortSignal.timeout(timeout), - }) - - if (!response.ok) { - await this.handleErrorResponse(response, endpoint) - } - - const data = await response.json() - - if (parseResponse) { - return parseResponse(data) - } - - return data as T - } catch (error) { - if (error instanceof TypeError && error.message.includes("fetch")) { - throw new NetworkError(`Network error while calling ${endpoint}`) - } - - if (error instanceof CloudAPIError) { - throw error - } - - if (error instanceof Error && error.name === "AbortError") { - throw new CloudAPIError(`Request to ${endpoint} timed out`, undefined, undefined) - } - - throw new CloudAPIError( - `Unexpected error while calling ${endpoint}: ${error instanceof Error ? error.message : String(error)}`, - ) - } + // No-op: cloud features disabled + throw new CloudAPIError("Cloud features are disabled in this fork", 0, undefined) } - private async handleErrorResponse(response: Response, endpoint: string): Promise { - let responseBody: unknown - - try { - responseBody = await response.json() - } catch { - responseBody = await response.text() - } - - switch (response.status) { - case 401: - throw new AuthenticationError() - case 404: - if (endpoint.includes("/share")) { - throw new TaskNotFoundError() - } - throw new CloudAPIError(`Resource not found: ${endpoint}`, 404, responseBody) - default: - throw new CloudAPIError( - `HTTP ${response.status}: ${response.statusText}`, - response.status, - responseBody, - ) - } + private async handleErrorResponse(_response: Response, _endpoint: string): Promise { + throw new CloudAPIError("Cloud features are disabled in this fork", 0, undefined) } - async shareTask(taskId: string, visibility: ShareVisibility = "organization"): Promise { - this.log(`[CloudAPI] Sharing task ${taskId} with visibility: ${visibility}`) - - const response = await this.request("/api/extension/share", { - method: "POST", - body: JSON.stringify({ taskId, visibility }), - parseResponse: (data) => shareResponseSchema.parse(data), - }) - - this.log("[CloudAPI] Share response:", response) - return response + async shareTask(_taskId: string, _visibility: ShareVisibility = "organization"): Promise { + this.log("[CloudAPI] Cloud features disabled — shareTask is a no-op") + throw new CloudAPIError("Cloud features are disabled in this fork", 0, undefined) } async bridgeConfig() { - return this.request("/api/extension/bridge/config", { - method: "GET", - parseResponse: (data) => - z - .object({ - userId: z.string(), - socketBridgeUrl: z.string(), - token: z.string(), - }) - .parse(data), - }) + this.log("[CloudAPI] Cloud features disabled — bridgeConfig is a no-op") + throw new CloudAPIError("Cloud features are disabled in this fork", 0, undefined) } async creditBalance(): Promise { - return this.request("/api/extension/credit-balance", { - method: "GET", - parseResponse: (data) => { - const result = z.object({ balance: z.number() }).parse(data) - return result.balance - }, - }) + this.log("[CloudAPI] Cloud features disabled — creditBalance is a no-op") + throw new CloudAPIError("Cloud features are disabled in this fork", 0, undefined) } } diff --git a/packages/cloud/src/CloudService.ts b/packages/cloud/src/CloudService.ts index 43f52d4b18a..c23a71b455f 100644 --- a/packages/cloud/src/CloudService.ts +++ b/packages/cloud/src/CloudService.ts @@ -165,7 +165,7 @@ export class CloudService extends EventEmitter implements Di }, ) - this._telemetryClient = new TelemetryClient(this._authService, this._settingsService, this._retryQueue) + this._telemetryClient = new TelemetryClient(this._authService, this._settingsService) this._shareService = new CloudShareService(this._cloudAPI, this._settingsService, this.log) diff --git a/packages/cloud/src/CloudSettingsService.ts b/packages/cloud/src/CloudSettingsService.ts index 5d05caea0b4..0000d7124fa 100644 --- a/packages/cloud/src/CloudSettingsService.ts +++ b/packages/cloud/src/CloudSettingsService.ts @@ -2,296 +2,70 @@ import EventEmitter from "events" import type { ExtensionContext } from "vscode" -import { z } from "zod" - import { type SettingsService, type SettingsServiceEvents, type AuthService, - type AuthState, type UserFeatures, type UserSettingsConfig, type UserSettingsData, OrganizationAllowList, OrganizationSettings, - organizationSettingsSchema, - userSettingsDataSchema, ORGANIZATION_ALLOW_ALL, } from "@roo-code/types" -import { getRooCodeApiUrl } from "./config.js" -import { RefreshTimer } from "./RefreshTimer.js" - -const ORGANIZATION_SETTINGS_CACHE_KEY = "organization-settings" -const USER_SETTINGS_CACHE_KEY = "user-settings" - -const parseExtensionSettingsResponse = (data: unknown) => { - const shapeResult = z.object({ organization: z.unknown(), user: z.unknown() }).safeParse(data) - - if (!shapeResult.success) { - return { success: false, error: shapeResult.error } as const - } - - const orgResult = organizationSettingsSchema.safeParse(shapeResult.data.organization) - - if (!orgResult.success) { - return { success: false, error: orgResult.error } as const - } - - const userResult = userSettingsDataSchema.safeParse(shapeResult.data.user) - - if (!userResult.success) { - return { success: false, error: userResult.error } as const - } - - return { - success: true, - data: { organization: orgResult.data, user: userResult.data }, - } as const -} - +/** + * CloudSettingsService — cloud features disabled. + * + * All methods that would make HTTP calls to app.roocode.com are no-ops. + * Returns empty/default settings. + */ export class CloudSettingsService extends EventEmitter implements SettingsService { - private context: ExtensionContext - private authService: AuthService - private settings: OrganizationSettings | undefined = undefined - private userSettings: UserSettingsData | undefined = undefined - private timer: RefreshTimer private log: (...args: unknown[]) => void - constructor(context: ExtensionContext, authService: AuthService, log?: (...args: unknown[]) => void) { + constructor(_context: ExtensionContext, _authService: AuthService, log?: (...args: unknown[]) => void) { super() - - this.context = context - this.authService = authService this.log = log || console.log - - this.timer = new RefreshTimer({ - callback: async () => { - return await this.fetchSettings() - }, - successInterval: 3_600_000, - initialBackoffMs: 1000, - maxBackoffMs: 3_600_000, - }) + this.log("[cloud-settings] Cloud features disabled — CloudSettingsService is a no-op") } + /** + * Initialize — cloud features disabled, no-op. + */ public async initialize(): Promise { - this.loadCachedSettings() - - // Clear cached settings if we have missed a log out. - if (this.authService.getState() == "logged-out" && (this.settings || this.userSettings)) { - await this.removeSettings() - } - - this.authService.on("auth-state-changed", async (data: { state: AuthState; previousState: AuthState }) => { - try { - if (data.state === "active-session") { - this.timer.start() - } else if (data.previousState === "active-session") { - this.timer.stop() - - if (data.state === "logged-out") { - await this.removeSettings() - } - } - } catch (error) { - this.log(`[cloud-settings] error processing auth-state-changed: ${error}`, error) - } - }) - - if (this.authService.hasActiveSession()) { - this.timer.start() - } - } - - private async fetchSettings(): Promise { - const token = this.authService.getSessionToken() - - if (!token) { - return false - } - - try { - const response = await fetch(`${getRooCodeApiUrl()}/api/extension-settings`, { - headers: { - Authorization: `Bearer ${token}`, - }, - }) - - if (!response.ok) { - this.log("[cloud-settings] Failed to fetch extension settings:", response.status, response.statusText) - return false - } - - const data = await response.json() - const result = parseExtensionSettingsResponse(data) - - if (!result.success) { - this.log("[cloud-settings] Invalid extension settings format:", result.error) - return false - } - - const { organization: newOrgSettings, user: newUserSettings } = result.data - - let orgChanged = false - let userChanged = false - - // Check for organization settings changes - if (!this.settings || this.settings.version !== newOrgSettings.version) { - this.settings = newOrgSettings - orgChanged = true - } - - // Check for user settings changes - if (!this.userSettings || this.userSettings.version !== newUserSettings.version) { - this.userSettings = newUserSettings - userChanged = true - } - - // Emit a single event if either settings changed - if (orgChanged || userChanged) { - this.emit("settings-updated", {} as Record) - } - - const hasChanges = orgChanged || userChanged - - if (hasChanges) { - await this.cacheSettings() - } - - return true - } catch (error) { - this.log("[cloud-settings] Error fetching extension settings:", error) - return false - } - } - - private async cacheSettings(): Promise { - // Store settings in separate globalState values - if (this.settings) { - await this.context.globalState.update(ORGANIZATION_SETTINGS_CACHE_KEY, this.settings) - } - - if (this.userSettings) { - await this.context.globalState.update(USER_SETTINGS_CACHE_KEY, this.userSettings) - } - } - - private loadCachedSettings(): void { - // Load settings from separate globalState values - this.settings = this.context.globalState.get(ORGANIZATION_SETTINGS_CACHE_KEY) - this.userSettings = this.context.globalState.get(USER_SETTINGS_CACHE_KEY) + this.log("[cloud-settings] Cloud features disabled — initialize is a no-op") } public getAllowList(): OrganizationAllowList { - return this.settings?.allowList || ORGANIZATION_ALLOW_ALL + return ORGANIZATION_ALLOW_ALL } public getSettings(): OrganizationSettings | undefined { - return this.settings + return undefined } public getUserSettings(): UserSettingsData | undefined { - return this.userSettings + return undefined } public getUserFeatures(): UserFeatures { - return this.userSettings?.features || {} + return {} } public getUserSettingsConfig(): UserSettingsConfig { - return this.userSettings?.settings || {} + return {} } - public async updateUserSettings(settings: Partial): Promise { - const token = this.authService.getSessionToken() - - if (!token) { - this.log("[cloud-settings] No session token available for updating user settings") - return false - } - - try { - const currentVersion = this.userSettings?.version - const requestBody: { - settings: Partial - version?: number - } = { - settings, - } - - // Include current version for optimistic locking if we have cached settings - if (currentVersion !== undefined) { - requestBody.version = currentVersion - } - - const response = await fetch(`${getRooCodeApiUrl()}/api/user-settings`, { - method: "PATCH", - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${token}`, - }, - body: JSON.stringify(requestBody), - }) - - if (!response.ok) { - if (response.status === 409) { - this.log( - "[cloud-settings] Version conflict when updating user settings - settings may have been updated elsewhere", - ) - } else { - this.log("[cloud-settings] Failed to update user settings:", response.status, response.statusText) - } - return false - } - - const updatedUserSettings = await response.json() - const result = userSettingsDataSchema.safeParse(updatedUserSettings) - - if (!result.success) { - this.log("[cloud-settings] Invalid user settings response format:", result.error) - return false - } - - if (!this.userSettings || result.data.version > this.userSettings.version) { - this.userSettings = result.data - await this.cacheSettings() - this.emit("settings-updated", {} as Record) - } - - return true - } catch (error) { - this.log("[cloud-settings] Error updating user settings:", error) - return false - } + public async updateUserSettings(_settings: Partial): Promise { + this.log("[cloud-settings] Cloud features disabled — updateUserSettings is a no-op") + return false } public isTaskSyncEnabled(): boolean { - // Org settings take precedence - if (this.authService.getStoredOrganizationId()) { - return this.settings?.cloudSettings?.recordTaskMessages ?? false - } - - // User settings default to true if unspecified - const userSettings = this.userSettings - if (userSettings) { - return userSettings.settings.taskSyncEnabled ?? true - } - return false } - private async removeSettings(): Promise { - this.settings = undefined - this.userSettings = undefined - - // Clear both cache keys - await this.context.globalState.update(ORGANIZATION_SETTINGS_CACHE_KEY, undefined) - await this.context.globalState.update(USER_SETTINGS_CACHE_KEY, undefined) - } - public dispose(): void { this.removeAllListeners() - this.timer.stop() } } diff --git a/packages/cloud/src/CloudShareService.ts b/packages/cloud/src/CloudShareService.ts index 3942456f597..11d5ec37f50 100644 --- a/packages/cloud/src/CloudShareService.ts +++ b/packages/cloud/src/CloudShareService.ts @@ -1,6 +1,5 @@ import type { SettingsService, ShareResponse, ShareVisibility } from "@roo-code/types" -import { importVscode } from "./importVscode.js" import type { CloudAPI } from "./CloudAPI.js" export class CloudShareService { @@ -14,48 +13,20 @@ export class CloudShareService { this.log = log || console.log } - async shareTask(taskId: string, visibility: ShareVisibility = "organization"): Promise { - try { - const response = await this.cloudAPI.shareTask(taskId, visibility) + // Cloud features disabled — all methods are no-ops. - if (response.success && response.shareUrl) { - const vscode = await importVscode() - - if (vscode?.env?.clipboard?.writeText) { - try { - await vscode.env.clipboard.writeText(response.shareUrl) - } catch (copyErr) { - this.log("[ShareService] Clipboard write failed (non-fatal):", copyErr) - } - } else { - this.log("[ShareService] VS Code clipboard unavailable; running outside extension host.") - } - } - - return response - } catch (error) { - this.log("[ShareService] Error sharing task:", error) - throw error - } + async shareTask(_taskId: string, _visibility: ShareVisibility = "organization"): Promise { + this.log("[ShareService] Cloud features disabled — shareTask is a no-op") + throw new Error("Cloud features are disabled in this fork") } async canShareTask(): Promise { - try { - return !!this.settingsService.getSettings()?.cloudSettings?.enableTaskSharing - } catch (error) { - this.log("[ShareService] Error checking if task can be shared:", error) - return false - } + // Cloud features disabled — sharing is not available + return false } async canSharePublicly(): Promise { - try { - const cloudSettings = this.settingsService.getSettings()?.cloudSettings - // Public sharing requires both enableTaskSharing AND allowPublicTaskSharing to be true - return !!cloudSettings?.enableTaskSharing && cloudSettings?.allowPublicTaskSharing !== false - } catch (error) { - this.log("[ShareService] Error checking if task can be shared publicly:", error) - return false - } + // Cloud features disabled — public sharing is not available + return false } } diff --git a/packages/cloud/src/TelemetryClient.ts b/packages/cloud/src/TelemetryClient.ts index 252cc8ad42b..53e8c789cac 100644 --- a/packages/cloud/src/TelemetryClient.ts +++ b/packages/cloud/src/TelemetryClient.ts @@ -5,16 +5,12 @@ import { type AuthService, type SettingsService, TelemetryEventName, - rooCodeTelemetryEventSchema, - TelemetryPropertiesProvider, TelemetryEventSubscription, } from "@roo-code/types" -import { getRooCodeApiUrl } from "./config.js" -import type { RetryQueue } from "./retry-queue/index.js" - abstract class BaseTelemetryClient implements TelemetryClient { - protected providerRef: WeakRef | null = null + // eslint-disable-next-line @typescript-eslint/no-explicit-any + protected providerRef: WeakRef | null = null protected telemetryEnabled: boolean = false constructor( @@ -71,7 +67,8 @@ abstract class BaseTelemetryClient implements TelemetryClient { public async captureException(_error: Error, _additionalProperties?: Record): Promise {} - public setProvider(provider: TelemetryPropertiesProvider): void { + // eslint-disable-next-line @typescript-eslint/no-explicit-any + public setProvider(provider: any): void { this.providerRef = new WeakRef(provider) } @@ -85,206 +82,30 @@ abstract class BaseTelemetryClient implements TelemetryClient { } export class CloudTelemetryClient extends BaseTelemetryClient { - private retryQueue: RetryQueue | null = null - - constructor( - private authService: AuthService, - private settingsService: SettingsService, - retryQueue?: RetryQueue, - ) { + constructor(_authService: AuthService, _settingsService: SettingsService) { super({ type: "exclude", events: [TelemetryEventName.TASK_CONVERSATION_MESSAGE], }) - this.retryQueue = retryQueue || null } - private async fetch(path: string, options: RequestInit, allowQueueing = true) { - if (!this.authService.isAuthenticated()) { - return - } - - const token = this.authService.getSessionToken() - - if (!token) { - console.error(`[TelemetryClient#fetch] Unauthorized: No session token available.`) - return - } - - const url = `${getRooCodeApiUrl()}/api/${path}` - const fetchOptions: RequestInit = { - ...options, - headers: { - Authorization: `Bearer ${token}`, - "Content-Type": "application/json", - }, - } - - try { - const response = await fetch(url, fetchOptions) - - if (!response.ok) { - console.error( - `[TelemetryClient#fetch] ${options.method} ${path} -> ${response.status} ${response.statusText}`, - ) - - // Queue for retry on server errors (5xx) or rate limiting (429) - // Do NOT retry on client errors (4xx) except 429 - they won't succeed - if (this.retryQueue && allowQueueing && (response.status >= 500 || response.status === 429)) { - await this.retryQueue.enqueue(url, fetchOptions, "telemetry") - } - } - - return response - } catch (error) { - console.error(`[TelemetryClient#fetch] Network error for ${options.method} ${path}: ${error}`) - - // Queue for retry on network failures (typically TypeError with "fetch failed" message) - // These are transient network issues that may succeed on retry - if ( - this.retryQueue && - allowQueueing && - error instanceof TypeError && - error.message.includes("fetch failed") - ) { - await this.retryQueue.enqueue(url, fetchOptions, "telemetry") - } - - throw error - } + public override async capture(_event: TelemetryEvent): Promise { + // No-op: telemetry disabled } - public override async capture(event: TelemetryEvent) { - if (!this.isTelemetryEnabled() || !this.isEventCapturable(event.event)) { - if (this.debug) { - console.info(`[TelemetryClient#capture] Skipping event: ${event.event}`) - } - - return - } - - const payload = { - type: event.event, - properties: await this.getEventProperties(event), - } - - if (this.debug) { - console.info(`[TelemetryClient#capture] ${JSON.stringify(payload)}`) - } - - const result = rooCodeTelemetryEventSchema.safeParse(payload) - - if (!result.success) { - console.error( - `[TelemetryClient#capture] Invalid telemetry event: ${result.error.message} - ${JSON.stringify(payload)}`, - ) - - return - } - - try { - await this.fetch(`events`, { - method: "POST", - body: JSON.stringify(result.data), - }) - } catch (error) { - console.error(`[TelemetryClient#capture] Error sending telemetry event: ${error}`) - // Error is already queued for retry in the fetch method - } + public async backfillMessages(_messages: ClineMessage[], _taskId: string): Promise { + // No-op: telemetry disabled } - public async backfillMessages(messages: ClineMessage[], taskId: string): Promise { - if (!this.isTelemetryEnabled()) { - return - } - - if (!this.authService.isAuthenticated()) { - if (this.debug) { - console.info(`[TelemetryClient#backfillMessages] Skipping: Not authenticated`) - } - return - } - - const token = this.authService.getSessionToken() - - if (!token) { - console.error(`[TelemetryClient#backfillMessages] Unauthorized: No session token available.`) - return - } - - try { - const mergedProperties = await this.getEventProperties({ - event: TelemetryEventName.TASK_MESSAGE, - properties: { taskId }, - }) - - const formData = new FormData() - formData.append("taskId", taskId) - formData.append("properties", JSON.stringify(mergedProperties)) - - formData.append( - "file", - new File([JSON.stringify(messages)], "task.json", { - type: "application/json", - }), - ) - - if (this.debug) { - console.info( - `[TelemetryClient#backfillMessages] Uploading ${messages.length} messages for task ${taskId}`, - ) - } - - const url = `${getRooCodeApiUrl()}/api/events/backfill` - const fetchOptions: RequestInit = { - method: "POST", - headers: { - Authorization: `Bearer ${token}`, - }, - body: formData, - } - - try { - const response = await fetch(url, fetchOptions) - - if (!response.ok) { - console.error( - `[TelemetryClient#backfillMessages] POST events/backfill -> ${response.status} ${response.statusText}`, - ) - } - } catch (fetchError) { - console.error(`[TelemetryClient#backfillMessages] Network error: ${fetchError}`) - throw fetchError - } - } catch (error) { - console.error(`[TelemetryClient#backfillMessages] Error uploading messages: ${error}`) - } + public override updateTelemetryState(_didUserOptIn: boolean): void { + // No-op: telemetry disabled } - public override updateTelemetryState(_didUserOptIn: boolean) {} - public override isTelemetryEnabled(): boolean { - if (process.env.ROO_CODE_DISABLE_TELEMETRY === "1") { - return false - } - - return true + return false } - protected override isEventCapturable(eventName: TelemetryEventName): boolean { - // Ensure that this event type is supported by the telemetry client - if (!super.isEventCapturable(eventName)) { - return false - } - - // Only record message telemetry if task sync is enabled - if (eventName === TelemetryEventName.TASK_MESSAGE) { - return this.settingsService.isTaskSyncEnabled() - } - - // Other telemetry types are capturable at this point - return true + public override async shutdown(): Promise { + // No-op: telemetry disabled } - - public override async shutdown() {} } diff --git a/packages/cloud/src/WebAuthService.ts b/packages/cloud/src/WebAuthService.ts index 501bf95bb55..fd35c8459bc 100644 --- a/packages/cloud/src/WebAuthService.ts +++ b/packages/cloud/src/WebAuthService.ts @@ -1,8 +1,6 @@ -import crypto from "crypto" import EventEmitter from "events" import type { ExtensionContext } from "vscode" -import { z } from "zod" import type { CloudUserInfo, @@ -12,185 +10,24 @@ import type { AuthState, } from "@roo-code/types" -import { getClerkBaseUrl, getRooCodeApiUrl, PRODUCTION_CLERK_BASE_URL } from "./config.js" -import { getUserAgent } from "./utils.js" -import { importVscode } from "./importVscode.js" -import { InvalidClientTokenError } from "./errors.js" -import { RefreshTimer } from "./RefreshTimer.js" - -const AUTH_STATE_KEY = "clerk-auth-state" - /** - * AuthCredentials + * WebAuthService — cloud features disabled. + * + * All methods that would make HTTP calls to Clerk or Roo Code servers are no-ops. + * The service starts in "logged-out" state and never transitions to an active session. */ - -const authCredentialsSchema = z.object({ - clientToken: z.string().min(1, "Client token cannot be empty"), - sessionId: z.string().min(1, "Session ID cannot be empty"), - organizationId: z.string().nullable().optional(), -}) - -type AuthCredentials = z.infer - -/** - * Clerk Schemas - */ - -const clerkSignInResponseSchema = z.object({ - response: z.object({ - created_session_id: z.string(), - }), -}) - -const clerkCreateSessionTokenResponseSchema = z.object({ - jwt: z.string(), -}) - -const clerkMeResponseSchema = z.object({ - response: z.object({ - id: z.string().optional(), - first_name: z.string().nullish(), - last_name: z.string().nullish(), - image_url: z.string().optional(), - primary_email_address_id: z.string().optional(), - email_addresses: z - .array( - z.object({ - id: z.string(), - email_address: z.string(), - }), - ) - .optional(), - public_metadata: z.record(z.any()).optional(), - }), -}) - -const clerkOrganizationMembershipsSchema = z.object({ - response: z.array( - z.object({ - id: z.string(), - role: z.string(), - permissions: z.array(z.string()).optional(), - created_at: z.number().optional(), - updated_at: z.number().optional(), - organization: z.object({ - id: z.string(), - name: z.string(), - slug: z.string().optional(), - image_url: z.string().optional(), - has_image: z.boolean().optional(), - created_at: z.number().optional(), - updated_at: z.number().optional(), - }), - }), - ), -}) - export class WebAuthService extends EventEmitter implements AuthService { - private context: ExtensionContext - private timer: RefreshTimer private state: AuthState = "initializing" private log: (...args: unknown[]) => void - private readonly authCredentialsKey: string - - private credentials: AuthCredentials | null = null - private sessionToken: string | null = null - private userInfo: CloudUserInfo | null = null - private isFirstRefreshAttempt: boolean = false - constructor(context: ExtensionContext, log?: (...args: unknown[]) => void) { + constructor(_context: ExtensionContext, log?: (...args: unknown[]) => void) { super() - - this.context = context this.log = log || console.log - - this.log("[auth] Using WebAuthService") - - // Calculate auth credentials key based on Clerk base URL. - const clerkBaseUrl = getClerkBaseUrl() - - if (clerkBaseUrl !== PRODUCTION_CLERK_BASE_URL) { - this.authCredentialsKey = `clerk-auth-credentials-${clerkBaseUrl}` - } else { - this.authCredentialsKey = "clerk-auth-credentials" - } - - this.timer = new RefreshTimer({ - callback: async () => { - await this.refreshSession() - return true - }, - successInterval: 50_000, - initialBackoffMs: 1_000, - maxBackoffMs: 300_000, - }) - } - - private changeState(newState: AuthState): void { - const previousState = this.state - this.state = newState - this.log(`[auth] changeState: ${previousState} -> ${newState}`) - this.emit("auth-state-changed", { state: newState, previousState }) - } - - private async handleCredentialsChange(): Promise { - try { - const credentials = await this.loadCredentials() - - if (credentials) { - if ( - this.credentials === null || - this.credentials.clientToken !== credentials.clientToken || - this.credentials.sessionId !== credentials.sessionId || - this.credentials.organizationId !== credentials.organizationId - ) { - this.transitionToAttemptingSession(credentials) - } - } else { - if (this.state !== "logged-out") { - this.transitionToLoggedOut() - } - } - } catch (error) { - this.log("[auth] Error handling credentials change:", error) - } - } - - private transitionToLoggedOut(): void { - this.timer.stop() - - this.credentials = null - this.sessionToken = null - this.userInfo = null - - this.changeState("logged-out") - } - - private transitionToAttemptingSession(credentials: AuthCredentials): void { - this.credentials = credentials - - this.sessionToken = null - this.userInfo = null - this.isFirstRefreshAttempt = true - - this.changeState("attempting-session") - - this.timer.stop() - this.timer.start() - } - - private transitionToInactiveSession(): void { - this.sessionToken = null - this.userInfo = null - - this.changeState("inactive-session") + this.log("[auth] Using WebAuthService (cloud features disabled)") } /** - * Initialize the auth state - * - * This method loads tokens from storage and determines the current auth state. - * It also starts the refresh timer if we have an active session. + * Initialize the auth state — cloud features disabled, starts in logged-out state. */ public async initialize(): Promise { if (this.state !== "initializing") { @@ -198,196 +35,39 @@ export class WebAuthService extends EventEmitter implements A return } - await this.handleCredentialsChange() - - this.context.subscriptions.push( - this.context.secrets.onDidChange((e) => { - if (e.key === this.authCredentialsKey) { - this.handleCredentialsChange() - } - }), - ) + // Start in logged-out state — no credentials loading, no timer, no HTTP calls. + this.state = "logged-out" + this.log("[auth] Cloud features disabled — initialized in logged-out state") } - public broadcast(): void {} - - private async storeCredentials(credentials: AuthCredentials): Promise { - await this.context.secrets.store(this.authCredentialsKey, JSON.stringify(credentials)) - } - - private async loadCredentials(): Promise { - const credentialsJson = await this.context.secrets.get(this.authCredentialsKey) - if (!credentialsJson) return null - - try { - const parsedJson = JSON.parse(credentialsJson) - const credentials = authCredentialsSchema.parse(parsedJson) - - // Migration: If no organizationId but we have userInfo, add it - if (credentials.organizationId === undefined && this.userInfo?.organizationId) { - credentials.organizationId = this.userInfo.organizationId - await this.storeCredentials(credentials) - this.log("[auth] Migrated credentials with organizationId") - } - - return credentials - } catch (error) { - if (error instanceof z.ZodError) { - this.log("[auth] Invalid credentials format:", error.errors) - } else { - this.log("[auth] Failed to parse stored credentials:", error) - } - return null - } - } - - private async clearCredentials(): Promise { - await this.context.secrets.delete(this.authCredentialsKey) + public broadcast(): void { + // No-op: cloud features disabled } /** - * Start the login process - * - * This method initiates the authentication flow by generating a state parameter - * and opening the browser to the authorization URL. - * - * @param landingPageSlug Optional slug of a specific landing page (e.g., "supernova", "special-offer", etc.) - * @param useProviderSignup If true, uses provider signup flow (/extension/provider-sign-up). If false, uses standard sign-in (/extension/sign-in). Defaults to false. + * Login — cloud features disabled, no-op. */ - public async login(landingPageSlug?: string, useProviderSignup: boolean = false): Promise { - try { - const vscode = await importVscode() - - if (!vscode) { - throw new Error("VS Code API not available") - } - - // Generate a cryptographically random state parameter. - const state = crypto.randomBytes(16).toString("hex") - await this.context.globalState.update(AUTH_STATE_KEY, state) - const packageJSON = this.context.extension?.packageJSON - const publisher = packageJSON?.publisher ?? "RooVeterinaryInc" - const name = packageJSON?.name ?? "roo-cline" - const params = new URLSearchParams({ - state, - auth_redirect: `${vscode.env.uriScheme}://${publisher}.${name}`, - }) - - // Use landing page URL if slug is provided, otherwise use provider sign-up or sign-in URL based on parameter - const url = landingPageSlug - ? `${getRooCodeApiUrl()}/l/${landingPageSlug}?${params.toString()}` - : useProviderSignup - ? `${getRooCodeApiUrl()}/extension/provider-sign-up?${params.toString()}` - : `${getRooCodeApiUrl()}/extension/sign-in?${params.toString()}` - - await vscode.env.openExternal(vscode.Uri.parse(url)) - } catch (error) { - const context = landingPageSlug ? ` (landing page: ${landingPageSlug})` : "" - this.log(`[auth] Error initiating Roo Code Cloud auth${context}: ${error}`) - throw new Error(`Failed to initiate Roo Code Cloud authentication${context}: ${error}`) - } + public async login(_landingPageSlug?: string, _useProviderSignup: boolean = false): Promise { + this.log("[auth] Cloud features disabled — login is a no-op") } /** - * Handle the callback from Roo Code Cloud - * - * This method is called when the user is redirected back to the extension - * after authenticating with Roo Code Cloud. - * - * @param code The authorization code from the callback - * @param state The state parameter from the callback - * @param organizationId The organization ID from the callback (null for personal accounts) - * @param providerModel The model ID selected during signup (optional) + * Handle auth callback — cloud features disabled, no-op. */ public async handleCallback( - code: string | null, - state: string | null, - organizationId?: string | null, - providerModel?: string | null, + _code: string | null, + _state: string | null, + _organizationId?: string | null, + _providerModel?: string | null, ): Promise { - if (!code || !state) { - const vscode = await importVscode() - - if (vscode) { - vscode.window.showInformationMessage("Invalid Roo Code Cloud sign in url") - } - - return - } - - try { - // Validate state parameter to prevent CSRF attacks. - const storedState = this.context.globalState.get(AUTH_STATE_KEY) - - if (state !== storedState) { - this.log("[auth] State mismatch in callback") - throw new Error("Invalid state parameter. Authentication request may have been tampered with.") - } - - const credentials = await this.clerkSignIn(code) - - // Set organizationId (null for personal accounts) - credentials.organizationId = organizationId || null - - await this.storeCredentials(credentials) - - // Store the provider model if provided, or flag that no model was selected - if (providerModel) { - await this.context.globalState.update("roo-provider-model", providerModel) - await this.context.globalState.update("roo-auth-skip-model", undefined) - this.log(`[auth] Stored provider model: ${providerModel}`) - } else { - // No model was selected during signup - flag this for the webview - await this.context.globalState.update("roo-auth-skip-model", true) - this.log(`[auth] No provider model selected during signup`) - } - - const vscode = await importVscode() - - if (vscode) { - vscode.window.showInformationMessage("Successfully authenticated with Roo Code Cloud") - } - - this.log("[auth] Successfully authenticated with Roo Code Cloud") - } catch (error) { - this.log(`[auth] Error handling Roo Code Cloud callback: ${error}`) - this.changeState("logged-out") - throw new Error(`Failed to handle Roo Code Cloud callback: ${error}`) - } + this.log("[auth] Cloud features disabled — handleCallback is a no-op") } /** - * Log out - * - * This method removes all stored tokens and stops the refresh timer. + * Logout — cloud features disabled, no-op. */ public async logout(): Promise { - const oldCredentials = this.credentials - - try { - // Clear credentials from storage - onDidChange will handle state transitions - await this.clearCredentials() - await this.context.globalState.update(AUTH_STATE_KEY, undefined) - - if (oldCredentials) { - try { - await this.clerkLogout(oldCredentials) - } catch (error) { - this.log("[auth] Error calling clerkLogout:", error) - } - } - - const vscode = await importVscode() - - if (vscode) { - vscode.window.showInformationMessage("Logged out from Roo Code Cloud") - } - - this.log("[auth] Logged out from Roo Code Cloud") - } catch (error) { - this.log(`[auth] Error logging out from Roo Code Cloud: ${error}`) - throw new Error(`Failed to log out from Roo Code Cloud: ${error}`) - } + this.log("[auth] Cloud features disabled — logout is a no-op") } public getState(): AuthState { @@ -395,350 +75,39 @@ export class WebAuthService extends EventEmitter implements A } public getSessionToken(): string | undefined { - if (this.state === "active-session" && this.sessionToken) { - return this.sessionToken - } - - return + // Never have a session token when cloud features are disabled + return undefined } - /** - * Check if the user is authenticated - * - * @returns True if the user is authenticated (has an active, attempting, or inactive session) - */ public isAuthenticated(): boolean { - return ( - this.state === "active-session" || this.state === "attempting-session" || this.state === "inactive-session" - ) + return false } public hasActiveSession(): boolean { - return this.state === "active-session" + return false } - /** - * Check if the user has an active session or is currently attempting to acquire one - * - * @returns True if the user has an active session or is attempting to get one - */ public hasOrIsAcquiringActiveSession(): boolean { - return this.state === "active-session" || this.state === "attempting-session" - } - - /** - * Refresh the session - * - * This method refreshes the session token using the client token. - */ - private async refreshSession(): Promise { - if (!this.credentials) { - this.log("[auth] Cannot refresh session: missing credentials") - return - } - - try { - const previousState = this.state - this.sessionToken = await this.clerkCreateSessionToken() - - if (previousState !== "active-session") { - this.changeState("active-session") - this.fetchUserInfo() - } else { - this.state = "active-session" - } - } catch (error) { - if (error instanceof InvalidClientTokenError) { - this.log("[auth] Invalid/Expired client token: clearing credentials") - this.clearCredentials() - } else if (this.isFirstRefreshAttempt && this.state === "attempting-session") { - this.isFirstRefreshAttempt = false - this.transitionToInactiveSession() - } - this.log("[auth] Failed to refresh session", error) - throw error - } - } - - private async fetchUserInfo(): Promise { - if (!this.credentials) { - return - } - - this.userInfo = await this.clerkMe() - this.emit("user-info", { userInfo: this.userInfo }) + return false } - /** - * Extract user information from the ID token - * - * @returns User information from ID token claims or null if no ID token available - */ public getUserInfo(): CloudUserInfo | null { - return this.userInfo + return null } - /** - * Get the stored organization ID from credentials - * - * @returns The stored organization ID, null for personal accounts or if no credentials exist - */ public getStoredOrganizationId(): string | null { - return this.credentials?.organizationId || null + return null } - /** - * Switch to a different organization context - * @param organizationId The organization ID to switch to, or null for personal account - */ - public async switchOrganization(organizationId: string | null): Promise { - if (!this.credentials) { - throw new Error("Cannot switch organization: not authenticated") - } - - // Update the stored credentials with the new organization ID - const updatedCredentials: AuthCredentials = { - ...this.credentials, - organizationId: organizationId, - } - - // Store the updated credentials, handleCredentialsChange will handle the update - await this.storeCredentials(updatedCredentials) + public async switchOrganization(_organizationId: string | null): Promise { + this.log("[auth] Cloud features disabled — switchOrganization is a no-op") } - /** - * Get all organization memberships for the current user - * @returns Array of organization memberships - */ public async getOrganizationMemberships(): Promise { - if (!this.credentials) { - return [] - } - - try { - return await this.clerkGetOrganizationMemberships() - } catch (error) { - this.log(`[auth] Failed to get organization memberships: ${error}`) - return [] - } - } - - private async clerkSignIn(ticket: string): Promise { - const formData = new URLSearchParams() - formData.append("strategy", "ticket") - formData.append("ticket", ticket) - - const response = await fetch(`${getClerkBaseUrl()}/v1/client/sign_ins`, { - method: "POST", - headers: { - "Content-Type": "application/x-www-form-urlencoded", - "User-Agent": this.userAgent(), - }, - body: formData.toString(), - signal: AbortSignal.timeout(10000), - }) - - if (!response.ok) { - throw new Error(`HTTP ${response.status}: ${response.statusText}`) - } - - const { - response: { created_session_id: sessionId }, - } = clerkSignInResponseSchema.parse(await response.json()) - - // 3. Extract the client token from the Authorization header. - const clientToken = response.headers.get("authorization") - - if (!clientToken) { - throw new Error("No authorization header found in the response") - } - - return authCredentialsSchema.parse({ clientToken, sessionId }) - } - - private async clerkCreateSessionToken(): Promise { - const formData = new URLSearchParams() - formData.append("_is_native", "1") - - // Handle 3 cases for organization_id: - // 1. Have an org id: organization_id=THE_ORG_ID - // 2. Have a personal account: organization_id= (empty string) - // 3. Don't know if you have an org id (old style credentials): don't send organization_id param at all - const organizationId = this.getStoredOrganizationId() - if (this.credentials?.organizationId !== undefined) { - // We have organization context info (either org id or personal account) - formData.append("organization_id", organizationId || "") - } - // If organizationId is undefined, don't send the param at all (old credentials) - - const response = await fetch(`${getClerkBaseUrl()}/v1/client/sessions/${this.credentials!.sessionId}/tokens`, { - method: "POST", - headers: { - "Content-Type": "application/x-www-form-urlencoded", - Authorization: `Bearer ${this.credentials!.clientToken}`, - "User-Agent": this.userAgent(), - }, - body: formData.toString(), - signal: AbortSignal.timeout(10000), - }) - - if (response.status === 401 || response.status === 404) { - throw new InvalidClientTokenError() - } else if (!response.ok) { - throw new Error(`HTTP ${response.status}: ${response.statusText}`) - } - - const data = clerkCreateSessionTokenResponseSchema.parse(await response.json()) - - return data.jwt - } - - private async clerkMe(): Promise { - const response = await fetch(`${getClerkBaseUrl()}/v1/me`, { - headers: { - Authorization: `Bearer ${this.credentials!.clientToken}`, - "User-Agent": this.userAgent(), - }, - signal: AbortSignal.timeout(10000), - }) - - if (!response.ok) { - throw new Error(`HTTP ${response.status}: ${response.statusText}`) - } - - const payload = await response.json() - const { response: userData } = clerkMeResponseSchema.parse(payload) - - const userInfo: CloudUserInfo = { - id: userData.id, - picture: userData.image_url, - } - - const names = [userData.first_name, userData.last_name].filter((name) => !!name) - userInfo.name = names.length > 0 ? names.join(" ") : undefined - const primaryEmailAddressId = userData.primary_email_address_id - const emailAddresses = userData.email_addresses - - if (primaryEmailAddressId && emailAddresses) { - userInfo.email = emailAddresses.find( - (email: { id: string }) => primaryEmailAddressId === email.id, - )?.email_address - } - - // Fetch organization info if user is in organization context - try { - const storedOrgId = this.getStoredOrganizationId() - - if (this.credentials?.organizationId !== undefined) { - // We have organization context info - if (storedOrgId !== null) { - // User is in organization context - fetch user's memberships and filter - const orgMemberships = await this.clerkGetOrganizationMemberships() - const userMembership = this.findOrganizationMembership(orgMemberships, storedOrgId) - - if (userMembership) { - this.setUserOrganizationInfo(userInfo, userMembership) - - this.log("[auth] User in organization context:", { - id: userMembership.organization.id, - name: userMembership.organization.name, - role: userMembership.role, - }) - } else { - this.log("[auth] Warning: User not found in stored organization:", storedOrgId) - } - } else { - this.log("[auth] User in personal account context - not setting organization info") - } - } else { - // Old credentials without organization context - fetch organization info to determine context - const orgMemberships = await this.clerkGetOrganizationMemberships() - const primaryOrgMembership = this.findPrimaryOrganizationMembership(orgMemberships) - - if (primaryOrgMembership) { - this.setUserOrganizationInfo(userInfo, primaryOrgMembership) - - this.log("[auth] Legacy credentials: Found organization membership:", { - id: primaryOrgMembership.organization.id, - name: primaryOrgMembership.organization.name, - role: primaryOrgMembership.role, - }) - } else { - this.log("[auth] Legacy credentials: No organization memberships found") - } - } - } catch (error) { - this.log("[auth] Failed to fetch organization info:", error) - // Don't throw - organization info is optional - } - - return userInfo - } - - private findOrganizationMembership( - memberships: CloudOrganizationMembership[], - organizationId: string, - ): CloudOrganizationMembership | undefined { - return memberships?.find((membership) => membership.organization.id === organizationId) - } - - private findPrimaryOrganizationMembership( - memberships: CloudOrganizationMembership[], - ): CloudOrganizationMembership | undefined { - return memberships && memberships.length > 0 ? memberships[0] : undefined - } - - private setUserOrganizationInfo(userInfo: CloudUserInfo, membership: CloudOrganizationMembership): void { - userInfo.organizationId = membership.organization.id - userInfo.organizationName = membership.organization.name - userInfo.organizationRole = membership.role - userInfo.organizationImageUrl = membership.organization.image_url - } - - private async clerkGetOrganizationMemberships(): Promise { - if (!this.credentials) { - this.log("[auth] Cannot get organization memberships: missing credentials") - return [] - } - - const response = await fetch(`${getClerkBaseUrl()}/v1/me/organization_memberships`, { - headers: { - Authorization: `Bearer ${this.credentials.clientToken}`, - "User-Agent": this.userAgent(), - }, - signal: AbortSignal.timeout(10000), - }) - - if (response.ok) { - return clerkOrganizationMembershipsSchema.parse(await response.json()).response - } - - const errorMessage = `Failed to get organization memberships: ${response.status} ${response.statusText}` - this.log(`[auth] ${errorMessage}`) - throw new Error(errorMessage) - } - - private async clerkLogout(credentials: AuthCredentials): Promise { - const formData = new URLSearchParams() - formData.append("_is_native", "1") - - const response = await fetch(`${getClerkBaseUrl()}/v1/client/sessions/${credentials.sessionId}/remove`, { - method: "POST", - headers: { - "Content-Type": "application/x-www-form-urlencoded", - Authorization: `Bearer ${credentials.clientToken}`, - "User-Agent": this.userAgent(), - }, - body: formData.toString(), - signal: AbortSignal.timeout(10000), - }) - - if (!response.ok) { - throw new Error(`HTTP ${response.status}: ${response.statusText}`) - } + return [] } - private userAgent(): string { - return getUserAgent(this.context) + public dispose(): void { + this.removeAllListeners() } } diff --git a/packages/cloud/src/__tests__/CloudAPI.creditBalance.spec.ts b/packages/cloud/src/__tests__/CloudAPI.creditBalance.spec.ts index 67ab0cf3b9b..9faeb8a41ac 100644 --- a/packages/cloud/src/__tests__/CloudAPI.creditBalance.spec.ts +++ b/packages/cloud/src/__tests__/CloudAPI.creditBalance.spec.ts @@ -1,6 +1,6 @@ -import { describe, it, expect, vi, beforeEach, type Mock } from "vitest" +import { describe, it, expect, vi, beforeEach } from "vitest" import { CloudAPI } from "../CloudAPI.js" -import { AuthenticationError, CloudAPIError } from "../errors.js" +import { CloudAPIError } from "../errors.js" import type { AuthService } from "@roo-code/types" // Mock the config module @@ -15,7 +15,7 @@ vi.mock("../utils.js", () => ({ describe("CloudAPI.creditBalance", () => { let mockAuthService: { - getSessionToken: Mock<() => string | undefined> + getSessionToken: ReturnType } let cloudAPI: CloudAPI @@ -29,68 +29,37 @@ describe("CloudAPI.creditBalance", () => { global.fetch = vi.fn() }) - it("should fetch credit balance successfully", async () => { - const mockBalance = 12.34 + it("should throw CloudAPIError when cloud features are disabled", async () => { + await expect(cloudAPI.creditBalance()).rejects.toThrow(CloudAPIError) + await expect(cloudAPI.creditBalance()).rejects.toThrow("Cloud features are disabled in this fork") + }) + + it("should throw CloudAPIError regardless of session token", async () => { mockAuthService.getSessionToken.mockReturnValue("test-session-token") + await expect(cloudAPI.creditBalance()).rejects.toThrow(CloudAPIError) + }) - global.fetch = vi.fn().mockResolvedValue({ - ok: true, - json: async () => ({ balance: mockBalance }), - }) + it("should not make any fetch calls", async () => { + const fetchSpy = vi.fn() + global.fetch = fetchSpy - const balance = await cloudAPI.creditBalance() + try { + await cloudAPI.creditBalance() + } catch { + // Expected to throw + } - expect(balance).toBe(mockBalance) - expect(global.fetch).toHaveBeenCalledWith( - "https://api.test.com/api/extension/credit-balance", - expect.objectContaining({ - method: "GET", - headers: expect.objectContaining({ - Authorization: "Bearer test-session-token", - "Content-Type": "application/json", - "User-Agent": "test-user-agent", - }), - }), - ) + expect(fetchSpy).not.toHaveBeenCalled() }) - it("should throw AuthenticationError when session token is missing", async () => { + it("should throw CloudAPIError when session token is missing", async () => { mockAuthService.getSessionToken.mockReturnValue(undefined) - - await expect(cloudAPI.creditBalance()).rejects.toThrow(AuthenticationError) - }) - - it("should handle API errors", async () => { - mockAuthService.getSessionToken.mockReturnValue("test-session-token") - - global.fetch = vi.fn().mockResolvedValue({ - ok: false, - status: 500, - statusText: "Internal Server Error", - json: async () => ({ error: "Server error" }), - }) - await expect(cloudAPI.creditBalance()).rejects.toThrow(CloudAPIError) }) - it("should handle network errors", async () => { + it("should throw CloudAPIError on network errors", async () => { mockAuthService.getSessionToken.mockReturnValue("test-session-token") - global.fetch = vi.fn().mockRejectedValue(new TypeError("fetch failed")) - - await expect(cloudAPI.creditBalance()).rejects.toThrow( - "Network error while calling /api/extension/credit-balance", - ) - }) - - it("should handle invalid response format", async () => { - mockAuthService.getSessionToken.mockReturnValue("test-session-token") - - global.fetch = vi.fn().mockResolvedValue({ - ok: true, - json: async () => ({ invalid: "response" }), - }) - - await expect(cloudAPI.creditBalance()).rejects.toThrow() + await expect(cloudAPI.creditBalance()).rejects.toThrow(CloudAPIError) }) }) diff --git a/packages/cloud/src/__tests__/CloudSettingsService.parsing.test.ts b/packages/cloud/src/__tests__/CloudSettingsService.parsing.test.ts index b0486b971c4..25f1e0cb5c6 100644 --- a/packages/cloud/src/__tests__/CloudSettingsService.parsing.test.ts +++ b/packages/cloud/src/__tests__/CloudSettingsService.parsing.test.ts @@ -32,132 +32,54 @@ describe("CloudSettingsService - Response Parsing", () => { service = new CloudSettingsService(mockContext, mockAuthService, vi.fn()) }) - it("should successfully parse valid extension settings response", async () => { - // Mock fetch response with a valid settings structure - const mockResponse = { - organization: { - version: 1, - defaultSettings: {}, - allowList: { - allowAll: true, - providers: {}, - }, - }, - user: { - features: {}, - settings: {}, - version: 1, - }, - } - - global.fetch = vi.fn().mockResolvedValue({ - ok: true, - json: vi.fn().mockResolvedValue(mockResponse), - }) + afterEach(() => { + service.dispose() + }) - // Initialize the service + it("should return undefined settings when cloud features are disabled", async () => { await service.initialize() - // Wait for the fetch to be called (timer executes immediately but asynchronously) - await vi.waitFor(() => { - expect(global.fetch).toHaveBeenCalled() - }) - - // Wait a bit for the async processing to complete - await new Promise((resolve) => setTimeout(resolve, 10)) - - // Verify settings were parsed correctly const orgSettings = service.getSettings() const userSettings = service.getUserSettings() - expect(orgSettings).toEqual(mockResponse.organization) - expect(userSettings).toEqual(mockResponse.user) + expect(orgSettings).toBeUndefined() + expect(userSettings).toBeUndefined() }) - it("should handle complex nested provider settings without type errors", async () => { - // Mock response with complex nested provider settings - const mockResponse = { - organization: { - version: 2, - defaultSettings: { - maxOpenTabsContext: 10, - }, - allowList: { - allowAll: false, - providers: { - anthropic: { - allowAll: true, - }, - openai: { - allowAll: false, - models: ["gpt-4", "gpt-3.5-turbo"], - }, - }, - }, - providerProfiles: { - default: { - id: "default", - apiProvider: "anthropic", - apiModelId: "claude-3-opus-20240229", - apiKey: "test-key", - modelTemperature: 0.7, - }, - }, - }, - user: { - features: {}, - settings: {}, - version: 1, - }, - } - - global.fetch = vi.fn().mockResolvedValue({ - ok: true, - json: vi.fn().mockResolvedValue(mockResponse), - }) + it("should not make fetch calls when cloud features are disabled", async () => { + const mockFetch = vi.fn() + global.fetch = mockFetch - // Initialize the service await service.initialize() - // Wait for the fetch to be called (timer executes immediately but asynchronously) - await vi.waitFor(() => { - expect(global.fetch).toHaveBeenCalled() - }) - - // Wait a bit for the async processing to complete - await new Promise((resolve) => setTimeout(resolve, 10)) - - // Verify complex settings were parsed correctly - const orgSettings = service.getSettings() - const userSettings = service.getUserSettings() + // Wait a bit to ensure no async fetch calls are made + await new Promise((resolve) => setTimeout(resolve, 50)) - expect(orgSettings).toEqual(mockResponse.organization) - expect(userSettings).toEqual(mockResponse.user) - expect(orgSettings?.providerProfiles?.default).toBeDefined() + expect(mockFetch).not.toHaveBeenCalled() }) - it("should handle invalid response gracefully", async () => { - // Mock invalid response - const mockResponse = { - organization: { - // Missing required fields - version: 1, - }, - user: { - // Missing required fields - version: 1, - }, - } - + it("should return undefined settings regardless of response data", async () => { + // Even if fetch is mocked with valid data, settings should be undefined global.fetch = vi.fn().mockResolvedValue({ ok: true, - json: vi.fn().mockResolvedValue(mockResponse), + json: vi.fn().mockResolvedValue({ + organization: { + version: 1, + defaultSettings: {}, + allowList: { allowAll: true, providers: {} }, + }, + user: { + features: {}, + settings: {}, + version: 1, + }, + }), }) - // Initialize the service await service.initialize() - // Settings should remain undefined due to validation failure + await new Promise((resolve) => setTimeout(resolve, 50)) + const orgSettings = service.getSettings() const userSettings = service.getUserSettings() diff --git a/packages/cloud/src/__tests__/CloudSettingsService.test.ts b/packages/cloud/src/__tests__/CloudSettingsService.test.ts index 1b41a54f246..d2473ade57f 100644 --- a/packages/cloud/src/__tests__/CloudSettingsService.test.ts +++ b/packages/cloud/src/__tests__/CloudSettingsService.test.ts @@ -1,54 +1,15 @@ import type { ExtensionContext } from "vscode" -import type { OrganizationSettings, AuthService } from "@roo-code/types" +import type { AuthService } from "@roo-code/types" import { CloudSettingsService } from "../CloudSettingsService.js" -import { RefreshTimer } from "../RefreshTimer.js" - -vi.mock("../RefreshTimer") - -vi.mock("../config", () => ({ - getRooCodeApiUrl: vi.fn().mockReturnValue("https://app.roocode.com"), -})) - -global.fetch = vi.fn() describe("CloudSettingsService", () => { let mockContext: ExtensionContext - let mockAuthService: { - getState: ReturnType - getSessionToken: ReturnType - hasActiveSession: ReturnType - on: ReturnType - getStoredOrganizationId: ReturnType - } - let mockRefreshTimer: { - start: ReturnType - stop: ReturnType - } + let mockAuthService: AuthService let cloudSettingsService: CloudSettingsService let mockLog: ReturnType - const mockSettings: OrganizationSettings = { - version: 1, - defaultSettings: {}, - allowList: { - allowAll: true, - providers: {}, - }, - } - - const mockUserSettings = { - features: {}, - settings: {}, - version: 1, - } - - const mockExtensionSettingsResponse = { - organization: mockSettings, - user: mockUserSettings, - } - beforeEach(() => { vi.clearAllMocks() @@ -65,19 +26,11 @@ describe("CloudSettingsService", () => { hasActiveSession: vi.fn().mockReturnValue(false), on: vi.fn(), getStoredOrganizationId: vi.fn().mockReturnValue(null), - } - - mockRefreshTimer = { - start: vi.fn(), - stop: vi.fn(), - } + } as unknown as AuthService mockLog = vi.fn() - // Mock RefreshTimer constructor - vi.mocked(RefreshTimer).mockImplementation(() => mockRefreshTimer as unknown as RefreshTimer) - - cloudSettingsService = new CloudSettingsService(mockContext, mockAuthService as unknown as AuthService, mockLog) + cloudSettingsService = new CloudSettingsService(mockContext, mockAuthService, mockLog) }) afterEach(() => { @@ -87,638 +40,94 @@ describe("CloudSettingsService", () => { describe("constructor", () => { it("should create CloudSettingsService with proper dependencies", () => { expect(cloudSettingsService).toBeInstanceOf(CloudSettingsService) - expect(RefreshTimer).toHaveBeenCalledWith({ - callback: expect.any(Function), - successInterval: 3_600_000, - initialBackoffMs: 1000, - maxBackoffMs: 3_600_000, - }) }) it("should use console.log as default logger when none provided", () => { - const service = new CloudSettingsService(mockContext, mockAuthService as unknown as AuthService) + const service = new CloudSettingsService(mockContext, mockAuthService) expect(service).toBeInstanceOf(CloudSettingsService) + service.dispose() }) - }) - - describe("initialize", () => { - it("should load cached settings on initialization", async () => { - const cachedSettings = { - version: 1, - defaultSettings: {}, - allowList: { allowAll: true, providers: {} }, - } - - // Create a fresh mock context for this test - const testContext = { - globalState: { - get: vi.fn((key: string) => { - if (key === "organization-settings") return cachedSettings - if (key === "user-settings") return mockUserSettings - return undefined - }), - update: vi.fn().mockResolvedValue(undefined), - }, - } as unknown as ExtensionContext - - // Mock auth service to not be logged out - const testAuthService = { - getState: vi.fn().mockReturnValue("active"), - getSessionToken: vi.fn(), - hasActiveSession: vi.fn().mockReturnValue(false), - on: vi.fn(), - } - - // Create a new instance to test initialization - const testService = new CloudSettingsService( - testContext, - testAuthService as unknown as AuthService, - mockLog, - ) - await testService.initialize() - - expect(testContext.globalState.get).toHaveBeenCalledWith("organization-settings") - expect(testContext.globalState.get).toHaveBeenCalledWith("user-settings") - expect(testService.getSettings()).toEqual(cachedSettings) - - testService.dispose() - }) - - it("should clear cached settings if user is logged out", async () => { - const cachedSettings = { - version: 1, - defaultSettings: {}, - allowList: { allowAll: true, providers: {} }, - } - mockContext.globalState.get = vi.fn((key: string) => { - if (key === "organization-settings") return cachedSettings - if (key === "user-settings") return mockUserSettings - return undefined - }) - mockAuthService.getState.mockReturnValue("logged-out") - - await cloudSettingsService.initialize() - - // Check that both cache keys are cleared - const updateCalls = vi.mocked(mockContext.globalState.update).mock.calls - const orgSettingsCall = updateCalls.find((call) => call[0] === "organization-settings") - const userSettingsCall = updateCalls.find((call) => call[0] === "user-settings") - - expect(orgSettingsCall).toBeDefined() - expect(orgSettingsCall?.[1]).toBeUndefined() - expect(userSettingsCall).toBeDefined() - expect(userSettingsCall?.[1]).toBeUndefined() - }) - - it("should set up auth service event listeners", async () => { - await cloudSettingsService.initialize() - - expect(mockAuthService.on).toHaveBeenCalledWith("auth-state-changed", expect.any(Function)) - }) - - it("should start timer if user has active session", async () => { - mockAuthService.hasActiveSession.mockReturnValue(true) - - await cloudSettingsService.initialize() - - expect(mockRefreshTimer.start).toHaveBeenCalled() - }) - - it("should not start timer if user has no active session", async () => { - mockAuthService.hasActiveSession.mockReturnValue(false) - - await cloudSettingsService.initialize() - - expect(mockRefreshTimer.start).not.toHaveBeenCalled() - }) - }) - - describe("event emission", () => { - beforeEach(async () => { - await cloudSettingsService.initialize() - }) - - it("should emit 'settings-updated' event when settings change", async () => { - const eventSpy = vi.fn() - cloudSettingsService.on("settings-updated", eventSpy) - - mockAuthService.getSessionToken.mockReturnValue("valid-token") - vi.mocked(fetch).mockResolvedValue({ - ok: true, - json: vi.fn().mockResolvedValue(mockExtensionSettingsResponse), - } as unknown as Response) - - // Get the callback function passed to RefreshTimer - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - - await timerCallback?.() - expect(eventSpy).toHaveBeenCalledWith({}) - }) - - it("should emit event when either org or user settings change", async () => { - const eventSpy = vi.fn() - - const previousSettings = { - version: 1, - defaultSettings: {}, - allowList: { allowAll: true, providers: {} }, - } - const newSettings = { - version: 2, - defaultSettings: {}, - allowList: { allowAll: true, providers: {} }, - } - - // Create a fresh mock context for this test - const testContext = { - globalState: { - get: vi.fn((key: string) => { - if (key === "organization-settings") return previousSettings - if (key === "user-settings") return mockUserSettings - return undefined - }), - update: vi.fn().mockResolvedValue(undefined), - }, - } as unknown as ExtensionContext - - // Mock auth service to not be logged out - const testAuthService = { - getState: vi.fn().mockReturnValue("active"), - getSessionToken: vi.fn().mockReturnValue("valid-token"), - hasActiveSession: vi.fn().mockReturnValue(false), - on: vi.fn(), - } - - // Create a new service instance with cached settings - const testService = new CloudSettingsService( - testContext, - testAuthService as unknown as AuthService, - mockLog, - ) - testService.on("settings-updated", eventSpy) - await testService.initialize() - - vi.mocked(fetch).mockResolvedValue({ - ok: true, - json: vi.fn().mockResolvedValue({ - organization: newSettings, - user: mockUserSettings, - }), - } as unknown as Response) - - // Get the callback function passed to RefreshTimer for this instance - const timerCallback = - vi.mocked(RefreshTimer).mock.calls[vi.mocked(RefreshTimer).mock.calls.length - 1]?.[0]?.callback - - await timerCallback?.() - - expect(eventSpy).toHaveBeenCalledWith({}) - - testService.dispose() - }) - - it("should not emit event when settings version is unchanged", async () => { - const eventSpy = vi.fn() - - // Create a fresh mock context for this test - const testContext = { - globalState: { - get: vi.fn((key: string) => { - if (key === "organization-settings") return mockSettings - if (key === "user-settings") return mockUserSettings - return undefined - }), - update: vi.fn().mockResolvedValue(undefined), - }, - } as unknown as ExtensionContext - - // Mock auth service to not be logged out - const testAuthService = { - getState: vi.fn().mockReturnValue("active"), - getSessionToken: vi.fn().mockReturnValue("valid-token"), - hasActiveSession: vi.fn().mockReturnValue(false), - on: vi.fn(), - } - - // Create a new service instance with cached settings - const testService = new CloudSettingsService( - testContext, - testAuthService as unknown as AuthService, - mockLog, + it("should log that cloud features are disabled", () => { + expect(mockLog).toHaveBeenCalledWith( + "[cloud-settings] Cloud features disabled — CloudSettingsService is a no-op", ) - testService.on("settings-updated", eventSpy) - await testService.initialize() - - vi.mocked(fetch).mockResolvedValue({ - ok: true, - json: vi.fn().mockResolvedValue(mockExtensionSettingsResponse), // Same version - } as unknown as Response) - - // Get the callback function passed to RefreshTimer for this instance - const timerCallback = - vi.mocked(RefreshTimer).mock.calls[vi.mocked(RefreshTimer).mock.calls.length - 1]?.[0]?.callback - - await timerCallback?.() - - expect(eventSpy).not.toHaveBeenCalled() - - testService.dispose() - }) - - it("should not emit event when fetch fails", async () => { - const eventSpy = vi.fn() - cloudSettingsService.on("settings-updated", eventSpy) - - mockAuthService.getSessionToken.mockReturnValue("valid-token") - vi.mocked(fetch).mockResolvedValue({ - ok: false, - status: 500, - statusText: "Internal Server Error", - } as unknown as Response) - - // Get the callback function passed to RefreshTimer - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - - await timerCallback?.() - - expect(eventSpy).not.toHaveBeenCalled() - }) - - it("should not emit event when no auth token available", async () => { - const eventSpy = vi.fn() - cloudSettingsService.on("settings-updated", eventSpy) - - mockAuthService.getSessionToken.mockReturnValue(null) - - // Get the callback function passed to RefreshTimer - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - - await timerCallback?.() - - expect(eventSpy).not.toHaveBeenCalled() - expect(fetch).not.toHaveBeenCalled() }) }) - describe("fetchSettings", () => { - beforeEach(async () => { + describe("initialize", () => { + it("should be a no-op", async () => { await cloudSettingsService.initialize() - }) - - it("should fetch and cache settings successfully", async () => { - mockAuthService.getSessionToken.mockReturnValue("valid-token") - vi.mocked(fetch).mockResolvedValue({ - ok: true, - json: vi.fn().mockResolvedValue(mockExtensionSettingsResponse), - } as unknown as Response) - - // Get the callback function passed to RefreshTimer - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - - const result = await timerCallback?.() - - expect(result).toBe(true) - - expect(fetch).toHaveBeenCalledWith("https://app.roocode.com/api/extension-settings", { - headers: { - Authorization: "Bearer valid-token", - }, - }) - - expect(mockContext.globalState.update).toHaveBeenCalledWith("organization-settings", mockSettings) - expect(mockContext.globalState.update).toHaveBeenCalledWith("user-settings", mockUserSettings) - }) - - it("should handle fetch errors gracefully", async () => { - mockAuthService.getSessionToken.mockReturnValue("valid-token") - vi.mocked(fetch).mockRejectedValue(new Error("Network error")) - - // Get the callback function passed to RefreshTimer - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - - const result = await timerCallback?.() - - expect(result).toBe(false) - - expect(mockLog).toHaveBeenCalledWith( - "[cloud-settings] Error fetching extension settings:", - expect.any(Error), - ) - }) - - it("should handle invalid response format", async () => { - mockAuthService.getSessionToken.mockReturnValue("valid-token") - vi.mocked(fetch).mockResolvedValue({ - ok: true, - json: vi.fn().mockResolvedValue({ invalid: "data" }), - } as unknown as Response) - - // Get the callback function passed to RefreshTimer - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - - const result = await timerCallback?.() - - expect(result).toBe(false) - - expect(mockLog).toHaveBeenCalledWith( - "[cloud-settings] Invalid extension settings format:", - expect.any(Object), - ) + expect(mockLog).toHaveBeenCalledWith("[cloud-settings] Cloud features disabled — initialize is a no-op") }) }) describe("getAllowList", () => { - it("should return settings allowList when available", async () => { - mockContext.globalState.get = vi.fn((key: string) => { - if (key === "organization-settings") return mockSettings - return undefined - }) - await cloudSettingsService.initialize() - - const allowList = cloudSettingsService.getAllowList() - expect(allowList).toEqual(mockSettings.allowList) - }) - - it("should return default allow all when no settings available", () => { + it("should return default allow all when cloud features are disabled", () => { const allowList = cloudSettingsService.getAllowList() expect(allowList).toEqual({ allowAll: true, providers: {} }) }) }) describe("getSettings", () => { - it("should return current settings", async () => { - // Create a fresh mock context for this test - const testContext = { - globalState: { - get: vi.fn((key: string) => { - if (key === "organization-settings") return mockSettings - return undefined - }), - update: vi.fn().mockResolvedValue(undefined), - }, - } as unknown as ExtensionContext - - // Mock auth service to not be logged out - const testAuthService = { - getState: vi.fn().mockReturnValue("active"), - getSessionToken: vi.fn(), - hasActiveSession: vi.fn().mockReturnValue(false), - on: vi.fn(), - } - - const testService = new CloudSettingsService( - testContext, - testAuthService as unknown as AuthService, - mockLog, - ) - await testService.initialize() - - const settings = testService.getSettings() - expect(settings).toEqual(mockSettings) - - testService.dispose() + it("should return undefined when cloud features are disabled", () => { + const settings = cloudSettingsService.getSettings() + expect(settings).toBeUndefined() }) - it("should return undefined when no settings available", () => { + it("should return undefined even after initialization", async () => { + await cloudSettingsService.initialize() const settings = cloudSettingsService.getSettings() expect(settings).toBeUndefined() }) }) - describe("dispose", () => { - it("should remove all listeners and stop timer", () => { - const removeAllListenersSpy = vi.spyOn(cloudSettingsService, "removeAllListeners") - - cloudSettingsService.dispose() - - expect(removeAllListenersSpy).toHaveBeenCalled() - expect(mockRefreshTimer.stop).toHaveBeenCalled() + describe("getUserSettings", () => { + it("should return undefined when cloud features are disabled", () => { + const userSettings = cloudSettingsService.getUserSettings() + expect(userSettings).toBeUndefined() }) }) - describe("auth service event handlers", () => { - it("should start timer when auth-state-changed event is triggered with active-session", async () => { - await cloudSettingsService.initialize() - - // Get the auth-state-changed handler - const authStateChangedHandler = mockAuthService.on.mock.calls.find( - (call: string[]) => call[0] === "auth-state-changed", - )?.[1] - expect(authStateChangedHandler).toBeDefined() - - // Simulate active-session state change - authStateChangedHandler({ - state: "active-session", - previousState: "attempting-session", - }) - expect(mockRefreshTimer.start).toHaveBeenCalled() - }) - - it("should stop timer and remove settings when auth-state-changed event is triggered with logged-out", async () => { - await cloudSettingsService.initialize() - - // Get the auth-state-changed handler - const authStateChangedHandler = mockAuthService.on.mock.calls.find( - (call: string[]) => call[0] === "auth-state-changed", - )?.[1] - expect(authStateChangedHandler).toBeDefined() - - // Simulate logged-out state change from active-session - await authStateChangedHandler({ - state: "logged-out", - previousState: "active-session", - }) - expect(mockRefreshTimer.stop).toHaveBeenCalled() - expect(mockContext.globalState.update).toHaveBeenCalledWith("organization-settings", undefined) - expect(mockContext.globalState.update).toHaveBeenCalledWith("user-settings", undefined) + describe("getUserFeatures", () => { + it("should return empty object when cloud features are disabled", () => { + const features = cloudSettingsService.getUserFeatures() + expect(features).toEqual({}) }) }) - describe("isTaskSyncEnabled", () => { - beforeEach(async () => { - await cloudSettingsService.initialize() + describe("getUserSettingsConfig", () => { + it("should return empty object when cloud features are disabled", () => { + const config = cloudSettingsService.getUserSettingsConfig() + expect(config).toEqual({}) }) + }) - it("should return true when org recordTaskMessages is true", () => { - // Set up mock settings with org recordTaskMessages = true - const mockSettings = { - version: 1, - cloudSettings: { - recordTaskMessages: true, - }, - defaultSettings: {}, - allowList: { allowAll: true, providers: {} }, - } - - // Mock that user has organization ID (indicating org settings should be used) - mockAuthService.getStoredOrganizationId.mockReturnValue("org-123") - - // Use reflection to set private settings - ;(cloudSettingsService as unknown as { settings: typeof mockSettings }).settings = mockSettings - - expect(cloudSettingsService.isTaskSyncEnabled()).toBe(true) - }) - - it("should return false when org recordTaskMessages is false", () => { - // Set up mock settings with org recordTaskMessages = false - const mockSettings = { - version: 1, - cloudSettings: { - recordTaskMessages: false, - }, - defaultSettings: {}, - allowList: { allowAll: true, providers: {} }, - } - - // Mock that user has organization ID (indicating org settings should be used) - mockAuthService.getStoredOrganizationId.mockReturnValue("org-123") - - // Use reflection to set private settings - ;(cloudSettingsService as unknown as { settings: typeof mockSettings }).settings = mockSettings - - expect(cloudSettingsService.isTaskSyncEnabled()).toBe(false) - }) - - it("should fall back to user taskSyncEnabled when org recordTaskMessages is undefined", () => { - // Set up mock settings with org recordTaskMessages undefined - const mockSettings = { - version: 1, - cloudSettings: {}, - defaultSettings: {}, - allowList: { allowAll: true, providers: {} }, - } - - const mockUserSettings = { - version: 1, - features: {}, - settings: { - taskSyncEnabled: true, - }, - } - - // Mock that user has no organization ID (indicating user settings should be used) - mockAuthService.getStoredOrganizationId.mockReturnValue(null) - - // Use reflection to set private settings - ;(cloudSettingsService as unknown as { settings: typeof mockSettings }).settings = mockSettings - ;(cloudSettingsService as unknown as { userSettings: typeof mockUserSettings }).userSettings = - mockUserSettings - - expect(cloudSettingsService.isTaskSyncEnabled()).toBe(true) - }) - - it("should return false when user taskSyncEnabled is false", () => { - // Set up mock settings with org recordTaskMessages undefined - const mockSettings = { - version: 1, - cloudSettings: {}, - defaultSettings: {}, - allowList: { allowAll: true, providers: {} }, - } - - const mockUserSettings = { - version: 1, - features: {}, - settings: { - taskSyncEnabled: false, - }, - } - - // Mock that user has no organization ID (indicating user settings should be used) - mockAuthService.getStoredOrganizationId.mockReturnValue(null) - - // Use reflection to set private settings - ;(cloudSettingsService as unknown as { settings: typeof mockSettings }).settings = mockSettings - ;(cloudSettingsService as unknown as { userSettings: typeof mockUserSettings }).userSettings = - mockUserSettings - - expect(cloudSettingsService.isTaskSyncEnabled()).toBe(false) - }) - - it("should return true when user taskSyncEnabled is undefined (default)", () => { - // Set up mock settings with org recordTaskMessages undefined - const mockSettings = { - version: 1, - cloudSettings: {}, - defaultSettings: {}, - allowList: { allowAll: true, providers: {} }, - } - - const mockUserSettings = { - version: 1, - features: {}, - settings: {}, - } - - // Mock that user has no organization ID (indicating user settings should be used) - mockAuthService.getStoredOrganizationId.mockReturnValue(null) - - // Use reflection to set private settings - ;(cloudSettingsService as unknown as { settings: typeof mockSettings }).settings = mockSettings - ;(cloudSettingsService as unknown as { userSettings: typeof mockUserSettings }).userSettings = - mockUserSettings - - expect(cloudSettingsService.isTaskSyncEnabled()).toBe(true) + describe("updateUserSettings", () => { + it("should return false when cloud features are disabled", async () => { + const result = await cloudSettingsService.updateUserSettings({}) + expect(result).toBe(false) }) + }) - it("should return false when no settings are available", () => { - // Mock that user has no organization ID - mockAuthService.getStoredOrganizationId.mockReturnValue(null) - - // Clear both settings - ;(cloudSettingsService as unknown as { settings: undefined }).settings = undefined - ;(cloudSettingsService as unknown as { userSettings: undefined }).userSettings = undefined - + describe("isTaskSyncEnabled", () => { + it("should return false when cloud features are disabled", () => { expect(cloudSettingsService.isTaskSyncEnabled()).toBe(false) }) - it("should return false when only org settings are available but cloudSettings is undefined", () => { - const mockSettings = { - version: 1, - defaultSettings: {}, - allowList: { allowAll: true, providers: {} }, - } - - // Mock that user has organization ID (indicating org settings should be used) - mockAuthService.getStoredOrganizationId.mockReturnValue("org-123") - - // Use reflection to set private settings - ;(cloudSettingsService as unknown as { settings: typeof mockSettings }).settings = mockSettings - ;(cloudSettingsService as unknown as { userSettings: undefined }).userSettings = undefined - + it("should always return false regardless of settings", async () => { + await cloudSettingsService.initialize() expect(cloudSettingsService.isTaskSyncEnabled()).toBe(false) }) + }) - it("should prioritize org settings over user settings", () => { - // Set up conflicting settings: org = false, user = true - const mockSettings = { - version: 1, - cloudSettings: { - recordTaskMessages: false, - }, - defaultSettings: {}, - allowList: { allowAll: true, providers: {} }, - } - - const mockUserSettings = { - version: 1, - features: {}, - settings: { - taskSyncEnabled: true, - }, - } - - // Mock that user has organization ID (indicating org settings should be used) - mockAuthService.getStoredOrganizationId.mockReturnValue("org-123") + describe("dispose", () => { + it("should remove all listeners", () => { + const removeAllListenersSpy = vi.spyOn(cloudSettingsService, "removeAllListeners") - // Use reflection to set private settings - ;(cloudSettingsService as unknown as { settings: typeof mockSettings }).settings = mockSettings - ;(cloudSettingsService as unknown as { userSettings: typeof mockUserSettings }).userSettings = - mockUserSettings + cloudSettingsService.dispose() - // Should return false (org setting takes precedence) - expect(cloudSettingsService.isTaskSyncEnabled()).toBe(false) + expect(removeAllListenersSpy).toHaveBeenCalled() }) }) }) diff --git a/packages/cloud/src/__tests__/CloudShareService.test.ts b/packages/cloud/src/__tests__/CloudShareService.test.ts index d8a3820b92d..c18e4cb288c 100644 --- a/packages/cloud/src/__tests__/CloudShareService.test.ts +++ b/packages/cloud/src/__tests__/CloudShareService.test.ts @@ -1,38 +1,11 @@ /* eslint-disable @typescript-eslint/no-explicit-any */ import type { MockedFunction } from "vitest" -import * as vscode from "vscode" import type { SettingsService, AuthService } from "@roo-code/types" import { CloudAPI } from "../CloudAPI.js" import { CloudShareService } from "../CloudShareService.js" -import { CloudAPIError, TaskNotFoundError } from "../errors.js" - -const mockFetch = vi.fn() -global.fetch = mockFetch as any - -vi.mock("vscode", () => ({ - window: { - showInformationMessage: vi.fn(), - showErrorMessage: vi.fn(), - showQuickPick: vi.fn(), - }, - env: { - clipboard: { - writeText: vi.fn(), - }, - openExternal: vi.fn(), - }, - Uri: { - parse: vi.fn(), - }, - extensions: { - getExtension: vi.fn(() => ({ - packageJSON: { version: "1.0.0" }, - })), - }, -})) vi.mock("../Config", () => ({ getRooCodeApiUrl: () => "https://app.roocode.com", @@ -51,7 +24,6 @@ describe("CloudShareService", () => { beforeEach(() => { vi.clearAllMocks() - mockFetch.mockClear() mockLog = vi.fn() mockAuthService = { @@ -70,249 +42,63 @@ describe("CloudShareService", () => { }) describe("shareTask", () => { - it("should share task with organization visibility and copy to clipboard", async () => { - const mockResponseData = { - success: true, - shareUrl: "https://app.roocode.com/share/abc123", - } - - ;(mockAuthService.getSessionToken as any).mockReturnValue("session-token") - - mockFetch.mockResolvedValue({ - ok: true, - json: vi.fn().mockResolvedValue(mockResponseData), - }) - - const result = await shareService.shareTask("task-123", "organization") - - expect(result.success).toBe(true) - expect(result.shareUrl).toBe("https://app.roocode.com/share/abc123") - - expect(mockFetch).toHaveBeenCalledWith("https://app.roocode.com/api/extension/share", { - method: "POST", - headers: { - "Content-Type": "application/json", - Authorization: "Bearer session-token", - "User-Agent": "Roo-Code 1.0.0", - }, - body: JSON.stringify({ - taskId: "task-123", - visibility: "organization", - }), - signal: expect.any(AbortSignal), - }) - - expect(vscode.env.clipboard.writeText).toHaveBeenCalledWith("https://app.roocode.com/share/abc123") - }) - - it("should share task with public visibility", async () => { - const mockResponseData = { - success: true, - shareUrl: "https://app.roocode.com/share/abc123", - } - - ;(mockAuthService.getSessionToken as any).mockReturnValue("session-token") - - mockFetch.mockResolvedValue({ - ok: true, - json: vi.fn().mockResolvedValue(mockResponseData), - }) - - const result = await shareService.shareTask("task-123", "public") - - expect(result.success).toBe(true) - - expect(mockFetch).toHaveBeenCalledWith("https://app.roocode.com/api/extension/share", { - method: "POST", - headers: { - "Content-Type": "application/json", - Authorization: "Bearer session-token", - "User-Agent": "Roo-Code 1.0.0", - }, - body: JSON.stringify({ taskId: "task-123", visibility: "public" }), - signal: expect.any(AbortSignal), - }) - }) - - it("should default to organization visibility when not specified", async () => { - const mockResponseData = { - success: true, - shareUrl: "https://app.roocode.com/share/abc123", - } - - ;(mockAuthService.getSessionToken as any).mockReturnValue("session-token") - mockFetch.mockResolvedValue({ - ok: true, - json: vi.fn().mockResolvedValue(mockResponseData), - }) - - const result = await shareService.shareTask("task-123") - - expect(result.success).toBe(true) - expect(mockFetch).toHaveBeenCalledWith("https://app.roocode.com/api/extension/share", { - method: "POST", - headers: { - "Content-Type": "application/json", - Authorization: "Bearer session-token", - "User-Agent": "Roo-Code 1.0.0", - }, - body: JSON.stringify({ - taskId: "task-123", - visibility: "organization", - }), - signal: expect.any(AbortSignal), - }) - }) - - it("should handle API error response", async () => { - const mockResponseData = { - success: false, - error: "Task not found", - } - - ;(mockAuthService.getSessionToken as any).mockReturnValue("session-token") - mockFetch.mockResolvedValue({ - ok: true, - json: vi.fn().mockResolvedValue(mockResponseData), - }) - - const result = await shareService.shareTask("task-123", "organization") - - expect(result.success).toBe(false) - expect(result.error).toBe("Task not found") - }) - - it("should handle authentication errors", async () => { - ;(mockAuthService.getSessionToken as any).mockReturnValue(null) - - await expect(shareService.shareTask("task-123", "organization")).rejects.toThrow("Authentication required") - }) - - it("should handle unexpected errors", async () => { - ;(mockAuthService.getSessionToken as any).mockReturnValue("session-token") - mockFetch.mockRejectedValue(new Error("Network error")) - - await expect(shareService.shareTask("task-123", "organization")).rejects.toThrow("Network error") + it("should throw error when cloud features are disabled", async () => { + await expect(shareService.shareTask("task-123", "organization")).rejects.toThrow( + "Cloud features are disabled in this fork", + ) }) - it("should throw TaskNotFoundError for 404 responses", async () => { - ;(mockAuthService.getSessionToken as any).mockReturnValue("session-token") - mockFetch.mockResolvedValue({ - ok: false, - status: 404, - statusText: "Not Found", - json: vi.fn().mockRejectedValue(new Error("Invalid JSON")), - text: vi.fn().mockResolvedValue("Not Found"), - }) - - await expect(shareService.shareTask("task-123", "organization")).rejects.toThrow(TaskNotFoundError) - await expect(shareService.shareTask("task-123", "organization")).rejects.toThrow("Task not found") + it("should throw error for public visibility", async () => { + await expect(shareService.shareTask("task-123", "public")).rejects.toThrow( + "Cloud features are disabled in this fork", + ) }) - it("should throw generic Error for non-404 HTTP errors", async () => { - ;(mockAuthService.getSessionToken as any).mockReturnValue("session-token") - mockFetch.mockResolvedValue({ - ok: false, - status: 500, - statusText: "Internal Server Error", - json: vi.fn().mockRejectedValue(new Error("Invalid JSON")), - text: vi.fn().mockResolvedValue("Internal Server Error"), - }) - - await expect(shareService.shareTask("task-123", "organization")).rejects.toThrow(CloudAPIError) - await expect(shareService.shareTask("task-123", "organization")).rejects.toThrow( - "HTTP 500: Internal Server Error", - ) + it("should throw error with default visibility", async () => { + await expect(shareService.shareTask("task-123")).rejects.toThrow("Cloud features are disabled in this fork") }) - it("should create TaskNotFoundError with correct properties", async () => { - ;(mockAuthService.getSessionToken as any).mockReturnValue("session-token") - mockFetch.mockResolvedValue({ - ok: false, - status: 404, - statusText: "Not Found", - json: vi.fn().mockRejectedValue(new Error("Invalid JSON")), - text: vi.fn().mockResolvedValue("Not Found"), - }) + it("should not make any fetch calls", async () => { + const mockFetch = vi.fn() + global.fetch = mockFetch as any try { await shareService.shareTask("task-123", "organization") - expect.fail("Expected TaskNotFoundError to be thrown") - } catch (error) { - expect(error).toBeInstanceOf(TaskNotFoundError) - expect(error).toBeInstanceOf(Error) - expect((error as TaskNotFoundError).message).toBe("Task not found") + } catch { + // Expected to throw } + + expect(mockFetch).not.toHaveBeenCalled() }) }) describe("canShareTask", () => { - it("should return true when authenticated and sharing is enabled", async () => { - ;(mockAuthService.isAuthenticated as any).mockReturnValue(true) - ;(mockSettingsService.getSettings as any).mockReturnValue({ - cloudSettings: { - enableTaskSharing: true, - }, - }) - + it("should return false when cloud features are disabled", async () => { const result = await shareService.canShareTask() - - expect(result).toBe(true) + expect(result).toBe(false) }) - it("should return false when authenticated but sharing is disabled", async () => { + it("should return false regardless of authentication state", async () => { ;(mockAuthService.isAuthenticated as any).mockReturnValue(true) - ;(mockSettingsService.getSettings as any).mockReturnValue({ - cloudSettings: { - enableTaskSharing: false, - }, - }) - const result = await shareService.canShareTask() - expect(result).toBe(false) }) - it("should return false when authenticated and sharing setting is undefined (default)", async () => { - ;(mockAuthService.isAuthenticated as any).mockReturnValue(true) + it("should return false regardless of settings", async () => { ;(mockSettingsService.getSettings as any).mockReturnValue({ - cloudSettings: {}, + cloudSettings: { + enableTaskSharing: true, + }, }) - - const result = await shareService.canShareTask() - - expect(result).toBe(false) - }) - - it("should return false when authenticated and no settings available (default)", async () => { - ;(mockAuthService.isAuthenticated as any).mockReturnValue(true) - ;(mockSettingsService.getSettings as any).mockReturnValue(undefined) - - const result = await shareService.canShareTask() - - expect(result).toBe(false) - }) - - it("should return false when settings service returns undefined", async () => { - ;(mockSettingsService.getSettings as any).mockReturnValue(undefined) - const result = await shareService.canShareTask() - expect(result).toBe(false) }) + }) - it("should handle errors gracefully", async () => { - ;(mockSettingsService.getSettings as any).mockImplementation(() => { - throw new Error("Settings error") - }) - - const result = await shareService.canShareTask() - + describe("canSharePublicly", () => { + it("should return false when cloud features are disabled", async () => { + const result = await shareService.canSharePublicly() expect(result).toBe(false) - expect(mockLog).toHaveBeenCalledWith( - "[ShareService] Error checking if task can be shared:", - expect.any(Error), - ) }) }) }) diff --git a/packages/cloud/src/__tests__/TelemetryClient.test.ts b/packages/cloud/src/__tests__/TelemetryClient.test.ts index 5cb952dc280..481bea345ad 100644 --- a/packages/cloud/src/__tests__/TelemetryClient.test.ts +++ b/packages/cloud/src/__tests__/TelemetryClient.test.ts @@ -84,9 +84,7 @@ describe("TelemetryClient", () => { expect(isEventCapturable(TelemetryEventName.TASK_CONVERSATION_MESSAGE)).toBe(false) }) - it("should return true for TASK_MESSAGE events when isTaskSyncEnabled returns true", () => { - mockSettingsService.isTaskSyncEnabled.mockReturnValue(true) - + it("should return true for TASK_MESSAGE events (not in exclude list)", () => { const client = new TelemetryClient(mockAuthService, mockSettingsService) const isEventCapturable = getPrivateProperty<(eventName: TelemetryEventName) => boolean>( @@ -94,22 +92,8 @@ describe("TelemetryClient", () => { "isEventCapturable", ).bind(client) + // TASK_MESSAGE is not in the exclude list, so base class returns true expect(isEventCapturable(TelemetryEventName.TASK_MESSAGE)).toBe(true) - expect(mockSettingsService.isTaskSyncEnabled).toHaveBeenCalled() - }) - - it("should return false for TASK_MESSAGE events when isTaskSyncEnabled returns false", () => { - mockSettingsService.isTaskSyncEnabled.mockReturnValue(false) - - const client = new TelemetryClient(mockAuthService, mockSettingsService) - - const isEventCapturable = getPrivateProperty<(eventName: TelemetryEventName) => boolean>( - client, - "isEventCapturable", - ).bind(client) - - expect(isEventCapturable(TelemetryEventName.TASK_MESSAGE)).toBe(false) - expect(mockSettingsService.isTaskSyncEnabled).toHaveBeenCalled() }) }) @@ -198,193 +182,59 @@ describe("TelemetryClient", () => { }) describe("capture", () => { - it("should not capture events that are not capturable", async () => { + it("should not send any requests (cloud features disabled)", async () => { const client = new TelemetryClient(mockAuthService, mockSettingsService) await client.capture({ - event: TelemetryEventName.TASK_CONVERSATION_MESSAGE, // In exclude list. + event: TelemetryEventName.TASK_CREATED, properties: { test: "value" }, }) expect(mockFetch).not.toHaveBeenCalled() }) - it("should not capture TASK_MESSAGE events when recordTaskMessages is false", async () => { - mockSettingsService.getSettings.mockReturnValue({ - cloudSettings: { - recordTaskMessages: false, - }, - }) - - const client = new TelemetryClient(mockAuthService, mockSettingsService) - - await client.capture({ - event: TelemetryEventName.TASK_MESSAGE, - properties: { - taskId: "test-task-id", - message: { - ts: 1, - type: "say", - say: "text", - text: "test message", - }, - }, - }) - - expect(mockFetch).not.toHaveBeenCalled() - }) - - it("should not capture TASK_MESSAGE events when isTaskSyncEnabled returns false", async () => { - mockSettingsService.isTaskSyncEnabled.mockReturnValue(false) - + it("should not send requests for TASK_MESSAGE events", async () => { const client = new TelemetryClient(mockAuthService, mockSettingsService) await client.capture({ event: TelemetryEventName.TASK_MESSAGE, properties: { taskId: "test-task-id", - message: { - ts: 1, - type: "say", - say: "text", - text: "test message", - }, + message: { ts: 1, type: "say", say: "text", text: "test message" }, }, }) expect(mockFetch).not.toHaveBeenCalled() - expect(mockSettingsService.isTaskSyncEnabled).toHaveBeenCalled() }) - it("should not send request when schema validation fails", async () => { + it("should not send requests for TASK_CONVERSATION_MESSAGE events", async () => { const client = new TelemetryClient(mockAuthService, mockSettingsService) await client.capture({ - event: TelemetryEventName.TASK_CREATED, + event: TelemetryEventName.TASK_CONVERSATION_MESSAGE, properties: { test: "value" }, }) expect(mockFetch).not.toHaveBeenCalled() - expect(console.error).toHaveBeenCalledWith(expect.stringContaining("Invalid telemetry event")) - }) - - it("should send request when event is capturable and validation passes", async () => { - const client = new TelemetryClient(mockAuthService, mockSettingsService) - - const providerProperties = { - appName: "roo-code", - appVersion: "1.0.0", - vscodeVersion: "1.60.0", - platform: "darwin", - editorName: "vscode", - language: "en", - mode: "code", - } - - const eventProperties = { - taskId: "test-task-id", - } - - const mockValidatedData = { - type: TelemetryEventName.TASK_CREATED, - properties: { - ...providerProperties, - taskId: "test-task-id", - }, - } - - const mockProvider: TelemetryPropertiesProvider = { - getTelemetryProperties: vi.fn().mockResolvedValue(providerProperties), - } - - client.setProvider(mockProvider) - - await client.capture({ - event: TelemetryEventName.TASK_CREATED, - properties: eventProperties, - }) - - expect(mockFetch).toHaveBeenCalledWith( - "https://app.roocode.com/api/events", - expect.objectContaining({ - method: "POST", - body: JSON.stringify(mockValidatedData), - }), - ) - }) - - it("should attempt to capture TASK_MESSAGE events when isTaskSyncEnabled returns true", async () => { - mockSettingsService.isTaskSyncEnabled.mockReturnValue(true) - - const eventProperties = { - appName: "roo-code", - appVersion: "1.0.0", - vscodeVersion: "1.60.0", - platform: "darwin", - editorName: "vscode", - language: "en", - mode: "code", - taskId: "test-task-id", - message: { - ts: 1, - type: "say", - say: "text", - text: "test message", - }, - } - - const mockValidatedData = { - type: TelemetryEventName.TASK_MESSAGE, - properties: eventProperties, - } - - const client = new TelemetryClient(mockAuthService, mockSettingsService) - - await client.capture({ - event: TelemetryEventName.TASK_MESSAGE, - properties: eventProperties, - }) - - expect(mockSettingsService.isTaskSyncEnabled).toHaveBeenCalled() - expect(mockFetch).toHaveBeenCalledWith( - "https://app.roocode.com/api/events", - expect.objectContaining({ - method: "POST", - body: JSON.stringify(mockValidatedData), - }), - ) - }) - - it("should handle fetch errors gracefully", async () => { - const client = new TelemetryClient(mockAuthService, mockSettingsService) - - mockFetch.mockRejectedValue(new Error("Network error")) - - await expect( - client.capture({ - event: TelemetryEventName.TASK_CREATED, - properties: { test: "value" }, - }), - ).resolves.not.toThrow() }) }) describe("telemetry state methods", () => { - it("should always return true for isTelemetryEnabled", () => { + it("should always return false for isTelemetryEnabled", () => { const client = new TelemetryClient(mockAuthService, mockSettingsService) - expect(client.isTelemetryEnabled()).toBe(true) + expect(client.isTelemetryEnabled()).toBe(false) }) it("should have empty implementations for updateTelemetryState and shutdown", async () => { const client = new TelemetryClient(mockAuthService, mockSettingsService) client.updateTelemetryState(true) await client.shutdown() + // Should not throw }) }) describe("backfillMessages", () => { - it("should not send request when not authenticated", async () => { - mockAuthService.isAuthenticated.mockReturnValue(false) + it("should not send any requests (cloud features disabled)", async () => { const client = new TelemetryClient(mockAuthService, mockSettingsService) const messages = [ @@ -401,8 +251,7 @@ describe("TelemetryClient", () => { expect(mockFetch).not.toHaveBeenCalled() }) - it("should not send request when no session token available", async () => { - mockAuthService.getSessionToken.mockReturnValue(null) + it("should not send request even when authenticated", async () => { const client = new TelemetryClient(mockAuthService, mockSettingsService) const messages = [ @@ -417,87 +266,15 @@ describe("TelemetryClient", () => { await client.backfillMessages(messages, "test-task-id") expect(mockFetch).not.toHaveBeenCalled() - expect(console.error).toHaveBeenCalledWith( - "[TelemetryClient#backfillMessages] Unauthorized: No session token available.", - ) }) - it("should send FormData request with correct structure when authenticated", async () => { + it("should not send request even with provider set", async () => { const client = new TelemetryClient(mockAuthService, mockSettingsService) - const providerProperties = { - appName: "roo-code", - appVersion: "1.0.0", - vscodeVersion: "1.60.0", - platform: "darwin", - editorName: "vscode", - language: "en", - mode: "code", - } - const mockProvider: TelemetryPropertiesProvider = { - getTelemetryProperties: vi.fn().mockResolvedValue(providerProperties), - } - - client.setProvider(mockProvider) - - const messages = [ - { - ts: 1, - type: "say" as const, - say: "text" as const, - text: "test message 1", - }, - { - ts: 2, - type: "ask" as const, - ask: "followup" as const, - text: "test question", - }, - ] - - await client.backfillMessages(messages, "test-task-id") - - expect(mockFetch).toHaveBeenCalledWith( - "https://app.roocode.com/api/events/backfill", - expect.objectContaining({ - method: "POST", - headers: { - Authorization: "Bearer mock-token", - }, - body: expect.any(FormData), + getTelemetryProperties: vi.fn().mockResolvedValue({ + appVersion: "1.0.0", }), - ) - - // Verify FormData contents - const call = mockFetch.mock.calls[0] - const formData = call?.[1]?.body as FormData - - expect(formData.get("taskId")).toBe("test-task-id") - - // Parse and compare properties as objects since JSON.stringify order can vary - const propertiesJson = formData.get("properties") as string - const parsedProperties = JSON.parse(propertiesJson) - expect(parsedProperties).toEqual({ - taskId: "test-task-id", - ...providerProperties, - }) - // The messages are stored as a File object under the "file" key - const fileField = formData.get("file") as File - expect(fileField).toBeInstanceOf(File) - expect(fileField.name).toBe("task.json") - expect(fileField.type).toBe("application/json") - - // Read the file content to verify the messages - const fileContent = await fileField.text() - expect(fileContent).toBe(JSON.stringify(messages)) - }) - - it("should handle provider errors gracefully", async () => { - const client = new TelemetryClient(mockAuthService, mockSettingsService) - - const mockProvider: TelemetryPropertiesProvider = { - getTelemetryProperties: vi.fn().mockRejectedValue(new Error("Provider error")), } client.setProvider(mockProvider) @@ -513,163 +290,15 @@ describe("TelemetryClient", () => { await client.backfillMessages(messages, "test-task-id") - expect(mockFetch).toHaveBeenCalledWith( - "https://app.roocode.com/api/events/backfill", - expect.objectContaining({ - method: "POST", - headers: { - Authorization: "Bearer mock-token", - }, - body: expect.any(FormData), - }), - ) - - // Verify FormData contents - should still work with just taskId - const call = mockFetch.mock.calls[0] - const formData = call?.[1]?.body as FormData - - expect(formData.get("taskId")).toBe("test-task-id") - expect(formData.get("properties")).toBe( - JSON.stringify({ - taskId: "test-task-id", - }), - ) - - // The messages are stored as a File object under the "file" key - const fileField = formData.get("file") as File - expect(fileField).toBeInstanceOf(File) - expect(fileField.name).toBe("task.json") - expect(fileField.type).toBe("application/json") - - // Read the file content to verify the messages - const fileContent = await fileField.text() - expect(fileContent).toBe(JSON.stringify(messages)) - }) - - it("should work without provider set", async () => { - const client = new TelemetryClient(mockAuthService, mockSettingsService) - - const messages = [ - { - ts: 1, - type: "say" as const, - say: "text" as const, - text: "test message", - }, - ] - - await client.backfillMessages(messages, "test-task-id") - - expect(mockFetch).toHaveBeenCalledWith( - "https://app.roocode.com/api/events/backfill", - expect.objectContaining({ - method: "POST", - headers: { - Authorization: "Bearer mock-token", - }, - body: expect.any(FormData), - }), - ) - - // Verify FormData contents - should work with just taskId - const call = mockFetch.mock.calls[0] - const formData = call?.[1]?.body as FormData - - expect(formData.get("taskId")).toBe("test-task-id") - expect(formData.get("properties")).toBe( - JSON.stringify({ - taskId: "test-task-id", - }), - ) - - // The messages are stored as a File object under the "file" key - const fileField = formData.get("file") as File - expect(fileField).toBeInstanceOf(File) - expect(fileField.name).toBe("task.json") - expect(fileField.type).toBe("application/json") - - // Read the file content to verify the messages - const fileContent = await fileField.text() - expect(fileContent).toBe(JSON.stringify(messages)) - }) - - it("should handle fetch errors gracefully", async () => { - const client = new TelemetryClient(mockAuthService, mockSettingsService) - - mockFetch.mockRejectedValue(new Error("Network error")) - - const messages = [ - { - ts: 1, - type: "say" as const, - say: "text" as const, - text: "test message", - }, - ] - - await expect(client.backfillMessages(messages, "test-task-id")).resolves.not.toThrow() - - expect(console.error).toHaveBeenCalledWith( - expect.stringContaining( - "[TelemetryClient#backfillMessages] Error uploading messages: Error: Network error", - ), - ) - }) - - it("should handle HTTP error responses", async () => { - const client = new TelemetryClient(mockAuthService, mockSettingsService) - - mockFetch.mockResolvedValue({ - ok: false, - status: 404, - statusText: "Not Found", - }) - - const messages = [ - { - ts: 1, - type: "say" as const, - say: "text" as const, - text: "test message", - }, - ] - - await client.backfillMessages(messages, "test-task-id") - - expect(console.error).toHaveBeenCalledWith( - "[TelemetryClient#backfillMessages] POST events/backfill -> 404 Not Found", - ) + expect(mockFetch).not.toHaveBeenCalled() }) - it("should handle empty messages array", async () => { + it("should handle empty messages array without sending requests", async () => { const client = new TelemetryClient(mockAuthService, mockSettingsService) await client.backfillMessages([], "test-task-id") - expect(mockFetch).toHaveBeenCalledWith( - "https://app.roocode.com/api/events/backfill", - expect.objectContaining({ - method: "POST", - headers: { - Authorization: "Bearer mock-token", - }, - body: expect.any(FormData), - }), - ) - - // Verify FormData contents - const call = mockFetch.mock.calls[0] - const formData = call?.[1]?.body as FormData - - // The messages are stored as a File object under the "file" key - const fileField = formData.get("file") as File - expect(fileField).toBeInstanceOf(File) - expect(fileField.name).toBe("task.json") - expect(fileField.type).toBe("application/json") - - // Read the file content to verify the empty messages array - const fileContent = await fileField.text() - expect(fileContent).toBe("[]") + expect(mockFetch).not.toHaveBeenCalled() }) }) }) diff --git a/packages/cloud/src/__tests__/WebAuthService.spec.ts b/packages/cloud/src/__tests__/WebAuthService.spec.ts index aa406e400d7..f6ba4785113 100644 --- a/packages/cloud/src/__tests__/WebAuthService.spec.ts +++ b/packages/cloud/src/__tests__/WebAuthService.spec.ts @@ -1,23 +1,8 @@ -// npx vitest run src/__tests__/auth/WebAuthService.spec.ts +// npx vitest run src/__tests__/WebAuthService.spec.ts -import crypto from "crypto" - -import type { Mock } from "vitest" import type { ExtensionContext } from "vscode" import { WebAuthService } from "../WebAuthService.js" -import { RefreshTimer } from "../RefreshTimer.js" -import { getClerkBaseUrl, getRooCodeApiUrl } from "../config.js" -import { getUserAgent } from "../utils.js" - -vi.mock("crypto") - -vi.mock("../RefreshTimer") -vi.mock("../config") -vi.mock("../utils") - -const mockFetch = vi.fn() -global.fetch = mockFetch vi.mock("vscode", () => ({ window: { @@ -35,42 +20,14 @@ vi.mock("vscode", () => ({ describe("WebAuthService", () => { let authService: WebAuthService - let mockTimer: { - start: Mock - stop: Mock - reset: Mock - } - let mockLog: Mock - let mockContext: { - subscriptions: { push: Mock } - secrets: { - get: Mock - store: Mock - delete: Mock - onDidChange: Mock - } - globalState: { - get: Mock - update: Mock - } - extension: { - packageJSON: { - version: string - publisher: string - name: string - } - } - } + let mockLog: ReturnType + let mockContext: ExtensionContext beforeEach(() => { - // Reset all mocks vi.clearAllMocks() - // Setup mock context with proper subscriptions array mockContext = { - subscriptions: { - push: vi.fn(), - }, + subscriptions: { push: vi.fn() }, secrets: { get: vi.fn().mockResolvedValue(undefined), store: vi.fn().mockResolvedValue(undefined), @@ -88,35 +45,15 @@ describe("WebAuthService", () => { name: "roo-cline", }, }, - } + } as unknown as ExtensionContext - // Setup timer mock - mockTimer = { - start: vi.fn(), - stop: vi.fn(), - reset: vi.fn(), - } - const MockedRefreshTimer = vi.mocked(RefreshTimer) - MockedRefreshTimer.mockImplementation(() => mockTimer as unknown as RefreshTimer) - - // Setup config mocks - use production URL by default to maintain existing test behavior - vi.mocked(getClerkBaseUrl).mockReturnValue("https://clerk.roocode.com") - vi.mocked(getRooCodeApiUrl).mockReturnValue("https://api.test.com") - - // Setup utils mock - vi.mocked(getUserAgent).mockReturnValue("Roo-Code 1.0.0") - - // Setup crypto mock - vi.mocked(crypto.randomBytes).mockReturnValue(Buffer.from("test-random-bytes") as never) - - // Setup log mock mockLog = vi.fn() - authService = new WebAuthService(mockContext as unknown as ExtensionContext, mockLog) + authService = new WebAuthService(mockContext, mockLog) }) afterEach(() => { - vi.clearAllMocks() + authService.dispose() }) describe("constructor", () => { @@ -128,1140 +65,172 @@ describe("WebAuthService", () => { expect(authService.getUserInfo()).toBeNull() }) - it("should create RefreshTimer with correct configuration", () => { - expect(RefreshTimer).toHaveBeenCalledWith({ - callback: expect.any(Function), - successInterval: 50_000, - initialBackoffMs: 1_000, - maxBackoffMs: 300_000, - }) - }) - it("should use console.log as default logger", () => { - const serviceWithoutLog = new WebAuthService(mockContext as unknown as ExtensionContext) - // Can't directly test console.log usage, but constructor should not throw + const serviceWithoutLog = new WebAuthService(mockContext) expect(serviceWithoutLog).toBeInstanceOf(WebAuthService) + serviceWithoutLog.dispose() + }) + + it("should log that cloud features are disabled", () => { + expect(mockLog).toHaveBeenCalledWith("[auth] Using WebAuthService (cloud features disabled)") }) }) describe("initialize", () => { - it("should handle credentials change and setup event listener", async () => { + it("should transition to logged-out state", async () => { await authService.initialize() - - expect(mockContext.subscriptions.push).toHaveBeenCalled() - expect(mockContext.secrets.onDidChange).toHaveBeenCalled() + expect(authService.getState()).toBe("logged-out") }) it("should not initialize twice", async () => { await authService.initialize() - const firstCallCount = vi.mocked(mockContext.secrets.onDidChange).mock.calls.length - await authService.initialize() - expect(mockContext.secrets.onDidChange).toHaveBeenCalledTimes(firstCallCount) expect(mockLog).toHaveBeenCalledWith("[auth] initialize() called after already initialized") }) - it("should transition to logged-out when no credentials exist", async () => { - mockContext.secrets.get.mockResolvedValue(undefined) - - const authStateChangedSpy = vi.fn() - authService.on("auth-state-changed", authStateChangedSpy) - - await authService.initialize() - - expect(authService.getState()).toBe("logged-out") - expect(authStateChangedSpy).toHaveBeenCalledWith({ - state: "logged-out", - previousState: "initializing", - }) - }) - - it("should transition to attempting-session when valid credentials exist", async () => { - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) - - const authStateChangedSpy = vi.fn() - authService.on("auth-state-changed", authStateChangedSpy) - - await authService.initialize() - - expect(authService.getState()).toBe("attempting-session") - expect(authStateChangedSpy).toHaveBeenCalledWith({ - state: "attempting-session", - previousState: "initializing", - }) - expect(mockTimer.start).toHaveBeenCalled() - }) - - it("should handle invalid credentials gracefully", async () => { - mockContext.secrets.get.mockResolvedValue("invalid-json") - - const authStateChangedSpy = vi.fn() - authService.on("auth-state-changed", authStateChangedSpy) + it("should always end in logged-out state regardless of stored credentials", async () => { + // Even if credentials exist, the no-op service goes to logged-out + vi.mocked(mockContext.secrets.get).mockResolvedValue( + JSON.stringify({ clientToken: "test-token", sessionId: "test-session" }), + ) await authService.initialize() - expect(authService.getState()).toBe("logged-out") - expect(mockLog).toHaveBeenCalledWith("[auth] Failed to parse stored credentials:", expect.any(Error)) }) - it("should handle credentials change events", async () => { - let onDidChangeCallback: (e: { key: string }) => void - - mockContext.secrets.onDidChange.mockImplementation((callback: (e: { key: string }) => void) => { - onDidChangeCallback = callback - return { dispose: vi.fn() } - }) - + it("should log that cloud features are disabled on initialize", async () => { await authService.initialize() - - // Simulate credentials change event - const newCredentials = { - clientToken: "new-token", - sessionId: "new-session", - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(newCredentials)) - - const authStateChangedSpy = vi.fn() - authService.on("auth-state-changed", authStateChangedSpy) - - onDidChangeCallback!({ key: "clerk-auth-credentials" }) - await new Promise((resolve) => setTimeout(resolve, 0)) // Wait for async handling - - expect(authStateChangedSpy).toHaveBeenCalled() + expect(mockLog).toHaveBeenCalledWith("[auth] Cloud features disabled — initialized in logged-out state") }) }) describe("login", () => { - beforeEach(async () => { + it("should be a no-op when cloud features are disabled", async () => { await authService.initialize() - }) - - it("should generate state and open external URL", async () => { - const mockOpenExternal = vi.fn() - const vscode = await import("vscode") - vi.mocked(vscode.env.openExternal).mockImplementation(mockOpenExternal) - await authService.login() - - expect(crypto.randomBytes).toHaveBeenCalledWith(16) - - expect(mockContext.globalState.update).toHaveBeenCalledWith( - "clerk-auth-state", - "746573742d72616e646f6d2d6279746573", - ) - - expect(mockOpenExternal).toHaveBeenCalledWith( - expect.objectContaining({ - toString: expect.any(Function), - }), - ) + // Should not throw, just log + expect(mockLog).toHaveBeenCalledWith("[auth] Cloud features disabled — login is a no-op") }) - it("should use package.json values for redirect URI with default sign-in endpoint", async () => { - const mockOpenExternal = vi.fn() - const vscode = await import("vscode") - vi.mocked(vscode.env.openExternal).mockImplementation(mockOpenExternal) - + it("should not open any external URLs", async () => { + await authService.initialize() await authService.login() - const expectedUrl = - "https://api.test.com/extension/sign-in?state=746573742d72616e646f6d2d6279746573&auth_redirect=vscode%3A%2F%2FRooVeterinaryInc.roo-cline" - expect(mockOpenExternal).toHaveBeenCalledWith( - expect.objectContaining({ - toString: expect.any(Function), - }), - ) - - // Verify the actual URL - const calledUri = mockOpenExternal.mock.calls[0]?.[0] - expect(calledUri.toString()).toBe(expectedUrl) - }) - - it("should use provider signup URL when useProviderSignup is true", async () => { - const mockOpenExternal = vi.fn() const vscode = await import("vscode") - vi.mocked(vscode.env.openExternal).mockImplementation(mockOpenExternal) - - await authService.login(undefined, true) - - const expectedUrl = - "https://api.test.com/extension/provider-sign-up?state=746573742d72616e646f6d2d6279746573&auth_redirect=vscode%3A%2F%2FRooVeterinaryInc.roo-cline" - expect(mockOpenExternal).toHaveBeenCalledWith( - expect.objectContaining({ - toString: expect.any(Function), - }), - ) - - // Verify the actual URL - const calledUri = mockOpenExternal.mock.calls[0]?.[0] - expect(calledUri.toString()).toBe(expectedUrl) - }) - - it("should handle errors during login", async () => { - vi.mocked(crypto.randomBytes).mockImplementation(() => { - throw new Error("Crypto error") - }) - - await expect(authService.login()).rejects.toThrow("Failed to initiate Roo Code Cloud authentication") - expect(mockLog).toHaveBeenCalledWith("[auth] Error initiating Roo Code Cloud auth: Error: Crypto error") + expect(vscode.env.openExternal).not.toHaveBeenCalled() }) }) describe("handleCallback", () => { - beforeEach(async () => { + it("should be a no-op when cloud features are disabled", async () => { await authService.initialize() + await authService.handleCallback("code", "state") + expect(mockLog).toHaveBeenCalledWith("[auth] Cloud features disabled — handleCallback is a no-op") }) - it("should handle invalid parameters", async () => { - const vscode = await import("vscode") - const mockShowInfo = vi.fn() - vi.mocked(vscode.window.showInformationMessage).mockImplementation(mockShowInfo) - - await authService.handleCallback(null, "state") - expect(mockShowInfo).toHaveBeenCalledWith("Invalid Roo Code Cloud sign in url") - - await authService.handleCallback("code", null) - expect(mockShowInfo).toHaveBeenCalledWith("Invalid Roo Code Cloud sign in url") - }) - - it("should validate state parameter", async () => { - mockContext.globalState.get.mockReturnValue("stored-state") - - await expect(authService.handleCallback("code", "different-state")).rejects.toThrow( - "Failed to handle Roo Code Cloud callback", - ) - expect(mockLog).toHaveBeenCalledWith("[auth] State mismatch in callback") - }) - - it("should successfully handle valid callback", async () => { - const storedState = "valid-state" - mockContext.globalState.get.mockReturnValue(storedState) - - // Mock successful Clerk sign-in response - const mockResponse = { - ok: true, - json: () => - Promise.resolve({ - response: { created_session_id: "session-123" }, - }), - headers: { - get: (header: string) => (header === "authorization" ? "Bearer token-123" : null), - }, - } - mockFetch.mockResolvedValue(mockResponse) - - const vscode = await import("vscode") - const mockShowInfo = vi.fn() - vi.mocked(vscode.window.showInformationMessage).mockImplementation(mockShowInfo) - - await authService.handleCallback("auth-code", storedState) + it("should not make any fetch calls", async () => { + const mockFetch = vi.fn() + global.fetch = mockFetch - expect(mockContext.secrets.store).toHaveBeenCalledWith( - "clerk-auth-credentials", - JSON.stringify({ - clientToken: "Bearer token-123", - sessionId: "session-123", - organizationId: null, - }), - ) - expect(mockShowInfo).toHaveBeenCalledWith("Successfully authenticated with Roo Code Cloud") - }) - - it("should store provider model when provided in callback", async () => { - const storedState = "valid-state" - mockContext.globalState.get.mockReturnValue(storedState) - - // Mock successful Clerk sign-in response - const mockResponse = { - ok: true, - json: () => - Promise.resolve({ - response: { created_session_id: "session-123" }, - }), - headers: { - get: (header: string) => (header === "authorization" ? "Bearer token-123" : null), - }, - } - mockFetch.mockResolvedValue(mockResponse) - - const vscode = await import("vscode") - const mockShowInfo = vi.fn() - vi.mocked(vscode.window.showInformationMessage).mockImplementation(mockShowInfo) - - await authService.handleCallback("auth-code", storedState, null, "xai/grok-code-fast-1") - - expect(mockContext.globalState.update).toHaveBeenCalledWith("roo-provider-model", "xai/grok-code-fast-1") - expect(mockContext.globalState.update).toHaveBeenCalledWith("roo-auth-skip-model", undefined) - expect(mockLog).toHaveBeenCalledWith("[auth] Stored provider model: xai/grok-code-fast-1") - }) - - it("should set skip model flag when provider model is NOT provided in callback", async () => { - const storedState = "valid-state" - mockContext.globalState.get.mockReturnValue(storedState) - - // Mock successful Clerk sign-in response - const mockResponse = { - ok: true, - json: () => - Promise.resolve({ - response: { created_session_id: "session-123" }, - }), - headers: { - get: (header: string) => (header === "authorization" ? "Bearer token-123" : null), - }, - } - mockFetch.mockResolvedValue(mockResponse) - - const vscode = await import("vscode") - const mockShowInfo = vi.fn() - vi.mocked(vscode.window.showInformationMessage).mockImplementation(mockShowInfo) - - // Call without provider model - await authService.handleCallback("auth-code", storedState, null) - - expect(mockContext.globalState.update).toHaveBeenCalledWith("roo-auth-skip-model", true) - expect(mockLog).toHaveBeenCalledWith("[auth] No provider model selected during signup") - }) - - it("should handle Clerk API errors", async () => { - const storedState = "valid-state" - mockContext.globalState.get.mockReturnValue(storedState) - - mockFetch.mockResolvedValue({ - ok: false, - status: 400, - statusText: "Bad Request", - }) - - const authStateChangedSpy = vi.fn() - authService.on("auth-state-changed", authStateChangedSpy) + await authService.initialize() + await authService.handleCallback("auth-code", "valid-state") - await expect(authService.handleCallback("auth-code", storedState)).rejects.toThrow( - "Failed to handle Roo Code Cloud callback", - ) - expect(authStateChangedSpy).toHaveBeenCalled() + expect(mockFetch).not.toHaveBeenCalled() }) }) describe("logout", () => { - beforeEach(async () => { + it("should be a no-op when cloud features are disabled", async () => { await authService.initialize() - }) - - it("should clear credentials and call Clerk logout", async () => { - // Set up credentials first by simulating a login state - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - } - - // Manually set the credentials in the service - authService["credentials"] = credentials - - // Mock successful logout response - mockFetch.mockResolvedValue({ ok: true }) - - const vscode = await import("vscode") - const mockShowInfo = vi.fn() - vi.mocked(vscode.window.showInformationMessage).mockImplementation(mockShowInfo) - await authService.logout() - - expect(mockContext.secrets.delete).toHaveBeenCalledWith("clerk-auth-credentials") - expect(mockContext.globalState.update).toHaveBeenCalledWith("clerk-auth-state", undefined) - expect(mockFetch).toHaveBeenCalledWith( - "https://clerk.roocode.com/v1/client/sessions/test-session/remove", - expect.objectContaining({ - method: "POST", - headers: expect.objectContaining({ - Authorization: "Bearer test-token", - }), - }), - ) - expect(mockShowInfo).toHaveBeenCalledWith("Logged out from Roo Code Cloud") + expect(mockLog).toHaveBeenCalledWith("[auth] Cloud features disabled — logout is a no-op") }) - it("should handle logout without credentials", async () => { - const vscode = await import("vscode") - const mockShowInfo = vi.fn() - vi.mocked(vscode.window.showInformationMessage).mockImplementation(mockShowInfo) + it("should not make any fetch calls", async () => { + const mockFetch = vi.fn() + global.fetch = mockFetch + await authService.initialize() await authService.logout() - expect(mockContext.secrets.delete).toHaveBeenCalled() expect(mockFetch).not.toHaveBeenCalled() - expect(mockShowInfo).toHaveBeenCalledWith("Logged out from Roo Code Cloud") - }) - - it("should handle Clerk logout errors gracefully", async () => { - // Set up credentials first by simulating a login state - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - } - - // Manually set the credentials in the service - authService["credentials"] = credentials - - // Mock failed logout response - mockFetch.mockRejectedValue(new Error("Network error")) - - const vscode = await import("vscode") - const mockShowInfo = vi.fn() - vi.mocked(vscode.window.showInformationMessage).mockImplementation(mockShowInfo) - - await authService.logout() - - expect(mockLog).toHaveBeenCalledWith("[auth] Error calling clerkLogout:", expect.any(Error)) - expect(mockShowInfo).toHaveBeenCalledWith("Logged out from Roo Code Cloud") }) }) describe("state management", () => { - it("should return correct state", () => { - expect(authService.getState()).toBe("initializing") + it("should return correct state after initialization", async () => { + await authService.initialize() + expect(authService.getState()).toBe("logged-out") }) - it("should return correct authentication status", async () => { + it("should always return false for isAuthenticated", async () => { + expect(authService.isAuthenticated()).toBe(false) await authService.initialize() expect(authService.isAuthenticated()).toBe(false) - - // Create a new service instance with credentials - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) - - const authenticatedService = new WebAuthService(mockContext as unknown as ExtensionContext, mockLog) - await authenticatedService.initialize() - - expect(authenticatedService.isAuthenticated()).toBe(true) - expect(authenticatedService.hasActiveSession()).toBe(false) }) - it("should return session token only for active sessions", () => { - expect(authService.getSessionToken()).toBeUndefined() - - // Manually set state to active-session for testing - // This would normally happen through refreshSession - authService["state"] = "active-session" - authService["sessionToken"] = "test-jwt" - - expect(authService.getSessionToken()).toBe("test-jwt") + it("should always return false for hasActiveSession", async () => { + expect(authService.hasActiveSession()).toBe(false) + await authService.initialize() + expect(authService.hasActiveSession()).toBe(false) }) - it("should return correct values for new methods", async () => { + it("should always return false for hasOrIsAcquiringActiveSession", async () => { + expect(authService.hasOrIsAcquiringActiveSession()).toBe(false) await authService.initialize() expect(authService.hasOrIsAcquiringActiveSession()).toBe(false) - - // Create a new service instance with credentials (attempting-session) - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) - - const attemptingService = new WebAuthService(mockContext as unknown as ExtensionContext, mockLog) - await attemptingService.initialize() - - expect(attemptingService.hasOrIsAcquiringActiveSession()).toBe(true) - expect(attemptingService.hasActiveSession()).toBe(false) - - // Manually set state to active-session for testing - attemptingService["state"] = "active-session" - expect(attemptingService.hasOrIsAcquiringActiveSession()).toBe(true) - expect(attemptingService.hasActiveSession()).toBe(true) }) - }) - describe("session refresh", () => { - beforeEach(async () => { - // Set up with credentials - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) + it("should always return undefined for getSessionToken", async () => { + expect(authService.getSessionToken()).toBeUndefined() await authService.initialize() + expect(authService.getSessionToken()).toBeUndefined() }) - it("should refresh session successfully", async () => { - // Mock successful token creation and user info fetch - mockFetch - .mockResolvedValueOnce({ - ok: true, - json: () => Promise.resolve({ jwt: "new-jwt-token" }), - }) - .mockResolvedValueOnce({ - ok: true, - json: () => - Promise.resolve({ - response: { - first_name: "John", - last_name: "Doe", - image_url: "https://example.com/avatar.jpg", - primary_email_address_id: "email-1", - email_addresses: [{ id: "email-1", email_address: "john@example.com" }], - }, - }), - }) - - const authStateChangedSpy = vi.fn() - const userInfoSpy = vi.fn() - authService.on("auth-state-changed", authStateChangedSpy) - authService.on("user-info", userInfoSpy) - - // Trigger refresh by calling the timer callback - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - await timerCallback?.() - - // Wait for async operations to complete - await new Promise((resolve) => setTimeout(resolve, 0)) - - expect(authService.getState()).toBe("active-session") - expect(authService.hasActiveSession()).toBe(true) - expect(authService.getSessionToken()).toBe("new-jwt-token") - expect(authStateChangedSpy).toHaveBeenCalledWith({ - state: "active-session", - previousState: "attempting-session", - }) - expect(userInfoSpy).toHaveBeenCalledWith({ - userInfo: { - id: undefined, - name: "John Doe", - email: "john@example.com", - picture: "https://example.com/avatar.jpg", - }, - }) - }) - - it("should handle invalid client token error", async () => { - // Mock 401 response (invalid token) - mockFetch.mockResolvedValue({ - ok: false, - status: 401, - statusText: "Unauthorized", - }) - - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - - await expect(timerCallback?.()).rejects.toThrow() - expect(mockContext.secrets.delete).toHaveBeenCalledWith("clerk-auth-credentials") - expect(mockLog).toHaveBeenCalledWith("[auth] Invalid/Expired client token: clearing credentials") - }) - - it("should handle network errors during refresh", async () => { - mockFetch.mockRejectedValue(new Error("Network error")) - - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - - await expect(timerCallback?.()).rejects.toThrow("Network error") - expect(mockLog).toHaveBeenCalledWith("[auth] Failed to refresh session", expect.any(Error)) - }) - - it("should transition to inactive-session on first attempt failure", async () => { - // Mock failed token creation response - mockFetch.mockResolvedValue({ - ok: false, - status: 500, - statusText: "Internal Server Error", - }) - - const authStateChangedSpy = vi.fn() - authService.on("auth-state-changed", authStateChangedSpy) - - // Verify we start in attempting-session state - expect(authService.getState()).toBe("attempting-session") - expect(authService["isFirstRefreshAttempt"]).toBe(true) - - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - - await expect(timerCallback?.()).rejects.toThrow() - - // Should transition to inactive-session after first failure - expect(authService.getState()).toBe("inactive-session") - expect(authService["isFirstRefreshAttempt"]).toBe(false) - expect(authStateChangedSpy).toHaveBeenCalledWith({ - state: "inactive-session", - previousState: "attempting-session", - }) - }) - - it("should not transition to inactive-session on subsequent failures", async () => { - // First, transition to inactive-session by failing the first attempt - mockFetch.mockResolvedValue({ - ok: false, - status: 500, - statusText: "Internal Server Error", - }) - - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - await expect(timerCallback?.()).rejects.toThrow() - - // Verify we're now in inactive-session - expect(authService.getState()).toBe("inactive-session") - expect(authService["isFirstRefreshAttempt"]).toBe(false) - - const authStateChangedSpy = vi.fn() - authService.on("auth-state-changed", authStateChangedSpy) - - // Subsequent failure should not trigger another transition - await expect(timerCallback?.()).rejects.toThrow() - - expect(authService.getState()).toBe("inactive-session") - expect(authStateChangedSpy).not.toHaveBeenCalled() - }) - - it("should clear credentials on 401 during first refresh attempt (bug fix)", async () => { - // Mock 401 response during first refresh attempt - mockFetch.mockResolvedValue({ - ok: false, - status: 401, - statusText: "Unauthorized", - }) - - const authStateChangedSpy = vi.fn() - authService.on("auth-state-changed", authStateChangedSpy) - - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - await expect(timerCallback?.()).rejects.toThrow() - - // Should clear credentials (not just transition to inactive-session) - expect(mockContext.secrets.delete).toHaveBeenCalledWith("clerk-auth-credentials") - expect(mockLog).toHaveBeenCalledWith("[auth] Invalid/Expired client token: clearing credentials") - - // Simulate credentials cleared event - mockContext.secrets.get.mockResolvedValue(undefined) - await authService["handleCredentialsChange"]() - - expect(authService.getState()).toBe("logged-out") - expect(authStateChangedSpy).toHaveBeenCalledWith({ - state: "logged-out", - previousState: "attempting-session", - }) - }) - }) - - describe("user info", () => { - it("should return null initially", () => { + it("should always return null for getUserInfo", async () => { expect(authService.getUserInfo()).toBeNull() - }) - - it("should parse user info correctly for personal accounts", async () => { - // Set up with credentials for personal account (no organizationId) - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - organizationId: null, - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) - await authService.initialize() - - // Clear previous mock calls - mockFetch.mockClear() - - // Mock successful responses - mockFetch - .mockResolvedValueOnce({ - ok: true, - json: () => Promise.resolve({ jwt: "jwt-token" }), - }) - .mockResolvedValueOnce({ - ok: true, - json: () => - Promise.resolve({ - response: { - first_name: "Jane", - last_name: "Smith", - image_url: "https://example.com/jane.jpg", - primary_email_address_id: "email-2", - email_addresses: [ - { id: "email-1", email_address: "jane.old@example.com" }, - { id: "email-2", email_address: "jane@example.com" }, - ], - }, - }), - }) - - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - await timerCallback?.() - - // Wait for async operations to complete - await new Promise((resolve) => setTimeout(resolve, 0)) - - const userInfo = authService.getUserInfo() - expect(userInfo).toEqual({ - id: undefined, - name: "Jane Smith", - email: "jane@example.com", - picture: "https://example.com/jane.jpg", - }) - }) - - it("should parse user info correctly for organization accounts", async () => { - // Set up with credentials for organization account - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - organizationId: "org_1", - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) await authService.initialize() - - // Clear previous mock calls - mockFetch.mockClear() - - // Mock successful responses - mockFetch - .mockResolvedValueOnce({ - ok: true, - json: () => Promise.resolve({ jwt: "jwt-token" }), - }) - .mockResolvedValueOnce({ - ok: true, - json: () => - Promise.resolve({ - response: { - first_name: "Jane", - last_name: "Smith", - image_url: "https://example.com/jane.jpg", - primary_email_address_id: "email-2", - email_addresses: [ - { id: "email-1", email_address: "jane.old@example.com" }, - { id: "email-2", email_address: "jane@example.com" }, - ], - }, - }), - }) - .mockResolvedValueOnce({ - ok: true, - json: () => - Promise.resolve({ - response: [ - { - id: "org_member_id_1", - role: "member", - organization: { - id: "org_1", - name: "Org 1", - }, - }, - ], - }), - }) - - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - await timerCallback?.() - - // Wait for async operations to complete - await new Promise((resolve) => setTimeout(resolve, 0)) - - const userInfo = authService.getUserInfo() - expect(userInfo).toEqual({ - id: undefined, - name: "Jane Smith", - email: "jane@example.com", - picture: "https://example.com/jane.jpg", - organizationId: "org_1", - organizationName: "Org 1", - organizationRole: "member", - organizationImageUrl: undefined, - }) + expect(authService.getUserInfo()).toBeNull() }) - it("should handle missing user info fields", async () => { - // Set up with credentials for personal account (no organizationId) - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - organizationId: null, - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) + it("should always return null for getStoredOrganizationId", async () => { + expect(authService.getStoredOrganizationId()).toBeNull() await authService.initialize() - - // Clear previous mock calls - mockFetch.mockClear() - - // Mock responses with minimal data - mockFetch - .mockResolvedValueOnce({ - ok: true, - json: () => Promise.resolve({ jwt: "jwt-token" }), - }) - .mockResolvedValueOnce({ - ok: true, - json: () => - Promise.resolve({ - response: { - first_name: "John", - last_name: "Doe", - }, - }), - }) - - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - - await timerCallback?.() - - // Wait for async operations to complete - await new Promise((resolve) => setTimeout(resolve, 0)) - - const userInfo = authService.getUserInfo() - expect(userInfo).toEqual({ - id: undefined, - name: "John Doe", - email: undefined, - picture: undefined, - }) + expect(authService.getStoredOrganizationId()).toBeNull() }) }) - describe("event emissions", () => { - it("should emit auth-state-changed event for logged-out", async () => { - const authStateChangedSpy = vi.fn() - authService.on("auth-state-changed", authStateChangedSpy) - - await authService.initialize() - - expect(authStateChangedSpy).toHaveBeenCalledWith({ - state: "logged-out", - previousState: "initializing", - }) - }) - - it("should emit auth-state-changed event for attempting-session", async () => { - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) - - const authStateChangedSpy = vi.fn() - authService.on("auth-state-changed", authStateChangedSpy) - + describe("switchOrganization", () => { + it("should be a no-op when cloud features are disabled", async () => { await authService.initialize() - - expect(authStateChangedSpy).toHaveBeenCalledWith({ - state: "attempting-session", - previousState: "initializing", - }) - }) - - it("should emit auth-state-changed event for active-session", async () => { - // Set up with credentials - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) - await authService.initialize() - - // Clear previous mock calls - mockFetch.mockClear() - - // Mock both the token creation and user info fetch - mockFetch - .mockResolvedValueOnce({ - ok: true, - json: () => Promise.resolve({ jwt: "jwt-token" }), - }) - .mockResolvedValueOnce({ - ok: true, - json: () => - Promise.resolve({ - response: { - first_name: "Test", - last_name: "User", - }, - }), - }) - - const authStateChangedSpy = vi.fn() - authService.on("auth-state-changed", authStateChangedSpy) - - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - - await timerCallback?.() - - // Wait for async operations to complete - await new Promise((resolve) => setTimeout(resolve, 0)) - - expect(authStateChangedSpy).toHaveBeenCalledWith({ - state: "active-session", - previousState: "attempting-session", - }) - }) - - it("should emit user-info event", async () => { - // Set up with credentials - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) - await authService.initialize() - - // Clear previous mock calls - mockFetch.mockClear() - - mockFetch - .mockResolvedValueOnce({ - ok: true, - json: () => Promise.resolve({ jwt: "jwt-token" }), - }) - .mockResolvedValueOnce({ - ok: true, - json: () => - Promise.resolve({ - response: { - first_name: "Test", - last_name: "User", - }, - }), - }) - - const userInfoSpy = vi.fn() - authService.on("user-info", userInfoSpy) - - const timerCallback = vi.mocked(RefreshTimer).mock.calls[0]?.[0]?.callback - - await timerCallback?.() - - // Wait for async operations to complete - await new Promise((resolve) => setTimeout(resolve, 0)) - - expect(userInfoSpy).toHaveBeenCalledWith({ - userInfo: { - id: undefined, - name: "Test User", - email: undefined, - picture: undefined, - }, - }) + await authService.switchOrganization("org-123") + expect(mockLog).toHaveBeenCalledWith("[auth] Cloud features disabled — switchOrganization is a no-op") }) }) - describe("error handling", () => { - it("should handle credentials change errors", async () => { - mockContext.secrets.get.mockRejectedValue(new Error("Storage error")) - - await authService.initialize() - - expect(mockLog).toHaveBeenCalledWith("[auth] Error handling credentials change:", expect.any(Error)) - }) - - it("should handle malformed JSON in credentials", async () => { - mockContext.secrets.get.mockResolvedValue("invalid-json{") - + describe("getOrganizationMemberships", () => { + it("should return empty array", async () => { await authService.initialize() - - expect(authService.getState()).toBe("logged-out") - expect(mockLog).toHaveBeenCalledWith("[auth] Failed to parse stored credentials:", expect.any(Error)) - }) - - it("should handle invalid credentials schema", async () => { - mockContext.secrets.get.mockResolvedValue(JSON.stringify({ invalid: "data" })) - - await authService.initialize() - - expect(authService.getState()).toBe("logged-out") - expect(mockLog).toHaveBeenCalledWith("[auth] Invalid credentials format:", expect.any(Array)) - }) - - it("should handle missing authorization header in sign-in response", async () => { - const storedState = "valid-state" - mockContext.globalState.get.mockReturnValue(storedState) - - mockFetch.mockResolvedValue({ - ok: true, - json: () => - Promise.resolve({ - response: { created_session_id: "session-123" }, - }), - headers: { - get: () => null, // No authorization header - }, - }) - - await expect(authService.handleCallback("auth-code", storedState)).rejects.toThrow( - "Failed to handle Roo Code Cloud callback", - ) + const memberships = await authService.getOrganizationMemberships() + expect(memberships).toEqual([]) }) }) - describe("timer integration", () => { - it("should stop timer on logged-out transition", async () => { - await authService.initialize() - - expect(mockTimer.stop).toHaveBeenCalled() - }) - - it("should start timer on attempting-session transition", async () => { - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) - - await authService.initialize() - - expect(mockTimer.start).toHaveBeenCalled() + describe("broadcast", () => { + it("should be a no-op", () => { + authService.broadcast() + // Should not throw }) }) - describe("auth credentials key scoping", () => { - it("should use default key when getClerkBaseUrl returns production URL", async () => { - // Mock getClerkBaseUrl to return production URL - vi.mocked(getClerkBaseUrl).mockReturnValue("https://clerk.roocode.com") - - const service = new WebAuthService(mockContext as unknown as ExtensionContext, mockLog) - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - } - - await service.initialize() - await service["storeCredentials"](credentials) - - expect(mockContext.secrets.store).toHaveBeenCalledWith( - "clerk-auth-credentials", - JSON.stringify(credentials), - ) - }) - - it("should use scoped key when getClerkBaseUrl returns custom URL", async () => { - const customUrl = "https://custom.clerk.com" - // Mock getClerkBaseUrl to return custom URL - vi.mocked(getClerkBaseUrl).mockReturnValue(customUrl) - - const service = new WebAuthService(mockContext as unknown as ExtensionContext, mockLog) - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - } - - await service.initialize() - await service["storeCredentials"](credentials) - - expect(mockContext.secrets.store).toHaveBeenCalledWith( - `clerk-auth-credentials-${customUrl}`, - JSON.stringify(credentials), - ) - }) - - it("should load credentials using scoped key", async () => { - const customUrl = "https://custom.clerk.com" - vi.mocked(getClerkBaseUrl).mockReturnValue(customUrl) - - const service = new WebAuthService(mockContext as unknown as ExtensionContext, mockLog) - const credentials = { - clientToken: "test-token", - sessionId: "test-session", - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(credentials)) - - await service.initialize() - const loadedCredentials = await service["loadCredentials"]() - - expect(mockContext.secrets.get).toHaveBeenCalledWith(`clerk-auth-credentials-${customUrl}`) - expect(loadedCredentials).toEqual(credentials) - }) - - it("should clear credentials using scoped key", async () => { - const customUrl = "https://custom.clerk.com" - vi.mocked(getClerkBaseUrl).mockReturnValue(customUrl) - - const service = new WebAuthService(mockContext as unknown as ExtensionContext, mockLog) - - await service.initialize() - await service["clearCredentials"]() - - expect(mockContext.secrets.delete).toHaveBeenCalledWith(`clerk-auth-credentials-${customUrl}`) - }) - - it("should listen for changes on scoped key", async () => { - const customUrl = "https://custom.clerk.com" - vi.mocked(getClerkBaseUrl).mockReturnValue(customUrl) - - let onDidChangeCallback: (e: { key: string }) => void - - mockContext.secrets.onDidChange.mockImplementation((callback: (e: { key: string }) => void) => { - onDidChangeCallback = callback - return { dispose: vi.fn() } - }) - - const service = new WebAuthService(mockContext as unknown as ExtensionContext, mockLog) - await service.initialize() - - // Simulate credentials change event with scoped key - const newCredentials = { - clientToken: "new-token", - sessionId: "new-session", - } - mockContext.secrets.get.mockResolvedValue(JSON.stringify(newCredentials)) - - const authStateChangedSpy = vi.fn() - service.on("auth-state-changed", authStateChangedSpy) - - onDidChangeCallback!({ key: `clerk-auth-credentials-${customUrl}` }) - await new Promise((resolve) => setTimeout(resolve, 0)) // Wait for async handling - - expect(authStateChangedSpy).toHaveBeenCalled() - }) - - it("should not respond to changes on different scoped keys", async () => { - const customUrl = "https://custom.clerk.com" - vi.mocked(getClerkBaseUrl).mockReturnValue(customUrl) - - let onDidChangeCallback: (e: { key: string }) => void - - mockContext.secrets.onDidChange.mockImplementation((callback: (e: { key: string }) => void) => { - onDidChangeCallback = callback - return { dispose: vi.fn() } - }) - - const service = new WebAuthService(mockContext as unknown as ExtensionContext, mockLog) - await service.initialize() - - const authStateChangedSpy = vi.fn() - service.on("auth-state-changed", authStateChangedSpy) - - // Simulate credentials change event with different scoped key - onDidChangeCallback!({ - key: "clerk-auth-credentials-https://other.clerk.com", - }) - await new Promise((resolve) => setTimeout(resolve, 0)) // Wait for async handling - - expect(authStateChangedSpy).not.toHaveBeenCalled() - }) - - it("should not respond to changes on default key when using scoped key", async () => { - const customUrl = "https://custom.clerk.com" - vi.mocked(getClerkBaseUrl).mockReturnValue(customUrl) - - let onDidChangeCallback: (e: { key: string }) => void - - mockContext.secrets.onDidChange.mockImplementation((callback: (e: { key: string }) => void) => { - onDidChangeCallback = callback - return { dispose: vi.fn() } - }) - - const service = new WebAuthService(mockContext as unknown as ExtensionContext, mockLog) - await service.initialize() - - const authStateChangedSpy = vi.fn() - service.on("auth-state-changed", authStateChangedSpy) - - // Simulate credentials change event with default key - onDidChangeCallback!({ key: "clerk-auth-credentials" }) - await new Promise((resolve) => setTimeout(resolve, 0)) // Wait for async handling - - expect(authStateChangedSpy).not.toHaveBeenCalled() + describe("dispose", () => { + it("should remove all listeners", () => { + const listener = vi.fn() + // eslint-disable-next-line @typescript-eslint/no-explicit-any + authService.on("auth-state-changed" as any, listener) + authService.dispose() + // After dispose, emitting should not reach the listener + // This is hard to test directly, but dispose should not throw }) }) }) diff --git a/packages/cloud/src/config.ts b/packages/cloud/src/config.ts index cfff9d0f589..268325149e6 100644 --- a/packages/cloud/src/config.ts +++ b/packages/cloud/src/config.ts @@ -1,5 +1,6 @@ -export const PRODUCTION_CLERK_BASE_URL = "https://clerk.roocode.com" -export const PRODUCTION_ROO_CODE_API_URL = "https://app.roocode.com" +// Cloud features disabled — production URLs neutralized for standalone fork. +export const PRODUCTION_CLERK_BASE_URL = "" +export const PRODUCTION_ROO_CODE_API_URL = "" export const getClerkBaseUrl = () => process.env.CLERK_BASE_URL || PRODUCTION_CLERK_BASE_URL diff --git a/packages/cloud/src/retry-queue/RetryQueue.ts b/packages/cloud/src/retry-queue/RetryQueue.ts index 17362ad41d3..53b63034da8 100644 --- a/packages/cloud/src/retry-queue/RetryQueue.ts +++ b/packages/cloud/src/retry-queue/RetryQueue.ts @@ -70,34 +70,15 @@ export class RetryQueue extends EventEmitter { } } + // Cloud features disabled — enqueue is a no-op to prevent any HTTP calls public async enqueue( - url: string, - options: RequestInit, - type: QueuedRequest["type"] = "other", - operation?: string, + _url: string, + _options: RequestInit, + _type: QueuedRequest["type"] = "other", + _operation?: string, ): Promise { - if (this.queue.size >= this.config.maxQueueSize) { - const oldestId = Array.from(this.queue.keys())[0] - if (oldestId) { - this.queue.delete(oldestId) - } - } - - const request: QueuedRequest = { - id: `${Date.now()}-${Math.random().toString(36).substr(2, 9)}`, - url, - options, - timestamp: Date.now(), - retryCount: 0, - type, - operation, - } - - this.queue.set(request.id, request) - await this.persistQueue() - - this.emit("request-queued", request) - this.log(`[RetryQueue] Queued request: ${url}`) + // No-op: cloud features disabled + this.log("[RetryQueue] Cloud features disabled - enqueue is a no-op") } public async retryAll(): Promise { @@ -195,28 +176,18 @@ export class RetryQueue extends EventEmitter { private async retryRequest(request: QueuedRequest): Promise { this.log(`[RetryQueue] Retrying request: ${request.url}`) - let headers = { ...request.options.headers } - if (this.authHeaderProvider) { - const freshAuthHeaders = this.authHeaderProvider() - if (freshAuthHeaders) { - headers = { - ...headers, - ...freshAuthHeaders, - } - } - } + // Cloud features disabled — skip actual HTTP fetch, return fake success + this.log(`[RetryQueue] Cloud features disabled - skipping request to ${request.url}`) const controller = new AbortController() const timeoutId = setTimeout(() => controller.abort(), this.config.requestTimeout) try { - const response = await fetch(request.url, { - ...request.options, - signal: controller.signal, - headers: { - ...headers, - "X-Retry-Queue": "true", - }, + // Return a fake successful response instead of making a real HTTP call + const response = new Response(JSON.stringify({ success: true }), { + status: 200, + statusText: "OK", + headers: { "Content-Type": "application/json" }, }) clearTimeout(timeoutId) diff --git a/packages/cloud/src/retry-queue/__tests__/RetryQueue.test.ts b/packages/cloud/src/retry-queue/__tests__/RetryQueue.test.ts index becee719c15..507f96a15c9 100644 --- a/packages/cloud/src/retry-queue/__tests__/RetryQueue.test.ts +++ b/packages/cloud/src/retry-queue/__tests__/RetryQueue.test.ts @@ -31,46 +31,33 @@ describe("RetryQueue", () => { }) describe("enqueue", () => { - it("should add a request to the queue", async () => { + it("should be a no-op when cloud features are disabled", async () => { const url = "https://api.example.com/test" const options = { method: "POST", body: JSON.stringify({ test: "data" }) } await retryQueue.enqueue(url, options, "telemetry") + // Queue should remain empty const stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(1) - expect(stats.byType["telemetry"]).toBe(1) + expect(stats.totalQueued).toBe(0) }) - it("should enforce max queue size with FIFO eviction", async () => { + it("should not add items to queue regardless of max size", async () => { // Create a queue with max size of 3 retryQueue = new RetryQueue(mockContext, { maxQueueSize: 3 }) - // Add 4 requests + // Try to add 4 requests for (let i = 1; i <= 4; i++) { await retryQueue.enqueue(`https://api.example.com/test${i}`, { method: "POST" }, "telemetry") } + // All should be no-ops, queue remains empty const stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(3) // Should only have 3 items (oldest was evicted) + expect(stats.totalQueued).toBe(0) }) }) describe("persistence", () => { - it("should persist queue to workspace state", async () => { - await retryQueue.enqueue("https://api.example.com/test", { method: "POST" }, "telemetry") - - expect(mockContext.workspaceState.update).toHaveBeenCalledWith( - "roo.retryQueue", - expect.arrayContaining([ - expect.objectContaining({ - url: "https://api.example.com/test", - type: "telemetry", - }), - ]), - ) - }) - it("should load persisted queue on initialization", () => { const persistedRequests: QueuedRequest[] = [ { @@ -94,62 +81,84 @@ describe("RetryQueue", () => { retryQueue = new RetryQueue(mockContext) + // Queue should load persisted data const stats = retryQueue.getStats() expect(stats.totalQueued).toBe(1) expect(mockContext.workspaceState.get).toHaveBeenCalledWith("roo.retryQueue") }) + + it("should persist queue to workspace state", async () => { + // Enqueue is a no-op, but if we manually add items + // they should still be persisted + // Since enqueue is no-op, we can't test this directly + // But persistence mechanism should still work + const stats = retryQueue.getStats() + expect(stats.totalQueued).toBe(0) + }) }) describe("clear", () => { - it("should clear all queued requests", async () => { - await retryQueue.enqueue("https://api.example.com/test1", { method: "POST" }, "telemetry") - await retryQueue.enqueue("https://api.example.com/test2", { method: "POST" }, "api-call") - - let stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(2) - + it("should clear all queued requests", () => { + // Since enqueue is a no-op, queue is empty + // But clear should still work retryQueue.clear() - stats = retryQueue.getStats() + const stats = retryQueue.getStats() expect(stats.totalQueued).toBe(0) }) }) describe("getStats", () => { - it("should return correct statistics", async () => { - const now = Date.now() + it("should return correct statistics for empty queue", () => { + const stats = retryQueue.getStats() + + expect(stats.totalQueued).toBe(0) + expect(stats.byType).toEqual({}) + expect(stats.oldestRequest).toBeUndefined() + expect(stats.newestRequest).toBeUndefined() + }) + + it("should return correct statistics when items are loaded from persistence", () => { + const persistedRequests: QueuedRequest[] = [ + { + id: "test-1", + url: "https://api.example.com/test1", + options: { method: "POST" }, + timestamp: Date.now(), + retryCount: 0, + type: "telemetry", + }, + { + id: "test-2", + url: "https://api.example.com/test2", + options: { method: "POST" }, + timestamp: Date.now() + 1000, + retryCount: 1, + type: "api-call", + }, + ] - await retryQueue.enqueue("https://api.example.com/test1", { method: "POST" }, "telemetry") - await retryQueue.enqueue("https://api.example.com/test2", { method: "POST" }, "api-call") - await retryQueue.enqueue("https://api.example.com/test3", { method: "POST" }, "telemetry") + const storage = new Map([["roo.retryQueue", persistedRequests]]) + mockContext = { + workspaceState: { + get: vi.fn((key: string) => storage.get(key)), + update: vi.fn(), + }, + } as unknown as ExtensionContext + + retryQueue = new RetryQueue(mockContext) const stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(3) - expect(stats.byType["telemetry"]).toBe(2) + expect(stats.totalQueued).toBe(2) + expect(stats.byType["telemetry"]).toBe(1) expect(stats.byType["api-call"]).toBe(1) expect(stats.oldestRequest).toBeDefined() expect(stats.newestRequest).toBeDefined() - expect(stats.oldestRequest!.getTime()).toBeGreaterThanOrEqual(now) - expect(stats.newestRequest!.getTime()).toBeGreaterThanOrEqual(now) }) }) describe("events", () => { - it("should emit request-queued event when enqueueing", async () => { - const listener = vi.fn() - retryQueue.on("request-queued", listener) - - await retryQueue.enqueue("https://api.example.com/test", { method: "POST" }, "telemetry") - - expect(listener).toHaveBeenCalledWith( - expect.objectContaining({ - url: "https://api.example.com/test", - type: "telemetry", - }), - ) - }) - it("should emit queue-cleared event when clearing", () => { const listener = vi.fn() retryQueue.on("queue-cleared", listener) @@ -161,7 +170,7 @@ describe("RetryQueue", () => { }) describe("auth state management", () => { - it("should pause and resume the queue", () => { + it("should pause and resume queue", () => { expect(retryQueue.isPausedState()).toBe(false) retryQueue.pause() @@ -171,29 +180,6 @@ describe("RetryQueue", () => { expect(retryQueue.isPausedState()).toBe(false) }) - it("should not process retries when paused", async () => { - const fetchMock = vi.fn().mockResolvedValue({ ok: true }) - global.fetch = fetchMock - - await retryQueue.enqueue("https://api.example.com/test", { method: "POST" }, "telemetry") - - // Pause the queue - retryQueue.pause() - - // Try to retry all - await retryQueue.retryAll() - - // Fetch should not be called because queue is paused - expect(fetchMock).not.toHaveBeenCalled() - - // Resume and retry - retryQueue.resume() - await retryQueue.retryAll() - - // Now fetch should be called - expect(fetchMock).toHaveBeenCalledTimes(1) - }) - it("should track and update current user ID", () => { expect(retryQueue.getCurrentUserId()).toBeUndefined() @@ -208,491 +194,74 @@ describe("RetryQueue", () => { }) it("should clear queue when user changes", async () => { - // Add some requests - await retryQueue.enqueue("https://api.example.com/test1", { method: "POST" }, "telemetry") - await retryQueue.enqueue("https://api.example.com/test2", { method: "POST" }, "telemetry") - - let stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(2) + // Since enqueue is a no-op, we can't add items + // But clear mechanism should still work + const stats = retryQueue.getStats() + expect(stats.totalQueued).toBe(0) - // Set initial user retryQueue.setCurrentUserId("user_123") // Same user login - should not clear let wasCleared = retryQueue.clearIfUserChanged("user_123") expect(wasCleared).toBe(false) - stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(2) - // Different user login - should clear + // Different user login - should clear (even though queue is already empty) wasCleared = retryQueue.clearIfUserChanged("user_456") - expect(wasCleared).toBe(true) - stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(0) + expect(wasCleared).toBe(true) // Returns true because user ID changed expect(retryQueue.getCurrentUserId()).toBe("user_456") }) it("should clear queue on logout (undefined user)", async () => { - // Set initial user retryQueue.setCurrentUserId("user_123") - // Add some requests - await retryQueue.enqueue("https://api.example.com/test1", { method: "POST" }, "telemetry") - await retryQueue.enqueue("https://api.example.com/test2", { method: "POST" }, "telemetry") - - let stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(2) - // Logout (undefined user) - should clear const wasCleared = retryQueue.clearIfUserChanged(undefined) - expect(wasCleared).toBe(true) - stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(0) + expect(wasCleared).toBe(true) // Returns true because user ID changed expect(retryQueue.getCurrentUserId()).toBeUndefined() }) it("should not clear on first login (no previous user)", async () => { - // Add some requests before any user is set - await retryQueue.enqueue("https://api.example.com/test1", { method: "POST" }, "telemetry") - await retryQueue.enqueue("https://api.example.com/test2", { method: "POST" }, "telemetry") - - let stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(2) - // First login - should not clear const wasCleared = retryQueue.clearIfUserChanged("user_123") expect(wasCleared).toBe(false) - stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(2) expect(retryQueue.getCurrentUserId()).toBe("user_123") }) - - it("should handle multiple user transitions correctly", async () => { - const clearListener = vi.fn() - retryQueue.on("queue-cleared", clearListener) - - // First user logs in - retryQueue.clearIfUserChanged("user_123") - await retryQueue.enqueue("https://api.example.com/user1-req", { method: "POST" }, "telemetry") - - // User logs out - const clearedOnLogout = retryQueue.clearIfUserChanged(undefined) - expect(clearedOnLogout).toBe(true) - expect(clearListener).toHaveBeenCalledTimes(1) - - // Different user logs in - await retryQueue.enqueue("https://api.example.com/user2-req", { method: "POST" }, "telemetry") - const clearedOnNewUser = retryQueue.clearIfUserChanged("user_456") - expect(clearedOnNewUser).toBe(true) - expect(clearListener).toHaveBeenCalledTimes(2) - - // Same user logs back in - await retryQueue.enqueue("https://api.example.com/user2-req2", { method: "POST" }, "telemetry") - const notCleared = retryQueue.clearIfUserChanged("user_456") - expect(notCleared).toBe(false) - expect(clearListener).toHaveBeenCalledTimes(2) // Still 2, not cleared - - const stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(1) // Only the last request remains - }) }) describe("retryAll", () => { - let fetchMock: ReturnType - - beforeEach(() => { - // Mock global fetch - fetchMock = vi.fn() - global.fetch = fetchMock - }) - - afterEach(() => { - vi.restoreAllMocks() - }) - - it("should process requests in FIFO order", async () => { - const successListener = vi.fn() - retryQueue.on("request-retry-success", successListener) - - // Add multiple requests - await retryQueue.enqueue("https://api.example.com/test1", { method: "POST" }, "telemetry") - await retryQueue.enqueue("https://api.example.com/test2", { method: "POST" }, "telemetry") - await retryQueue.enqueue("https://api.example.com/test3", { method: "POST" }, "telemetry") - - // Mock successful responses - fetchMock.mockResolvedValue({ ok: true }) - - await retryQueue.retryAll() - - // Check that fetch was called in FIFO order - expect(fetchMock).toHaveBeenCalledTimes(3) - expect(fetchMock.mock.calls[0]?.[0]).toBe("https://api.example.com/test1") - expect(fetchMock.mock.calls[1]?.[0]).toBe("https://api.example.com/test2") - expect(fetchMock.mock.calls[2]?.[0]).toBe("https://api.example.com/test3") - - // Check that success events were emitted - expect(successListener).toHaveBeenCalledTimes(3) - - // Queue should be empty after successful retries - const stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(0) - }) - - it("should handle failed retries and increment retry count", async () => { - const failListener = vi.fn() - retryQueue.on("request-retry-failed", failListener) - - await retryQueue.enqueue("https://api.example.com/test", { method: "POST" }, "telemetry") - - // Mock failed response - fetchMock.mockRejectedValue(new Error("Network error")) - - await retryQueue.retryAll() - - // Check that failure event was emitted - expect(failListener).toHaveBeenCalledWith( - expect.objectContaining({ - url: "https://api.example.com/test", - retryCount: 1, - lastError: "Network error", - }), - expect.any(Error), - ) - - // Request should still be in queue - const stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(1) - }) - - it("should enforce max retries limit", async () => { - // Create queue with max retries of 2 - retryQueue = new RetryQueue(mockContext, { maxRetries: 2 }) - - const maxRetriesListener = vi.fn() - retryQueue.on("request-max-retries-exceeded", maxRetriesListener) - - await retryQueue.enqueue("https://api.example.com/test", { method: "POST" }, "telemetry") - - // Mock failed responses - fetchMock.mockRejectedValue(new Error("Network error")) - - // First retry - await retryQueue.retryAll() - let stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(1) // Still in queue - - // Second retry - should hit max retries - await retryQueue.retryAll() - - // Check that max retries event was emitted - expect(maxRetriesListener).toHaveBeenCalledWith( - expect.objectContaining({ - url: "https://api.example.com/test", - retryCount: 2, - }), - expect.any(Error), - ) - - // Request should be removed from queue after exceeding max retries - stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(0) - }) - - it("should not process if already processing", async () => { - // Add a request - await retryQueue.enqueue("https://api.example.com/test", { method: "POST" }, "telemetry") - - // Mock a slow response - fetchMock.mockImplementation(() => new Promise((resolve) => setTimeout(() => resolve({ ok: true }), 100))) - - // Start first retryAll (don't await) - const firstCall = retryQueue.retryAll() - - // Try to call retryAll again immediately - const secondCall = retryQueue.retryAll() - - // Both should complete without errors - await Promise.all([firstCall, secondCall]) - - // Fetch should only be called once (from the first call) - expect(fetchMock).toHaveBeenCalledTimes(1) - }) - it("should handle empty queue gracefully", async () => { // Call retryAll on empty queue await expect(retryQueue.retryAll()).resolves.toBeUndefined() - - // No fetch calls should be made - expect(fetchMock).not.toHaveBeenCalled() - }) - - it("should use auth header provider if available", async () => { - const authHeaderProvider = vi.fn().mockReturnValue({ - Authorization: "Bearer fresh-token", - }) - - retryQueue = new RetryQueue(mockContext, {}, undefined, authHeaderProvider) - - await retryQueue.enqueue( - "https://api.example.com/test", - { - method: "POST", - headers: { "Content-Type": "application/json" }, - }, - "telemetry", - ) - - fetchMock.mockResolvedValue({ ok: true }) - - await retryQueue.retryAll() - - // Check that fresh auth headers were used - expect(fetchMock).toHaveBeenCalledWith( - "https://api.example.com/test", - expect.objectContaining({ - headers: expect.objectContaining({ - Authorization: "Bearer fresh-token", - "Content-Type": "application/json", - "X-Retry-Queue": "true", - }), - }), - ) - - expect(authHeaderProvider).toHaveBeenCalled() - }) - - it("should respect configurable timeout", async () => { - // Create queue with custom timeout (short timeout for testing) - retryQueue = new RetryQueue(mockContext, { requestTimeout: 100 }) - - await retryQueue.enqueue("https://api.example.com/test", { method: "POST" }, "telemetry") - - // Mock fetch to reject with abort error - const abortError = new Error("The operation was aborted") - abortError.name = "AbortError" - fetchMock.mockRejectedValue(abortError) - - const failListener = vi.fn() - retryQueue.on("request-retry-failed", failListener) - - await retryQueue.retryAll() - - // Check that the request failed with an abort error - expect(failListener).toHaveBeenCalledWith( - expect.objectContaining({ - url: "https://api.example.com/test", - lastError: "The operation was aborted", - }), - expect.any(Error), - ) - - // The timeout configuration is being used (verified by the constructor accepting it) - // The actual timeout behavior is handled by the browser's AbortController - }) - - it("should retry on 500+ status codes", async () => { - const failListener = vi.fn() - const successListener = vi.fn() - retryQueue.on("request-retry-failed", failListener) - retryQueue.on("request-retry-success", successListener) - - await retryQueue.enqueue("https://api.example.com/test", { method: "POST" }, "telemetry") - - // First attempt: 500 error - fetchMock.mockResolvedValueOnce({ ok: false, status: 500, statusText: "Internal Server Error" }) - - await retryQueue.retryAll() - - // Should fail and remain in queue - expect(failListener).toHaveBeenCalledWith( - expect.objectContaining({ - url: "https://api.example.com/test", - retryCount: 1, - lastError: "Server error: 500 Internal Server Error", - }), - expect.any(Error), - ) - - let stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(1) - - // Second attempt: success - fetchMock.mockResolvedValueOnce({ ok: true, status: 200 }) - - await retryQueue.retryAll() - - // Should succeed and be removed from queue - expect(successListener).toHaveBeenCalled() - stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(0) }) - it("should pause entire queue on 429 rate limiting with Retry-After header", async () => { - // Add multiple requests to test queue-wide pause - await retryQueue.enqueue("https://api.example.com/test1", { method: "POST" }, "telemetry") - await retryQueue.enqueue("https://api.example.com/test2", { method: "POST" }, "telemetry") - await retryQueue.enqueue("https://api.example.com/test3", { method: "POST" }, "telemetry") - - // Mock 429 response with Retry-After header (in seconds) for the first request - const retryAfterResponse = { - ok: false, - status: 429, - headers: { - get: vi.fn((header: string) => { - if (header === "Retry-After") return "2" // 2 seconds - return null - }), - }, - } - - fetchMock.mockResolvedValueOnce(retryAfterResponse) - - await retryQueue.retryAll() - - // All requests should still be in queue - const stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(3) + it("should not process when paused", async () => { + const fetchMock = vi.fn().mockResolvedValue({ ok: true }) + global.fetch = fetchMock - // Only the first request should have been attempted - expect(fetchMock).toHaveBeenCalledTimes(1) - expect(fetchMock).toHaveBeenCalledWith("https://api.example.com/test1", expect.any(Object)) + // Pause queue + retryQueue.pause() - // Try to retry immediately - should be skipped due to queue-wide rate limiting - fetchMock.mockClear() + // Try to retry all await retryQueue.retryAll() - // No fetch calls should be made because the entire queue is paused + // Fetch should not be called because queue is paused expect(fetchMock).not.toHaveBeenCalled() - }) - it("should process all requests after rate limit period expires", async () => { - // Add multiple requests - await retryQueue.enqueue("https://api.example.com/test1", { method: "POST" }, "telemetry") - await retryQueue.enqueue("https://api.example.com/test2", { method: "POST" }, "telemetry") - - // Mock 429 response with very short Retry-After (for testing) - const retryAfterResponse = { - ok: false, - status: 429, - headers: { - get: vi.fn((header: string) => { - if (header === "Retry-After") return "0" // 0 seconds (immediate) - return null - }), - }, - } - - fetchMock.mockResolvedValueOnce(retryAfterResponse) - - await retryQueue.retryAll() - - // Queue should be paused but requests still in queue - expect(retryQueue.getStats().totalQueued).toBe(2) - - // Wait a tiny bit for the rate limit to "expire" - await new Promise((resolve) => setTimeout(resolve, 10)) - - // Mock successful responses for both requests - fetchMock.mockResolvedValue({ ok: true }) - - // Now retry should process all requests + // Resume and retry + retryQueue.resume() await retryQueue.retryAll() - // All requests should be processed and removed from queue - expect(retryQueue.getStats().totalQueued).toBe(0) - // First request will be retried plus the second one - expect(fetchMock).toHaveBeenCalledTimes(3) // 1 (429) + 2 (success) + // Still no fetch calls because queue is empty + expect(fetchMock).not.toHaveBeenCalled() }) - it("should not retry on 401/403 auth errors", async () => { - const successListener = vi.fn() - retryQueue.on("request-retry-success", successListener) - - await retryQueue.enqueue("https://api.example.com/test", { method: "POST" }, "telemetry") - - // Mock 401 error - fetchMock.mockResolvedValueOnce({ ok: false, status: 401, statusText: "Unauthorized" }) - - await retryQueue.retryAll() - - // Should be removed from queue without retry (401 is a client error) - expect(successListener).toHaveBeenCalled() - const stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(0) - - // Test 403 as well - await retryQueue.enqueue("https://api.example.com/test2", { method: "POST" }, "telemetry") - fetchMock.mockResolvedValueOnce({ ok: false, status: 403, statusText: "Forbidden" }) - + it("should not process if already processing", async () => { + // This test is less meaningful since enqueue is a no-op + // But mechanism should still work await retryQueue.retryAll() - // Should also be removed from queue without retry - expect(successListener).toHaveBeenCalledTimes(2) - const stats2 = retryQueue.getStats() - expect(stats2.totalQueued).toBe(0) - }) - - it("should not retry on 400/404/422 client errors", async () => { - const successListener = vi.fn() - retryQueue.on("request-retry-success", successListener) - - // Test various 4xx errors that should not be retried - const clientErrors = [ - { status: 400, statusText: "Bad Request" }, - { status: 404, statusText: "Not Found" }, - { status: 422, statusText: "Unprocessable Entity" }, - ] - - for (const error of clientErrors) { - await retryQueue.enqueue( - `https://api.example.com/test-${error.status}`, - { method: "POST" }, - "telemetry", - ) - fetchMock.mockResolvedValueOnce({ ok: false, ...error }) - } - + // Should not throw await retryQueue.retryAll() - - // All requests should be removed from queue without retry - expect(successListener).toHaveBeenCalledTimes(3) - const stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(0) - }) - - it("should prevent concurrent processing", async () => { - // Add a single request - await retryQueue.enqueue("https://api.example.com/test1", { method: "POST" }, "telemetry") - - // Mock slow response - let resolveFirst: () => void - const firstPromise = new Promise<{ ok: boolean }>((resolve) => { - resolveFirst = () => resolve({ ok: true }) - }) - - fetchMock.mockReturnValueOnce(firstPromise) - - // Start first retryAll (don't await) - const firstCall = retryQueue.retryAll() - - // Try to call retryAll again immediately - should return immediately without processing - const secondCall = retryQueue.retryAll() - - // Second call should return immediately - await secondCall - - // Fetch should only be called once (from first call) - expect(fetchMock).toHaveBeenCalledTimes(1) - - // Resolve the promise - resolveFirst!() - - // Wait for first call to complete - await firstCall - - // Queue should be empty - const stats = retryQueue.getStats() - expect(stats.totalQueued).toBe(0) }) }) }) diff --git a/packages/telemetry/src/PostHogTelemetryClient.ts b/packages/telemetry/src/PostHogTelemetryClient.ts index adc8b89a7a1..135ab24782f 100644 --- a/packages/telemetry/src/PostHogTelemetryClient.ts +++ b/packages/telemetry/src/PostHogTelemetryClient.ts @@ -1,42 +1,23 @@ -import { PostHog } from "posthog-node" -import * as vscode from "vscode" - -import { - type TelemetryProperties, - type TelemetryEvent, - TelemetryEventName, - getErrorStatusCode, - getErrorMessage, - shouldReportApiErrorToTelemetry, - isApiProviderError, - extractApiProviderErrorProperties, - isConsecutiveMistakeError, - extractConsecutiveMistakeErrorProperties, -} from "@roo-code/types" +import { type TelemetryEvent, TelemetryEventName } from "@roo-code/types" import { BaseTelemetryClient } from "./BaseTelemetryClient" /** * PostHogTelemetryClient handles telemetry event tracking for the Roo Code extension. - * Uses PostHog analytics to track user interactions and system events. - * Respects user privacy settings and VSCode's global telemetry configuration. + * All methods are no-ops — telemetry has been disabled in this fork. */ export class PostHogTelemetryClient extends BaseTelemetryClient { - private client: PostHog - private distinctId: string = vscode.env.machineId // Git repository properties that should be filtered out private readonly gitPropertyNames = ["repositoryUrl", "repositoryName", "defaultBranch"] - constructor(debug = false) { + constructor(_debug = false) { super( { type: "exclude", events: [TelemetryEventName.TASK_MESSAGE, TelemetryEventName.LLM_COMPLETION], }, - debug, + _debug, ) - - this.client = new PostHog(process.env.POSTHOG_API_KEY || "", { host: "https://ph.roocode.com" }) } /** @@ -45,126 +26,28 @@ export class PostHogTelemetryClient extends BaseTelemetryClient { * @returns Whether the property should be included in telemetry events */ protected override isPropertyCapturable(propertyName: string): boolean { - // Filter out git repository properties if (this.gitPropertyNames.includes(propertyName)) { return false } return true } - public override async capture(event: TelemetryEvent): Promise { - if (!this.isTelemetryEnabled() || !this.isEventCapturable(event.event)) { - if (this.debug) { - console.info(`[PostHogTelemetryClient#capture] Skipping event: ${event.event}`) - } - - return - } - - if (this.debug) { - console.info(`[PostHogTelemetryClient#capture] ${event.event}`) - } - - const properties = await this.getEventProperties(event) - - this.client.capture({ - distinctId: this.distinctId, - event: event.event, - properties, - }) + public override async capture(_event: TelemetryEvent): Promise { + // No-op: telemetry disabled } public override async captureException( - error: Error, - additionalProperties?: Record, + _error: Error, + _additionalProperties?: Record, ): Promise { - if (!this.isTelemetryEnabled()) { - if (this.debug) { - console.info(`[PostHogTelemetryClient#captureException] Skipping exception: ${error.message}`) - } - - return - } - - // Extract error status code and message for filtering. - const errorCode = getErrorStatusCode(error) - const errorMessage = getErrorMessage(error) ?? error.message - - // Filter out expected errors (e.g., 402 billing, 429 rate limits) - if (!shouldReportApiErrorToTelemetry(errorCode, errorMessage)) { - if (this.debug) { - console.info( - `[PostHogTelemetryClient#captureException] Filtering out expected error: ${errorCode} - ${errorMessage}`, - ) - } - return - } - - if (this.debug) { - console.info(`[PostHogTelemetryClient#captureException] ${error.message}`) - } - - // Auto-extract properties from known error types and merge with additionalProperties. - // Explicit additionalProperties take precedence over auto-extracted properties. - let mergedProperties = additionalProperties - - if (isApiProviderError(error)) { - const extractedProperties = extractApiProviderErrorProperties(error) - mergedProperties = { ...extractedProperties, ...additionalProperties } - } else if (isConsecutiveMistakeError(error)) { - const extractedProperties = extractConsecutiveMistakeErrorProperties(error) - mergedProperties = { ...extractedProperties, ...additionalProperties } - } - - // Override the error message with the extracted error message. - error.message = errorMessage - - const provider = this.providerRef?.deref() - let telemetryProperties: TelemetryProperties | undefined = undefined - - if (provider) { - try { - telemetryProperties = await provider.getTelemetryProperties() - } catch (_error) { - // Ignore. - } - } - - const exceptionProperties = { - ...mergedProperties, - $app_version: telemetryProperties?.appVersion, - } - - this.client.captureException(error, this.distinctId, exceptionProperties) + // No-op: telemetry disabled } - /** - * Updates the telemetry state based on user preferences and VSCode settings. - * Only enables telemetry if both VSCode global telemetry is enabled and - * user has opted in. - * @param didUserOptIn Whether the user has explicitly opted into telemetry - */ - public override updateTelemetryState(didUserOptIn: boolean): void { - this.telemetryEnabled = false - - // First check global telemetry level - telemetry should only be enabled when level is "all". - const telemetryLevel = vscode.workspace.getConfiguration("telemetry").get("telemetryLevel", "all") - const globalTelemetryEnabled = telemetryLevel === "all" - - // We only enable telemetry if global vscode telemetry is enabled. - if (globalTelemetryEnabled) { - this.telemetryEnabled = didUserOptIn - } - - // Update PostHog client state based on telemetry preference. - if (this.telemetryEnabled) { - this.client.optIn() - } else { - this.client.optOut() - } + public override updateTelemetryState(_didUserOptIn: boolean): void { + // No-op: telemetry disabled } public override async shutdown(): Promise { - await this.client.shutdown() + // No-op: telemetry disabled } } diff --git a/packages/telemetry/src/TelemetryService.ts b/packages/telemetry/src/TelemetryService.ts index 8eb1ed0ab67..0cee3bf4322 100644 --- a/packages/telemetry/src/TelemetryService.ts +++ b/packages/telemetry/src/TelemetryService.ts @@ -164,6 +164,40 @@ export class TelemetryService { this.captureEvent(TelemetryEventName.CONSECUTIVE_MISTAKE_ERROR, { taskId }) } + public captureWorktreeCreated(taskId: string, worktreePath: string): void { + this.captureEvent(TelemetryEventName.WORKTREE_CREATED, { taskId, worktreePath }) + } + + public captureWorktreeDeleted(taskId: string, worktreePath: string): void { + this.captureEvent(TelemetryEventName.WORKTREE_DELETED, { taskId, worktreePath }) + } + + public captureWorktreeOrphanDetected(worktreePath: string): void { + this.captureEvent(TelemetryEventName.WORKTREE_ORPHAN_DETECTED, { worktreePath }) + } + + public captureParallelTaskSpawned(parentTaskId: string, taskCount: number): void { + this.captureEvent(TelemetryEventName.PARALLEL_TASK_SPAWNED, { parentTaskId, taskCount }) + } + + public captureParallelTaskCompleted( + parentTaskId: string, + completedCount: number, + totalCount: number, + hadFailures: boolean, + ): void { + this.captureEvent(TelemetryEventName.PARALLEL_TASK_COMPLETED, { + parentTaskId, + completedCount, + totalCount, + hadFailures, + }) + } + + public captureParallelTaskChildFailed(parentTaskId: string, childTaskId: string): void { + this.captureEvent(TelemetryEventName.PARALLEL_TASK_CHILD_FAILED, { parentTaskId, childTaskId }) + } + /** * Captures when a tab is shown due to user action * @param tab The tab that was shown diff --git a/packages/types/src/events.ts b/packages/types/src/events.ts index 54267d67e4e..5c8cbe05ad3 100644 --- a/packages/types/src/events.ts +++ b/packages/types/src/events.ts @@ -51,6 +51,15 @@ export enum RooCodeEventName { ModesResponse = "modesResponse", ModelsResponse = "modelsResponse", + // Swarm Lifecycle + SwarmSessionStarted = "swarmSessionStarted", + SwarmSessionEnded = "swarmSessionEnded", + WorkerRegistered = "workerRegistered", + WorkerIdle = "workerIdle", + WorkerShutdown = "workerShutdown", + PermissionRequested = "permissionRequested", + PermissionResolved = "permissionResolved", + // Evals EvalPass = "evalPass", EvalFail = "evalFail", @@ -112,6 +121,14 @@ export const rooCodeEventsSchema = z.object({ [RooCodeEventName.TaskToolFailed]: z.tuple([z.string(), toolNamesSchema, z.string()]), [RooCodeEventName.TaskTokenUsageUpdated]: z.tuple([z.string(), tokenUsageSchema, toolUsageSchema]), + [RooCodeEventName.SwarmSessionStarted]: z.tuple([z.string(), z.string()]), // [sessionId, leaderTaskId] + [RooCodeEventName.SwarmSessionEnded]: z.tuple([z.string(), z.string()]), // [sessionId, leaderTaskId] + [RooCodeEventName.WorkerRegistered]: z.tuple([z.string(), z.string(), z.string(), z.string()]), // [sessionId, taskId, agentName, agentColor] + [RooCodeEventName.WorkerIdle]: z.tuple([z.string(), z.string()]), // [sessionId, taskId] + [RooCodeEventName.WorkerShutdown]: z.tuple([z.string(), z.string()]), // [sessionId, taskId] + [RooCodeEventName.PermissionRequested]: z.tuple([z.string(), z.string(), z.string()]), // [workerTaskId, toolName, requestId] + [RooCodeEventName.PermissionResolved]: z.tuple([z.string(), z.string(), z.boolean()]), // [workerTaskId, requestId, allowed] + [RooCodeEventName.ModeChanged]: z.tuple([z.string()]), [RooCodeEventName.ProviderProfileChanged]: z.tuple([z.object({ name: z.string(), provider: z.string() })]), @@ -274,6 +291,43 @@ export const taskEventSchema = z.discriminatedUnion("eventName", [ taskId: z.number().optional(), }), + // Swarm Lifecycle + z.object({ + eventName: z.literal(RooCodeEventName.SwarmSessionStarted), + payload: rooCodeEventsSchema.shape[RooCodeEventName.SwarmSessionStarted], + taskId: z.number().optional(), + }), + z.object({ + eventName: z.literal(RooCodeEventName.SwarmSessionEnded), + payload: rooCodeEventsSchema.shape[RooCodeEventName.SwarmSessionEnded], + taskId: z.number().optional(), + }), + z.object({ + eventName: z.literal(RooCodeEventName.WorkerRegistered), + payload: rooCodeEventsSchema.shape[RooCodeEventName.WorkerRegistered], + taskId: z.number().optional(), + }), + z.object({ + eventName: z.literal(RooCodeEventName.WorkerIdle), + payload: rooCodeEventsSchema.shape[RooCodeEventName.WorkerIdle], + taskId: z.number().optional(), + }), + z.object({ + eventName: z.literal(RooCodeEventName.WorkerShutdown), + payload: rooCodeEventsSchema.shape[RooCodeEventName.WorkerShutdown], + taskId: z.number().optional(), + }), + z.object({ + eventName: z.literal(RooCodeEventName.PermissionRequested), + payload: rooCodeEventsSchema.shape[RooCodeEventName.PermissionRequested], + taskId: z.number().optional(), + }), + z.object({ + eventName: z.literal(RooCodeEventName.PermissionResolved), + payload: rooCodeEventsSchema.shape[RooCodeEventName.PermissionResolved], + taskId: z.number().optional(), + }), + // Evals z.object({ eventName: z.literal(RooCodeEventName.EvalPass), diff --git a/packages/types/src/history.ts b/packages/types/src/history.ts index a60d1a75b65..e12743582df 100644 --- a/packages/types/src/history.ts +++ b/packages/types/src/history.ts @@ -26,6 +26,36 @@ export const historyItemSchema = z.object({ awaitingChildId: z.string().optional(), // Child currently awaited (set when delegated) completedByChildId: z.string().optional(), // Child that completed and resumed this parent completionResultSummary: z.string().optional(), // Summary from completed child + completionPayload: z.record(z.unknown()).optional(), // Structured JSON result from child + worktreePath: z.string().optional(), // Worktree path if this task ran in an isolated worktree + parallelQueue: z + .array( + z.object({ + mode: z.string(), + message: z.string(), + worktree: z.string().optional(), + todos: z.string().optional(), + abortOnFailure: z.boolean().optional(), // If true, remaining queue is abandoned when this task fails + }), + ) + .optional(), // Remaining tasks for spawn_parallel_tasks fan-out + parallelResults: z + .array( + z.object({ + taskId: z.string(), + summary: z.string(), + payload: z.record(z.unknown()).optional(), + error: z.string().optional(), // Set when the child task failed or was aborted + }), + ) + .optional(), // Accumulated results from completed parallel children + abortOnChildFailure: z.boolean().optional(), // When true, the entire parallel queue is abandoned if any child fails + // Swarm identity — set by spawnConcurrentChildren for workers, and on the leader task + swarmSessionId: z.string().optional(), + agentId: z.string().optional(), // "@" + agentName: z.string().optional(), // Human-readable role label + agentColor: z.string().optional(), // AgentColorName + isSwarmLeader: z.boolean().optional(), }) export type HistoryItem = z.infer diff --git a/packages/types/src/index.ts b/packages/types/src/index.ts index cd5804aecb7..361603f028a 100644 --- a/packages/types/src/index.ts +++ b/packages/types/src/index.ts @@ -23,6 +23,8 @@ export * from "./provider-settings.js" export * from "./task.js" export * from "./todo.js" export * from "./skills.js" +export * from "./swarm.js" +export * from "./team.js" export * from "./telemetry.js" export * from "./terminal.js" export * from "./tool.js" diff --git a/packages/types/src/swarm.ts b/packages/types/src/swarm.ts new file mode 100644 index 00000000000..bd027960436 --- /dev/null +++ b/packages/types/src/swarm.ts @@ -0,0 +1,55 @@ +/** + * Swarm / multi-agent identity types. + * These types describe agent identities, sessions, and the message envelope + * used between agents (mailbox protocol, implemented in Phase 3+). + */ + +export type AgentColorName = "red" | "blue" | "green" | "yellow" | "purple" | "orange" | "pink" | "cyan" + +/** + * Stable identity for a single agent within a swarm session. + */ +export interface AgentIdentity { + /** Stable unique identifier — "@" */ + agentId: string + /** Human-readable role name (e.g. "Backend Engineer") */ + agentName: string + /** Assigned display color */ + color: AgentColorName + /** True for the task that called spawnConcurrentChildren */ + isLeader: boolean + /** The Roo-Code taskId this identity is bound to */ + taskId: string +} + +/** + * In-memory session tracking all agents spawned by a single concurrent spawn call. + * Keyed by taskId in the `teammates` record. + */ +export interface SwarmSession { + /** Same as the leaderTaskId for simplicity */ + sessionId: string + /** The parent task that created this session */ + leaderTaskId: string + /** All registered worker identities, keyed by taskId */ + teammates: Record +} + +// --------------------------------------------------------------------------- +// Mailbox protocol (P3 stubs — types only, no implementation yet) +// --------------------------------------------------------------------------- + +export type TeammateMessageType = + | "task_assignment" + | "idle_notification" + | "shutdown_request" + | "permission_request" + | "permission_response" + +export interface TeammateMessage { + type: TeammateMessageType + from: string + to: string + payload?: Record + ts: number +} diff --git a/packages/types/src/task.ts b/packages/types/src/task.ts index 56a75728980..b65690cf5fe 100644 --- a/packages/types/src/task.ts +++ b/packages/types/src/task.ts @@ -26,6 +26,9 @@ export interface TaskProviderLike { clearTask(): Promise resumeTask(taskId: string): void + // Introspection + getParallelTaskStatus(taskId: string): Promise + // Modes getModes(): Promise<{ slug: string; name: string }[]> getMode(): Promise @@ -82,6 +85,29 @@ export type TaskProviderEvents = { [RooCodeEventName.ModeChanged]: [mode: string] [RooCodeEventName.ProviderProfileChanged]: [config: { name: string; provider?: string }] + + [RooCodeEventName.SwarmSessionStarted]: [sessionId: string, leaderTaskId: string] + [RooCodeEventName.SwarmSessionEnded]: [sessionId: string, leaderTaskId: string] + [RooCodeEventName.WorkerRegistered]: [sessionId: string, taskId: string, agentName: string, agentColor: string] + [RooCodeEventName.WorkerIdle]: [sessionId: string, taskId: string] + [RooCodeEventName.WorkerShutdown]: [sessionId: string, taskId: string] + [RooCodeEventName.PermissionRequested]: [workerTaskId: string, toolName: string, requestId: string] + [RooCodeEventName.PermissionResolved]: [workerTaskId: string, requestId: string, allowed: boolean] +} + +/** + * ParallelTaskStatus — snapshot of a task's parallel-queue state for introspection. + */ +export interface ParallelTaskStatus { + taskId: string + historyStatus: "active" | "completed" | "delegated" | undefined + worktreePath: string | undefined + /** Remaining tasks waiting to run */ + queuedTasks: Array<{ mode: string; message: string; worktree?: string }> + /** Results from children that have already completed */ + completedResults: Array<{ taskId: string; summary: string; error?: string }> + /** Task ID of the child currently executing, if any */ + activeChildId: string | undefined } /** @@ -99,6 +125,8 @@ export interface CreateTaskOptions { /** Whether to start the task loop immediately (default: true). * When false, the caller must invoke `task.start()` manually. */ startTask?: boolean + /** Override the workspace path for this task (e.g., a git worktree). Takes priority over parent task's path. */ + workspacePath?: string } export enum TaskStatus { diff --git a/packages/types/src/team.ts b/packages/types/src/team.ts new file mode 100644 index 00000000000..49219d5c746 --- /dev/null +++ b/packages/types/src/team.ts @@ -0,0 +1,67 @@ +/** + * Team configuration — defines a named, phased multi-agent workflow. + * Team configs live in .roo/teams/.json inside the workspace. + */ + +export interface TeamAgentSpec { + /** Mode slug for this agent (e.g., "code", "architect") */ + mode: string + /** Role label shown in results (e.g., "frontend", "explorer"). Optional, for readability. */ + role?: string + /** + * Instruction template for this agent. + * Supports: {{task}}, {{context}}, {{phase}}, {{team}} + */ + instruction: string + /** + * Optional git worktree isolation. + * "auto" creates a new branch+worktree; any other string is used as the branch name. + */ + worktree?: string +} + +export interface TeamPhase { + /** Phase identifier used when calling run_team_phase (e.g., "discovery", "execution") */ + name: string + /** Human-readable label for UI display. Defaults to name if omitted. */ + label?: string + /** When true, all agents in this phase run concurrently. Default: false (sequential). */ + concurrent?: boolean + /** + * When true, the orchestrator should ask for user approval before this phase starts. + * The run_team_phase tool itself does not enforce this — it is a signal for the + * orchestrator mode to call ask_followup_question first. + */ + requireApproval?: boolean + /** + * When true and concurrent is also true, abort all remaining sibling agents as soon as + * one fails. Has no effect in sequential mode. + */ + abortOnChildFailure?: boolean + /** Agents to run in this phase. Must contain at least one entry. */ + agents: TeamAgentSpec[] +} + +export interface TeamConfig { + /** Auto-populated with the source file path at load time. Not set by user. */ + $source?: string + /** Unique identifier used in tool calls and skill registration (e.g., "fullstack"). */ + slug: string + /** Human-readable team name (e.g., "Full-Stack Dev Team"). */ + name: string + /** Short description of what this team does. Shown in team listings. */ + description?: string + /** Ordered list of phases. The orchestrator runs them in this order. */ + phases: TeamPhase[] + /** + * Path to a Markdown file containing shared conventions for all agents. + * Relative to the workspace root. Content is injected into every agent's message + * inside a block. + */ + conventions?: string + /** + * Mode slug for the orchestrating task. Defaults to "architect". + * This is informational — the skill / invocation mechanism uses it to set the initial mode. + */ + orchestratorMode?: string +} diff --git a/packages/types/src/telemetry.ts b/packages/types/src/telemetry.ts index 68ed38fe326..f1aa91b93fc 100644 --- a/packages/types/src/telemetry.ts +++ b/packages/types/src/telemetry.ts @@ -74,6 +74,13 @@ export enum TelemetryEventName { TELEMETRY_SETTINGS_CHANGED = "Telemetry Settings Changed", MODEL_CACHE_EMPTY_RESPONSE = "Model Cache Empty Response", READ_FILE_LEGACY_FORMAT_USED = "Read File Legacy Format Used", + + WORKTREE_CREATED = "Worktree Created", + WORKTREE_DELETED = "Worktree Deleted", + WORKTREE_ORPHAN_DETECTED = "Worktree Orphan Detected", + PARALLEL_TASK_SPAWNED = "Parallel Task Spawned", + PARALLEL_TASK_COMPLETED = "Parallel Task Completed", + PARALLEL_TASK_CHILD_FAILED = "Parallel Task Child Failed", } /** diff --git a/packages/types/src/tool.ts b/packages/types/src/tool.ts index 4f90b63e9fc..f71325798c9 100644 --- a/packages/types/src/tool.ts +++ b/packages/types/src/tool.ts @@ -40,6 +40,8 @@ export const toolNames = [ "attempt_completion", "switch_mode", "new_task", + "spawn_parallel_tasks", + "run_team_phase", "codebase_search", "update_todo_list", "run_slash_command", diff --git a/packages/types/src/vscode-extension-host.ts b/packages/types/src/vscode-extension-host.ts index b20539afe49..ec763b84169 100644 --- a/packages/types/src/vscode-extension-host.ts +++ b/packages/types/src/vscode-extension-host.ts @@ -461,6 +461,7 @@ export interface WebviewMessage { | "ttsEnabled" | "ttsSpeed" | "openKeyboardShortcuts" + | "openVSCodeSettings" | "openMcpSettings" | "openProjectMcpSettings" | "restartMcpServer" diff --git a/progress.txt b/progress.txt deleted file mode 100644 index b3983826b38..00000000000 --- a/progress.txt +++ /dev/null @@ -1,59 +0,0 @@ -# Reapplication Progress — rc6 branch cleanup -# Updated: 2026-02-15 - -## Completed Batches - -### Batch 1 — Clean cherry-picks (PR #11473) -- 22 PRs merged cleanly -- Status: MERGED to main - -### Batch 2 — Minor conflicts (PR #11474) -- 9 PRs with minor conflicts resolved -- Status: MERGED to main - -### Batch 3 — Skills Infrastructure & Browser Use Removal (4 PRs) -- PR #11102: skill mode dropdown (44 conflicts resolved) -- PR #11157: improve Skills/Slash Commands UI (6 conflicts resolved) -- PR #11414: remove built-in skills mechanism (4 conflicts resolved) -- PR #11392: remove browser use entirely (5 conflicts resolved) -- Status: ON BRANCH reapply/batch-3-4-5-major-conflicts - -### Batch 4 — Provider Removals (2 PRs) -- PR #11253: remove URL context/Grounding checkboxes (4 conflicts resolved) -- PR #11297: remove 9 low-usage providers + retired UX (14 conflicts resolved) -- Status: ON BRANCH reapply/batch-3-4-5-major-conflicts - -### Batch 5 — Azure Foundry -- PR #11315 and #11374: EXCLUDED — depends on AI-SDK (@ai-sdk/azure, from "ai") -- These PRs are AI-SDK-entangled and cannot be cherry-picked to the pre-AI-SDK codebase -- Status: DEFERRED (AI-SDK dependent) - -## Post-cherry-pick Fixes Applied -1. Restored gemini.ts + vertex.ts to pre-AI-SDK state (cherry-picks brought AI-SDK versions) -2. Restored ai-sdk.spec.ts, gemini-handler.spec.ts, vertex.spec.ts to pre-AI-SDK versions -3. Fixed processUserContentMentions.ts ghost import (rooMessage.ts doesn't exist) -4. Added missing skills type exports to @roo-code/types (SkillMetadata, validateSkillName, etc.) -5. Added SkillsSettings import to SettingsView.tsx -6. Added Dialog/Select/Collapsible mocks to SettingsView test files -7. Fixed Task.ts type mismatches (replaced local types with Anthropic SDK types) -8. Added skills state to ExtensionStateContext - -## Deferred PRs (AI-SDK Entangled) -- #11379: delegation (AI-SDK) -- #11418: delegation (AI-SDK) -- #11422: delegation (AI-SDK) -- #11315: Azure Foundry provider (AI-SDK) -- #11374: Azure Foundry fix (AI-SDK) - -## Validation Results -- Backend tests: ALL PASSED (5224 tests) -- UI tests: ALL PASSED (1267 tests) -- Type checks: ALL PASSED (14/14 packages) -- AI-SDK contamination: CLEAN (0 matches) - -## Notes -- Pre-push hook fails on `roo-cline:bundle` because `generate-built-in-skills.ts` was removed - by PR #11414 but `package.json` still references it in `prebundle`. This is expected and - will be resolved when the PR is merged to main and the script reference is cleaned up. -- Push was done with `--no-verify` after independent verification of types, backend tests, - and UI tests all passed cleanly. diff --git a/src/__tests__/removeClineFromStack-delegation.spec.ts b/src/__tests__/removeClineFromStack-delegation.spec.ts index a72f580d6fe..e32afb240f6 100644 --- a/src/__tests__/removeClineFromStack-delegation.spec.ts +++ b/src/__tests__/removeClineFromStack-delegation.spec.ts @@ -5,7 +5,7 @@ import { ClineProvider } from "../core/webview/ClineProvider" describe("ClineProvider.removeClineFromStack() delegation awareness", () => { /** - * Helper to build a minimal mock provider with a single task on the stack. + * Helper to build a minimal mock provider with a single task in the map. * The task's parentTaskId and taskId are configurable. */ function buildMockProvider(opts: { @@ -33,7 +33,9 @@ describe("ClineProvider.removeClineFromStack() delegation awareness", () => { }) const provider = { - clineStack: [childTask] as any[], + tasks: new Map([[opts.childTaskId, childTask as any]]), + focusedTaskId: opts.childTaskId as string | undefined, + leaderTaskId: opts.childTaskId as string | undefined, taskEventListeners: new Map(), log: vi.fn(), getTaskWithId, @@ -64,8 +66,8 @@ describe("ClineProvider.removeClineFromStack() delegation awareness", () => { await (ClineProvider.prototype as any).removeClineFromStack.call(provider) - // Stack should be empty after pop - expect(provider.clineStack).toHaveLength(0) + // Map should be empty after removal + expect(provider.tasks.size).toBe(0) // Parent lookup should have been called expect(getTaskWithId).toHaveBeenCalledWith("parent-1") @@ -93,8 +95,8 @@ describe("ClineProvider.removeClineFromStack() delegation awareness", () => { await (ClineProvider.prototype as any).removeClineFromStack.call(provider) - // Stack should be empty - expect(provider.clineStack).toHaveLength(0) + // Map should be empty + expect(provider.tasks.size).toBe(0) // No parent lookup or update should happen expect(getTaskWithId).not.toHaveBeenCalled() @@ -161,8 +163,8 @@ describe("ClineProvider.removeClineFromStack() delegation awareness", () => { // Should NOT throw await (ClineProvider.prototype as any).removeClineFromStack.call(provider) - // Stack should still be empty (pop was not blocked) - expect(provider.clineStack).toHaveLength(0) + // Map should still be empty (removal was not blocked) + expect(provider.tasks.size).toBe(0) // The abort should still have been called expect(childTask.abortTask).toHaveBeenCalledWith(true) @@ -176,9 +178,11 @@ describe("ClineProvider.removeClineFromStack() delegation awareness", () => { expect(updateTaskHistory).not.toHaveBeenCalled() }) - it("handles empty stack gracefully", async () => { + it("handles empty map gracefully", async () => { const provider = { - clineStack: [] as any[], + tasks: new Map() as Map, + focusedTaskId: undefined as string | undefined, + leaderTaskId: undefined as string | undefined, taskEventListeners: new Map(), log: vi.fn(), getTaskWithId: vi.fn(), @@ -188,7 +192,7 @@ describe("ClineProvider.removeClineFromStack() delegation awareness", () => { // Should not throw await (ClineProvider.prototype as any).removeClineFromStack.call(provider) - expect(provider.clineStack).toHaveLength(0) + expect(provider.tasks.size).toBe(0) expect(provider.getTaskWithId).not.toHaveBeenCalled() expect(provider.updateTaskHistory).not.toHaveBeenCalled() }) @@ -215,8 +219,8 @@ describe("ClineProvider.removeClineFromStack() delegation awareness", () => { // Call with skipDelegationRepair: true (as delegateParentAndOpenChild would) await (ClineProvider.prototype as any).removeClineFromStack.call(provider, { skipDelegationRepair: true }) - // Stack should be empty after pop - expect(provider.clineStack).toHaveLength(0) + // Map should be empty after removal + expect(provider.tasks.size).toBe(0) // Parent lookup should NOT have been called — repair was skipped entirely expect(getTaskWithId).not.toHaveBeenCalled() @@ -258,7 +262,9 @@ describe("ClineProvider.removeClineFromStack() delegation awareness", () => { const updateTaskHistory = vi.fn().mockResolvedValue([]) const provider = { - clineStack: [taskB] as any[], + tasks: new Map([["task-B", taskB as any]]), + focusedTaskId: "task-B" as string | undefined, + leaderTaskId: "task-B" as string | undefined, taskEventListeners: new Map(), log: vi.fn(), getTaskWithId, @@ -268,8 +274,8 @@ describe("ClineProvider.removeClineFromStack() delegation awareness", () => { // Simulate what delegateParentAndOpenChild does: pop B with skipDelegationRepair await (ClineProvider.prototype as any).removeClineFromStack.call(provider, { skipDelegationRepair: true }) - // B was popped - expect(provider.clineStack).toHaveLength(0) + // B was removed + expect(provider.tasks.size).toBe(0) // Grandparent A should NOT have been looked up or modified expect(getTaskWithId).not.toHaveBeenCalled() diff --git a/src/__tests__/single-open-invariant.spec.ts b/src/__tests__/single-open-invariant.spec.ts index 2dd466a992a..069b6c15333 100644 --- a/src/__tests__/single-open-invariant.spec.ts +++ b/src/__tests__/single-open-invariant.spec.ts @@ -40,8 +40,10 @@ describe("Single-open-task invariant", () => { const addClineToStack = vi.fn().mockResolvedValue(undefined) const provider = { - // Simulate an existing task present in stack - clineStack: [{ taskId: "existing-1" }], + // Simulate an existing task present in the map + tasks: new Map([["existing-1", { taskId: "existing-1" }]]), + focusedTaskId: "existing-1", + leaderTaskId: "existing-1", setValues: vi.fn(), getState: vi.fn().mockResolvedValue({ apiConfiguration: { apiProvider: "anthropic", consecutiveMistakeLimit: 0 }, diff --git a/src/__tests__/spawnConcurrentChildren.spec.ts b/src/__tests__/spawnConcurrentChildren.spec.ts new file mode 100644 index 00000000000..bc4762bc65d --- /dev/null +++ b/src/__tests__/spawnConcurrentChildren.spec.ts @@ -0,0 +1,323 @@ +// npx vitest run __tests__/spawnConcurrentChildren.spec.ts + +import { describe, it, expect, vi } from "vitest" +import { ClineProvider } from "../core/webview/ClineProvider" +import { RooCodeEventName } from "@roo-code/types" + +vi.mock("@roo-code/telemetry", () => ({ + TelemetryService: { + instance: { + captureParallelTaskSpawned: vi.fn(), + captureParallelTaskCompleted: vi.fn(), + captureParallelTaskChildFailed: vi.fn(), + captureWorktreeCreated: vi.fn(), + captureWorktreeDeleted: vi.fn(), + }, + }, +})) + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +type CompletionHandler = { + resolve: (result: { summary: string; payload?: Record }) => void + reject: (reason: Error) => void +} + +function buildProvider(opts: { parentTaskId: string }) { + const { parentTaskId } = opts + + const parentTask = { taskId: parentTaskId, workspacePath: "/workspace", parentTask: undefined } + + const childTaskCounter = { n: 0 } + const createdChildren: Array<{ taskId: string; start: ReturnType }> = [] + + const provider = { + tasks: new Map([[parentTaskId, parentTask]]), + focusedTaskId: parentTaskId as string | undefined, + leaderTaskId: parentTaskId as string | undefined, + childCompletionHandlers: new Map(), + taskEventListeners: new WeakMap(), + log: vi.fn(), + + // Mock handleModeSwitch — synchronous to avoid ordering issues in tests + handleModeSwitch: vi.fn().mockResolvedValue(undefined), + + // Mock _createWorktreeForTask + _createWorktreeForTask: vi.fn().mockResolvedValue("/worktree/path"), + + // Mock createTask — each call returns a new unique child Task stub + createTask: vi.fn().mockImplementation(async () => { + const id = `child-${++childTaskCounter.n}` + const child = { + taskId: id, + workspacePath: "/workspace", + parentTask: parentTask, + start: vi.fn(), + } + createdChildren.push(child) + // Register in the tasks map as addClineToStack would + provider.tasks.set(id, child) + provider.focusedTaskId = id + return child + }), + + getTaskWithId: vi.fn().mockImplementation(async (id: string) => ({ + historyItem: { id, worktreePath: undefined, workspace: "/workspace" }, + })), + updateTaskHistory: vi.fn().mockResolvedValue(undefined), + + // Mock removeClineFromStack so abort-siblings tests can track calls + removeClineFromStack: vi.fn().mockImplementation(async (opts?: { taskId?: string }) => { + const id = opts?.taskId ?? provider.focusedTaskId + if (id) provider.tasks.delete(id) + }), + + emit: vi.fn(), + } + + return { provider, parentTask, createdChildren, childTaskCounter } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +describe("ClineProvider.spawnConcurrentChildren()", () => { + it("creates and starts all children, returns aggregated results", async () => { + const { provider, createdChildren } = buildProvider({ parentTaskId: "parent-1" }) + + // Schedule resolutions after children are registered + const resolveAll = () => { + for (const [taskId, handler] of provider.childCompletionHandlers) { + handler.resolve({ summary: `result-of-${taskId}` }) + } + } + + // Intercept start() calls to trigger resolutions + let startCount = 0 + const originalCreateTask = provider.createTask + provider.createTask = vi.fn().mockImplementation(async (...args) => { + const child = await originalCreateTask(...args) + const originalStart = child.start + child.start = vi.fn().mockImplementation(() => { + originalStart() + startCount++ + if (startCount === 2) resolveAll() + }) + return child + }) + + const results = await (ClineProvider.prototype as any).spawnConcurrentChildren.call(provider, { + parentTaskId: "parent-1", + tasks: [ + { mode: "code", message: "Task A" }, + { mode: "debug", message: "Task B" }, + ], + }) + + expect(results).toHaveLength(2) + expect(results[0].summary).toBe("result-of-child-1") + expect(results[1].summary).toBe("result-of-child-2") + expect(results.every((r: { error?: string }) => !r.error)).toBe(true) + }) + + it("serialises handleModeSwitch calls (no concurrent mode-switch race)", async () => { + const { provider } = buildProvider({ parentTaskId: "parent-1" }) + const modeOrder: string[] = [] + + provider.handleModeSwitch = vi.fn().mockImplementation(async (mode: string) => { + modeOrder.push(`start-${mode}`) + await Promise.resolve() // yield + modeOrder.push(`end-${mode}`) + }) + + let startCount = 0 + const originalCreateTask = provider.createTask + provider.createTask = vi.fn().mockImplementation(async (...args) => { + const child = await originalCreateTask(...args) + child.start = vi.fn().mockImplementation(() => { + startCount++ + if (startCount === 2) { + for (const [, handler] of provider.childCompletionHandlers) { + handler.resolve({ summary: "done" }) + } + } + }) + return child + }) + + await (ClineProvider.prototype as any).spawnConcurrentChildren.call(provider, { + parentTaskId: "parent-1", + tasks: [ + { mode: "code", message: "A" }, + { mode: "debug", message: "B" }, + ], + }) + + // The calls must be fully interleaved in order — start-code, end-code, start-debug, end-debug + // (not start-code, start-debug, end-code, end-debug which would be the concurrent-map race) + expect(modeOrder).toEqual(["start-code", "end-code", "start-debug", "end-debug"]) + }) + + it("all children start() are called before any await on completions", async () => { + const { provider } = buildProvider({ parentTaskId: "parent-1" }) + const startLog: string[] = [] + + const originalCreateTask = provider.createTask + provider.createTask = vi.fn().mockImplementation(async (...args) => { + const child = await originalCreateTask(...args) + child.start = vi.fn().mockImplementation(() => { + startLog.push(child.taskId) + // Do NOT resolve yet — verifying start was called for all before awaiting + }) + return child + }) + + // Let resolutions happen after the start loop + setImmediate(() => { + for (const [, handler] of provider.childCompletionHandlers) { + handler.resolve({ summary: "done" }) + } + }) + + await (ClineProvider.prototype as any).spawnConcurrentChildren.call(provider, { + parentTaskId: "parent-1", + tasks: [ + { mode: "code", message: "A" }, + { mode: "code", message: "B" }, + { mode: "code", message: "C" }, + ], + }) + + // All three starts fired before we awaited any completion + expect(startLog).toEqual(["child-1", "child-2", "child-3"]) + }) + + it("collects results with errors when children fail (no abortOnChildFailure)", async () => { + const { provider } = buildProvider({ parentTaskId: "parent-1" }) + + let startCount = 0 + const originalCreateTask = provider.createTask + provider.createTask = vi.fn().mockImplementation(async (...args) => { + const child = await originalCreateTask(...args) + child.start = vi.fn().mockImplementation(() => { + startCount++ + const handler = provider.childCompletionHandlers.get(child.taskId) + if (startCount === 1) { + handler?.reject(new Error("child-1 failed")) + } else { + handler?.resolve({ summary: "child-2 succeeded" }) + } + }) + return child + }) + + const results = await (ClineProvider.prototype as any).spawnConcurrentChildren.call(provider, { + parentTaskId: "parent-1", + tasks: [ + { mode: "code", message: "A" }, + { mode: "code", message: "B" }, + ], + abortOnChildFailure: false, + }) + + expect(results).toHaveLength(2) + const failed = results.find((r: { error?: string }) => r.error) + const succeeded = results.find((r: { error?: string }) => !r.error) + expect(failed?.error).toBe("child-1 failed") + expect(succeeded?.summary).toBe("child-2 succeeded") + }) + + it("aborts sibling tasks when abortOnChildFailure is true and a child fails", async () => { + const { provider } = buildProvider({ parentTaskId: "parent-1" }) + + let startCount = 0 + const originalCreateTask = provider.createTask + provider.createTask = vi.fn().mockImplementation(async (...args) => { + const child = await originalCreateTask(...args) + child.start = vi.fn().mockImplementation(() => { + startCount++ + const handler = provider.childCompletionHandlers.get(child.taskId) + if (startCount === 1) { + // child-1 fails immediately + handler?.reject(new Error("child-1 bombed")) + } + // child-2 never resolves on its own — it should be aborted + }) + return child + }) + + const results = await (ClineProvider.prototype as any).spawnConcurrentChildren.call(provider, { + parentTaskId: "parent-1", + tasks: [ + { mode: "code", message: "A" }, + { mode: "code", message: "B" }, + ], + abortOnChildFailure: true, + }) + + expect(results).toHaveLength(2) + // Both results should carry errors + expect(results.every((r: { error?: string }) => Boolean(r.error))).toBe(true) + // removeClineFromStack should have been called for child-2 (the sibling) + expect(provider.removeClineFromStack).toHaveBeenCalledWith( + expect.objectContaining({ taskId: "child-2", skipDelegationRepair: true }), + ) + }) + + it("emits TaskSpawned and TaskDelegated events for each child", async () => { + const { provider } = buildProvider({ parentTaskId: "parent-1" }) + + let startCount = 0 + const originalCreateTask = provider.createTask + provider.createTask = vi.fn().mockImplementation(async (...args) => { + const child = await originalCreateTask(...args) + child.start = vi.fn().mockImplementation(() => { + startCount++ + if (startCount === 2) { + for (const [, h] of provider.childCompletionHandlers) h.resolve({ summary: "done" }) + } + }) + return child + }) + + await (ClineProvider.prototype as any).spawnConcurrentChildren.call(provider, { + parentTaskId: "parent-1", + tasks: [ + { mode: "code", message: "A" }, + { mode: "code", message: "B" }, + ], + }) + + expect(provider.emit).toHaveBeenCalledWith(RooCodeEventName.TaskSpawned, "child-1") + expect(provider.emit).toHaveBeenCalledWith(RooCodeEventName.TaskSpawned, "child-2") + expect(provider.emit).toHaveBeenCalledWith(RooCodeEventName.TaskDelegated, "parent-1", "child-1") + expect(provider.emit).toHaveBeenCalledWith(RooCodeEventName.TaskDelegated, "parent-1", "child-2") + }) + + it("creates worktrees when spec.worktree is specified", async () => { + const { provider } = buildProvider({ parentTaskId: "parent-1" }) + + let startCount = 0 + const originalCreateTask = provider.createTask + provider.createTask = vi.fn().mockImplementation(async (...args) => { + const child = await originalCreateTask(...args) + child.start = vi.fn().mockImplementation(() => { + startCount++ + if (startCount === 1) { + provider.childCompletionHandlers.get(child.taskId)?.resolve({ summary: "done" }) + } + }) + return child + }) + + await (ClineProvider.prototype as any).spawnConcurrentChildren.call(provider, { + parentTaskId: "parent-1", + tasks: [{ mode: "code", message: "A", worktree: "auto" }], + }) + + expect(provider._createWorktreeForTask).toHaveBeenCalledWith("/workspace", "auto", "parent-1") + }) +}) diff --git a/src/activate/CodeActionProvider.ts b/src/activate/CodeActionProvider.ts index 4a0eb1b81ee..99485fda55b 100644 --- a/src/activate/CodeActionProvider.ts +++ b/src/activate/CodeActionProvider.ts @@ -7,11 +7,11 @@ import { getCodeActionCommand } from "../utils/commands" import { EditorUtils } from "../integrations/editor/EditorUtils" export const TITLES: Record = { - EXPLAIN: "Explain with Roo Code", - FIX: "Fix with Roo Code", - IMPROVE: "Improve with Roo Code", - ADD_TO_CONTEXT: "Add to Roo Code", - NEW_TASK: "New Roo Code Task", + EXPLAIN: "Explain with Moo Code", + FIX: "Fix with Moo Code", + IMPROVE: "Improve with Moo Code", + ADD_TO_CONTEXT: "Add to Moo Code", + NEW_TASK: "New Moo Code Task", } as const export class CodeActionProvider implements vscode.CodeActionProvider { diff --git a/src/activate/registerCommands.ts b/src/activate/registerCommands.ts index f02ee8309a3..8b4c13ed8eb 100644 --- a/src/activate/registerCommands.ts +++ b/src/activate/registerCommands.ts @@ -21,7 +21,7 @@ import { t } from "../i18n" export function getVisibleProviderOrLog(outputChannel: vscode.OutputChannel): ClineProvider | undefined { const visibleProvider = ClineProvider.getVisibleInstance() if (!visibleProvider) { - outputChannel.appendLine("Cannot find any visible Roo Code instances.") + outputChannel.appendLine("Cannot find any visible Moo Code instances.") return undefined } return visibleProvider @@ -227,7 +227,7 @@ export const openClineInNewTab = async ({ context, outputChannel }: Omit { + // Cloud features disabled — return empty models if no base URL configured + if (!baseUrl) { + return {} + } // Construct the models endpoint URL early so it's available in catch block for logging // Strip trailing /v1 or /v1/ to avoid /v1/v1/models const normalizedBase = baseUrl.replace(/\/?v1\/?$/, "") @@ -65,9 +69,9 @@ export async function getRooModels(baseUrl: string, apiKey?: string): Promise = { originator: "roo-code", session_id: taskId || this.sessionId, - "User-Agent": `roo-code/${Package.version} (${os.platform()} ${os.release()}; ${os.arch()}) node/${process.version.slice(1)}`, + "User-Agent": `moo-code/${Package.version} (${os.platform()} ${os.release()}; ${os.arch()}) node/${process.version.slice(1)}`, ...(accountId ? { "ChatGPT-Account-Id": accountId } : {}), } @@ -505,7 +505,7 @@ export class OpenAiCodexHandler extends BaseProvider implements SingleCompletion Authorization: `Bearer ${accessToken}`, originator: "roo-code", session_id: taskId || this.sessionId, - "User-Agent": `roo-code/${Package.version} (${os.platform()} ${os.release()}; ${os.arch()}) node/${process.version.slice(1)}`, + "User-Agent": `moo-code/${Package.version} (${os.platform()} ${os.release()}; ${os.arch()}) node/${process.version.slice(1)}`, } // Add ChatGPT-Account-Id if available (required for organization subscriptions) @@ -1201,7 +1201,7 @@ export class OpenAiCodexHandler extends BaseProvider implements SingleCompletion Authorization: `Bearer ${accessToken}`, originator: "roo-code", session_id: this.sessionId, - "User-Agent": `roo-code/${Package.version} (${os.platform()} ${os.release()}; ${os.arch()}) node/${process.version.slice(1)}`, + "User-Agent": `moo-code/${Package.version} (${os.platform()} ${os.release()}; ${os.arch()}) node/${process.version.slice(1)}`, } // Add ChatGPT-Account-Id if available diff --git a/src/api/providers/openai-native.ts b/src/api/providers/openai-native.ts index 6ce93827636..43641c4f4a2 100644 --- a/src/api/providers/openai-native.ts +++ b/src/api/providers/openai-native.ts @@ -95,7 +95,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio } const apiKey = this.options.openAiNativeApiKey ?? "not-provided" // Include originator, session_id, and User-Agent headers for API tracking and debugging - const userAgent = `roo-code/${Package.version} (${os.platform()} ${os.release()}; ${os.arch()}) node/${process.version.slice(1)}` + const userAgent = `moo-code/${Package.version} (${os.platform()} ${os.release()}; ${os.arch()}) node/${process.version.slice(1)}` this.client = new OpenAI({ baseURL: this.options.openAiNativeBaseUrl || undefined, apiKey, @@ -415,7 +415,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio // Build per-request headers using taskId when available, falling back to sessionId const taskId = metadata?.taskId - const userAgent = `roo-code/${Package.version} (${os.platform()} ${os.release()}; ${os.arch()}) node/${process.version.slice(1)}` + const userAgent = `moo-code/${Package.version} (${os.platform()} ${os.release()}; ${os.arch()}) node/${process.version.slice(1)}` const requestHeaders: Record = { originator: "roo-code", session_id: taskId || this.sessionId, @@ -563,7 +563,7 @@ export class OpenAiNativeHandler extends BaseProvider implements SingleCompletio // Build per-request headers using taskId when available, falling back to sessionId const taskId = metadata?.taskId - const userAgent = `roo-code/${Package.version} (${os.platform()} ${os.release()}; ${os.arch()}) node/${process.version.slice(1)}` + const userAgent = `moo-code/${Package.version} (${os.platform()} ${os.release()}; ${os.arch()}) node/${process.version.slice(1)}` try { const response = await fetch(url, { diff --git a/src/api/providers/roo.ts b/src/api/providers/roo.ts index b455a1885ed..7ae110bab33 100644 --- a/src/api/providers/roo.ts +++ b/src/api/providers/roo.ts @@ -44,10 +44,10 @@ export class RooHandler extends BaseOpenAiCompatibleProvider { constructor(options: ApiHandlerOptions) { const sessionToken = options.rooApiKey ?? getSessionToken() - let baseURL = process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy" + let baseURL = process.env.ROO_CODE_PROVIDER_URL ?? "" // Ensure baseURL ends with /v1 for OpenAI client, but don't duplicate it - if (!baseURL.endsWith("/v1")) { + if (baseURL && !baseURL.endsWith("/v1")) { baseURL = `${baseURL}/v1` } @@ -55,7 +55,7 @@ export class RooHandler extends BaseOpenAiCompatibleProvider { // The provider-proxy server will return 401 if authentication fails. super({ ...options, - providerName: "Roo Code Cloud", + providerName: "Moo Code Cloud", baseURL, // Already has /v1 suffix apiKey: sessionToken, defaultProviderModelId: rooDefaultModelId, @@ -65,9 +65,10 @@ export class RooHandler extends BaseOpenAiCompatibleProvider { // Load dynamic models asynchronously - strip /v1 from baseURL for fetcher this.fetcherBaseURL = baseURL.endsWith("/v1") ? baseURL.slice(0, -3) : baseURL - this.loadDynamicModels(this.fetcherBaseURL, sessionToken).catch((error) => { - console.error("[RooHandler] Failed to load dynamic models:", error) - }) + // Cloud features disabled — skip loading dynamic models from upstream + // this.loadDynamicModels(this.fetcherBaseURL, sessionToken).catch((error) => { + // console.error("[RooHandler] Failed to load dynamic models:", error) + // }) } protected override createStream( @@ -122,220 +123,18 @@ export class RooHandler extends BaseOpenAiCompatibleProvider { return this.currentReasoningDetails.length > 0 ? this.currentReasoningDetails : undefined } + // eslint-disable-next-line require-yield override async *createMessage( systemPrompt: string, messages: Anthropic.Messages.MessageParam[], metadata?: ApiHandlerCreateMessageMetadata, ): ApiStream { - try { - // Reset reasoning_details accumulator for this request - this.currentReasoningDetails = [] - - const headers: Record = { - "X-Roo-App-Version": Package.version, - } - - if (metadata?.taskId) { - headers["X-Roo-Task-ID"] = metadata.taskId - } - - const stream = await this.createStream(systemPrompt, messages, metadata, { headers }) - - let lastUsage: RooUsage | undefined = undefined - // Accumulator for reasoning_details FROM the API. - // We preserve the original shape of reasoning_details to prevent malformed responses. - const reasoningDetailsAccumulator = new Map< - string, - { - type: string - text?: string - summary?: string - data?: string - id?: string | null - format?: string - signature?: string - index: number - } - >() - - // Track whether we've yielded displayable text from reasoning_details. - // When reasoning_details has displayable content (reasoning.text or reasoning.summary), - // we skip yielding the top-level reasoning field to avoid duplicate display. - let hasYieldedReasoningFromDetails = false - - for await (const chunk of stream) { - const delta = chunk.choices[0]?.delta - const finishReason = chunk.choices[0]?.finish_reason - - if (delta) { - // Handle reasoning_details array format (used by Gemini 3, Claude, OpenAI o-series, etc.) - // See: https://openrouter.ai/docs/use-cases/reasoning-tokens#preserving-reasoning-blocks - // Priority: Check for reasoning_details first, as it's the newer format - const deltaWithReasoning = delta as typeof delta & { - reasoning_details?: Array<{ - type: string - text?: string - summary?: string - data?: string - id?: string | null - format?: string - signature?: string - index?: number - }> - } - - if (deltaWithReasoning.reasoning_details && Array.isArray(deltaWithReasoning.reasoning_details)) { - for (const detail of deltaWithReasoning.reasoning_details) { - const index = detail.index ?? 0 - // Use id as key when available to merge chunks that share the same reasoning block id - // This ensures that reasoning.summary and reasoning.encrypted chunks with the same id - // are merged into a single object, matching the provider's expected format - const key = detail.id ?? `${detail.type}-${index}` - const existing = reasoningDetailsAccumulator.get(key) - - if (existing) { - // Accumulate text/summary/data for existing reasoning detail - if (detail.text !== undefined) { - existing.text = (existing.text || "") + detail.text - } - if (detail.summary !== undefined) { - existing.summary = (existing.summary || "") + detail.summary - } - if (detail.data !== undefined) { - existing.data = (existing.data || "") + detail.data - } - // Update other fields if provided - // Note: Don't update type - keep original type (e.g., reasoning.summary) - // even when encrypted data chunks arrive with type reasoning.encrypted - if (detail.id !== undefined) existing.id = detail.id - if (detail.format !== undefined) existing.format = detail.format - if (detail.signature !== undefined) existing.signature = detail.signature - } else { - // Start new reasoning detail accumulation - reasoningDetailsAccumulator.set(key, { - type: detail.type, - text: detail.text, - summary: detail.summary, - data: detail.data, - id: detail.id, - format: detail.format, - signature: detail.signature, - index, - }) - } - - // Yield text for display (still fragmented for live streaming) - // Only reasoning.text and reasoning.summary have displayable content - // reasoning.encrypted is intentionally skipped as it contains redacted content - let reasoningText: string | undefined - if (detail.type === "reasoning.text" && typeof detail.text === "string") { - reasoningText = detail.text - } else if (detail.type === "reasoning.summary" && typeof detail.summary === "string") { - reasoningText = detail.summary - } - - if (reasoningText) { - hasYieldedReasoningFromDetails = true - yield { type: "reasoning", text: reasoningText } - } - } - } - - // Handle top-level reasoning field for UI display. - // Skip if we've already yielded from reasoning_details to avoid duplicate display. - if ("reasoning" in delta && delta.reasoning && typeof delta.reasoning === "string") { - if (!hasYieldedReasoningFromDetails) { - yield { type: "reasoning", text: delta.reasoning } - } - } else if ("reasoning_content" in delta && typeof delta.reasoning_content === "string") { - // Also check for reasoning_content for backward compatibility - if (!hasYieldedReasoningFromDetails) { - yield { type: "reasoning", text: delta.reasoning_content } - } - } - - // Emit raw tool call chunks - NativeToolCallParser handles state management - if ("tool_calls" in delta && Array.isArray(delta.tool_calls)) { - for (const toolCall of delta.tool_calls) { - yield { - type: "tool_call_partial", - index: toolCall.index, - id: toolCall.id, - name: toolCall.function?.name, - arguments: toolCall.function?.arguments, - } - } - } - - if (delta.content) { - yield { - type: "text", - text: delta.content, - } - } - } - - if (finishReason) { - const endEvents = NativeToolCallParser.processFinishReason(finishReason) - for (const event of endEvents) { - yield event - } - } - - if (chunk.usage) { - lastUsage = chunk.usage as RooUsage - } - } - - // After streaming completes, store ONLY the reasoning_details we received from the API. - if (reasoningDetailsAccumulator.size > 0) { - this.currentReasoningDetails = Array.from(reasoningDetailsAccumulator.values()) - } - - if (lastUsage) { - // Check if the current model is marked as free - const model = this.getModel() - const isFreeModel = model.info.isFree ?? false - - // Normalize input tokens based on protocol expectations: - // - OpenAI protocol expects TOTAL input tokens (cached + non-cached) - // - Anthropic protocol expects NON-CACHED input tokens (caches passed separately) - const modelId = model.id - const apiProtocol = getApiProtocol("roo", modelId) - - const promptTokens = lastUsage.prompt_tokens || 0 - const cacheWrite = lastUsage.cache_creation_input_tokens || 0 - const cacheRead = lastUsage.prompt_tokens_details?.cached_tokens || 0 - const nonCached = Math.max(0, promptTokens - cacheWrite - cacheRead) - - const inputTokensForDownstream = apiProtocol === "anthropic" ? nonCached : promptTokens - - yield { - type: "usage", - inputTokens: inputTokensForDownstream, - outputTokens: lastUsage.completion_tokens || 0, - cacheWriteTokens: cacheWrite, - cacheReadTokens: cacheRead, - totalCost: isFreeModel ? 0 : (lastUsage.cost ?? 0), - } - } - } catch (error) { - const errorContext = { - error: error instanceof Error ? error.message : String(error), - stack: error instanceof Error ? error.stack : undefined, - modelId: this.options.apiModelId, - hasTaskId: Boolean(metadata?.taskId), - } - - console.error(`[RooHandler] Error during message streaming: ${JSON.stringify(errorContext)}`) - - throw error - } + // Cloud features disabled — Moo Code provider is not available in this fork + throw new Error("Moo Code provider is not available in this fork. Cloud features have been disabled.") } + override async completePrompt(prompt: string): Promise { - // Update API key before making request to ensure we use the latest session token - this.client.apiKey = this.options.rooApiKey ?? getSessionToken() - return super.completePrompt(prompt) + throw new Error("Moo Code provider is not available in this fork. Cloud features have been disabled.") } private async loadDynamicModels(baseURL: string, apiKey?: string): Promise { @@ -387,7 +186,7 @@ export class RooHandler extends BaseOpenAiCompatibleProvider { } /** - * Generate an image using Roo Code Cloud's image generation API + * Generate an image using Moo Code Cloud's image generation API * @param prompt The text prompt for image generation * @param model The model to use for generation * @param inputImage Optional base64 encoded input image data URL @@ -400,36 +199,9 @@ export class RooHandler extends BaseOpenAiCompatibleProvider { inputImage?: string, apiMethod?: ImageGenerationApiMethod, ): Promise { - const sessionToken = this.options.rooApiKey ?? getSessionToken() - - if (!sessionToken || sessionToken === "unauthenticated") { - return { - success: false, - error: t("tools:generateImage.roo.authRequired"), - } - } - - const baseURL = `${this.fetcherBaseURL}/v1` - - // Use the specified API method, defaulting to chat_completions for backward compatibility - if (apiMethod === "images_api") { - return generateImageWithImagesApi({ - baseURL, - authToken: sessionToken, - model, - prompt, - inputImage, - outputFormat: "png", - }) + return { + success: false, + error: "Moo Code provider is not available in this fork. Cloud features have been disabled.", } - - // Default to chat completions approach - return generateImageWithProvider({ - baseURL, - authToken: sessionToken, - model, - prompt, - inputImage, - }) } } diff --git a/src/api/providers/utils/image-generation.ts b/src/api/providers/utils/image-generation.ts index 16ddb9c815b..92939a18b25 100644 --- a/src/api/providers/utils/image-generation.ts +++ b/src/api/providers/utils/image-generation.ts @@ -70,8 +70,8 @@ export async function generateImageWithProvider(options: ImageGenerationOptions) headers: { Authorization: `Bearer ${authToken}`, "Content-Type": "application/json", - "HTTP-Referer": "https://github.com/RooVetGit/Roo-Code", - "X-Title": "Roo Code", + "HTTP-Referer": "https://github.com/moo-code/Moo-Code", + "X-Title": "Moo Code", }, body: JSON.stringify({ model, @@ -216,8 +216,8 @@ export async function generateImageWithImagesApi(options: ImagesApiOptions): Pro headers: { Authorization: `Bearer ${authToken}`, "Content-Type": "application/json", - "HTTP-Referer": "https://github.com/RooVetGit/Roo-Code", - "X-Title": "Roo Code", + "HTTP-Referer": "https://github.com/moo-code/Moo-Code", + "X-Title": "Moo Code", }, body: JSON.stringify(requestBody), } diff --git a/src/api/providers/vscode-lm.ts b/src/api/providers/vscode-lm.ts index 8fb564a9d59..e50ebe4d1c5 100644 --- a/src/api/providers/vscode-lm.ts +++ b/src/api/providers/vscode-lm.ts @@ -91,7 +91,7 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan this.dispose() throw new Error( - `Roo Code : Failed to initialize handler: ${error instanceof Error ? error.message : "Unknown error"}`, + `Moo Code : Failed to initialize handler: ${error instanceof Error ? error.message : "Unknown error"}`, ) } } @@ -106,17 +106,17 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan try { // Check if the client is already initialized if (this.client) { - console.debug("Roo Code : Client already initialized") + console.debug("Moo Code : Client already initialized") return } // Create a new client instance this.client = await this.createClient(this.options.vsCodeLmModelSelector || {}) - console.debug("Roo Code : Client initialized successfully") + console.debug("Moo Code : Client initialized successfully") } catch (error) { // Handle errors during client initialization const errorMessage = error instanceof Error ? error.message : "Unknown error" - console.error("Roo Code : Client initialization failed:", errorMessage) - throw new Error(`Roo Code : Failed to initialize client: ${errorMessage}`) + console.error("Moo Code : Client initialization failed:", errorMessage) + throw new Error(`Moo Code : Failed to initialize client: ${errorMessage}`) } } /** @@ -164,7 +164,7 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan } } catch (error) { const errorMessage = error instanceof Error ? error.message : "Unknown error" - throw new Error(`Roo Code : Failed to select model: ${errorMessage}`) + throw new Error(`Moo Code : Failed to select model: ${errorMessage}`) } } @@ -225,13 +225,13 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan private async internalCountTokens(text: string | vscode.LanguageModelChatMessage): Promise { // Check for required dependencies if (!this.client) { - console.warn("Roo Code : No client available for token counting") + console.warn("Moo Code : No client available for token counting") return 0 } // Validate input if (!text) { - console.debug("Roo Code : Empty text provided for token counting") + console.debug("Moo Code : Empty text provided for token counting") return 0 } @@ -255,24 +255,24 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan } else if (text instanceof vscode.LanguageModelChatMessage) { // For chat messages, ensure we have content if (!text.content || (Array.isArray(text.content) && text.content.length === 0)) { - console.debug("Roo Code : Empty chat message content") + console.debug("Moo Code : Empty chat message content") return 0 } const countMessage = extractTextCountFromMessage(text) tokenCount = await this.client.countTokens(countMessage, cancellationToken) } else { - console.warn("Roo Code : Invalid input type for token counting") + console.warn("Moo Code : Invalid input type for token counting") return 0 } // Validate the result if (typeof tokenCount !== "number") { - console.warn("Roo Code : Non-numeric token count received:", tokenCount) + console.warn("Moo Code : Non-numeric token count received:", tokenCount) return 0 } if (tokenCount < 0) { - console.warn("Roo Code : Negative token count received:", tokenCount) + console.warn("Moo Code : Negative token count received:", tokenCount) return 0 } @@ -280,12 +280,12 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan } catch (error) { // Handle specific error types if (error instanceof vscode.CancellationError) { - console.debug("Roo Code : Token counting cancelled by user") + console.debug("Moo Code : Token counting cancelled by user") return 0 } const errorMessage = error instanceof Error ? error.message : "Unknown error" - console.warn("Roo Code : Token counting failed:", errorMessage) + console.warn("Moo Code : Token counting failed:", errorMessage) // Log additional error details if available if (error instanceof Error && error.stack) { @@ -317,7 +317,7 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan private async getClient(): Promise { if (!this.client) { - console.debug("Roo Code : Getting client with options:", { + console.debug("Moo Code : Getting client with options:", { vsCodeLmModelSelector: this.options.vsCodeLmModelSelector, hasOptions: !!this.options, selectorKeys: this.options.vsCodeLmModelSelector ? Object.keys(this.options.vsCodeLmModelSelector) : [], @@ -326,12 +326,12 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan try { // Use default empty selector if none provided to get all available models const selector = this.options?.vsCodeLmModelSelector || {} - console.debug("Roo Code : Creating client with selector:", selector) + console.debug("Moo Code : Creating client with selector:", selector) this.client = await this.createClient(selector) } catch (error) { const message = error instanceof Error ? error.message : "Unknown error" - console.error("Roo Code : Client creation failed:", message) - throw new Error(`Roo Code : Failed to create client: ${message}`) + console.error("Moo Code : Client creation failed:", message) + throw new Error(`Moo Code : Failed to create client: ${message}`) } } @@ -395,7 +395,7 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan try { // Create the response stream with required options const requestOptions: vscode.LanguageModelChatRequestOptions = { - justification: `Roo Code would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`, + justification: `Moo Code would like to use '${client.name}' from '${client.vendor}', Click 'Allow' to proceed.`, tools: convertToVsCodeLmTools(metadata?.tools ?? []), } @@ -410,7 +410,7 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan if (chunk instanceof vscode.LanguageModelTextPart) { // Validate text part value if (typeof chunk.value !== "string") { - console.warn("Roo Code : Invalid text part value received:", chunk.value) + console.warn("Moo Code : Invalid text part value received:", chunk.value) continue } @@ -423,23 +423,23 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan try { // Validate tool call parameters if (!chunk.name || typeof chunk.name !== "string") { - console.warn("Roo Code : Invalid tool name received:", chunk.name) + console.warn("Moo Code : Invalid tool name received:", chunk.name) continue } if (!chunk.callId || typeof chunk.callId !== "string") { - console.warn("Roo Code : Invalid tool callId received:", chunk.callId) + console.warn("Moo Code : Invalid tool callId received:", chunk.callId) continue } // Ensure input is a valid object if (!chunk.input || typeof chunk.input !== "object") { - console.warn("Roo Code : Invalid tool input received:", chunk.input) + console.warn("Moo Code : Invalid tool input received:", chunk.input) continue } // Log tool call for debugging - console.debug("Roo Code : Processing tool call:", { + console.debug("Moo Code : Processing tool call:", { name: chunk.name, callId: chunk.callId, inputSize: JSON.stringify(chunk.input).length, @@ -457,12 +457,12 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan } } } catch (error) { - console.error("Roo Code : Failed to process tool call:", error) + console.error("Moo Code : Failed to process tool call:", error) // Continue processing other chunks even if one fails continue } } else { - console.warn("Roo Code : Unknown chunk type received:", chunk) + console.warn("Moo Code : Unknown chunk type received:", chunk) } } @@ -479,11 +479,11 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan this.ensureCleanState() if (error instanceof vscode.CancellationError) { - throw new Error("Roo Code : Request cancelled by user") + throw new Error("Moo Code : Request cancelled by user") } if (error instanceof Error) { - console.error("Roo Code : Stream error details:", { + console.error("Moo Code : Stream error details:", { message: error.message, stack: error.stack, name: error.name, @@ -494,13 +494,13 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan } else if (typeof error === "object" && error !== null) { // Handle error-like objects const errorDetails = JSON.stringify(error, null, 2) - console.error("Roo Code : Stream error object:", errorDetails) - throw new Error(`Roo Code : Response stream error: ${errorDetails}`) + console.error("Moo Code : Stream error object:", errorDetails) + throw new Error(`Moo Code : Response stream error: ${errorDetails}`) } else { // Fallback for unknown error types const errorMessage = String(error) - console.error("Roo Code : Unknown stream error:", errorMessage) - throw new Error(`Roo Code : Response stream error: ${errorMessage}`) + console.error("Moo Code : Unknown stream error:", errorMessage) + throw new Error(`Moo Code : Response stream error: ${errorMessage}`) } } } @@ -520,7 +520,7 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan // Log any missing properties for debugging for (const [prop, value] of Object.entries(requiredProps)) { if (!value && value !== 0) { - console.warn(`Roo Code : Client missing ${prop} property`) + console.warn(`Moo Code : Client missing ${prop} property`) } } @@ -551,7 +551,7 @@ export class VsCodeLmHandler extends BaseProvider implements SingleCompletionHan ? stringifyVsCodeLmModelSelector(this.options.vsCodeLmModelSelector) : "vscode-lm" - console.debug("Roo Code : No client available, using fallback model info") + console.debug("Moo Code : No client available, using fallback model info") return { id: fallbackId, diff --git a/src/api/transform/vscode-lm-format.ts b/src/api/transform/vscode-lm-format.ts index 388197c2c2c..8515fe31dde 100644 --- a/src/api/transform/vscode-lm-format.ts +++ b/src/api/transform/vscode-lm-format.ts @@ -23,7 +23,7 @@ function asObjectSafe(value: any): object { return {} } catch (error) { - console.warn("Roo Code : Failed to parse object:", error) + console.warn("Moo Code : Failed to parse object:", error) return {} } } @@ -184,7 +184,7 @@ export function extractTextCountFromMessage(message: vscode.LanguageModelChatMes try { text += JSON.stringify(item.input) } catch (error) { - console.error("Roo Code : Failed to stringify tool call input:", error) + console.error("Moo Code : Failed to stringify tool call input:", error) } } } diff --git a/src/assets/icons/icon-nightly.png b/src/assets/icons/icon-nightly.png index b0bef29cc9c..8106c8bb754 100644 Binary files a/src/assets/icons/icon-nightly.png and b/src/assets/icons/icon-nightly.png differ diff --git a/src/assets/icons/icon.png b/src/assets/icons/icon.png index b0bef29cc9c..8106c8bb754 100644 Binary files a/src/assets/icons/icon.png and b/src/assets/icons/icon.png differ diff --git a/src/core/assistant-message/presentAssistantMessage.ts b/src/core/assistant-message/presentAssistantMessage.ts index 7f5862be154..bf1ffbc6769 100644 --- a/src/core/assistant-message/presentAssistantMessage.ts +++ b/src/core/assistant-message/presentAssistantMessage.ts @@ -3,6 +3,7 @@ import { Anthropic } from "@anthropic-ai/sdk" import type { ToolName, ClineAsk, ToolProgressStatus } from "@roo-code/types" import { ConsecutiveMistakeError, TelemetryEventName } from "@roo-code/types" +import { submitWorkerPermissionRequest } from "../swarm/LeaderPermissionBridge" import { TelemetryService } from "@roo-code/telemetry" import { customToolRegistry } from "@roo-code/core" @@ -30,6 +31,8 @@ import { askFollowupQuestionTool } from "../tools/AskFollowupQuestionTool" import { switchModeTool } from "../tools/SwitchModeTool" import { attemptCompletionTool, AttemptCompletionCallbacks } from "../tools/AttemptCompletionTool" import { newTaskTool } from "../tools/NewTaskTool" +import { spawnParallelTasksTool } from "../tools/SpawnParallelTasksTool" +import { runTeamPhaseTool } from "../tools/RunTeamPhaseTool" import { updateTodoListTool } from "../tools/UpdateTodoListTool" import { runSlashCommandTool } from "../tools/RunSlashCommandTool" import { skillTool } from "../tools/SkillTool" @@ -377,6 +380,11 @@ export async function presentAssistantMessage(cline: Task) { const modeName = getModeBySlug(mode, customModes)?.name ?? mode return `[${block.name} in ${modeName} mode: '${message}']` } + case "spawn_parallel_tasks": { + const tasks = block.params.tasks as Array<{ mode: string; message: string }> | undefined + const count = tasks?.length ?? 0 + return `[${block.name}: ${count} task${count !== 1 ? "s" : ""}]` + } case "run_slash_command": return `[${block.name} for '${block.params.command}'${block.params.args ? ` with args: ${block.params.args}` : ""}]` case "skill": @@ -497,6 +505,47 @@ export async function presentAssistantMessage(cline: Task) { progressStatus?: ToolProgressStatus, isProtected?: boolean, ) => { + // Concurrent workers route tool-approval through the leader permission bridge + // instead of surfacing a UI prompt on the worker task itself. + if (type === "tool" && cline.parentTaskId) { + type PermissionProvider = { + getTaskById?: (id: string) => unknown + swarmRegistry?: { + getSessionForTask(taskId: string): + | { + sessionId: string + teammates: Record< + string, + { agentName: string; color: import("@roo-code/types").AgentColorName } + > + } + | undefined + } + } + const provider = cline.providerRef.deref() as PermissionProvider | undefined + const parentIsAlive = provider?.getTaskById?.(cline.parentTaskId) !== undefined + if (parentIsAlive) { + const session = provider?.swarmRegistry?.getSessionForTask(cline.taskId) + const identity = session?.teammates[cline.taskId] + const agentName = identity?.agentName ?? "Worker" + const color = identity?.color ?? ("blue" as import("@roo-code/types").AgentColorName) + const toolName = String(block.name ?? "tool") + const allowed = await submitWorkerPermissionRequest( + cline.taskId, + agentName, + color, + toolName, + partialMessage ?? "", + ) + if (!allowed) { + pushToolResult(formatResponse.toolDenied()) + cline.didRejectTool = true + return false + } + return true + } + } + const { response, text, images } = await cline.ask( type, partialMessage, @@ -812,6 +861,24 @@ export async function presentAssistantMessage(cline: Task) { toolCallId: block.id, }) break + case "spawn_parallel_tasks": + await checkpointSaveAndMark(cline) + await spawnParallelTasksTool.handle(cline, block as ToolUse<"spawn_parallel_tasks">, { + askApproval, + handleError, + pushToolResult, + toolCallId: block.id, + }) + break + case "run_team_phase": + await checkpointSaveAndMark(cline) + await runTeamPhaseTool.handle(cline, block as ToolUse<"run_team_phase">, { + askApproval, + handleError, + pushToolResult, + toolCallId: block.id, + }) + break case "attempt_completion": { const completionCallbacks: AttemptCompletionCallbacks = { askApproval, diff --git a/src/core/condense/__tests__/microCompact.spec.ts b/src/core/condense/__tests__/microCompact.spec.ts new file mode 100644 index 00000000000..f9a1ae513b9 --- /dev/null +++ b/src/core/condense/__tests__/microCompact.spec.ts @@ -0,0 +1,609 @@ +// npx vitest src/core/condense/__tests__/microCompact.spec.ts + +import { Anthropic } from "@anthropic-ai/sdk" + +import { ApiMessage } from "../../task-persistence/apiMessages" +import { + microcompactMessages, + estimateMicrocompactSavings, + DEFAULT_MICROCOMPACT_CONFIG, + MICROCOMPACT_CLEARED_MESSAGE, + type MicrocompactConfig, +} from "../microCompact" + +describe("Microcompact", () => { + describe("microcompactMessages", () => { + it("should return messages unchanged when disabled", () => { + const messages: ApiMessage[] = [ + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-1", + name: "read_file", + input: { path: "test.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-1", + content: "This is a long file content that should be cleared", + }, + ], + }, + ] + + const result = microcompactMessages(messages, { enabled: false }) + + expect(result.messages).toEqual(messages) + expect(result.tokensSaved).toBe(0) + expect(result.toolsCleared).toBe(0) + expect(result.toolsKept).toBe(0) + }) + + it("should return messages unchanged when there are not enough tools", () => { + const messages: ApiMessage[] = [ + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-1", + name: "read_file", + input: { path: "test.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-1", + content: "This is a long file content that should be cleared", + }, + ], + }, + ] + + const result = microcompactMessages(messages, { threshold: 5, keepRecent: 3 }) + + expect(result.messages).toEqual(messages) + expect(result.tokensSaved).toBe(0) + expect(result.toolsCleared).toBe(0) + expect(result.toolsKept).toBe(1) + }) + + it("should clear old tool results when threshold is exceeded", () => { + const messages: ApiMessage[] = [ + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-1", + name: "read_file", + input: { path: "file1.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-1", + content: "Content of file 1", + }, + ], + }, + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-2", + name: "read_file", + input: { path: "file2.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-2", + content: "Content of file 2", + }, + ], + }, + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-3", + name: "read_file", + input: { path: "file3.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-3", + content: "Content of file 3", + }, + ], + }, + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-4", + name: "read_file", + input: { path: "file4.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-4", + content: "Content of file 4", + }, + ], + }, + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-5", + name: "read_file", + input: { path: "file5.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-5", + content: "Content of file 5", + }, + ], + }, + ] + + const result = microcompactMessages(messages, { threshold: 3, keepRecent: 2 }) + + expect(result.toolsCleared).toBe(3) + expect(result.toolsKept).toBe(2) + expect(result.tokensSaved).toBeGreaterThan(0) + + // Check that the last 2 tool results are preserved + const lastUserMessage = result.messages[result.messages.length - 1] + if (Array.isArray(lastUserMessage.content)) { + const toolResult = lastUserMessage.content[0] as Anthropic.Messages.ToolResultBlockParam + expect(toolResult.content).toBe("Content of file 5") + } + }) + + it("should only compact compactable tools", () => { + const messages: ApiMessage[] = [ + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-1", + name: "read_file", + input: { path: "test.txt" }, + }, + { + type: "tool_use", + id: "tool-2", + name: "non_compactable_tool", + input: { data: "test" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-1", + content: "Content of file 1", + }, + { + type: "tool_result", + tool_use_id: "tool-2", + content: "Content of non-compactable tool", + }, + ], + }, + ] + + const result = microcompactMessages(messages, { threshold: 1, keepRecent: 0 }) + + // Only the compactable tool should be cleared + expect(result.toolsCleared).toBe(1) + expect(result.toolsKept).toBe(0) + + // Check that the non-compactable tool is not cleared + const userMessage = result.messages[1] + if (Array.isArray(userMessage.content)) { + const toolResult2 = userMessage.content[1] as Anthropic.Messages.ToolResultBlockParam + expect(toolResult2.content).toBe("Content of non-compactable tool") + } + }) + + it("should handle array content in tool results", () => { + const messages: ApiMessage[] = [ + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-1", + name: "read_file", + input: { path: "test.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-1", + content: [ + { type: "text", text: "Text content" }, + { + type: "image", + source: { + type: "base64", + media_type: "image/png", + data: "base64data", + }, + }, + ], + }, + ], + }, + ] + + const result = microcompactMessages(messages, { threshold: 1, keepRecent: 0 }) + + expect(result.toolsCleared).toBe(1) + expect(result.tokensSaved).toBeGreaterThan(0) + + // Check that the content was cleared + const userMessage = result.messages[1] + if (Array.isArray(userMessage.content)) { + const toolResult = userMessage.content[0] as Anthropic.Messages.ToolResultBlockParam + expect(toolResult.content).toBe(MICROCOMPACT_CLEARED_MESSAGE.replace("{toolName}", "read_file")) + } + }) + + it("should not double-clear already cleared content", () => { + const clearedMessage = MICROCOMPACT_CLEARED_MESSAGE.replace("{toolName}", "read_file") + const messages: ApiMessage[] = [ + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-1", + name: "read_file", + input: { path: "test.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-1", + content: clearedMessage, + }, + ], + }, + ] + + const result = microcompactMessages(messages, { threshold: 1, keepRecent: 0 }) + + // Should not count as clearing since it's already cleared + expect(result.toolsCleared).toBe(0) + expect(result.tokensSaved).toBe(0) + }) + + it("should use default config when no config is provided", () => { + const messages: ApiMessage[] = [] + + const result = microcompactMessages(messages) + + expect(result.messages).toEqual(messages) + expect(result.tokensSaved).toBe(0) + }) + + it("should preserve message structure and metadata", () => { + const messages: ApiMessage[] = [ + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-1", + name: "read_file", + input: { path: "test.txt" }, + }, + ], + ts: 1234567890, + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-1", + content: "Content to be cleared", + }, + ], + ts: 1234567891, + id: "user-msg-1", + }, + ] + + const result = microcompactMessages(messages, { threshold: 1, keepRecent: 0 }) + + // Check that message structure is preserved + expect(result.messages[0].ts).toBe(1234567890) + expect(result.messages[1].ts).toBe(1234567891) + expect(result.messages[1].id).toBe("user-msg-1") + }) + }) + + describe("estimateMicrocompactSavings", () => { + it("should return 0 when disabled", () => { + const messages: ApiMessage[] = [ + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-1", + name: "read_file", + input: { path: "test.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-1", + content: "This is a long file content", + }, + ], + }, + ] + + const savings = estimateMicrocompactSavings(messages, { enabled: false }) + + expect(savings).toBe(0) + }) + + it("should return 0 when there are not enough tools", () => { + const messages: ApiMessage[] = [ + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-1", + name: "read_file", + input: { path: "test.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-1", + content: "This is a long file content", + }, + ], + }, + ] + + const savings = estimateMicrocompactSavings(messages, { threshold: 5, keepRecent: 3 }) + + expect(savings).toBe(0) + }) + + it("should estimate token savings correctly", () => { + const messages: ApiMessage[] = [ + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-1", + name: "read_file", + input: { path: "file1.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-1", + content: "Content of file 1", + }, + ], + }, + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-2", + name: "read_file", + input: { path: "file2.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-2", + content: "Content of file 2", + }, + ], + }, + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-3", + name: "read_file", + input: { path: "file3.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-3", + content: "Content of file 3", + }, + ], + }, + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-4", + name: "read_file", + input: { path: "file4.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-4", + content: "Content of file 4", + }, + ], + }, + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-5", + name: "read_file", + input: { path: "file5.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-5", + content: "Content of file 5", + }, + ], + }, + ] + + const savings = estimateMicrocompactSavings(messages, { threshold: 3, keepRecent: 2 }) + + // Should estimate savings for the first 3 tools (tools 1-3) + expect(savings).toBeGreaterThan(0) + }) + + it("should not modify messages when estimating savings", () => { + const messages: ApiMessage[] = [ + { + role: "assistant", + content: [ + { + type: "tool_use", + id: "tool-1", + name: "read_file", + input: { path: "test.txt" }, + }, + ], + }, + { + role: "user", + content: [ + { + type: "tool_result", + tool_use_id: "tool-1", + content: "Original content", + }, + ], + }, + ] + + const originalContent = JSON.stringify(messages) + estimateMicrocompactSavings(messages, { threshold: 1, keepRecent: 0 }) + const afterEstimateContent = JSON.stringify(messages) + + expect(originalContent).toBe(afterEstimateContent) + }) + }) + + describe("DEFAULT_MICROCOMPACT_CONFIG", () => { + it("should have correct default values", () => { + expect(DEFAULT_MICROCOMPACT_CONFIG.enabled).toBe(true) + expect(DEFAULT_MICROCOMPACT_CONFIG.threshold).toBe(5) + expect(DEFAULT_MICROCOMPACT_CONFIG.keepRecent).toBe(3) + }) + }) + + describe("MICROCOMPACT_CLEARED_MESSAGE", () => { + it("should contain placeholder for tool name", () => { + expect(MICROCOMPACT_CLEARED_MESSAGE).toContain("{toolName}") + }) + + it("should produce correct message when placeholder is replaced", () => { + const message = MICROCOMPACT_CLEARED_MESSAGE.replace("{toolName}", "read_file") + expect(message).toBe("[Content condensed - tool: read_file]") + }) + }) +}) diff --git a/src/core/condense/__tests__/promptTooLongRetry.spec.ts b/src/core/condense/__tests__/promptTooLongRetry.spec.ts new file mode 100644 index 00000000000..2a32d6badb8 --- /dev/null +++ b/src/core/condense/__tests__/promptTooLongRetry.spec.ts @@ -0,0 +1,248 @@ +import { describe, it, expect, vi, beforeEach } from "vitest" +import { + isPromptTooLongError, + parsePromptTooLongTokenGap, + truncateHeadForPromptTooLongRetry, + groupMessagesByApiRound, + estimateMessageTokens, +} from "../index" +import type { ApiMessage } from "../../task-persistence/apiMessages" + +describe("Prompt-Too-Long Retry", () => { + describe("isPromptTooLongError", () => { + it("should detect Anthropic prompt-too-long error", () => { + expect(isPromptTooLongError("prompt is too long")).toBe(true) + expect(isPromptTooLongError("Prompt is too long")).toBe(true) + expect(isPromptTooLongError("PROMPT IS TOO LONG")).toBe(true) + }) + + it("should detect OpenAI prompt-too-long error", () => { + expect(isPromptTooLongError("maximum context length exceeded")).toBe(true) + expect(isPromptTooLongError("This model's maximum context length is 200000 tokens")).toBe(true) + }) + + it("should detect generic prompt-too-long errors", () => { + expect(isPromptTooLongError("context length exceeded")).toBe(true) + expect(isPromptTooLongError("too many tokens")).toBe(true) + expect(isPromptTooLongError("tokens exceed limit")).toBe(true) + }) + + it("should not detect other errors", () => { + expect(isPromptTooLongError("rate limit exceeded")).toBe(false) + expect(isPromptTooLongError("invalid API key")).toBe(false) + expect(isPromptTooLongError("network error")).toBe(false) + expect(isPromptTooLongError("")).toBe(false) + }) + }) + + describe("parsePromptTooLongTokenGap", () => { + it("should parse token gap from Anthropic error format", () => { + expect(parsePromptTooLongTokenGap("prompt is too long: 137500 tokens > 135000 maximum")).toBe(2500) + expect(parsePromptTooLongTokenGap("Prompt is too long: 200000 tokens > 100000 maximum")).toBe(100000) + }) + + it("should parse token gap from 'exceeded by' format", () => { + expect(parsePromptTooLongTokenGap("exceeded by 5000 tokens")).toBe(5000) + expect(parsePromptTooLongTokenGap("Context exceeded by 1000 tokens")).toBe(1000) + }) + + it("should return undefined for unparseable messages", () => { + expect(parsePromptTooLongTokenGap("prompt is too long")).toBeUndefined() + expect(parsePromptTooLongTokenGap("rate limit exceeded")).toBeUndefined() + expect(parsePromptTooLongTokenGap("")).toBeUndefined() + }) + + it("should handle case-insensitive matching", () => { + expect(parsePromptTooLongTokenGap("PROMPT IS TOO LONG: 15000 TOKENS > 10000 MAXIMUM")).toBe(5000) + }) + }) + + describe("estimateMessageTokens", () => { + it("should estimate tokens for string content", () => { + const msg: ApiMessage = { + role: "user", + content: "Hello world!", + ts: Date.now(), + } + expect(estimateMessageTokens(msg)).toBeGreaterThan(0) + }) + + it("should estimate tokens for text blocks", () => { + const msg: ApiMessage = { + role: "user", + content: [{ type: "text", text: "This is a test message" }], + ts: Date.now(), + } + expect(estimateMessageTokens(msg)).toBeGreaterThan(0) + }) + + it("should estimate tokens for image blocks", () => { + const msg: ApiMessage = { + role: "user", + content: [{ type: "image", source: { type: "base64", media_type: "image/png", data: "abc" } }], + ts: Date.now(), + } + expect(estimateMessageTokens(msg)).toBe(1000) // Fixed estimate for images + }) + + it("should estimate tokens for tool blocks", () => { + const msg: ApiMessage = { + role: "assistant", + content: [{ type: "tool_use", id: "123", name: "test", input: {} }], + ts: Date.now(), + } + expect(estimateMessageTokens(msg)).toBe(100) // Fixed estimate for tool blocks + }) + + it("should estimate tokens for mixed content", () => { + const msg: ApiMessage = { + role: "user", + content: [ + { type: "text", text: "Hello" }, + { type: "text", text: "World" }, + ], + ts: Date.now(), + } + expect(estimateMessageTokens(msg)).toBeGreaterThan(0) + }) + }) + + describe("groupMessagesByApiRound", () => { + it("should group messages by API round", () => { + const messages: ApiMessage[] = [ + { role: "user", content: "User 1", ts: 1 }, + { role: "assistant", content: "Assistant 1", ts: 2 }, + { role: "user", content: "User 2", ts: 3 }, + { role: "assistant", content: "Assistant 2", ts: 4 }, + ] + const groups = groupMessagesByApiRound(messages) + expect(groups).toHaveLength(2) + expect(groups[0]).toHaveLength(2) + expect(groups[1]).toHaveLength(2) + expect(groups[0][0].role).toBe("user") + expect(groups[0][1].role).toBe("assistant") + }) + + it("should handle single user message", () => { + const messages: ApiMessage[] = [{ role: "user", content: "User 1", ts: 1 }] + const groups = groupMessagesByApiRound(messages) + expect(groups).toHaveLength(1) + expect(groups[0]).toHaveLength(1) + }) + + it("should handle assistant-first sequence", () => { + const messages: ApiMessage[] = [ + { role: "assistant", content: "Assistant 1", ts: 1 }, + { role: "user", content: "User 1", ts: 2 }, + ] + const groups = groupMessagesByApiRound(messages) + expect(groups).toHaveLength(2) + expect(groups[0]).toHaveLength(1) + expect(groups[1]).toHaveLength(1) + }) + + it("should handle empty array", () => { + const groups = groupMessagesByApiRound([]) + expect(groups).toHaveLength(0) + }) + }) + + describe("truncateHeadForPromptTooLongRetry", () => { + const createMessages = (count: number): ApiMessage[] => { + const messages: ApiMessage[] = [] + for (let i = 0; i < count; i++) { + messages.push({ role: "user", content: `User ${i}`, ts: i * 2 }) + messages.push({ role: "assistant", content: `Assistant ${i}`, ts: i * 2 + 1 }) + } + return messages + } + + it("should truncate messages based on token gap", () => { + const messages = createMessages(10) + const errorMessage = "prompt is too long: 5000 tokens > 3000 maximum" + const truncated = truncateHeadForPromptTooLongRetry(messages, errorMessage) + + expect(truncated).not.toBeNull() + expect(truncated!.length).toBeLessThan(messages.length) + expect(truncated!.length).toBeGreaterThan(0) + }) + + it("should truncate 20% of groups when token gap is unparseable", () => { + const messages = createMessages(10) + const errorMessage = "prompt is too long" + const truncated = truncateHeadForPromptTooLongRetry(messages, errorMessage) + + expect(truncated).not.toBeNull() + expect(truncated!.length).toBeLessThan(messages.length) + expect(truncated!.length).toBeGreaterThan(0) + }) + + it("should keep at least one group", () => { + const messages = createMessages(2) + const errorMessage = "prompt is too long: 10000 tokens > 1000 maximum" + const truncated = truncateHeadForPromptTooLongRetry(messages, errorMessage) + + expect(truncated).not.toBeNull() + expect(truncated!.length).toBeGreaterThan(0) + }) + + it("should return null when there are not enough groups", () => { + const messages: ApiMessage[] = [{ role: "user", content: "User 1", ts: 1 }] + const errorMessage = "prompt is too long: 10000 tokens > 1000 maximum" + const truncated = truncateHeadForPromptTooLongRetry(messages, errorMessage) + + expect(truncated).toBeNull() + }) + + it("should prepend synthetic user marker when first message is assistant", () => { + const messages: ApiMessage[] = [ + { role: "assistant", content: "Assistant 1", ts: 1 }, + { role: "user", content: "User 1", ts: 2 }, + { role: "assistant", content: "Assistant 2", ts: 3 }, + ] + const errorMessage = "prompt is too long: 5000 tokens > 3000 maximum" + const truncated = truncateHeadForPromptTooLongRetry(messages, errorMessage) + + expect(truncated).not.toBeNull() + expect(truncated![0].role).toBe("user") + expect(truncated![0].isMeta).toBe(true) + }) + + it("should strip previous retry marker before grouping", () => { + const messages: ApiMessage[] = [ + { + role: "user", + content: "[earlier conversation truncated for condensation retry]", + ts: 1, + isMeta: true, + }, + { role: "user", content: "User 1", ts: 2 }, + { role: "assistant", content: "Assistant 1", ts: 3 }, + { role: "user", content: "User 2", ts: 4 }, + { role: "assistant", content: "Assistant 2", ts: 5 }, + ] + const errorMessage = "prompt is too long: 5000 tokens > 3000 maximum" + const truncated = truncateHeadForPromptTooLongRetry(messages, errorMessage) + + expect(truncated).not.toBeNull() + // Should not have duplicate markers + const markers = truncated!.filter( + (m) => m.isMeta && typeof m.content === "string" && m.content.includes("truncated"), + ) + expect(markers.length).toBeLessThanOrEqual(1) + }) + + it("should return null when all messages would be dropped", () => { + const messages: ApiMessage[] = [ + { role: "user", content: "User 1", ts: 1 }, + { role: "assistant", content: "Assistant 1", ts: 2 }, + ] + const errorMessage = "prompt is too long: 100000 tokens > 1000 maximum" + const truncated = truncateHeadForPromptTooLongRetry(messages, errorMessage) + + // Should keep at least one group + expect(truncated).not.toBeNull() + expect(truncated!.length).toBeGreaterThan(0) + }) + }) +}) diff --git a/src/core/condense/__tests__/sessionMemoryCompact.spec.ts b/src/core/condense/__tests__/sessionMemoryCompact.spec.ts new file mode 100644 index 00000000000..6350a95a7a4 --- /dev/null +++ b/src/core/condense/__tests__/sessionMemoryCompact.spec.ts @@ -0,0 +1,408 @@ +/** + * Tests for Session Memory Compaction + */ + +import { describe, it, expect, beforeEach, vi } from "vitest" +import { ApiMessage } from "../../task-persistence/apiMessages" +import { + adjustIndexToPreserveAPIInvariants, + calculateMessagesToKeepIndex, + trySessionMemoryCompaction, + setSessionMemoryCompactConfig, + resetSessionMemoryCompactConfig, + getSessionMemoryCompactConfig, + DEFAULT_SM_COMPACT_CONFIG, + resetAllSessionMemoryState, +} from "../sessionMemoryCompact" +import { + setSessionMemoryConfig, + resetSessionMemoryState, + estimateMessageTokens, + hasTextBlocks, + setLastSummarizedMessageId, +} from "../sessionMemory" + +describe("Session Memory Compact", () => { + beforeEach(() => { + resetAllSessionMemoryState() + }) + + describe("adjustIndexToPreserveAPIInvariants", () => { + it("should return the same index when at boundaries", () => { + const messages: ApiMessage[] = [ + { role: "user", content: "test1", ts: 1 }, + { role: "assistant", content: [{ type: "text", text: "response1" }], ts: 2 }, + { role: "user", content: "test2", ts: 3 }, + ] + + expect(adjustIndexToPreserveAPIInvariants(messages, 0)).toBe(0) + expect(adjustIndexToPreserveAPIInvariants(messages, 3)).toBe(3) + }) + + it("should preserve tool_use/tool_result pairs", () => { + const messages: ApiMessage[] = [ + { role: "user", content: "old message", ts: 1 }, + { + role: "assistant", + content: [ + { type: "text", text: "I'll read a file" }, + { type: "tool_use", id: "tool_1", name: "read_file", input: { path: "test.txt" } }, + ], + ts: 2, + }, + { + role: "user", + content: [{ type: "tool_result", tool_use_id: "tool_1", content: "file content" }], + ts: 3, + }, + { role: "user", content: "new message", ts: 4 }, + ] + + // Starting at index 2 (the tool_result user message): the tool_result needs its + // tool_use at index 1 to be in the kept range — so we extend back to 1. + const adjusted = adjustIndexToPreserveAPIInvariants(messages, 2) + expect(adjusted).toBe(1) + + // Starting at index 3 (plain "new message"): no tool_results in kept range, + // so no adjustment is needed. + const unchanged = adjustIndexToPreserveAPIInvariants(messages, 3) + expect(unchanged).toBe(3) + }) + + it("should preserve thinking blocks with same message.id", () => { + const messages: ApiMessage[] = [ + { role: "user", content: "old message", ts: 1 }, + { + role: "assistant", + id: "msg_1", + content: [{ type: "text", text: "thinking..." }], + ts: 2, + }, + { + role: "assistant", + id: "msg_1", + content: [{ type: "tool_use", id: "tool_1", name: "read_file", input: { path: "test.txt" } }], + ts: 3, + }, + { + role: "user", + content: [{ type: "tool_result", tool_use_id: "tool_1", content: "file content" }], + ts: 4, + }, + { role: "user", content: "new message", ts: 5 }, + ] + + // Starting at index 3 (the tool_result): extend back to 2 (tool_use) for the + // tool_use/tool_result pair, then back to 1 (thinking block with same msg_1 id). + const adjusted = adjustIndexToPreserveAPIInvariants(messages, 3) + expect(adjusted).toBe(1) + }) + }) + + describe("calculateMessagesToKeepIndex", () => { + it("should return 0 for empty messages", () => { + expect(calculateMessagesToKeepIndex([], 0)).toBe(0) + }) + + it("should respect minTokens threshold", () => { + const messages: ApiMessage[] = Array.from({ length: 20 }, (_, i) => ({ + role: i % 2 === 0 ? "user" : "assistant", + content: i % 2 === 0 ? `message ${i}` : [{ type: "text", text: `response ${i}` }], + ts: i, + })) + + setSessionMemoryCompactConfig({ minTokens: 100000, minTextBlockMessages: 0 }) + + // With a very high minTokens, should keep all messages + const result = calculateMessagesToKeepIndex(messages, -1) + expect(result).toBe(0) + }) + + it("should respect minTextBlockMessages threshold", () => { + const messages: ApiMessage[] = Array.from({ length: 20 }, (_, i) => ({ + role: i % 2 === 0 ? "user" : "assistant", + content: i % 2 === 0 ? `message ${i}` : [{ type: "text", text: `response ${i}` }], + ts: i, + })) + + setSessionMemoryCompactConfig({ minTokens: 0, minTextBlockMessages: 15 }) + + // With a high minTextBlockMessages, should keep enough messages + const result = calculateMessagesToKeepIndex(messages, -1) + expect(result).toBeLessThan(messages.length) + }) + + it("should respect maxTokens cap", () => { + const messages: ApiMessage[] = Array.from({ length: 100 }, (_, i) => ({ + role: i % 2 === 0 ? "user" : "assistant", + content: i % 2 === 0 ? `message ${i}` : [{ type: "text", text: `response ${i}` }], + ts: i, + })) + + setSessionMemoryCompactConfig({ maxTokens: 100 }) + + // With a low maxTokens, should keep fewer messages + const result = calculateMessagesToKeepIndex(messages, -1) + expect(result).toBeGreaterThan(0) + expect(result).toBeLessThan(messages.length) + }) + + it("should start from lastSummarizedIndex when provided", () => { + const messages: ApiMessage[] = Array.from({ length: 20 }, (_, i) => ({ + role: i % 2 === 0 ? "user" : "assistant", + content: i % 2 === 0 ? `message ${i}` : [{ type: "text", text: `response ${i}` }], + ts: i, + })) + + setSessionMemoryCompactConfig({ minTokens: 0, minTextBlockMessages: 0 }) + + // Should start after the last summarized index + const result = calculateMessagesToKeepIndex(messages, 10) + expect(result).toBeGreaterThan(10) + }) + }) + + describe("trySessionMemoryCompaction", () => { + it("should return null when session memory is empty", async () => { + const messages: ApiMessage[] = [{ role: "user", content: "test", ts: 1 }] + + const apiHandler = { + createMessage: vi.fn(), + } as any + + const result = await trySessionMemoryCompaction( + messages, + "", // Empty session memory + apiHandler, + "test-task", + ) + + expect(result).toBeNull() + }) + + it("should return null when session memory matches template", async () => { + const messages: ApiMessage[] = [{ role: "user", content: "test", ts: 1 }] + + const apiHandler = { + createMessage: vi.fn(), + } as any + + // This is the default template + const template = ` +# Session Title +_A short and distinctive 5-10 word descriptive title for the session. Super info dense, no filler_ + +# Current State +_What is actively being worked on right now? Pending tasks not yet completed. Immediate next steps._ + +# Task specification +_What did the user ask to build? Any design decisions or other explanatory context_ + +# Files and Functions +_What are the important files? In short, what do they contain and why are they relevant?_ + +# Workflow +_What bash commands are usually run and in what order? How to interpret their output if not obvious?_ + +# Errors & Corrections +_Errors encountered and how they were fixed. What did the user correct? What approaches failed and should not be tried again?_ + +# Codebase and System Documentation +_What are the important system components? How do they work/fit together?_ + +# Learnings +_What has worked well? What has not? What to avoid? Do not duplicate items from other sections_ + +# Key results +_If the user asked a specific output such as an answer to a question, a table, or other document, repeat the exact result here_ + +# Worklog +_Step by step, what was attempted, done? Very terse summary for each step_ +` + + const result = await trySessionMemoryCompaction(messages, template, apiHandler, "test-task") + + expect(result).toBeNull() + }) + + it("should succeed with valid session memory", async () => { + const messages: ApiMessage[] = [ + { role: "user", content: "test", ts: 1, id: "msg_1" }, + { + role: "assistant", + content: [{ type: "text", text: "response" }], + ts: 2, + id: "msg_2", + }, + ] + + const apiHandler = { + createMessage: vi.fn(), + } as any + + const sessionMemory = ` +# Session Title +Test Session + +# Current State +Working on implementing a feature + +# Task specification +Build a new feature for the application + +# Files and Functions +- src/index.ts: Main entry point +- src/utils.ts: Utility functions + +# Workflow +1. Read files +2. Make changes +3. Test + +# Errors & Corrections +No errors yet + +# Codebase and System Documentation +The app uses TypeScript and React + +# Learnings +- Use TypeScript for type safety + +# Key results +None yet + +# Worklog +- Started implementation +` + + setLastSummarizedMessageId("msg_1") + + const result = await trySessionMemoryCompaction(messages, sessionMemory, apiHandler, "test-task") + + expect(result).not.toBeNull() + expect(result?.messages).toBeDefined() + expect(result?.summary).toContain("Session Memory") + expect(result?.cost).toBe(0) + expect(result?.condenseId).toBeDefined() + }) + + it("should handle resumed sessions without lastSummarizedMessageId", async () => { + const messages: ApiMessage[] = [ + { role: "user", content: "test", ts: 1, id: "msg_1" }, + { + role: "assistant", + content: [{ type: "text", text: "response" }], + ts: 2, + id: "msg_2", + }, + ] + + const apiHandler = { + createMessage: vi.fn(), + } as any + + const sessionMemory = ` +# Session Title +Test Session + +# Current State +Working on implementing a feature +` + + // Don't set lastSummarizedMessageId - simulating resumed session + const result = await trySessionMemoryCompaction(messages, sessionMemory, apiHandler, "test-task") + + expect(result).not.toBeNull() + }) + }) + + describe("Configuration", () => { + it("should use default configuration", () => { + const config = { + ...DEFAULT_SM_COMPACT_CONFIG, + } + expect(config.minTokens).toBe(10000) + expect(config.minTextBlockMessages).toBe(5) + expect(config.maxTokens).toBe(50000) + }) + + it("should allow configuration updates", () => { + setSessionMemoryCompactConfig({ + minTokens: 15000, + maxTokens: 60000, + }) + + const config = getSessionMemoryCompactConfig() + expect(config.minTokens).toBe(15000) + expect(config.maxTokens).toBe(60000) + }) + + it("should reset configuration", () => { + setSessionMemoryCompactConfig({ + minTokens: 15000, + }) + + resetSessionMemoryCompactConfig() + + const config = { + ...DEFAULT_SM_COMPACT_CONFIG, + } + expect(config.minTokens).toBe(10000) + }) + }) + + describe("Session Memory Utils", () => { + beforeEach(() => { + resetSessionMemoryState() + }) + + it("should estimate message tokens correctly", () => { + const messages: ApiMessage[] = [ + { role: "user", content: "This is a test message with some text", ts: 1 }, + { + role: "assistant", + content: [ + { type: "text", text: "This is a response" }, + { type: "tool_use", id: "tool_1", name: "read_file", input: { path: "test.txt" } }, + ], + ts: 2, + }, + ] + + const tokens = estimateMessageTokens(messages) + expect(tokens).toBeGreaterThan(0) + }) + + it("should detect messages with text blocks", () => { + const messageWithText: ApiMessage = { + role: "assistant", + content: [{ type: "text", text: "response" }], + ts: 1, + } + + const messageWithoutText: ApiMessage = { + role: "assistant", + content: [{ type: "tool_use", id: "tool_1", name: "test", input: {} }], + ts: 2, + } + + expect(hasTextBlocks(messageWithText)).toBe(true) + expect(hasTextBlocks(messageWithoutText)).toBe(false) + }) + + it("should handle session memory configuration", () => { + setSessionMemoryConfig({ + minimumMessageTokensToInit: 15000, + minimumTokensBetweenUpdate: 6000, + toolCallsBetweenUpdates: 5, + }) + + const config = { + ...DEFAULT_SM_COMPACT_CONFIG, + } + // Session memory config is separate from compact config + // Just verify the function doesn't throw + expect(() => setSessionMemoryConfig({})).not.toThrow() + }) + }) +}) diff --git a/src/core/condense/index.ts b/src/core/condense/index.ts index 0438bf6bcb1..5354c2e78f2 100644 --- a/src/core/condense/index.ts +++ b/src/core/condense/index.ts @@ -11,8 +11,15 @@ import { findLast } from "../../shared/array" import { supportPrompt } from "../../shared/support-prompt" import { RooIgnoreController } from "../ignore/RooIgnoreController" import { generateFoldedFileContext } from "./foldedFileContext" +import type { HookSystem } from "../hooks" export type { FoldedFileContextResult, FoldedFileContextOptions } from "./foldedFileContext" +export * from "./microCompact" +export * from "./sessionMemory" +export * from "./sessionMemoryCompact" + +// Export helper functions for testing +export { groupMessagesByApiRound, estimateMessageTokens } /** * Converts a tool_use block to a text representation. @@ -111,6 +118,182 @@ export function transformMessagesForCondensing< export const MIN_CONDENSE_THRESHOLD = 5 // Minimum percentage of context window to trigger condensing export const MAX_CONDENSE_THRESHOLD = 100 // Maximum percentage of context window to trigger condensing +// Prompt-too-long retry configuration +const MAX_PTL_RETRIES = 2 // Maximum number of retry attempts when condensation hits prompt-too-long error +const PTL_RETRY_MARKER = "[earlier conversation truncated for condensation retry]" + +/** + * Checks if an error message indicates a "prompt too long" error from the API. + * This handles multiple providers that may return different error messages. + * + * @param errorMessage - The error message to check + * @returns True if this is a prompt-too-long error, false otherwise + */ +export function isPromptTooLongError(errorMessage: string): boolean { + const lowerMessage = errorMessage.toLowerCase() + // Common patterns across providers: + // - Anthropic: "prompt is too long" + // - OpenAI: "maximum context length exceeded" + // - Generic: "context length exceeded", "too many tokens" + return ( + lowerMessage.includes("prompt is too long") || + lowerMessage.includes("maximum context length exceeded") || + lowerMessage.includes("context length exceeded") || + lowerMessage.includes("too many tokens") || + lowerMessage.includes("tokens exceed") + ) +} + +/** + * Parses the token gap from a prompt-too-long error message. + * Returns the number of tokens over the limit, or undefined if unparseable. + * + * @param errorMessage - The error message from the API + * @returns The number of tokens over the limit, or undefined + */ +export function parsePromptTooLongTokenGap(errorMessage: string): number | undefined { + // Try to match patterns like "137500 tokens > 135000 maximum" or "exceeded by 2500 tokens" + const match = + errorMessage.match(/(\d+)\s*tokens?\s*>\s*(\d+)/i) || errorMessage.match(/exceeded\s+by\s+(\d+)\s*tokens?/i) + if (match) { + const actual = parseInt(match[1], 10) + const limit = match[2] ? parseInt(match[2], 10) : actual + return Math.max(0, actual - limit) + } + return undefined +} + +/** + * Groups messages by API round (user message + assistant response). + * This is used for intelligent truncation that removes complete conversation turns. + * + * @param messages - The messages to group + * @returns Array of message groups, where each group represents one API round + */ +function groupMessagesByApiRound(messages: ApiMessage[]): ApiMessage[][] { + const groups: ApiMessage[][] = [] + let currentGroup: ApiMessage[] = [] + + for (const msg of messages) { + if (msg.role === "user") { + // Start a new group when we see a user message + if (currentGroup.length > 0) { + groups.push(currentGroup) + } + currentGroup = [msg] + } else { + // Add assistant messages to the current group + currentGroup.push(msg) + } + } + + // Don't forget the last group + if (currentGroup.length > 0) { + groups.push(currentGroup) + } + + return groups +} + +/** + * Estimates the token count for a message. + * This is a rough estimation used for determining how many groups to drop. + * + * @param msg - The message to estimate tokens for + * @returns Estimated token count + */ +function estimateMessageTokens(msg: ApiMessage): number { + const content = msg.content + if (typeof content === "string") { + // Rough estimate: ~4 characters per token + return Math.ceil(content.length / 4) + } else if (Array.isArray(content)) { + let total = 0 + for (const block of content) { + if (block.type === "text") { + total += Math.ceil(block.text.length / 4) + } else if (block.type === "image") { + // Images are expensive, estimate conservatively + total += 1000 + } else { + // Other blocks (tool_use, tool_result, etc.) + total += 100 + } + } + return total + } + return 0 +} + +/** + * Truncates messages from the head (beginning) to reduce token count when + * condensation hits a prompt-too-long error. This is a fallback mechanism + * when the condensed context itself is still too large. + * + * @param messages - The messages to truncate + * @param errorMessage - The error message that triggered the retry + * @returns Truncated messages, or null if nothing can be dropped + */ +export function truncateHeadForPromptTooLongRetry(messages: ApiMessage[], errorMessage: string): ApiMessage[] | null { + // Strip our own synthetic marker from a previous retry before grouping + const input = + messages[0]?.role === "user" && + messages[0]?.isMeta && + typeof messages[0].content === "string" && + messages[0].content === PTL_RETRY_MARKER + ? messages.slice(1) + : messages + + const groups = groupMessagesByApiRound(input) + if (groups.length < 2) { + return null // Not enough groups to drop + } + + const tokenGap = parsePromptTooLongTokenGap(errorMessage) + let dropCount: number + + if (tokenGap !== undefined && tokenGap > 0) { + // Calculate how many groups to drop to cover the token gap + let acc = 0 + dropCount = 0 + for (const group of groups) { + const groupTokens = group.reduce((sum, msg) => sum + estimateMessageTokens(msg), 0) + acc += groupTokens + dropCount++ + if (acc >= tokenGap) { + break + } + } + } else { + // Fallback: drop 20% of groups when we can't parse the gap + dropCount = Math.max(1, Math.floor(groups.length * 0.2)) + } + + // Keep at least one group so there's something to summarize + dropCount = Math.min(dropCount, groups.length - 1) + if (dropCount < 1) { + return null + } + + const sliced = groups.slice(dropCount).flat() + + // If the first message is an assistant message, prepend a synthetic user marker + // to ensure the conversation starts with a user message (API requirement) + if (sliced[0]?.role === "assistant") { + return [ + { + role: "user", + content: PTL_RETRY_MARKER, + ts: Date.now(), + isMeta: true, + }, + ...sliced, + ] + } + + return sliced +} + const SUMMARY_PROMPT = `You are a helpful AI assistant tasked with summarizing conversations. CRITICAL: This is a summarization-only request. DO NOT call any tools or functions. @@ -233,6 +416,12 @@ export type SummarizeConversationOptions = { filesReadByRoo?: string[] cwd?: string rooIgnoreController?: RooIgnoreController + /** Optional hook system for executing pre/post compact hooks */ + hookSystem?: HookSystem + /** Whether prompt-too-long retry is enabled */ + promptTooLongRetryEnabled?: boolean + /** Maximum number of retry attempts when condensation hits prompt-too-long error */ + promptTooLongMaxRetries?: number } /** @@ -266,6 +455,9 @@ export async function summarizeConversation(options: SummarizeConversationOption filesReadByRoo, cwd, rooIgnoreController, + hookSystem, + promptTooLongRetryEnabled = true, + promptTooLongMaxRetries = MAX_PTL_RETRIES, } = options TelemetryService.instance.captureContextCondensed( taskId, @@ -276,7 +468,7 @@ export async function summarizeConversation(options: SummarizeConversationOption const response: SummarizeResponse = { messages, cost: 0, summary: "" } // Get messages to summarize (all messages since the last summary, if any) - const messagesToSummarize = getMessagesSinceLastSummary(messages) + let messagesToSummarize = getMessagesSinceLastSummary(messages) if (messagesToSummarize.length <= 1) { const error = @@ -294,32 +486,35 @@ export async function summarizeConversation(options: SummarizeConversationOption return { ...response, error } } + // Execute pre-compact hooks if hook system is provided + let effectiveCustomCondensingPrompt = customCondensingPrompt + if (hookSystem) { + const trigger: "manual" | "auto" = isAutomaticTrigger ? "auto" : "manual" + const preCompactResult = await hookSystem.executePreCompactHooks(trigger, customCondensingPrompt || null, { + cwd, + taskId, + }) + + // Use custom instructions from hooks if provided + if (preCompactResult.newCustomInstructions) { + effectiveCustomCondensingPrompt = preCompactResult.newCustomInstructions + } + + // Log hook results + if (preCompactResult.userMessage) { + console.log(`[PreCompact Hooks] ${preCompactResult.userMessage}`) + } + } + // Use custom prompt if provided and non-empty, otherwise use the default CONDENSE prompt // This respects user's custom condensing prompt setting - const condenseInstructions = customCondensingPrompt?.trim() || supportPrompt.default.CONDENSE + const condenseInstructions = effectiveCustomCondensingPrompt?.trim() || supportPrompt.default.CONDENSE const finalRequestMessage: Anthropic.MessageParam = { role: "user", content: condenseInstructions, } - // Inject synthetic tool_results for orphan tool_calls to prevent API rejections - // (e.g., when user triggers condense after receiving attempt_completion but before responding) - const messagesWithToolResults = injectSyntheticToolResults(messagesToSummarize) - - // Transform tool_use and tool_result blocks to text representations. - // This is necessary because some providers (like Bedrock via LiteLLM) require the `tools` parameter - // when tool blocks are present. By converting them to text, we can send the conversation for - // summarization without needing to pass the tools parameter. - const messagesWithTextToolBlocks = transformMessagesForCondensing( - maybeRemoveImageBlocks([...messagesWithToolResults, finalRequestMessage], apiHandler), - ) - - const requestMessages = messagesWithTextToolBlocks.map(({ role, content }) => ({ role, content })) - - // Note: this doesn't need to be a stream, consider using something like apiHandler.completePrompt - const promptToUse = SUMMARY_PROMPT - // Validate that the API handler supports message creation if (!apiHandler || typeof apiHandler.createMessage !== "function") { console.error("API handler is invalid for condensing. Cannot proceed.") @@ -327,61 +522,117 @@ export async function summarizeConversation(options: SummarizeConversationOption return { ...response, error } } + // Note: this doesn't need to be a stream, consider using something like apiHandler.completePrompt + const promptToUse = SUMMARY_PROMPT + let summary = "" let cost = 0 let outputTokens = 0 + let ptlAttempts = 0 // Prompt-too-long retry attempts - try { - const stream = apiHandler.createMessage(promptToUse, requestMessages, metadata) + // Retry loop for prompt-too-long errors + while (true) { + // Inject synthetic tool_results for orphan tool_calls to prevent API rejections + // (e.g., when user triggers condense after receiving attempt_completion but before responding) + const messagesWithToolResults = injectSyntheticToolResults(messagesToSummarize) - for await (const chunk of stream) { - if (chunk.type === "text") { - summary += chunk.text - } else if (chunk.type === "usage") { - // Record final usage chunk only - cost = chunk.totalCost ?? 0 - outputTokens = chunk.outputTokens ?? 0 - } - } - } catch (error) { - console.error("Error during condensing API call:", error) - const errorMessage = error instanceof Error ? error.message : String(error) - - // Capture detailed error information for debugging - let errorDetails = "" - if (error instanceof Error) { - errorDetails = `Error: ${error.message}` - // Capture any additional API error properties - const anyError = error as unknown as Record - if (anyError.status) { - errorDetails += `\n\nHTTP Status: ${anyError.status}` - } - if (anyError.code) { - errorDetails += `\nError Code: ${anyError.code}` + // Transform tool_use and tool_result blocks to text representations. + // This is necessary because some providers (like Bedrock via LiteLLM) require the `tools` parameter + // when tool blocks are present. By converting them to text, we can send the conversation for + // summarization without needing to pass the tools parameter. + const messagesWithTextToolBlocks = transformMessagesForCondensing( + maybeRemoveImageBlocks([...messagesWithToolResults, finalRequestMessage], apiHandler), + ) + + const requestMessages = messagesWithTextToolBlocks.map(({ role, content }) => ({ role, content })) + + try { + const stream = apiHandler.createMessage(promptToUse, requestMessages, metadata) + + for await (const chunk of stream) { + if (chunk.type === "text") { + summary += chunk.text + } else if (chunk.type === "usage") { + // Record final usage chunk only + cost = chunk.totalCost ?? 0 + outputTokens = chunk.outputTokens ?? 0 + } } - if (anyError.response) { - try { - errorDetails += `\n\nAPI Response:\n${JSON.stringify(anyError.response, null, 2)}` - } catch { - errorDetails += `\n\nAPI Response: [Unable to serialize]` + // Success! Break out of the retry loop + break + } catch (error) { + console.error("Error during condensing API call:", error) + const errorMessage = error instanceof Error ? error.message : String(error) + + // Check if this is a prompt-too-long error and retry is enabled + if (promptTooLongRetryEnabled && isPromptTooLongError(errorMessage)) { + ptlAttempts++ + console.log( + `[Condense] Prompt too long error detected. Retry attempt ${ptlAttempts}/${promptTooLongMaxRetries}`, + ) + + if (ptlAttempts <= promptTooLongMaxRetries) { + // Try to truncate messages and retry + const truncated = truncateHeadForPromptTooLongRetry(messagesToSummarize, errorMessage) + if (truncated) { + const droppedMessages = messagesToSummarize.length - truncated.length + console.log( + `[Condense] Truncated ${droppedMessages} messages for retry. ${truncated.length} messages remaining.`, + ) + messagesToSummarize = truncated + continue // Retry with truncated messages + } else { + console.log( + `[Condense] Cannot truncate further. Not enough messages to drop after ${ptlAttempts} attempts.`, + ) + } + } else { + console.log(`[Condense] Max retry attempts (${promptTooLongMaxRetries}) exceeded. Giving up.`) } } - if (anyError.body) { - try { - errorDetails += `\n\nResponse Body:\n${JSON.stringify(anyError.body, null, 2)}` - } catch { - errorDetails += `\n\nResponse Body: [Unable to serialize]` + + // If we get here, either: + // 1. This is not a prompt-too-long error + // 2. Retry is disabled + // 3. We've exhausted all retry attempts + // 4. We cannot truncate further + + // Capture detailed error information for debugging + let errorDetails = "" + if (error instanceof Error) { + errorDetails = `Error: ${error.message}` + // Capture any additional API error properties + const anyError = error as unknown as Record + if (anyError.status) { + errorDetails += `\n\nHTTP Status: ${anyError.status}` + } + if (anyError.code) { + errorDetails += `\nError Code: ${anyError.code}` } + if (anyError.response) { + try { + errorDetails += `\n\nAPI Response:\n${JSON.stringify(anyError.response, null, 2)}` + } catch { + errorDetails += `\n\nAPI Response: [Unable to serialize]` + } + } + if (anyError.body) { + try { + errorDetails += `\n\nResponse Body:\n${JSON.stringify(anyError.body, null, 2)}` + } catch { + errorDetails += `\n\nResponse Body: [Unable to serialize]` + } + } + } else { + errorDetails = String(error) } - } else { - errorDetails = String(error) - } - return { - ...response, - cost, - error: t("common:errors.condense_api_failed", { message: errorMessage }), - errorDetails, + return { + ...response, + cost, + error: t("common:errors.condense_api_failed", { message: errorMessage }), + errorDetails, + } } } @@ -506,6 +757,30 @@ ${commandBlocks} } const newContextTokens = messageTokens + toolTokens + + // Execute post-compact hooks if hook system is provided + if (hookSystem) { + const trigger: "manual" | "auto" = isAutomaticTrigger ? "auto" : "manual" + // Estimate previous token count (before condensation) + const prevContextBlocks = messages.flatMap((message) => + typeof message.content === "string" ? [{ text: message.content, type: "text" as const }] : message.content, + ) + const prevContextTokens = await apiHandler.countTokens(prevContextBlocks) + + const postCompactResult = await hookSystem.executePostCompactHooks( + trigger, + summary, + prevContextTokens, + newContextTokens, + { cwd, taskId }, + ) + + // Log hook results + if (postCompactResult.userMessage) { + console.log(`[PostCompact Hooks] ${postCompactResult.userMessage}`) + } + } + return { messages: newMessages, summary, cost, newContextTokens, condenseId } } diff --git a/src/core/condense/microCompact.ts b/src/core/condense/microCompact.ts new file mode 100644 index 00000000000..79c484cf533 --- /dev/null +++ b/src/core/condense/microCompact.ts @@ -0,0 +1,287 @@ +import { Anthropic } from "@anthropic-ai/sdk" +import { ApiMessage } from "../task-persistence/apiMessages" + +/** + * Configuration for microcompact feature. + */ +export type MicrocompactConfig = { + /** Master switch. When false, microcompact is a no-op. */ + enabled: boolean + /** Number of messages back to start clearing tool results. */ + threshold: number + /** Keep this many most-recent compactable tool results. */ + keepRecent: number +} + +/** + * Default configuration for microcompact. + */ +export const DEFAULT_MICROCOMPACT_CONFIG: MicrocompactConfig = { + enabled: true, + threshold: 5, + keepRecent: 3, +} + +/** + * Message cleared placeholder text. + */ +export const MICROCOMPACT_CLEARED_MESSAGE = "[Content condensed - tool: {toolName}]" + +/** + * Tools that are eligible for microcompaction. + * These tools produce large output that can be safely cleared after some time. + */ +const COMPACTABLE_TOOLS = new Set([ + "read_file", + "execute_command", + "search_files", + "list_files", + "list_code_definition_names", + "codebase_search", + "browser_action", +]) + +/** + * Result of a microcompact operation. + */ +export type MicrocompactResult = { + messages: ApiMessage[] + tokensSaved: number + toolsCleared: number + toolsKept: number +} + +/** + * Helper to estimate token count for text content. + * This is a rough approximation - actual token counting should use the API handler. + */ +function estimateTextTokens(text: string): number { + // Rough approximation: ~4 characters per token for English text + return Math.ceil(text.length / 4) +} + +/** + * Helper to calculate tool result tokens. + */ +function calculateToolResultTokens(block: Anthropic.Messages.ToolResultBlockParam): number { + if (!block.content) { + return 0 + } + + if (typeof block.content === "string") { + return estimateTextTokens(block.content) + } + + // Array of TextBlockParam | ImageBlockParam | DocumentBlockParam + const contentArray = block.content as Array< + Anthropic.Messages.TextBlockParam | Anthropic.Messages.ImageBlockParam | Anthropic.Messages.DocumentBlockParam + > + return contentArray.reduce((sum, item) => { + if (item.type === "text") { + return sum + estimateTextTokens(item.text) + } + // Images/documents are approximately 2000 tokens regardless of format + if (item.type === "image" || item.type === "document") { + return sum + 2000 + } + return sum + }, 0) +} + +/** + * Walk messages and collect tool_use IDs whose tool name is in COMPACTABLE_TOOLS, + * in encounter order. + */ +function collectCompactableToolIds(messages: ApiMessage[]): Array<{ id: string; toolName: string }> { + const ids: Array<{ id: string; toolName: string }> = [] + for (const message of messages) { + if (message.role === "assistant" && Array.isArray(message.content)) { + for (const block of message.content) { + if (block.type === "tool_use" && COMPACTABLE_TOOLS.has(block.name)) { + ids.push({ id: block.id, toolName: block.name }) + } + } + } + } + return ids +} + +/** + * Build a map from tool_use_id to tool name for all compactable tools. + */ +function buildToolNameMap(messages: ApiMessage[]): Map { + const toolNameMap = new Map() + for (const message of messages) { + if (message.role === "assistant" && Array.isArray(message.content)) { + for (const block of message.content) { + if (block.type === "tool_use" && COMPACTABLE_TOOLS.has(block.name)) { + toolNameMap.set(block.id, block.name) + } + } + } + } + return toolNameMap +} + +/** + * Get the cleared message placeholder for a specific tool. + */ +function getClearedMessage(toolName: string): string { + return MICROCOMPACT_CLEARED_MESSAGE.replace("{toolName}", toolName) +} + +/** + * Microcompact conversation messages by clearing old tool result content. + * + * This function identifies compactable tool results (file reads, shell output, grep results, etc.) + * and replaces their content with a placeholder message. Only tool results older than the + * configured threshold are cleared, preserving the most recent N results for context. + * + * @param messages - The conversation messages to microcompact + * @param config - The microcompact configuration + * @returns The microcompact result with modified messages and statistics + */ +export function microcompactMessages( + messages: ApiMessage[], + config: Partial = {}, +): MicrocompactResult { + const finalConfig: MicrocompactConfig = { + ...DEFAULT_MICROCOMPACT_CONFIG, + ...config, + } + + // If disabled, return messages unchanged + if (!finalConfig.enabled) { + return { messages, tokensSaved: 0, toolsCleared: 0, toolsKept: 0 } + } + + // Collect all compactable tool IDs in encounter order + const compactableTools = collectCompactableToolIds(messages) + + // If there are fewer than threshold + keepRecent tools, no need to clear + if (compactableTools.length < finalConfig.threshold + finalConfig.keepRecent) { + return { messages, tokensSaved: 0, toolsCleared: 0, toolsKept: compactableTools.length } + } + + // Determine which tools to clear and which to keep + // Keep the most recent N tools, clear the rest + const keepRecent = Math.max(0, finalConfig.keepRecent) + const toolsToKeep = + keepRecent > 0 ? new Set(compactableTools.slice(-keepRecent).map((t) => t.id)) : new Set() + const toolsToClear = new Set(compactableTools.filter((t) => !toolsToKeep.has(t.id)).map((t) => t.id)) + + if (toolsToClear.size === 0) { + return { messages, tokensSaved: 0, toolsCleared: 0, toolsKept: compactableTools.length } + } + + // Build a map from tool_use_id to tool name for placeholder messages + const toolNameMap = buildToolNameMap(messages) + + let tokensSaved = 0 + let toolsCleared = 0 + + // Clear tool result content + const result: ApiMessage[] = messages.map((message) => { + if (message.role !== "user" || !Array.isArray(message.content)) { + return message + } + + let touched = false + const newContent = message.content.map((block) => { + if (block.type === "tool_result" && toolsToClear.has(block.tool_use_id)) { + const toolName = toolNameMap.get(block.tool_use_id) || "unknown" + const clearedMessage = getClearedMessage(toolName) + + // Check if already cleared to avoid double-counting + if (typeof block.content === "string" && block.content === clearedMessage) { + return block + } + + tokensSaved += calculateToolResultTokens(block) + toolsCleared++ + touched = true + return { ...block, content: clearedMessage } + } + return block + }) + + if (!touched) { + return message + } + + return { + ...message, + content: newContent, + } + }) + + console.log( + `[Microcompact] Cleared ${toolsCleared} tool results (~${tokensSaved} tokens), kept last ${toolsToKeep.size}`, + ) + + return { + messages: result, + tokensSaved, + toolsCleared, + toolsKept: toolsToKeep.size, + } +} + +/** + * Estimate the number of tokens that could be saved by microcompacting. + * This does not modify the messages, just returns an estimate. + * + * @param messages - The conversation messages to analyze + * @param config - The microcompact configuration + * @returns Estimated number of tokens that would be saved + */ +export function estimateMicrocompactSavings(messages: ApiMessage[], config: Partial = {}): number { + const finalConfig: MicrocompactConfig = { + ...DEFAULT_MICROCOMPACT_CONFIG, + ...config, + } + + if (!finalConfig.enabled) { + return 0 + } + + const compactableTools = collectCompactableToolIds(messages) + + if (compactableTools.length < finalConfig.threshold + finalConfig.keepRecent) { + return 0 + } + + const keepRecent = Math.max(0, finalConfig.keepRecent) + const toolsToKeep = + keepRecent > 0 ? new Set(compactableTools.slice(-keepRecent).map((t) => t.id)) : new Set() + const toolsToClear = new Set(compactableTools.filter((t) => !toolsToKeep.has(t.id)).map((t) => t.id)) + + if (toolsToClear.size === 0) { + return 0 + } + + let tokensSaved = 0 + const toolNameMap = buildToolNameMap(messages) + + for (const message of messages) { + if (message.role !== "user" || !Array.isArray(message.content)) { + continue + } + + for (const block of message.content) { + if (block.type === "tool_result" && toolsToClear.has(block.tool_use_id)) { + const toolName = toolNameMap.get(block.tool_use_id) || "unknown" + const clearedMessage = getClearedMessage(toolName) + + // Skip if already cleared + if (typeof block.content === "string" && block.content === clearedMessage) { + continue + } + + tokensSaved += calculateToolResultTokens(block) + } + } + } + + return tokensSaved +} diff --git a/src/core/condense/sessionMemory.ts b/src/core/condense/sessionMemory.ts new file mode 100644 index 00000000000..321faee09aa --- /dev/null +++ b/src/core/condense/sessionMemory.ts @@ -0,0 +1,606 @@ +/** + * Session Memory Extraction for Roo Code + * + * This module extracts key information from conversations and stores it + * as structured session memory that can be used for condensation. + * + * Adapted from Claude Code CLI's session memory system. + */ + +import { Anthropic } from "@anthropic-ai/sdk" +import { ApiHandler } from "../../api" +import { ApiMessage } from "../task-persistence/apiMessages" + +const MAX_SECTION_LENGTH = 2000 +const MAX_TOTAL_SESSION_MEMORY_TOKENS = 12000 + +/** + * Default session memory template + */ +export const DEFAULT_SESSION_MEMORY_TEMPLATE = ` +# Session Title +_A short and distinctive 5-10 word descriptive title for the session. Super info dense, no filler_ + +# Current State +_What is actively being worked on right now? Pending tasks not yet completed. Immediate next steps._ + +# Task specification +_What did the user ask to build? Any design decisions or other explanatory context_ + +# Files and Functions +_What are the important files? In short, what do they contain and why are they relevant?_ + +# Workflow +_What bash commands are usually run and in what order? How to interpret their output if not obvious?_ + +# Errors & Corrections +_Errors encountered and how they were fixed. What did the user correct? What approaches failed and should not be tried again?_ + +# Codebase and System Documentation +_What are the important system components? How do they work/fit together?_ + +# Learnings +_What has worked well? What has not? What to avoid? Do not duplicate items from other sections_ + +# Key results +_If the user asked a specific output such as an answer to a question, a table, or other document, repeat the exact result here_ + +# Worklog +_Step by step, what was attempted, done? Very terse summary for each step_ +` + +/** + * Default prompt for updating session memory + */ +function getDefaultUpdatePrompt(): string { + return `IMPORTANT: This message and these instructions are NOT part of the actual user conversation. Do NOT include any references to "note-taking", "session notes extraction", or these update instructions in the notes content. + +Based on the user conversation above (EXCLUDING this note-taking instruction message), update the session notes file. + +The file {{notesPath}} has already been read for you. Here are its current contents: + +{{currentNotes}} + + +Your ONLY task is to update the notes file with the new information from the conversation. + +CRITICAL RULES FOR EDITING: +- The file must maintain its exact structure with all sections, headers, and italic descriptions intact +-- NEVER modify, delete, or add section headers (the lines starting with '#' like # Task specification) +-- NEVER modify or delete the italic _section description_ lines (these are the lines in italics immediately following each header - they start and end with underscores) +-- The italic _section descriptions_ are TEMPLATE INSTRUCTIONS that must be preserved exactly as-is - they guide what content belongs in each section +-- ONLY update the actual content that appears BELOW the italic _section descriptions_ within each existing section +-- Do NOT add any new sections, summaries, or information outside the existing structure +- Do NOT reference this note-taking process or instructions anywhere in the notes +- It's OK to skip updating a section if there are no substantial new insights to add. Do not add filler content like "No info yet", just leave sections blank/unedited if appropriate. +- Write DETAILED, INFO-DENSE content for each section - include specifics like file paths, function names, error messages, exact commands, technical details, etc. +- For "Key results", include the complete, exact output the user requested (e.g., full table, full answer, etc.) +- Keep each section under ~${MAX_SECTION_LENGTH} tokens/words - if a section is approaching this limit, condense it by cycling out less important details while preserving the most critical information +- Focus on actionable, specific information that would help someone understand or recreate the work discussed in the conversation +- IMPORTANT: Always update "Current State" to reflect the most recent work - this is critical for continuity after compaction + +STRUCTURE PRESERVATION REMINDER: +Each section has TWO parts that must be preserved exactly as they appear in the current file: +1. The section header (line starting with #) +2. The italic description line (the _italicized text_ immediately after the header - this is a template instruction) + +You ONLY update the actual content that comes AFTER these two preserved lines. The italic description lines starting and ending with underscores are part of the template structure, NOT content to be edited or removed. + +REMEMBER: Update the notes with insights from the actual user conversation only. Do not delete or change section headers or italic _section descriptions_.` +} + +/** + * Configuration for session memory extraction + */ +export type SessionMemoryConfig = { + /** Minimum context window tokens before initializing session memory */ + minimumMessageTokensToInit: number + /** Minimum context window growth (in tokens) between session memory updates */ + minimumTokensBetweenUpdate: number + /** Number of tool calls between session memory updates */ + toolCallsBetweenUpdates: number +} + +/** + * Default configuration values + */ +export const DEFAULT_SESSION_MEMORY_CONFIG: SessionMemoryConfig = { + minimumMessageTokensToInit: 10000, + minimumTokensBetweenUpdate: 5000, + toolCallsBetweenUpdates: 3, +} + +/** + * Current session memory configuration + */ +let sessionMemoryConfig: SessionMemoryConfig = { + ...DEFAULT_SESSION_MEMORY_CONFIG, +} + +/** + * Track the last summarized message ID + */ +let lastSummarizedMessageId: string | undefined + +/** + * Track context size at last memory extraction + */ +let tokensAtLastExtraction = 0 + +/** + * Track whether session memory has been initialized + */ +let sessionMemoryInitialized = false + +/** + * Track extraction state + */ +let extractionStartedAt: number | undefined + +/** + * Track tool call count since last update + */ +let toolCallsSinceLastUpdate = 0 + +/** + * Get the message ID up to which the session memory is current + */ +export function getLastSummarizedMessageId(): string | undefined { + return lastSummarizedMessageId +} + +/** + * Set the last summarized message ID + */ +export function setLastSummarizedMessageId(messageId: string | undefined): void { + lastSummarizedMessageId = messageId +} + +/** + * Mark extraction as started + */ +export function markExtractionStarted(): void { + extractionStartedAt = Date.now() +} + +/** + * Mark extraction as completed + */ +export function markExtractionCompleted(): void { + extractionStartedAt = undefined +} + +/** + * Wait for any in-progress session memory extraction to complete (with 15s timeout) + */ +export async function waitForSessionMemoryExtraction(): Promise { + const EXTRACTION_WAIT_TIMEOUT_MS = 15000 + const EXTRACTION_STALE_THRESHOLD_MS = 60000 // 1 minute + + const startTime = Date.now() + while (extractionStartedAt) { + const extractionAge = Date.now() - extractionStartedAt + if (extractionAge > EXTRACTION_STALE_THRESHOLD_MS) { + // Extraction is stale, don't wait + return + } + + if (Date.now() - startTime > EXTRACTION_WAIT_TIMEOUT_MS) { + // Timeout - continue anyway + return + } + + await new Promise((resolve) => setTimeout(resolve, 1000)) + } +} + +/** + * Get the current session memory configuration + */ +export function getSessionMemoryConfig(): SessionMemoryConfig { + return { ...sessionMemoryConfig } +} + +/** + * Set the session memory configuration + */ +export function setSessionMemoryConfig(config: Partial): void { + sessionMemoryConfig = { + ...sessionMemoryConfig, + ...config, + } +} + +/** + * Record the context size at the time of extraction + */ +export function recordExtractionTokenCount(currentTokenCount: number): void { + tokensAtLastExtraction = currentTokenCount +} + +/** + * Check if session memory has been initialized + */ +export function isSessionMemoryInitialized(): boolean { + return sessionMemoryInitialized +} + +/** + * Mark session memory as initialized + */ +export function markSessionMemoryInitialized(): void { + sessionMemoryInitialized = true +} + +/** + * Check if we've met the threshold to initialize session memory + */ +export function hasMetInitializationThreshold(currentTokenCount: number): boolean { + return currentTokenCount >= sessionMemoryConfig.minimumMessageTokensToInit +} + +/** + * Check if we've met the threshold for the next update + */ +export function hasMetUpdateThreshold(currentTokenCount: number): boolean { + const tokensSinceLastExtraction = currentTokenCount - tokensAtLastExtraction + return tokensSinceLastExtraction >= sessionMemoryConfig.minimumTokensBetweenUpdate +} + +/** + * Get the configured number of tool calls between updates + */ +export function getToolCallsBetweenUpdates(): number { + return sessionMemoryConfig.toolCallsBetweenUpdates +} + +/** + * Increment tool call count + */ +export function incrementToolCallCount(): void { + toolCallsSinceLastUpdate++ +} + +/** + * Get current tool call count + */ +export function getToolCallCount(): number { + return toolCallsSinceLastUpdate +} + +/** + * Reset tool call count + */ +export function resetToolCallCount(): void { + toolCallsSinceLastUpdate = 0 +} + +/** + * Reset session memory state (useful for testing) + */ +export function resetSessionMemoryState(): void { + sessionMemoryConfig = { ...DEFAULT_SESSION_MEMORY_CONFIG } + tokensAtLastExtraction = 0 + sessionMemoryInitialized = false + lastSummarizedMessageId = undefined + extractionStartedAt = undefined + toolCallsSinceLastUpdate = 0 +} + +/** + * Estimate token count for text content + */ +function estimateTextTokens(text: string): number { + // Rough approximation: ~4 characters per token for English text + return Math.ceil(text.length / 4) +} + +/** + * Parse the session memory file and analyze section sizes + */ +function analyzeSectionSizes(content: string): Record { + const sections: Record = {} + const lines = content.split("\n") + let currentSection = "" + let currentContent: string[] = [] + + for (const line of lines) { + if (line.startsWith("# ")) { + if (currentSection && currentContent.length > 0) { + const sectionContent = currentContent.join("\n").trim() + sections[currentSection] = estimateTextTokens(sectionContent) + } + currentSection = line + currentContent = [] + } else { + currentContent.push(line) + } + } + + if (currentSection && currentContent.length > 0) { + const sectionContent = currentContent.join("\n").trim() + sections[currentSection] = estimateTextTokens(sectionContent) + } + + return sections +} + +/** + * Generate reminders for sections that are too long + */ +function generateSectionReminders(sectionSizes: Record, totalTokens: number): string { + const overBudget = totalTokens > MAX_TOTAL_SESSION_MEMORY_TOKENS + const oversizedSections = Object.entries(sectionSizes) + .filter(([_, tokens]) => tokens > MAX_SECTION_LENGTH) + .sort(([, a], [, b]) => b - a) + .map(([section, tokens]) => `- "${section}" is ~${tokens} tokens (limit: ${MAX_SECTION_LENGTH})`) + + if (oversizedSections.length === 0 && !overBudget) { + return "" + } + + const parts: string[] = [] + + if (overBudget) { + parts.push( + `\n\nCRITICAL: The session memory file is currently ~${totalTokens} tokens, which exceeds the maximum of ${MAX_TOTAL_SESSION_MEMORY_TOKENS} tokens. You MUST condense the file to fit within this budget. Aggressively shorten oversized sections by removing less important details, merging related items, and summarizing older entries. Prioritize keeping "Current State" and "Errors & Corrections" accurate and detailed.`, + ) + } + + if (oversizedSections.length > 0) { + parts.push( + `\n\n${overBudget ? "Oversized sections to condense" : "IMPORTANT: The following sections exceed the per-section limit and MUST be condensed"}:\n${oversizedSections.join("\n")}`, + ) + } + + return parts.join("") +} + +/** + * Substitute variables in the prompt template using {{variable}} syntax + */ +function substituteVariables(template: string, variables: Record): string { + return template.replace(/\{\{(\w+)\}\}/g, (match, key: string) => + Object.prototype.hasOwnProperty.call(variables, key) ? variables[key]! : match, + ) +} + +/** + * Check if the session memory content is essentially empty (matches the template) + */ +export function isSessionMemoryEmpty(content: string): boolean { + // Compare trimmed content to detect if it's just the template + return content.trim() === DEFAULT_SESSION_MEMORY_TEMPLATE.trim() +} + +/** + * Build the session memory update prompt + */ +export function buildSessionMemoryUpdatePrompt(currentNotes: string, notesPath: string): string { + const promptTemplate = getDefaultUpdatePrompt() + + // Analyze section sizes and generate reminders if needed + const sectionSizes = analyzeSectionSizes(currentNotes) + const totalTokens = estimateTextTokens(currentNotes) + const sectionReminders = generateSectionReminders(sectionSizes, totalTokens) + + // Substitute variables in the prompt + const variables = { + currentNotes, + notesPath, + } + + const basePrompt = substituteVariables(promptTemplate, variables) + + // Add section size reminders and/or total budget warnings + return basePrompt + sectionReminders +} + +/** + * Truncate session memory sections that exceed the per-section token limit + */ +export function truncateSessionMemoryForCompact(content: string): { + truncatedContent: string + wasTruncated: boolean +} { + const lines = content.split("\n") + const maxCharsPerSection = MAX_SECTION_LENGTH * 4 // estimateTextTokens uses length/4 + const outputLines: string[] = [] + let currentSectionLines: string[] = [] + let currentSectionHeader = "" + let wasTruncated = false + + for (const line of lines) { + if (line.startsWith("# ")) { + const result = flushSessionSection(currentSectionHeader, currentSectionLines, maxCharsPerSection) + outputLines.push(...result.lines) + wasTruncated = wasTruncated || result.wasTruncated + currentSectionHeader = line + currentSectionLines = [] + } else { + currentSectionLines.push(line) + } + } + + // Flush the last section + const result = flushSessionSection(currentSectionHeader, currentSectionLines, maxCharsPerSection) + outputLines.push(...result.lines) + wasTruncated = wasTruncated || result.wasTruncated + + return { + truncatedContent: outputLines.join("\n"), + wasTruncated, + } +} + +function flushSessionSection( + sectionHeader: string, + sectionLines: string[], + maxCharsPerSection: number, +): { lines: string[]; wasTruncated: boolean } { + if (!sectionHeader) { + return { lines: sectionLines, wasTruncated: false } + } + + const sectionContent = sectionLines.join("\n") + if (sectionContent.length <= maxCharsPerSection) { + return { lines: [sectionHeader, ...sectionLines], wasTruncated: false } + } + + // Truncate at a line boundary near the limit + let charCount = 0 + const keptLines: string[] = [sectionHeader] + for (const line of sectionLines) { + if (charCount + line.length + 1 > maxCharsPerSection) { + break + } + keptLines.push(line) + charCount += line.length + 1 + } + keptLines.push("\n[... section truncated for length ...]") + return { lines: keptLines, wasTruncated: true } +} + +/** + * Extract session memory from conversation messages + * + * This function uses the AI to extract and update session memory based on + * the conversation history. + * + * @param messages - The conversation messages to extract memory from + * @param currentSessionMemory - The current session memory content (if any) + * @param apiHandler - The API handler to use for the extraction + * @returns The updated session memory content + */ +export async function extractSessionMemory( + messages: ApiMessage[], + currentSessionMemory: string | null, + apiHandler: ApiHandler, +): Promise { + // If no current memory, start with the template + const currentNotes = currentSessionMemory || DEFAULT_SESSION_MEMORY_TEMPLATE + + // Build the update prompt + const prompt = buildSessionMemoryUpdatePrompt(currentNotes, "session-memory.md") + + // Transform messages for the API (convert tool blocks to text) + const messagesForApi = messages.map((msg) => ({ + role: msg.role, + content: + typeof msg.content === "string" + ? msg.content + : msg.content.map((block) => { + if (block.type === "tool_use") { + return { + type: "text" as const, + text: `[Tool Use: ${(block as Anthropic.Messages.ToolUseBlockParam).name}]\n${JSON.stringify((block as Anthropic.Messages.ToolUseBlockParam).input, null, 2)}`, + } + } + if (block.type === "tool_result") { + const errorSuffix = (block as Anthropic.Messages.ToolResultBlockParam).is_error + ? " (Error)" + : "" + if (typeof (block as Anthropic.Messages.ToolResultBlockParam).content === "string") { + return { + type: "text" as const, + text: `[Tool Result${errorSuffix}]\n${(block as Anthropic.Messages.ToolResultBlockParam).content}`, + } + } + return { + type: "text" as const, + text: `[Tool Result${errorSuffix}]\n[Complex content]`, + } + } + return block + }), + })) + + // Call the API to update the session memory + let updatedMemory = "" + try { + const stream = apiHandler.createMessage(prompt, messagesForApi, undefined) + + for await (const chunk of stream) { + if (chunk.type === "text") { + updatedMemory += chunk.text + } + } + } catch (error) { + console.error("[Session Memory] Error extracting session memory:", error) + // Return current memory if extraction fails + return currentNotes + } + + // If the API returned empty content, return current memory + if (!updatedMemory.trim()) { + return currentNotes + } + + // Extract the updated session memory from the response + // The response should contain the updated session memory content + // We need to parse it out if it's wrapped in markdown code blocks or similar + const memoryMatch = updatedMemory.match(/```(?:markdown)?\n([\s\S]*?)\n```/) || updatedMemory.match(/^([\s\S]*?)$/) + if (memoryMatch) { + return memoryMatch[1]?.trim() || currentNotes + } + + return updatedMemory.trim() || currentNotes +} + +/** + * Check if a message contains text blocks + */ +export function hasTextBlocks(message: ApiMessage): boolean { + if (message.role === "assistant") { + const content = message.content + return Array.isArray(content) && content.some((block) => block.type === "text") + } + if (message.role === "user") { + const content = message.content + if (typeof content === "string") { + return content.length > 0 + } + if (Array.isArray(content)) { + return content.some((block) => block.type === "text") + } + } + return false +} + +/** + * Estimate the number of tokens in a message + */ +export function estimateMessageTokens(messages: ApiMessage[]): number { + let totalTokens = 0 + for (const msg of messages) { + if (typeof msg.content === "string") { + totalTokens += estimateTextTokens(msg.content) + } else if (Array.isArray(msg.content)) { + for (const block of msg.content) { + if (block.type === "text") { + totalTokens += estimateTextTokens(block.text) + } else if (block.type === "image") { + totalTokens += 2000 // Approximate token count for images + } else if (block.type === "tool_use") { + totalTokens += estimateTextTokens( + JSON.stringify((block as Anthropic.Messages.ToolUseBlockParam).input), + ) + } else if (block.type === "tool_result") { + const content = (block as Anthropic.Messages.ToolResultBlockParam).content + if (typeof content === "string") { + totalTokens += estimateTextTokens(content) + } else if (Array.isArray(content)) { + for (const item of content) { + if (item.type === "text") { + totalTokens += estimateTextTokens(item.text) + } else if (item.type === "image") { + totalTokens += 2000 + } + } + } + } + } + } + } + return totalTokens +} diff --git a/src/core/condense/sessionMemoryCompact.ts b/src/core/condense/sessionMemoryCompact.ts new file mode 100644 index 00000000000..86d058e2c64 --- /dev/null +++ b/src/core/condense/sessionMemoryCompact.ts @@ -0,0 +1,426 @@ +/** + * Session Memory Compaction for Roo Code + * + * This module implements session memory compaction, which uses pre-extracted + * session memory as a condensation summary instead of calling the LLM again. + * + * Adapted from Claude Code CLI's session memory compaction system. + */ + +import { Anthropic } from "@anthropic-ai/sdk" +import crypto from "crypto" + +import { ApiHandler } from "../../api" +import { ApiMessage } from "../task-persistence/apiMessages" +import { + DEFAULT_SESSION_MEMORY_CONFIG, + estimateMessageTokens, + getLastSummarizedMessageId, + hasTextBlocks, + isSessionMemoryEmpty, + markExtractionCompleted, + markExtractionStarted, + resetSessionMemoryState, + setLastSummarizedMessageId, + setSessionMemoryConfig, + truncateSessionMemoryForCompact, + waitForSessionMemoryExtraction, + type SessionMemoryConfig, +} from "./sessionMemory" + +/** + * Configuration for session memory compaction thresholds + */ +export type SessionMemoryCompactConfig = { + /** Minimum tokens to preserve after compaction */ + minTokens: number + /** Minimum number of messages with text blocks to keep */ + minTextBlockMessages: number + /** Maximum tokens to preserve after compaction (hard cap) */ + maxTokens: number +} + +/** + * Default configuration values + */ +export const DEFAULT_SM_COMPACT_CONFIG: SessionMemoryCompactConfig = { + minTokens: 10000, + minTextBlockMessages: 5, + maxTokens: 50000, +} + +/** + * Current configuration + */ +let smCompactConfig: SessionMemoryCompactConfig = { + ...DEFAULT_SM_COMPACT_CONFIG, +} + +/** + * Set the session memory compact configuration + */ +export function setSessionMemoryCompactConfig(config: Partial): void { + smCompactConfig = { + ...smCompactConfig, + ...config, + } +} + +/** + * Get the current session memory compact configuration + */ +export function getSessionMemoryCompactConfig(): SessionMemoryCompactConfig { + return { ...smCompactConfig } +} + +/** + * Reset config state (useful for testing) + */ +export function resetSessionMemoryCompactConfig(): void { + smCompactConfig = { ...DEFAULT_SM_COMPACT_CONFIG } +} + +/** + * Check if a message contains tool_result blocks and return their tool_use_ids + */ +function getToolResultIds(message: ApiMessage): string[] { + if (message.role !== "user") { + return [] + } + const content = message.content + if (!Array.isArray(content)) { + return [] + } + const ids: string[] = [] + for (const block of content) { + if (block.type === "tool_result") { + ids.push((block as Anthropic.Messages.ToolResultBlockParam).tool_use_id) + } + } + return ids +} + +/** + * Check if a message contains tool_use blocks with any of the given ids + */ +function hasToolUseWithIds(message: ApiMessage, toolUseIds: Set): boolean { + if (message.role !== "assistant") { + return false + } + const content = message.content + if (!Array.isArray(content)) { + return false + } + return content.some( + (block) => block.type === "tool_use" && toolUseIds.has((block as Anthropic.Messages.ToolUseBlockParam).id), + ) +} + +/** + * Adjust the start index to ensure we don't split tool_use/tool_result pairs + * or thinking blocks that share the same message.id with kept assistant messages. + * + * If ANY message we're keeping contains tool_result blocks, we need to + * include the preceding assistant message(s) that contain the matching tool_use blocks. + */ +export function adjustIndexToPreserveAPIInvariants(messages: ApiMessage[], startIndex: number): number { + if (startIndex <= 0 || startIndex >= messages.length) { + return startIndex + } + + let adjustedIndex = startIndex + + // Step 1: Handle tool_use/tool_result pairs + // Collect tool_result IDs from ALL messages in the kept range + const allToolResultIds: string[] = [] + for (let i = startIndex; i < messages.length; i++) { + allToolResultIds.push(...getToolResultIds(messages[i]!)) + } + + if (allToolResultIds.length > 0) { + // Collect tool_use IDs already in the kept range + const toolUseIdsInKeptRange = new Set() + for (let i = adjustedIndex; i < messages.length; i++) { + const msg = messages[i]! + if (msg.role === "assistant" && Array.isArray(msg.content)) { + for (const block of msg.content) { + if (block.type === "tool_use") { + toolUseIdsInKeptRange.add((block as Anthropic.Messages.ToolUseBlockParam).id) + } + } + } + } + + // Only look for tool_uses that are NOT already in the kept range + const neededToolUseIds = new Set(allToolResultIds.filter((id) => !toolUseIdsInKeptRange.has(id))) + + // Find the assistant message(s) with matching tool_use blocks + for (let i = adjustedIndex - 1; i >= 0 && neededToolUseIds.size > 0; i--) { + const message = messages[i]! + if (hasToolUseWithIds(message, neededToolUseIds)) { + adjustedIndex = i + // Remove found tool_use_ids from the set + if (message.role === "assistant" && Array.isArray(message.content)) { + for (const block of message.content) { + if ( + block.type === "tool_use" && + neededToolUseIds.has((block as Anthropic.Messages.ToolUseBlockParam).id) + ) { + neededToolUseIds.delete((block as Anthropic.Messages.ToolUseBlockParam).id) + } + } + } + } + } + } + + // Step 2: Handle thinking blocks that share message.id with kept assistant messages + // Collect all message.ids from assistant messages in the kept range + const messageIdsInKeptRange = new Set() + for (let i = adjustedIndex; i < messages.length; i++) { + const msg = messages[i]! + if (msg.role === "assistant" && msg.id) { + messageIdsInKeptRange.add(msg.id) + } + } + + // Look backwards for assistant messages with the same message.id that are not in the kept range + for (let i = adjustedIndex - 1; i >= 0; i--) { + const message = messages[i]! + if (message.role === "assistant" && message.id && messageIdsInKeptRange.has(message.id)) { + // This message has the same message.id as one in the kept range + // Include it so thinking blocks can be properly merged + adjustedIndex = i + } + } + + return adjustedIndex +} + +/** + * Calculate the starting index for messages to keep after compaction. + * Starts from lastSummarizedMessageId, then expands backwards to meet minimums. + */ +export function calculateMessagesToKeepIndex(messages: ApiMessage[], lastSummarizedIndex: number): number { + if (messages.length === 0) { + return 0 + } + + const config = getSessionMemoryCompactConfig() + + // Start from the message after lastSummarizedIndex + let startIndex = lastSummarizedIndex >= 0 ? lastSummarizedIndex + 1 : messages.length + + // Calculate current tokens and text-block message count from startIndex to end + let totalTokens = 0 + let textBlockMessageCount = 0 + for (let i = startIndex; i < messages.length; i++) { + const msg = messages[i]! + totalTokens += estimateMessageTokens([msg]) + if (hasTextBlocks(msg)) { + textBlockMessageCount++ + } + } + + // Check if we already hit the max cap + if (totalTokens >= config.maxTokens) { + return adjustIndexToPreserveAPIInvariants(messages, startIndex) + } + + // Check if we already meet both minimums + if (totalTokens >= config.minTokens && textBlockMessageCount >= config.minTextBlockMessages) { + return adjustIndexToPreserveAPIInvariants(messages, startIndex) + } + + // Expand backwards until we meet both minimums or hit max cap + for (let i = startIndex - 1; i >= 0; i--) { + const msg = messages[i]! + const msgTokens = estimateMessageTokens([msg]) + totalTokens += msgTokens + if (hasTextBlocks(msg)) { + textBlockMessageCount++ + } + startIndex = i + + // Stop if we hit the max cap + if (totalTokens >= config.maxTokens) { + break + } + + // Stop if we meet both minimums + if (totalTokens >= config.minTokens && textBlockMessageCount >= config.minTextBlockMessages) { + break + } + } + + // Adjust for tool pairs + return adjustIndexToPreserveAPIInvariants(messages, startIndex) +} + +/** + * Try to use session memory for compaction instead of traditional compaction. + * Returns null if session memory compaction cannot be used. + * + * @param messages - The conversation messages + * @param sessionMemory - The session memory content + * @param apiHandler - The API handler + * @param taskId - The task ID for telemetry + * @param autoCompactThreshold - Optional threshold for autocompact + * @returns The compaction result or null if session memory compaction cannot be used + */ +export async function trySessionMemoryCompaction( + messages: ApiMessage[], + sessionMemory: string, + apiHandler: ApiHandler, + taskId: string, + autoCompactThreshold?: number, + lastSummarizedMessageId?: string, +): Promise { + // No session memory file exists + if (!sessionMemory) { + console.log("[Session Memory Compaction] No session memory available") + return null + } + + // Session memory exists but matches the template (no actual content extracted) + if (isSessionMemoryEmpty(sessionMemory)) { + console.log("[Session Memory Compaction] Session memory is empty (matches template)") + return null + } + + try { + let lastSummarizedIndex: number + + // Prefer the caller-supplied id; fall back to module-level state for backwards compat + const resolvedLastSummarizedMessageId = lastSummarizedMessageId ?? getLastSummarizedMessageId() + + if (resolvedLastSummarizedMessageId) { + // Normal case: we know exactly which messages have been summarized + lastSummarizedIndex = messages.findIndex((msg) => msg.id === resolvedLastSummarizedMessageId) + + if (lastSummarizedIndex === -1) { + // The summarized message ID doesn't exist in current messages + console.log("[Session Memory Compaction] Summarized message ID not found") + return null + } + } else { + // Resumed session case: session memory has content but we don't know the boundary + // Set lastSummarizedIndex to last message so startIndex becomes messages.length (no messages kept initially) + lastSummarizedIndex = messages.length - 1 + console.log("[Session Memory Compaction] Resumed session detected") + } + + // Calculate the starting index for messages to keep + const startIndex = calculateMessagesToKeepIndex(messages, lastSummarizedIndex) + + // Filter out old compact boundary messages from messagesToKeep + const messagesToKeep = messages.slice(startIndex).filter((m) => !m.isTruncationMarker) + + // Truncate oversized sections to prevent session memory from consuming + // the entire post-compact token budget + const { truncatedContent, wasTruncated } = truncateSessionMemoryForCompact(sessionMemory) + + // Build the summary content + let summaryContent = `## Session Memory\n${truncatedContent}` + + if (wasTruncated) { + summaryContent += `\n\nSome session memory sections were truncated for length. The full session memory can be viewed in the session memory file.` + } + + // Generate a unique condenseId for this summary + const condenseId = crypto.randomUUID() + + // Use the last message's timestamp + 1 to ensure unique timestamp for summary + const lastMsgTs = messages[messages.length - 1]?.ts ?? Date.now() + + const summaryMessage: ApiMessage = { + role: "user", + content: summaryContent, + ts: lastMsgTs + 1, + isSummary: true, + condenseId, + } + + // Tag ALL messages with condenseParent + const newMessages = messages.map((msg) => { + if (!msg.condenseParent) { + return { ...msg, condenseParent: condenseId } + } + return msg + }) + + // Append the summary message at the end + newMessages.push(summaryMessage) + + // Calculate post-compact token count + const postCompactTokenCount = estimateMessageTokens([summaryMessage]) + + // Only check threshold if one was provided (for autocompact) + if (autoCompactThreshold !== undefined && postCompactTokenCount >= autoCompactThreshold) { + console.log( + `[Session Memory Compaction] Threshold exceeded: ${postCompactTokenCount} >= ${autoCompactThreshold}`, + ) + return null + } + + console.log("[Session Memory Compaction] Success") + + return { + messages: newMessages, + summary: summaryContent, + cost: 0, // No API call made + newContextTokens: postCompactTokenCount, + condenseId, + messagesToKeep, + postCompactTokenCount, + } + } catch (error) { + console.error("[Session Memory Compaction] Error:", error) + return null + } +} + +/** + * Result type for session memory compaction + */ +export type SessionMemoryCompactionResult = { + messages: ApiMessage[] + summary: string + cost: number + newContextTokens: number + condenseId: string + messagesToKeep: ApiMessage[] + postCompactTokenCount: number +} + +/** + * Check if session memory compaction is enabled + */ +export function isSessionMemoryCompactEnabled(): boolean { + // This will be controlled by the configuration setting + // For now, return true by default + return true +} + +/** + * Initialize session memory configuration from settings + */ +export function initializeSessionMemoryConfig( + sessionMemoryConfig?: Partial, + smCompactConfig?: Partial, +): void { + if (sessionMemoryConfig) { + setSessionMemoryConfig(sessionMemoryConfig) + } + if (smCompactConfig) { + setSessionMemoryCompactConfig(smCompactConfig) + } +} + +/** + * Reset all session memory state (useful for testing) + */ +export function resetAllSessionMemoryState(): void { + resetSessionMemoryState() + resetSessionMemoryCompactConfig() +} diff --git a/src/core/context-management/index.ts b/src/core/context-management/index.ts index 243d7bd797f..41be6448a3d 100644 --- a/src/core/context-management/index.ts +++ b/src/core/context-management/index.ts @@ -4,10 +4,21 @@ import crypto from "crypto" import { TelemetryService } from "@roo-code/telemetry" import { ApiHandler, ApiHandlerCreateMessageMetadata } from "../../api" -import { MAX_CONDENSE_THRESHOLD, MIN_CONDENSE_THRESHOLD, summarizeConversation, SummarizeResponse } from "../condense" +import { + MAX_CONDENSE_THRESHOLD, + MIN_CONDENSE_THRESHOLD, + microcompactMessages, + summarizeConversation, + SummarizeResponse, + type MicrocompactConfig, + trySessionMemoryCompaction, + isSessionMemoryCompactEnabled, + type SessionMemoryCompactionResult, +} from "../condense" import { ApiMessage } from "../task-persistence/apiMessages" import { ANTHROPIC_DEFAULT_MAX_TOKENS } from "@roo-code/types" import { RooIgnoreController } from "../ignore/RooIgnoreController" +import type { HookSystem } from "../hooks" /** * Context Management @@ -147,6 +158,7 @@ export type WillManageContextOptions = { profileThresholds: Record currentProfileId: string lastMessageTokens: number + microCompactEnabled?: boolean } /** @@ -167,7 +179,16 @@ export function willManageContext({ profileThresholds, currentProfileId, lastMessageTokens, + microCompactEnabled, }: WillManageContextOptions): boolean { + // Microcompact is always a possibility if enabled + if (microCompactEnabled !== false) { + // We can't accurately predict microcompact savings without the actual messages, + // but we can indicate that context management might run + // This is a conservative estimate - actual savings depend on message content + return true + } + if (!autoCondenseContext) { // When auto-condense is disabled, only truncation can occur const reservedTokens = maxTokens || ANTHROPIC_DEFAULT_MAX_TOKENS @@ -229,6 +250,20 @@ export type ContextManagementOptions = { cwd?: string /** Optional controller for file access validation */ rooIgnoreController?: RooIgnoreController + /** Optional microcompact configuration */ + microCompactConfig?: Partial + /** Optional session memory content for session memory compaction */ + sessionMemory?: string + /** Whether session memory compaction is enabled */ + sessionMemoryCompactEnabled?: boolean + /** The message ID up to which session memory has been summarized (avoids global state) */ + lastSummarizedMessageId?: string + /** Optional hook system for executing pre/post compact hooks */ + hookSystem?: HookSystem + /** Whether prompt-too-long retry is enabled */ + promptTooLongRetryEnabled?: boolean + /** Maximum number of retry attempts when condensation hits prompt-too-long error */ + promptTooLongMaxRetries?: number } export type ContextManagementResult = SummarizeResponse & { @@ -236,6 +271,8 @@ export type ContextManagementResult = SummarizeResponse & { truncationId?: string messagesRemoved?: number newContextTokensAfterTruncation?: number + /** Indicates if session memory compaction was used */ + sessionMemoryCompactionUsed?: boolean } /** @@ -262,6 +299,13 @@ export async function manageContext({ filesReadByRoo, cwd, rooIgnoreController, + microCompactConfig, + sessionMemory, + sessionMemoryCompactEnabled, + lastSummarizedMessageId, + hookSystem, + promptTooLongRetryEnabled, + promptTooLongMaxRetries, }: ContextManagementOptions): Promise { let error: string | undefined let errorDetails: string | undefined @@ -283,6 +327,39 @@ export async function manageContext({ // Truncate if we're within TOKEN_BUFFER_PERCENTAGE of the context window const allowedTokens = contextWindow * (1 - TOKEN_BUFFER_PERCENTAGE) - reservedTokens + // Step 1: Run microcompact as a pre-step to save tokens before full condensation + let workingMessages = messages + let microcompactTokensSaved = 0 + if (microCompactConfig?.enabled !== false) { + const microcompactResult = microcompactMessages(messages, microCompactConfig) + workingMessages = microcompactResult.messages + microcompactTokensSaved = microcompactResult.tokensSaved + + if (microcompactTokensSaved > 0) { + console.log( + `[Context Management] Microcompact saved ${microcompactTokensSaved} tokens (${microcompactResult.toolsCleared} tools cleared, ${microcompactResult.toolsKept} kept)`, + ) + } + } + + // Recalculate tokens after microcompact (only if we actually saved tokens) + let adjustedTotalTokens = totalTokens + if (microcompactTokensSaved > 0) { + // Estimate tokens for all messages except the last one + adjustedTotalTokens = 0 + for (const msg of workingMessages.slice(0, -1)) { + const content = msg.content + if (Array.isArray(content)) { + adjustedTotalTokens += await estimateTokenCount(content, apiHandler) + } else if (typeof content === "string") { + adjustedTotalTokens += await estimateTokenCount([{ type: "text", text: content }], apiHandler) + } + } + } + + // Calculate new context tokens after microcompact + const adjustedPrevContextTokens = adjustedTotalTokens + lastMessageTokens + // Determine the effective threshold to use let effectiveThreshold = autoCondenseContextPercent const profileThreshold = profileThresholds[currentProfileId] @@ -304,11 +381,37 @@ export async function manageContext({ // If no specific threshold is found for the profile, fall back to global setting if (autoCondenseContext) { - const contextPercent = (100 * prevContextTokens) / contextWindow - if (contextPercent >= effectiveThreshold || prevContextTokens > allowedTokens) { - // Attempt to intelligently condense the context + const contextPercent = (100 * adjustedPrevContextTokens) / contextWindow + if (contextPercent >= effectiveThreshold || adjustedPrevContextTokens > allowedTokens) { + // First, try session memory compaction if enabled and session memory is available + if (sessionMemoryCompactEnabled !== false && sessionMemory && isSessionMemoryCompactEnabled()) { + console.log("[Context Management] Attempting session memory compaction...") + const smResult = await trySessionMemoryCompaction( + workingMessages, + sessionMemory, + apiHandler, + taskId, + undefined, // No autoCompactThreshold for now + lastSummarizedMessageId, + ) + + if (smResult) { + console.log("[Context Management] Session memory compaction succeeded") + return { + ...smResult, + prevContextTokens: adjustedPrevContextTokens, + sessionMemoryCompactionUsed: true, + } + } else { + console.log( + "[Context Management] Session memory compaction failed, falling back to regular condensation", + ) + } + } + + // Fall back to regular condensation const result = await summarizeConversation({ - messages, + messages: workingMessages, apiHandler, systemPrompt, taskId, @@ -319,20 +422,23 @@ export async function manageContext({ filesReadByRoo, cwd, rooIgnoreController, + hookSystem, + promptTooLongRetryEnabled, + promptTooLongMaxRetries, }) if (result.error) { error = result.error errorDetails = result.errorDetails cost = result.cost } else { - return { ...result, prevContextTokens } + return { ...result, prevContextTokens: adjustedPrevContextTokens, sessionMemoryCompactionUsed: false } } } } // Fall back to sliding window truncation if needed - if (prevContextTokens > allowedTokens) { - const truncationResult = truncateConversation(messages, 0.5, taskId) + if (adjustedPrevContextTokens > allowedTokens) { + const truncationResult = truncateConversation(workingMessages, 0.5, taskId) // Calculate new context tokens after truncation by counting non-truncated messages // Messages with truncationParent are hidden, so we count only those without it @@ -361,7 +467,7 @@ export async function manageContext({ return { messages: truncationResult.messages, - prevContextTokens, + prevContextTokens: adjustedPrevContextTokens, summary: "", cost, error, @@ -372,5 +478,12 @@ export async function manageContext({ } } // No truncation or condensation needed - return { messages, summary: "", cost, prevContextTokens, error, errorDetails } + return { + messages: workingMessages, + summary: "", + cost, + prevContextTokens: adjustedPrevContextTokens, + error, + errorDetails, + } } diff --git a/src/core/hooks/__tests__/hooks.spec.ts b/src/core/hooks/__tests__/hooks.spec.ts new file mode 100644 index 00000000000..d9aa791617a --- /dev/null +++ b/src/core/hooks/__tests__/hooks.spec.ts @@ -0,0 +1,176 @@ +/** + * Tests for the Hook System + */ + +import { describe, it, expect, vi, beforeEach, afterEach } from "vitest" +import { HookSystem, createHookSystem, type HookSystemConfig } from "../index" +import * as fs from "fs" +import * as path from "path" + +// Mock the fs module +vi.mock("fs", () => ({ + existsSync: vi.fn(), + promises: { + readFile: vi.fn(), + }, +})) + +// Mock the vscode module +vi.mock("vscode", () => ({ + window: { + createOutputChannel: vi.fn(() => ({ + appendLine: vi.fn(), + dispose: vi.fn(), + })), + showErrorMessage: vi.fn(), + }, +})) + +describe("HookSystem", () => { + const mockWorkspacePath = "/test/workspace" + const mockConfig: HookSystemConfig = { + enabled: true, + workspacePath: mockWorkspacePath, + } + + beforeEach(() => { + vi.clearAllMocks() + }) + + afterEach(() => { + vi.restoreAllMocks() + }) + + describe("createHookSystem", () => { + it("should create a HookSystem instance with default enabled state", () => { + const hookSystem = createHookSystem(mockWorkspacePath) + expect(hookSystem).toBeInstanceOf(HookSystem) + expect(hookSystem.isEnabled()).toBe(true) + }) + + it("should create a HookSystem instance with disabled state", () => { + const hookSystem = createHookSystem(mockWorkspacePath, false) + expect(hookSystem).toBeInstanceOf(HookSystem) + expect(hookSystem.isEnabled()).toBe(false) + }) + + it("should create a HookSystem instance with custom config path", () => { + const customPath = "/custom/path/hooks.json" + const hookSystem = createHookSystem(mockWorkspacePath, true, customPath) + expect(hookSystem).toBeInstanceOf(HookSystem) + }) + }) + + describe("isEnabled", () => { + it("should return true when hooks are enabled", () => { + const hookSystem = new HookSystem({ ...mockConfig, enabled: true }) + expect(hookSystem.isEnabled()).toBe(true) + }) + + it("should return false when hooks are disabled", () => { + const hookSystem = new HookSystem({ ...mockConfig, enabled: false }) + expect(hookSystem.isEnabled()).toBe(false) + }) + }) + + describe("updateConfig", () => { + it("should update the configuration", () => { + const hookSystem = new HookSystem(mockConfig) + expect(hookSystem.isEnabled()).toBe(true) + + hookSystem.updateConfig({ enabled: false }) + expect(hookSystem.isEnabled()).toBe(false) + }) + }) + + describe("executePreCompactHooks", () => { + it("should return empty result when hooks are disabled", async () => { + const hookSystem = new HookSystem({ ...mockConfig, enabled: false }) + const result = await hookSystem.executePreCompactHooks("auto", null) + + expect(result).toEqual({ hasFailures: false }) + expect(result.newCustomInstructions).toBeUndefined() + expect(result.userMessage).toBeUndefined() + }) + + it("should return empty result when no hooks are configured", async () => { + vi.mocked(fs.existsSync).mockReturnValue(false) + + const hookSystem = new HookSystem(mockConfig) + const result = await hookSystem.executePreCompactHooks("auto", null) + + expect(result).toEqual({ hasFailures: false }) + }) + + it("should execute pre-compact hooks when configured", async () => { + const mockConfigContent = { + PreCompact: [ + { + matcher: "auto", + hooks: [ + { + id: "test-hook", + type: "command", + command: "echo 'test'", + }, + ], + }, + ], + } + + vi.mocked(fs.existsSync).mockReturnValue(true) + vi.mocked(fs.promises.readFile).mockResolvedValue(JSON.stringify(mockConfigContent)) + + const hookSystem = new HookSystem(mockConfig) + const result = await hookSystem.executePreCompactHooks("auto", null, { cwd: mockWorkspacePath }) + + expect(result.hasFailures).toBeDefined() + }) + }) + + describe("executePostCompactHooks", () => { + it("should return empty result when hooks are disabled", async () => { + const hookSystem = new HookSystem({ ...mockConfig, enabled: false }) + const result = await hookSystem.executePostCompactHooks("auto", "summary", 1000, 500) + + expect(result).toEqual({ hasFailures: false }) + expect(result.userMessage).toBeUndefined() + }) + + it("should return empty result when no hooks are configured", async () => { + vi.mocked(fs.existsSync).mockReturnValue(false) + + const hookSystem = new HookSystem(mockConfig) + const result = await hookSystem.executePostCompactHooks("auto", "summary", 1000, 500) + + expect(result).toEqual({ hasFailures: false }) + }) + + it("should execute post-compact hooks when configured", async () => { + const mockConfigContent = { + PostCompact: [ + { + matcher: "auto", + hooks: [ + { + id: "test-hook", + type: "command", + command: "echo 'test'", + }, + ], + }, + ], + } + + vi.mocked(fs.existsSync).mockReturnValue(true) + vi.mocked(fs.promises.readFile).mockResolvedValue(JSON.stringify(mockConfigContent)) + + const hookSystem = new HookSystem(mockConfig) + const result = await hookSystem.executePostCompactHooks("auto", "summary", 1000, 500, { + cwd: mockWorkspacePath, + }) + + expect(result.hasFailures).toBeDefined() + }) + }) +}) diff --git a/src/core/hooks/config.ts b/src/core/hooks/config.ts new file mode 100644 index 00000000000..97d4108f96d --- /dev/null +++ b/src/core/hooks/config.ts @@ -0,0 +1,185 @@ +/** + * Hook Configuration Loader + * + * This module handles loading and validating hook configurations from + * JSON files in the workspace. + */ + +import * as fs from "fs" +import * as path from "path" +import * as vscode from "vscode" +import { z } from "zod" +import type { Hook, HookEvent, HooksConfig } from "./types" + +/** + * Schema for validating command hooks + */ +const CommandHookSchema = z.object({ + id: z.string().min(1), + type: z.literal("command"), + description: z.string().optional(), + enabled: z.boolean().optional(), + timeout: z.number().positive().optional(), + command: z.string().min(1), + shell: z.enum(["bash", "powershell", "cmd"]).optional(), + cwd: z.string().optional(), + env: z.record(z.string()).optional(), +}) + +/** + * Schema for validating HTTP hooks + */ +const HttpHookSchema = z.object({ + id: z.string().min(1), + type: z.literal("http"), + description: z.string().optional(), + enabled: z.boolean().optional(), + timeout: z.number().positive().optional(), + url: z.string().url(), + method: z.enum(["POST", "PUT", "PATCH"]).optional(), + headers: z.record(z.string()).optional(), +}) + +/** + * Schema for validating hooks + */ +const HookSchema: z.ZodDiscriminatedUnion<"type", [typeof CommandHookSchema, typeof HttpHookSchema]> = + z.discriminatedUnion("type", [CommandHookSchema, HttpHookSchema]) + +/** + * Schema for validating hook matchers + */ +const HookMatcherSchema = z.object({ + matcher: z.string().optional(), + hooks: z.array(HookSchema), +}) + +/** + * Schema for validating the entire hooks configuration + */ +const HooksConfigSchema: z.ZodType = z.object({ + PreCompact: z.array(HookMatcherSchema).optional(), + PostCompact: z.array(HookMatcherSchema).optional(), +}) + +/** + * Default hooks configuration file name + */ +export const DEFAULT_HOOKS_CONFIG_FILE = ".roo-hooks.json" + +/** + * Result of loading hooks configuration + */ +export interface HooksConfigLoadResult { + /** The loaded configuration */ + config: HooksConfig + /** Path to the configuration file */ + configPath: string + /** Whether the configuration was loaded from a file */ + loadedFromFile: boolean + /** Error message if loading failed */ + error?: string +} + +/** + * Loads hooks configuration from the workspace + * + * @param workspacePath - The workspace root path + * @param configFileName - Optional custom configuration file name + * @returns The loaded hooks configuration + */ +export async function loadHooksConfig(workspacePath: string, configFileName?: string): Promise { + const configPath = path.join(workspacePath, configFileName || DEFAULT_HOOKS_CONFIG_FILE) + + // Check if configuration file exists + if (!fs.existsSync(configPath)) { + return { + config: {}, + configPath, + loadedFromFile: false, + } + } + + try { + const fileContent = await fs.promises.readFile(configPath, "utf-8") + const rawConfig = JSON.parse(fileContent) + + // Validate the configuration + const validatedConfig = HooksConfigSchema.parse(rawConfig) + + return { + config: validatedConfig, + configPath, + loadedFromFile: true, + } + } catch (error) { + let errorMessage = "Failed to load hooks configuration" + + if (error instanceof z.ZodError) { + errorMessage = `Invalid hooks configuration: ${error.errors.map((e) => e.message).join(", ")}` + } else if (error instanceof Error) { + errorMessage = `Failed to load hooks configuration: ${error.message}` + } + + // Show error to user + void vscode.window.showErrorMessage(errorMessage) + + return { + config: {}, + configPath, + loadedFromFile: false, + error: errorMessage, + } + } +} + +/** + * Gets hooks for a specific event from the configuration + * + * @param config - The hooks configuration + * @param event - The hook event to get hooks for + * @param matchQuery - Optional query string to filter hooks + * @returns Array of hooks that match the event and query + */ +export function getHooksForEvent(config: HooksConfig, event: HookEvent, matchQuery?: string): Hook[] { + const matchers = config[event] + + if (!matchers || matchers.length === 0) { + return [] + } + + // Flatten all hooks from all matchers + const allHooks: Hook[] = [] + + for (const matcher of matchers) { + // If a matcher is specified, check if it matches the query + if (matcher.matcher && matchQuery) { + // Simple string matching - can be enhanced with glob patterns if needed + if (!matchQuery.includes(matcher.matcher)) { + continue + } + } + + // Filter enabled hooks + const enabledHooks = matcher.hooks.filter((hook) => hook.enabled !== false) + + allHooks.push(...enabledHooks) + } + + return allHooks +} + +/** + * Validates a hooks configuration object + * + * @param config - The configuration to validate + * @returns Whether the configuration is valid + */ +export function validateHooksConfig(config: unknown): config is HooksConfig { + try { + HooksConfigSchema.parse(config) + return true + } catch { + return false + } +} diff --git a/src/core/hooks/executor.ts b/src/core/hooks/executor.ts new file mode 100644 index 00000000000..2b4b2479c96 --- /dev/null +++ b/src/core/hooks/executor.ts @@ -0,0 +1,295 @@ +/** + * Hook Executor + * + * This module handles the execution of hooks, including command hooks + * and HTTP hooks. + */ + +import { exec } from "child_process" +import { promisify } from "util" +import * as vscode from "vscode" +import type { + CommandHook, + HttpHook, + Hook, + HookExecutionOptions, + HookExecutionResult, + HookExecutionSummary, + HookEvent, + HookInput, +} from "./types" + +const execAsync = promisify(exec) + +let _hooksOutputChannel: vscode.OutputChannel | undefined +function getHooksOutputChannel(): vscode.OutputChannel { + if (!_hooksOutputChannel) { + _hooksOutputChannel = vscode.window.createOutputChannel("Roo Code Hooks") + } + return _hooksOutputChannel +} + +/** + * Default timeout for hook execution in milliseconds + */ +const DEFAULT_HOOK_TIMEOUT_MS = 60 * 1000 // 60 seconds + +/** + * Executes a command hook + * + * @param hook - The command hook to execute + * @param input - The hook input data + * @param options - Execution options + * @returns The execution result + */ +async function executeCommandHook( + hook: CommandHook, + input: HookInput, + options: HookExecutionOptions, +): Promise { + const startTime = Date.now() + const timeout = (hook.timeout || 60) * 1000 + const cwd = hook.cwd || options.cwd || process.cwd() + + // Prepare environment variables + const env = { + ...process.env, + ...hook.env, + // Add hook input as environment variables + HOOK_EVENT: input.hook_event_name, + HOOK_INPUT: JSON.stringify(input), + } + + // Determine shell to use + let shell = hook.shell || "bash" + let command = hook.command + + // On Windows, default to cmd if no shell specified + if (process.platform === "win32" && !hook.shell) { + shell = "cmd" + } + + // Wrap command for different shells + if (shell === "powershell") { + // Escape backslashes first, then double-quotes, to prevent shell injection + const escaped = command.replace(/\\/g, "\\\\").replace(/"/g, '\\"') + command = `powershell -Command "${escaped}"` + } else if (shell === "cmd") { + // Escape % (variable expansion) and ^ (cmd escape char) to prevent injection + const escaped = command.replace(/\^/g, "^^").replace(/%/g, "%%") + command = `cmd /c "${escaped}"` + } + + try { + // Execute the command with timeout + const { stdout, stderr } = await execAsync(command, { + cwd, + env, + timeout, + windowsHide: true, + }) + + const output = stdout.trim() + const error = stderr.trim() + + return { + hook, + succeeded: true, + output: output || "", + error: error || undefined, + duration: Date.now() - startTime, + } + } catch (error) { + let errorMessage = "Unknown error" + if (error instanceof Error) { + errorMessage = error.message + // Check if it was a timeout + if (error.message.includes("timed out")) { + errorMessage = `Command timed out after ${timeout}ms` + } + } + + return { + hook, + succeeded: false, + output: "", + error: errorMessage, + duration: Date.now() - startTime, + } + } +} + +/** + * Executes an HTTP hook + * + * @param hook - The HTTP hook to execute + * @param input - The hook input data + * @param options - Execution options + * @returns The execution result + */ +async function executeHttpHook( + hook: HttpHook, + input: HookInput, + options: HookExecutionOptions, +): Promise { + const startTime = Date.now() + const timeout = (hook.timeout || 60) * 1000 + + try { + const controller = new AbortController() + const timeoutId = setTimeout(() => controller.abort(), timeout) + + // Combine options signal with timeout signal + if (options.signal) { + options.signal.addEventListener("abort", () => controller.abort()) + } + + const response = await fetch(hook.url, { + method: hook.method || "POST", + headers: { + "Content-Type": "application/json", + ...hook.headers, + }, + body: JSON.stringify(input), + signal: controller.signal, + }) + + clearTimeout(timeoutId) + + const responseText = await response.text() + + if (!response.ok) { + return { + hook, + succeeded: false, + output: responseText, + error: `HTTP ${response.status}: ${response.statusText}`, + duration: Date.now() - startTime, + } + } + + return { + hook, + succeeded: true, + output: responseText, + duration: Date.now() - startTime, + } + } catch (error) { + let errorMessage = "Unknown error" + if (error instanceof Error) { + errorMessage = error.message + if (error.name === "AbortError") { + errorMessage = `HTTP request timed out after ${timeout}ms` + } + } + + return { + hook, + succeeded: false, + output: "", + error: errorMessage, + duration: Date.now() - startTime, + } + } +} + +/** + * Executes a single hook + * + * @param hook - The hook to execute + * @param input - The hook input data + * @param options - Execution options + * @returns The execution result + */ +export async function executeHook( + hook: Hook, + input: HookInput, + options: HookExecutionOptions = {}, +): Promise { + // Check for abort signal + if (options.signal?.aborted) { + return { + hook, + succeeded: false, + output: "", + error: "Hook execution aborted", + duration: 0, + } + } + + // Execute based on hook type + if (hook.type === "command") { + return executeCommandHook(hook, input, options) + } else if (hook.type === "http") { + return executeHttpHook(hook, input, options) + } + + // This should never happen due to type checking + return { + hook, + succeeded: false, + output: "", + error: `Unknown hook type: ${(hook as Hook).type}`, + duration: 0, + } +} + +/** + * Executes multiple hooks for an event + * + * @param event - The hook event + * @param hooks - The hooks to execute + * @param input - The hook input data + * @param options - Execution options + * @returns The execution summary + */ +export async function executeHooks( + event: HookEvent, + hooks: Hook[], + input: HookInput, + options: HookExecutionOptions = {}, +): Promise { + const startTime = Date.now() + + // Execute all hooks in parallel + const results = await Promise.all(hooks.map((hook) => executeHook(hook, input, options))) + + // Check for failures + const hasFailures = results.some((result) => !result.succeeded) + + // Build user message + const messages: string[] = [] + for (const result of results) { + const hookType = result.hook.type === "command" ? "command" : "http" + const hookName = result.hook.description || result.hook.id + + if (result.succeeded) { + if (result.output) { + messages.push(`${event} [${hookName}] completed successfully: ${result.output}`) + } else { + messages.push(`${event} [${hookName}] completed successfully`) + } + } else { + if (result.error) { + messages.push(`${event} [${hookName}] failed: ${result.error}`) + } else { + messages.push(`${event} [${hookName}] failed`) + } + } + } + + if (messages.length > 0) { + const ch = getHooksOutputChannel() + ch.appendLine(`[${new Date().toISOString()}] ${event} hooks executed`) + ch.appendLine(messages.join("\n")) + ch.appendLine("---") + } + + return { + event, + results, + hasFailures, + totalDuration: Date.now() - startTime, + userMessage: messages.length > 0 ? messages.join("\n") : undefined, + } +} diff --git a/src/core/hooks/index.ts b/src/core/hooks/index.ts new file mode 100644 index 00000000000..db1aedcf009 --- /dev/null +++ b/src/core/hooks/index.ts @@ -0,0 +1,183 @@ +/** + * Hook System for Roo Code Extension + * + * This module provides a hook system that allows users to execute custom + * commands at specific points in the extension's lifecycle, such as before + * and after context condensation. + * + * Features: + * - Command hooks: Execute shell commands + * - HTTP hooks: Send HTTP POST requests + * - Configuration via JSON file in workspace + * - Async execution with timeout support + * - Graceful error handling + */ + +import type { + CompactTrigger, + HookExecutionOptions, + HooksConfig, + PreCompactHookInput, + PostCompactHookInput, +} from "./types" +import { loadHooksConfig, getHooksForEvent, DEFAULT_HOOKS_CONFIG_FILE } from "./config" +import { executeHooks } from "./executor" + +// Re-export types and utilities +export * from "./types" +export { loadHooksConfig, getHooksForEvent, DEFAULT_HOOKS_CONFIG_FILE } from "./config" +export { executeHooks, executeHook } from "./executor" + +/** + * Hook system configuration + */ +export interface HookSystemConfig { + /** Whether hooks are enabled */ + enabled: boolean + /** Path to hooks configuration file */ + configPath?: string + /** Workspace root path */ + workspacePath: string +} + +/** + * Result of executing pre-compact hooks + */ +export interface PreCompactHookResult { + /** New custom instructions from hooks */ + newCustomInstructions?: string + /** User-facing message */ + userMessage?: string + /** Whether any hook failed */ + hasFailures: boolean +} + +/** + * Result of executing post-compact hooks + */ +export interface PostCompactHookResult { + /** User-facing message */ + userMessage?: string + /** Whether any hook failed */ + hasFailures: boolean +} + +/** + * Hook system class that manages hook execution + */ +export class HookSystem { + private config: HookSystemConfig + + constructor(config: HookSystemConfig) { + this.config = config + } + + /** + * Updates the hook system configuration + */ + updateConfig(config: Partial): void { + this.config = { ...this.config, ...config } + } + + /** + * Checks if hooks are enabled + */ + isEnabled(): boolean { + return this.config.enabled + } + + /** + * Loads hooks configuration fresh from disk on each call so that + * changes to .roo-hooks.json are always picked up without a reload. + */ + private async loadConfig(): Promise { + const result = await loadHooksConfig(this.config.workspacePath, this.config.configPath) + return result.config + } + + /** + * Executes pre-compact hooks + */ + async executePreCompactHooks( + trigger: CompactTrigger, + customInstructions: string | null, + options: HookExecutionOptions = {}, + ): Promise { + if (!this.isEnabled()) { + return { hasFailures: false } + } + + const config = await this.loadConfig() + const hooks = getHooksForEvent(config, "PreCompact", trigger) + + if (hooks.length === 0) { + return { hasFailures: false } + } + + const input: PreCompactHookInput = { + hook_event_name: "PreCompact", + trigger, + custom_instructions: customInstructions, + cwd: options.cwd || this.config.workspacePath, + taskId: options.taskId || "", + } + + const summary = await executeHooks("PreCompact", hooks, input, options) + + const successfulOutputs = summary.results + .filter((result) => result.succeeded && result.output.trim().length > 0) + .map((result) => result.output.trim()) + + return { + newCustomInstructions: successfulOutputs.length > 0 ? successfulOutputs.join("\n\n") : undefined, + userMessage: summary.userMessage, + hasFailures: summary.hasFailures, + } + } + + /** + * Executes post-compact hooks + */ + async executePostCompactHooks( + trigger: CompactTrigger, + compactSummary: string, + prevTokens: number, + newTokens: number, + options: HookExecutionOptions = {}, + ): Promise { + if (!this.isEnabled()) { + return { hasFailures: false } + } + + const config = await this.loadConfig() + const hooks = getHooksForEvent(config, "PostCompact", trigger) + + if (hooks.length === 0) { + return { hasFailures: false } + } + + const input: PostCompactHookInput = { + hook_event_name: "PostCompact", + trigger, + compact_summary: compactSummary, + prev_tokens: prevTokens, + new_tokens: newTokens, + cwd: options.cwd || this.config.workspacePath, + taskId: options.taskId || "", + } + + const summary = await executeHooks("PostCompact", hooks, input, options) + + return { + userMessage: summary.userMessage, + hasFailures: summary.hasFailures, + } + } +} + +/** + * Creates a new hook system instance + */ +export function createHookSystem(workspacePath: string, enabled: boolean = true, configPath?: string): HookSystem { + return new HookSystem({ enabled, workspacePath, configPath }) +} diff --git a/src/core/hooks/types.ts b/src/core/hooks/types.ts new file mode 100644 index 00000000000..c5b59d39b2d --- /dev/null +++ b/src/core/hooks/types.ts @@ -0,0 +1,166 @@ +/** + * Hook Types for Roo Code Extension + * + * This module defines the type system for the hook functionality, + * which allows users to execute custom commands at specific points + * in the extension's lifecycle (e.g., before/after context condensation). + */ + +/** + * Hook event types that can be triggered in the extension + */ +export type HookEvent = "PreCompact" | "PostCompact" + +/** + * Trigger type for compact hooks + */ +export type CompactTrigger = "manual" | "auto" + +/** + * Base hook configuration + */ +export interface BaseHook { + /** Unique identifier for this hook */ + id: string + /** Type of hook (command, http) */ + type: "command" | "http" + /** Optional description of what this hook does */ + description?: string + /** Whether this hook is enabled */ + enabled?: boolean + /** Optional timeout in seconds (default: 60) */ + timeout?: number +} + +/** + * Command hook configuration - executes a shell command + */ +export interface CommandHook extends BaseHook { + type: "command" + /** Shell command to execute */ + command: string + /** Shell interpreter to use (bash, powershell, cmd) */ + shell?: "bash" | "powershell" | "cmd" + /** Working directory for the command (default: project root) */ + cwd?: string + /** Environment variables to pass to the command */ + env?: Record +} + +/** + * HTTP hook configuration - sends an HTTP POST request + */ +export interface HttpHook extends BaseHook { + type: "http" + /** URL to POST the hook data to */ + url: string + /** HTTP method (default: POST) */ + method?: "POST" | "PUT" | "PATCH" + /** Additional headers to include in the request */ + headers?: Record +} + +/** + * Union type for all hook configurations + */ +export type Hook = CommandHook | HttpHook + +/** + * Hook matcher configuration - allows filtering when hooks run + */ +export interface HookMatcher { + /** Optional pattern to match (e.g., trigger type) */ + matcher?: string + /** List of hooks to execute when the matcher matches */ + hooks: Hook[] +} + +/** + * Hooks configuration - maps hook events to their matchers + */ +export type HooksConfig = Partial> + +/** + * Input data passed to PreCompact hooks + */ +export interface PreCompactHookInput { + /** Hook event name */ + hook_event_name: "PreCompact" + /** Trigger type (manual or auto) */ + trigger: CompactTrigger + /** Custom instructions for condensation (if any) */ + custom_instructions: string | null + /** Current working directory */ + cwd: string + /** Task ID */ + taskId: string +} + +/** + * Input data passed to PostCompact hooks + */ +export interface PostCompactHookInput { + /** Hook event name */ + hook_event_name: "PostCompact" + /** Trigger type (manual or auto) */ + trigger: CompactTrigger + /** The generated summary from condensation */ + compact_summary: string + /** Number of tokens before condensation */ + prev_tokens: number + /** Number of tokens after condensation */ + new_tokens: number + /** Current working directory */ + cwd: string + /** Task ID */ + taskId: string +} + +/** + * Union type for all hook inputs + */ +export type HookInput = PreCompactHookInput | PostCompactHookInput + +/** + * Result of executing a single hook + */ +export interface HookExecutionResult { + /** The hook that was executed */ + hook: Hook + /** Whether the hook executed successfully */ + succeeded: boolean + /** Output from the hook (stdout for commands, response body for HTTP) */ + output: string + /** Error message if the hook failed */ + error?: string + /** Execution time in milliseconds */ + duration: number +} + +/** + * Result of executing hooks for an event + */ +export interface HookExecutionSummary { + /** Event name */ + event: HookEvent + /** All hook execution results */ + results: HookExecutionResult[] + /** Whether any hook failed */ + hasFailures: boolean + /** Total execution time in milliseconds */ + totalDuration: number + /** User-facing message summarizing the results */ + userMessage?: string +} + +/** + * Options for executing hooks + */ +export interface HookExecutionOptions { + /** Abort signal to cancel hook execution */ + signal?: AbortSignal + /** Working directory for command hooks */ + cwd?: string + /** Task ID for tracking */ + taskId?: string +} diff --git a/src/core/prompts/tools/native-tools/index.ts b/src/core/prompts/tools/native-tools/index.ts index 758914d2d65..da3e04f0e35 100644 --- a/src/core/prompts/tools/native-tools/index.ts +++ b/src/core/prompts/tools/native-tools/index.ts @@ -10,6 +10,8 @@ import executeCommand from "./execute_command" import generateImage from "./generate_image" import listFiles from "./list_files" import newTask from "./new_task" +import runTeamPhase from "./run_team_phase" +import spawnParallelTasks from "./spawn_parallel_tasks" import readCommandOutput from "./read_command_output" import { createReadFileTool, type ReadFileToolOptions } from "./read_file" import runSlashCommand from "./run_slash_command" @@ -57,6 +59,8 @@ export function getNativeTools(options: NativeToolsOptions = {}): OpenAI.Chat.Ch generateImage, listFiles, newTask, + runTeamPhase, + spawnParallelTasks, readCommandOutput, createReadFileTool(readFileOptions), runSlashCommand, diff --git a/src/core/prompts/tools/native-tools/new_task.ts b/src/core/prompts/tools/native-tools/new_task.ts index f8e29e549d9..2a4a8f893b4 100644 --- a/src/core/prompts/tools/native-tools/new_task.ts +++ b/src/core/prompts/tools/native-tools/new_task.ts @@ -10,6 +10,8 @@ const MESSAGE_PARAMETER_DESCRIPTION = `Initial user instructions or context for const TODOS_PARAMETER_DESCRIPTION = `Optional initial todo list written as a markdown checklist; required when the workspace mandates todos` +const WORKTREE_PARAMETER_DESCRIPTION = `Optional git worktree for filesystem isolation. Use "auto" to create a new branch+worktree automatically, or provide a branch name (e.g., "feat/my-branch") to check out that branch in an isolated worktree. Omit when the task doesn't need its own filesystem scope.` + export default { type: "function", function: { @@ -31,8 +33,12 @@ export default { type: ["string", "null"], description: TODOS_PARAMETER_DESCRIPTION, }, + worktree: { + type: ["string", "null"], + description: WORKTREE_PARAMETER_DESCRIPTION, + }, }, - required: ["mode", "message", "todos"], + required: ["mode", "message", "todos", "worktree"], additionalProperties: false, }, }, diff --git a/src/core/prompts/tools/native-tools/run_team_phase.ts b/src/core/prompts/tools/native-tools/run_team_phase.ts new file mode 100644 index 00000000000..ed358781a48 --- /dev/null +++ b/src/core/prompts/tools/native-tools/run_team_phase.ts @@ -0,0 +1,56 @@ +import type OpenAI from "openai" + +const RUN_TEAM_PHASE_DESCRIPTION = `Run one phase of a pre-configured team workflow defined in .roo/teams/.json. + +A team config describes an ordered list of phases (e.g., discovery → execution → review). Each phase runs one or more specialist agents. This tool executes exactly ONE phase and returns its aggregated results. + +**Typical orchestrator loop:** +1. Read .roo/teams/.json (with read_file) to learn the phase names and requireApproval flags. +2. For each phase in order: + a. If requireApproval is true, call ask_followup_question to get user sign-off. + b. Call run_team_phase with the phase name, the original task, and accumulated context from prior phases. +3. After the last phase, call attempt_completion with the final summary. + +**Execution modes (set in the team config per-phase):** +- concurrent: false (default) — agents in the phase run one at a time; you receive results when all finish. +- concurrent: true — all agents start simultaneously; you receive results when all finish. + +Pass prior phase results as JSON in the \`context\` parameter so later phases (e.g., review agents) have full context. + +CRITICAL: This tool MUST be called alone. Do NOT call it alongside other tools in the same turn.` + +export default { + type: "function", + function: { + name: "run_team_phase", + description: RUN_TEAM_PHASE_DESCRIPTION, + strict: true, + parameters: { + type: "object", + properties: { + team_slug: { + type: "string", + description: + 'Slug of the team to run (matches the "slug" field in the team config, e.g., "fullstack")', + }, + phase_name: { + type: "string", + description: + 'Name of the phase to execute (matches the "name" field in a phase entry, e.g., "discovery")', + }, + task: { + type: "string", + description: + "The original user task description. Injected into agent instructions via {{task}}. Pass the same value for every phase.", + }, + context: { + type: ["string", "null"], + description: + "JSON string of accumulated results from previous phases. Injected into agent instructions via {{context}}. Omit or pass null for the first phase.", + }, + }, + required: ["team_slug", "phase_name", "task", "context"], + additionalProperties: false, + }, + }, +} satisfies OpenAI.Chat.ChatCompletionTool diff --git a/src/core/prompts/tools/native-tools/spawn_parallel_tasks.ts b/src/core/prompts/tools/native-tools/spawn_parallel_tasks.ts new file mode 100644 index 00000000000..9c55d5f8aab --- /dev/null +++ b/src/core/prompts/tools/native-tools/spawn_parallel_tasks.ts @@ -0,0 +1,66 @@ +import type OpenAI from "openai" + +const SPAWN_PARALLEL_TASKS_DESCRIPTION = `Spawn multiple subtasks and collect their aggregated results before continuing. + +**Execution modes:** +- \`concurrent: false\` (default) — tasks run one at a time; the parent task is suspended until all finish. Lower resource use; good for dependent or quota-sensitive work. +- \`concurrent: true\` — all tasks start simultaneously and run in parallel while the parent stays active. Best for truly independent work where wall-clock time matters. + +Use this when you need to split work into independent chunks (e.g., implement N features, analyze N files, run N experiments). When all tasks finish, you receive their aggregated results as a JSON array. + +CRITICAL: This tool MUST be called alone. Do NOT call it alongside other tools in the same turn.` + +export default { + type: "function", + function: { + name: "spawn_parallel_tasks", + description: SPAWN_PARALLEL_TASKS_DESCRIPTION, + strict: true, + parameters: { + type: "object", + properties: { + tasks: { + type: "array", + description: "List of tasks to execute. Must contain at least 2 items.", + items: { + type: "object", + properties: { + mode: { + type: "string", + description: "Mode slug for this task (e.g., code, debug, architect)", + }, + message: { + type: "string", + description: "Instructions for this task", + }, + worktree: { + type: ["string", "null"], + description: + 'Optional git worktree isolation. "auto" creates a new branch+worktree, or provide a branch name.', + }, + todos: { + type: ["string", "null"], + description: "Optional markdown checklist of initial todos for this task", + }, + }, + required: ["mode", "message", "worktree", "todos"], + additionalProperties: false, + }, + minItems: 2, + }, + concurrent: { + type: ["boolean", "null"], + description: + "When true, all tasks run simultaneously (parent stays active). When false or omitted, tasks run sequentially.", + }, + abortOnChildFailure: { + type: ["boolean", "null"], + description: + "When true and concurrent is also true, abort all remaining sibling tasks as soon as one fails. Has no effect in sequential mode (use the default false to collect all results regardless of failures).", + }, + }, + required: ["tasks", "concurrent", "abortOnChildFailure"], + additionalProperties: false, + }, + }, +} satisfies OpenAI.Chat.ChatCompletionTool diff --git a/src/core/swarm/FileMailbox.ts b/src/core/swarm/FileMailbox.ts new file mode 100644 index 00000000000..487abf665e8 --- /dev/null +++ b/src/core/swarm/FileMailbox.ts @@ -0,0 +1,157 @@ +import fs from "fs/promises" +import path from "path" + +import lockfile from "proper-lockfile" + +import type { TeammateMessage, TeammateMessageType } from "@roo-code/types" + +import type { IMailboxService } from "./IMailboxService" + +const POLL_INTERVAL_MS = 500 + +/** + * File-backed mailbox for cross-process swarm workers. + * + * Each agent's queue lives at `/.json`. + * Concurrent writes are serialised with proper-lockfile (advisory lock + * via a sibling `.lock` directory); polling replaces the EventEmitter + * used by InMemoryMailbox. + * + * baseDir is typically `~/.roo/swarm//`. + */ +export class FileMailbox implements IMailboxService { + private _disposed = false + + constructor(private readonly baseDir: string) {} + + // ------------------------------------------------------------------------- + // IMailboxService + // ------------------------------------------------------------------------- + + async send(to: string, msg: TeammateMessage): Promise { + const p = await this.ensureInbox(to) + await this.withLock(p, async () => { + const queue = await this.readRaw(p) + queue.push(msg) + await fs.writeFile(p, JSON.stringify(queue, null, 2), "utf-8") + }) + } + + async read(agentId: string): Promise { + const p = this.inboxPath(agentId) + return this.readRaw(p) + } + + async markRead(agentId: string, idx: number): Promise { + const p = await this.ensureInbox(agentId) + await this.withLock(p, async () => { + const queue = await this.readRaw(p) + queue.splice(idx, 1) + await fs.writeFile(p, JSON.stringify(queue, null, 2), "utf-8") + }) + } + + async waitForMessage( + agentId: string, + types: TeammateMessageType[], + opts: { timeoutMs?: number } = {}, + ): Promise { + const { timeoutMs = 60_000 } = opts + + if (this._disposed) return null + + const immediate = await this.dequeueMatching(agentId, types) + if (immediate) return immediate + + return new Promise((resolve) => { + let settled = false + + const finish = (msg: TeammateMessage | null) => { + if (settled) return + settled = true + clearTimeout(timer) + clearInterval(poll) + resolve(msg) + } + + const timer = setTimeout(() => finish(null), timeoutMs) + + const poll = setInterval(async () => { + if (settled || this._disposed) { + finish(null) + return + } + const msg = await this.dequeueMatching(agentId, types) + if (msg) finish(msg) + }, POLL_INTERVAL_MS) + }) + } + + dispose(): void { + this._disposed = true + } + + // ------------------------------------------------------------------------- + // Internals + // ------------------------------------------------------------------------- + + private inboxPath(agentId: string): string { + // Sanitise agentId so it's safe as a filename. + const safe = agentId.replace(/[^a-zA-Z0-9_-]/g, "_") + return path.join(this.baseDir, `${safe}.json`) + } + + /** Ensure the inbox file and its parent directory exist. Race-safe. */ + private async ensureInbox(agentId: string): Promise { + const p = this.inboxPath(agentId) + await fs.mkdir(path.dirname(p), { recursive: true }) + try { + // wx = exclusive create; throws EEXIST if the file is already there + await fs.writeFile(p, "[]", { flag: "wx", encoding: "utf-8" }) + } catch (err) { + if ((err as NodeJS.ErrnoException).code !== "EEXIST") throw err + } + return p + } + + private async readRaw(filePath: string): Promise { + try { + const raw = await fs.readFile(filePath, "utf-8") + return JSON.parse(raw) as TeammateMessage[] + } catch { + return [] + } + } + + private async withLock(filePath: string, fn: () => Promise): Promise { + const release = await lockfile.lock(filePath, { + retries: { retries: 10, minTimeout: 50, maxTimeout: 200 }, + }) + try { + await fn() + } finally { + await release() + } + } + + /** Read, find the first matching message, remove it, write back. */ + private async dequeueMatching(agentId: string, types: TeammateMessageType[]): Promise { + const p = this.inboxPath(agentId) + try { + await fs.access(p) + } catch { + return null // inbox file doesn't exist yet + } + + let result: TeammateMessage | null = null + await this.withLock(p, async () => { + const queue = await this.readRaw(p) + const idx = queue.findIndex((m) => types.includes(m.type)) + if (idx >= 0) { + result = queue.splice(idx, 1)[0] + await fs.writeFile(p, JSON.stringify(queue, null, 2), "utf-8") + } + }) + return result + } +} diff --git a/src/core/swarm/IMailboxService.ts b/src/core/swarm/IMailboxService.ts new file mode 100644 index 00000000000..6c698e9ec4c --- /dev/null +++ b/src/core/swarm/IMailboxService.ts @@ -0,0 +1,30 @@ +import type { TeammateMessage, TeammateMessageType } from "@roo-code/types" + +/** + * Abstraction over a mailbox backend. + * - InMemoryMailbox — in-process workers (P3, zero latency) + * - FileMailbox — cross-process workers (P5, uses proper-lockfile) + */ +export interface IMailboxService { + /** Deliver a message to the agent identified by `to`. */ + send(to: string, msg: TeammateMessage): Promise + + /** Peek at all pending messages for `agentId` without removing them. */ + read(agentId: string): Promise + + /** Remove the message at `idx` for `agentId`. */ + markRead(agentId: string, idx: number): Promise + + /** + * Wait for the next message whose type is in `types`. + * Removes the matched message from the queue before returning it. + * Returns null if the timeout elapses before a matching message arrives. + */ + waitForMessage( + agentId: string, + types: TeammateMessageType[], + opts?: { timeoutMs?: number }, + ): Promise + + dispose(): void +} diff --git a/src/core/swarm/InMemoryMailbox.ts b/src/core/swarm/InMemoryMailbox.ts new file mode 100644 index 00000000000..f8aa1cbe2be --- /dev/null +++ b/src/core/swarm/InMemoryMailbox.ts @@ -0,0 +1,82 @@ +import EventEmitter from "events" + +import type { TeammateMessage, TeammateMessageType } from "@roo-code/types" + +import type { IMailboxService } from "./IMailboxService" + +/** + * In-process mailbox backed by a plain Map + EventEmitter. + * Delivery is synchronous (within the same event loop turn) so there is zero + * polling latency. A configurable timeout provides a safety fallback. + */ +export class InMemoryMailbox implements IMailboxService { + private queues = new Map() + private emitter = new EventEmitter() + + constructor() { + // Suppress MaxListenersExceededWarning when many workers are waiting simultaneously. + this.emitter.setMaxListeners(0) + } + + async send(to: string, msg: TeammateMessage): Promise { + if (!this.queues.has(to)) this.queues.set(to, []) + this.queues.get(to)!.push(msg) + this.emitter.emit(`msg:${to}`) + } + + async read(agentId: string): Promise { + return [...(this.queues.get(agentId) ?? [])] + } + + async markRead(agentId: string, idx: number): Promise { + this.queues.get(agentId)?.splice(idx, 1) + } + + async waitForMessage( + agentId: string, + types: TeammateMessageType[], + opts: { timeoutMs?: number } = {}, + ): Promise { + const { timeoutMs = 60_000 } = opts + + // Check the existing queue before registering a listener. + const match = this.dequeueMatching(agentId, types) + if (match) return match + + return new Promise((resolve) => { + let settled = false + + const timer = setTimeout(() => { + if (settled) return + settled = true + this.emitter.off(`msg:${agentId}`, handler) + resolve(null) + }, timeoutMs) + + const handler = () => { + if (settled) return + const msg = this.dequeueMatching(agentId, types) + if (!msg) return // Message was of a different type — keep waiting. + settled = true + clearTimeout(timer) + this.emitter.off(`msg:${agentId}`, handler) + resolve(msg) + } + + this.emitter.on(`msg:${agentId}`, handler) + }) + } + + dispose(): void { + this.queues.clear() + this.emitter.removeAllListeners() + } + + private dequeueMatching(agentId: string, types: TeammateMessageType[]): TeammateMessage | null { + const q = this.queues.get(agentId) + if (!q) return null + const idx = q.findIndex((m) => types.includes(m.type)) + if (idx < 0) return null + return q.splice(idx, 1)[0] + } +} diff --git a/src/core/swarm/LeaderPermissionBridge.ts b/src/core/swarm/LeaderPermissionBridge.ts new file mode 100644 index 00000000000..0a0d2332943 --- /dev/null +++ b/src/core/swarm/LeaderPermissionBridge.ts @@ -0,0 +1,81 @@ +import * as vscode from "vscode" +import { randomUUID } from "crypto" + +import type { AgentColorName } from "@roo-code/types" + +/** + * A pending tool-approval request from a concurrent worker agent. + */ +export interface WorkerPermissionRequest { + requestId: string + workerTaskId: string + agentName: string + color: AgentColorName + /** Canonical tool name (e.g. "write_to_file") */ + toolName: string + /** Human-readable description shown in the approval dialog */ + description: string +} + +type PermissionHandler = (req: WorkerPermissionRequest) => Promise + +// Module-level singleton — one ClineProvider registers itself; all in-process +// worker tasks can reach it without needing a direct reference. +let activeHandler: PermissionHandler | null = null + +/** + * Called by ClineProvider to register itself as the approval surface. + * Returns a cleanup function that deregisters the handler on dispose. + */ +export function registerPermissionHandler(handler: PermissionHandler): () => void { + activeHandler = handler + return () => { + if (activeHandler === handler) activeHandler = null + } +} + +/** True when a ClineProvider has registered a handler. */ +export function hasPermissionHandler(): boolean { + return activeHandler !== null +} + +/** + * Called by concurrent workers when they need tool-use approval. + * Routes the request to the registered leader handler. + * Returns false (deny) when no handler is registered — safe fail-closed. + */ +export async function submitWorkerPermissionRequest( + workerTaskId: string, + agentName: string, + color: AgentColorName, + toolName: string, + description: string, +): Promise { + if (!activeHandler) return false + const req: WorkerPermissionRequest = { + requestId: randomUUID(), + workerTaskId, + agentName, + color, + toolName, + description, + } + return activeHandler(req) +} + +/** + * Default VS Code approval handler used by ClineProvider. + * Shows a modal dialog with the worker's identity and tool details. + */ +export async function showWorkerPermissionDialog(req: WorkerPermissionRequest): Promise { + const label = `${req.agentName} (${req.color})` + const detail = req.description.length > 400 ? req.description.slice(0, 400) + "…" : req.description + + const result = await vscode.window.showInformationMessage( + `Worker approval request from ${label}`, + { modal: true, detail: `Tool: ${req.toolName}\n\n${detail}` }, + "Allow", + "Deny", + ) + return result === "Allow" +} diff --git a/src/core/swarm/MailboxManager.ts b/src/core/swarm/MailboxManager.ts new file mode 100644 index 00000000000..be755ad8a2d --- /dev/null +++ b/src/core/swarm/MailboxManager.ts @@ -0,0 +1,119 @@ +import os from "os" +import path from "path" + +import type { TeammateMessage } from "@roo-code/types" + +import type { IMailboxService } from "./IMailboxService" +import { InMemoryMailbox } from "./InMemoryMailbox" +import { FileMailbox } from "./FileMailbox" + +/** + * Manages one mailbox per swarm session. + * A session's mailbox is only created when the session opts in to persistent + * workers (`persistent: true` on spawnConcurrentChildren). + * + * Two backends are available: + * - InMemoryMailbox — in-process workers (zero latency, default) + * - FileMailbox — cross-process workers (polls JSON file with lockfile) + * + * All public methods are no-ops when the session has no mailbox, so callers + * don't need to guard against missing sessions. + */ +export class MailboxManager { + private mailboxes = new Map() + + /** Create an in-process (memory) mailbox for the session. */ + createMailbox(sessionId: string): void { + if (!this.mailboxes.has(sessionId)) { + this.mailboxes.set(sessionId, new InMemoryMailbox()) + } + } + + /** + * Create a file-backed mailbox for the session. + * Used by cross-process workers (P5/P6). + * Files land at `/.json`. + * Defaults to `~/.roo/swarm//` when baseDir is omitted. + */ + createFileMailbox(sessionId: string, baseDir?: string): void { + if (!this.mailboxes.has(sessionId)) { + const dir = baseDir ?? path.join(os.homedir(), ".roo", "swarm", sessionId) + this.mailboxes.set(sessionId, new FileMailbox(dir)) + } + } + + getMailbox(sessionId: string): IMailboxService | undefined { + return this.mailboxes.get(sessionId) + } + + /** + * Called by a worker when it finishes a turn. + * Stores an `idle_notification` in the worker's own queue so the leader's + * `on(WorkerIdle)` handler can read it if needed. + */ + async notifyIdle( + sessionId: string, + workerId: string, + summary: string, + payload?: Record, + ): Promise { + const mailbox = this.mailboxes.get(sessionId) + if (!mailbox) return + await mailbox.send(`leader:${sessionId}`, { + type: "idle_notification", + from: workerId, + to: `leader:${sessionId}`, + payload: { workerId, summary, ...payload }, + ts: Date.now(), + }) + } + + /** Leader assigns a new task to an idle worker. */ + async assignTask(sessionId: string, workerId: string, message: string): Promise { + const mailbox = this.mailboxes.get(sessionId) + if (!mailbox) throw new Error(`[MailboxManager] No mailbox for session "${sessionId}"`) + await mailbox.send(workerId, { + type: "task_assignment", + from: `leader:${sessionId}`, + to: workerId, + payload: { message }, + ts: Date.now(), + }) + } + + /** Leader tells a worker to stop after its current idle period. */ + async shutdownWorker(sessionId: string, workerId: string): Promise { + const mailbox = this.mailboxes.get(sessionId) + if (!mailbox) return + await mailbox.send(workerId, { + type: "shutdown_request", + from: `leader:${sessionId}`, + to: workerId, + ts: Date.now(), + }) + } + + /** + * Waits for the worker's next `task_assignment` or `shutdown_request`. + * Returns null if the timeout fires first or the session has no mailbox. + */ + async waitForNextMessage( + sessionId: string, + workerId: string, + opts?: { timeoutMs?: number }, + ): Promise { + const mailbox = this.mailboxes.get(sessionId) + if (!mailbox) return null + return mailbox.waitForMessage(workerId, ["task_assignment", "shutdown_request"], opts) + } + + destroyMailbox(sessionId: string): void { + this.mailboxes.get(sessionId)?.dispose() + this.mailboxes.delete(sessionId) + } + + dispose(): void { + for (const mailbox of this.mailboxes.values()) mailbox.dispose() + this.mailboxes.clear() + } +} diff --git a/src/core/swarm/SwarmRegistry.ts b/src/core/swarm/SwarmRegistry.ts new file mode 100644 index 00000000000..73954f04b56 --- /dev/null +++ b/src/core/swarm/SwarmRegistry.ts @@ -0,0 +1,62 @@ +import type { AgentColorName, AgentIdentity, SwarmSession } from "@roo-code/types" + +const AGENT_COLORS: AgentColorName[] = ["blue", "green", "yellow", "purple", "orange", "pink", "cyan", "red"] + +/** + * In-memory registry that tracks swarm sessions and assigns stable identities + * (name + color) to concurrent worker agents. + * + * One instance lives on ClineProvider. Sessions are created when + * spawnConcurrentChildren() starts and destroyed when all children resolve. + */ +export class SwarmRegistry { + private sessions: Map = new Map() + private colorIndex = 0 + + createSession(sessionId: string, leaderTaskId: string): SwarmSession { + const session: SwarmSession = { sessionId, leaderTaskId, teammates: {} } + this.sessions.set(sessionId, session) + return session + } + + /** Round-robin color assignment, stable across the provider lifetime. */ + assignColor(): AgentColorName { + const color = AGENT_COLORS[this.colorIndex % AGENT_COLORS.length] + this.colorIndex++ + return color + } + + registerWorker(sessionId: string, identity: AgentIdentity): void { + const session = this.sessions.get(sessionId) + if (session) { + session.teammates[identity.taskId] = identity + } + } + + unregisterWorker(sessionId: string, taskId: string): void { + const session = this.sessions.get(sessionId) + if (session) { + delete session.teammates[taskId] + } + } + + getSession(sessionId: string): SwarmSession | undefined { + return this.sessions.get(sessionId) + } + + /** Find the session a given task belongs to (as a worker). */ + getSessionForTask(taskId: string): SwarmSession | undefined { + for (const session of this.sessions.values()) { + if (taskId in session.teammates) return session + } + return undefined + } + + destroySession(sessionId: string): void { + this.sessions.delete(sessionId) + } + + dispose(): void { + this.sessions.clear() + } +} diff --git a/src/core/task-persistence/apiMessages.ts b/src/core/task-persistence/apiMessages.ts index 7672f6f7ee6..8c5dfeaf71e 100644 --- a/src/core/task-persistence/apiMessages.ts +++ b/src/core/task-persistence/apiMessages.ts @@ -35,6 +35,8 @@ export type ApiMessage = Anthropic.MessageParam & { truncationParent?: string // Identifies a message as a truncation boundary marker isTruncationMarker?: boolean + // Identifies a message as synthetic/meta (e.g., retry markers) + isMeta?: boolean } export async function readApiMessages({ diff --git a/src/core/task/Task.ts b/src/core/task/Task.ts index 005bb0f292b..c1b869da9bb 100644 --- a/src/core/task/Task.ts +++ b/src/core/task/Task.ts @@ -126,7 +126,14 @@ import { checkpointDiff, } from "../checkpoints" import { processUserContentMentions } from "../mentions/processUserContentMentions" -import { getMessagesSinceLastSummary, summarizeConversation, getEffectiveApiHistory } from "../condense" +import { + getMessagesSinceLastSummary, + summarizeConversation, + getEffectiveApiHistory, + extractSessionMemory, +} from "../condense" +import { estimateMessageTokens } from "../condense/sessionMemory" +import { createHookSystem, type HookSystem } from "../hooks" import { MessageQueueService } from "../message-queue/MessageQueueService" import { AutoApprovalHandler, checkAutoApproval } from "../auto-approval" import { MessageManager } from "../message-manager" @@ -417,6 +424,14 @@ export class Task extends EventEmitter implements TaskLike { // MessageManager for high-level message operations (lazy initialized) private _messageManager?: MessageManager + // Session Memory (per-task, avoids module-level global state) + private sessionMemoryContent: string | undefined + private sessionMemoryLastMessageId: string | undefined + private sessionMemoryTokensAtLastExtraction = 0 + private sessionMemoryInitialized = false + private sessionMemoryToolCallsSinceUpdate = 0 + private sessionMemoryExtracting = false + constructor({ provider, apiConfiguration, @@ -467,10 +482,9 @@ export class Task extends EventEmitter implements TaskLike { images: historyItem ? [] : images, } - // Normal use-case is usually retry similar history task with new workspace. - this.workspacePath = parentTask - ? parentTask.workspacePath - : (workspacePath ?? getWorkspacePath(path.join(os.homedir(), "Desktop"))) + // Explicit workspacePath always wins (e.g., worktree tasks). Falls back to parent's path, then default. + this.workspacePath = + workspacePath ?? parentTask?.workspacePath ?? getWorkspacePath(path.join(os.homedir(), "Desktop")) this.instanceId = crypto.randomUUID().slice(0, 8) this.taskNumber = -1 @@ -3614,6 +3628,10 @@ export class Task extends EventEmitter implements TaskLike { this.consecutiveNoToolUseCount = 0 } + // Fire-and-forget session memory extraction after each tool turn. + // Runs in background and never blocks the agentic loop. + void this.maybeExtractSessionMemory() + // Push to stack if there's content OR if we're paused waiting for a subtask. // When paused, we push an empty item so the loop continues to the pause check. if (this.userMessageContent.length > 0 || this.isPaused) { @@ -3887,6 +3905,13 @@ export class Task extends EventEmitter implements TaskLike { // Generate environment details to include in the condensed summary const environmentDetails = await getEnvironmentDetails(this, true) + const forcedCfg = vscode.workspace.getConfiguration(Package.name) + const forcedHooksEnabled = forcedCfg.get("compactHooksEnabled", true) + const forcedHooksPath = forcedCfg.get("compactHooksPath") || undefined + const forcedHookSystem: HookSystem | undefined = forcedHooksEnabled + ? createHookSystem(this.cwd, true, forcedHooksPath) + : undefined + // Force aggressive truncation by keeping only 75% of the conversation history const truncateResult = await manageContext({ messages: this.apiConversationHistory, @@ -3902,6 +3927,12 @@ export class Task extends EventEmitter implements TaskLike { currentProfileId, metadata, environmentDetails, + sessionMemory: this.sessionMemoryContent, + sessionMemoryCompactEnabled: forcedCfg.get("sessionMemoryCompactEnabled", true), + lastSummarizedMessageId: this.sessionMemoryLastMessageId, + hookSystem: forcedHookSystem, + promptTooLongRetryEnabled: true, + promptTooLongMaxRetries: 2, }) if (truncateResult.messages !== this.apiConversationHistory) { @@ -3984,6 +4015,66 @@ export class Task extends EventEmitter implements TaskLike { } } + /** + * Conditionally extracts session memory from the current conversation history. + * Runs asynchronously and fire-and-forget — never blocks the main agentic loop. + * Guards against concurrent extractions and respects the configured thresholds. + */ + private async maybeExtractSessionMemory(): Promise { + if (this.sessionMemoryExtracting) { + return + } + + const cfg = vscode.workspace.getConfiguration(Package.name) + if (!cfg.get("sessionMemoryCompactEnabled", true)) { + return + } + + const minTokensToInit = cfg.get("sessionMemoryMinTokensToInit", 10000) + const minTokensBetweenUpdate = cfg.get("sessionMemoryMinTokensBetweenUpdate", 5000) + const toolCallsBetweenUpdates = cfg.get("sessionMemoryToolCallsBetweenUpdates", 3) + + this.sessionMemoryToolCallsSinceUpdate++ + if (this.sessionMemoryToolCallsSinceUpdate < toolCallsBetweenUpdates) { + return + } + + const currentTokens = estimateMessageTokens(this.apiConversationHistory) + + if (!this.sessionMemoryInitialized && currentTokens < minTokensToInit) { + return + } + + if (this.sessionMemoryInitialized) { + const tokensSince = currentTokens - this.sessionMemoryTokensAtLastExtraction + if (tokensSince < minTokensBetweenUpdate) { + return + } + } + + this.sessionMemoryExtracting = true + this.sessionMemoryToolCallsSinceUpdate = 0 + const snapshotLastMessageId = this.apiConversationHistory.at(-1)?.id + + try { + const updated = await extractSessionMemory( + this.apiConversationHistory, + this.sessionMemoryContent ?? null, + this.api, + ) + this.sessionMemoryContent = updated + this.sessionMemoryInitialized = true + this.sessionMemoryTokensAtLastExtraction = currentTokens + if (snapshotLastMessageId) { + this.sessionMemoryLastMessageId = snapshotLastMessageId + } + } catch (err) { + console.error("[Session Memory] Extraction failed:", err) + } finally { + this.sessionMemoryExtracting = false + } + } + public async *attemptApiRequest( retryAttempt: number = 0, options: { skipProviderRateLimit?: boolean } = {}, @@ -4111,6 +4202,15 @@ export class Task extends EventEmitter implements TaskLike { ? await this.getFilesReadByRooSafely("attemptApiRequest") : undefined + const cfg = vscode.workspace.getConfiguration(Package.name) + const promptTooLongRetryEnabled = cfg.get("promptTooLongRetryEnabled", true) + const promptTooLongMaxRetries = cfg.get("promptTooLongMaxRetries", 2) + const compactHooksEnabled = cfg.get("compactHooksEnabled", true) + const compactHooksPath = cfg.get("compactHooksPath") || undefined + const hookSystem: HookSystem | undefined = compactHooksEnabled + ? createHookSystem(this.cwd, true, compactHooksPath) + : undefined + try { const truncateResult = await manageContext({ messages: this.apiConversationHistory, @@ -4130,6 +4230,12 @@ export class Task extends EventEmitter implements TaskLike { filesReadByRoo: contextMgmtFilesReadByRoo, cwd: this.cwd, rooIgnoreController: this.rooIgnoreController, + sessionMemory: this.sessionMemoryContent, + sessionMemoryCompactEnabled: cfg.get("sessionMemoryCompactEnabled", true), + lastSummarizedMessageId: this.sessionMemoryLastMessageId, + hookSystem, + promptTooLongRetryEnabled, + promptTooLongMaxRetries, }) if (truncateResult.messages !== this.apiConversationHistory) { await this.overwriteApiConversationHistory(truncateResult.messages) diff --git a/src/core/tools/AttemptCompletionTool.ts b/src/core/tools/AttemptCompletionTool.ts index a70576d75f2..23a5cb0dd22 100644 --- a/src/core/tools/AttemptCompletionTool.ts +++ b/src/core/tools/AttemptCompletionTool.ts @@ -8,6 +8,8 @@ import { formatResponse } from "../prompts/responses" import { Package } from "../../shared/package" import type { ToolUse } from "../../shared/tools" import { t } from "../../i18n" +import type { MailboxManager } from "../swarm/MailboxManager" +import type { SwarmRegistry } from "../swarm/SwarmRegistry" import { BaseTool, ToolCallbacks } from "./BaseTool" @@ -30,7 +32,24 @@ interface DelegationProvider { parentTaskId: string childTaskId: string completionResultSummary: string + completionPayload?: Record + childFailed?: boolean }): Promise + /** Returns the live Task instance if it is still registered (concurrent mode check). */ + getTaskById?(taskId: string): import("../task/Task").Task | undefined + /** Resolves a concurrent child's completion Promise in the parent's spawnConcurrentChildren call. */ + resolveChildCompletion?(params: { + childTaskId: string + summary: string + payload?: Record + failed?: boolean + }): Promise + /** SwarmRegistry — used to find which session a task belongs to. */ + swarmRegistry?: SwarmRegistry + /** MailboxManager — only present when the session was started with persistent:true. */ + mailboxManager?: MailboxManager + /** EventEmitter — used to emit swarm lifecycle events. */ + emit?(event: string, ...args: unknown[]): boolean } export class AttemptCompletionTool extends BaseTool<"attempt_completion"> { @@ -107,6 +126,7 @@ export class AttemptCompletionTool extends BaseTool<"attempt_completion"> { if (delegation === "delegated") { this.emitTaskCompleted(task) } + // "reassigned" — worker continues; do NOT emit TaskCompleted if (delegation !== "continue") return } else { // Unexpected status (undefined or "delegated") - log error and skip delegation @@ -149,9 +169,10 @@ export class AttemptCompletionTool extends BaseTool<"attempt_completion"> { /** * Handles the common delegation flow when a subtask completes. * Returns: - * - "delegated" when completion was approved and parent resumed - * - "denied" when user denied finishing the subtask - * - "continue" when caller should fall through to normal completion ask flow + * - "delegated" completion was approved and parent resumed (or concurrent Promise resolved) + * - "reassigned" worker received a new task_assignment; LLM loop continues without completing + * - "denied" user denied finishing the subtask + * - "continue" caller should fall through to normal completion ask flow */ private async delegateToParent( task: Task, @@ -159,7 +180,7 @@ export class AttemptCompletionTool extends BaseTool<"attempt_completion"> { provider: DelegationProvider, askFinishSubTaskApproval: () => Promise, pushToolResult: (result: string) => void, - ): Promise<"delegated" | "denied" | "continue"> { + ): Promise<"delegated" | "reassigned" | "denied" | "continue"> { const didApprove = await askFinishSubTaskApproval() if (!didApprove) { @@ -167,12 +188,67 @@ export class AttemptCompletionTool extends BaseTool<"attempt_completion"> { return "denied" } - pushToolResult("") + // If the result is valid JSON, pass it as a structured payload alongside the summary. + let completionPayload: Record | undefined + try { + const parsed = JSON.parse(result) + if (parsed && typeof parsed === "object" && !Array.isArray(parsed)) { + completionPayload = parsed as Record + } + } catch { + // Not JSON — leave completionPayload undefined + } + + // Concurrent path: if the parent task is still alive in the provider's task map, + // this child was spawned concurrently — either enter the idle loop (persistent) or + // resolve its completion Promise immediately. + const parentIsAlive = + typeof provider.getTaskById === "function" && provider.getTaskById(task.parentTaskId!) !== undefined + if (parentIsAlive && typeof provider.resolveChildCompletion === "function") { + // Idle loop — only for persistent swarm sessions (a mailbox exists for the session). + const session = provider.swarmRegistry?.getSessionForTask(task.taskId) + const mailbox = session ? provider.mailboxManager?.getMailbox(session.sessionId) : undefined + + if (session && mailbox) { + // Notify the leader that this worker is idle. + await provider.mailboxManager!.notifyIdle(session.sessionId, task.taskId, result, completionPayload) + provider.emit?.(RooCodeEventName.WorkerIdle, session.sessionId, task.taskId) + + // Wait for leader to either assign a new task or shut us down. + const nextMsg = await provider.mailboxManager!.waitForNextMessage(session.sessionId, task.taskId, { + timeoutMs: 300_000, // 5-minute safety timeout + }) + + if (nextMsg?.type === "task_assignment" && typeof nextMsg.payload?.message === "string") { + // Give worker a new task — do NOT resolve the parent's completion Promise. + pushToolResult( + `[Task complete]\n\nNew task assigned by swarm leader:\n\n${nextMsg.payload.message}`, + ) + return "reassigned" + } + + // shutdown_request or timeout — fall through to complete normally. + if (nextMsg?.type === "shutdown_request") { + provider.emit?.(RooCodeEventName.WorkerShutdown, session.sessionId, task.taskId) + } + } + pushToolResult("") + await provider.resolveChildCompletion({ + childTaskId: task.taskId, + summary: result, + payload: completionPayload, + }) + return "delegated" + } + + // Sequential path: parent was disposed; recreate it from history. + pushToolResult("") await provider.reopenParentFromDelegation({ parentTaskId: task.parentTaskId!, childTaskId: task.taskId, completionResultSummary: result, + completionPayload, }) return "delegated" diff --git a/src/core/tools/GenerateImageTool.ts b/src/core/tools/GenerateImageTool.ts index 3eaa2d84c2d..e8cb0c35aa0 100644 --- a/src/core/tools/GenerateImageTool.ts +++ b/src/core/tools/GenerateImageTool.ts @@ -191,7 +191,7 @@ export class GenerateImageTool extends BaseTool<"generate_image"> { let result if (modelProvider === "roo") { - // Use Roo Code Cloud provider (supports both chat completions and images API) + // Use Moo Code Cloud provider (supports both chat completions and images API) const rooHandler = new RooHandler({} as any) result = await rooHandler.generateImage(prompt, selectedModel, inputImageData, apiMethod) } else { diff --git a/src/core/tools/NewTaskTool.ts b/src/core/tools/NewTaskTool.ts index f36d8e1e379..45bc0998c4b 100644 --- a/src/core/tools/NewTaskTool.ts +++ b/src/core/tools/NewTaskTool.ts @@ -15,13 +15,14 @@ interface NewTaskParams { mode: string message: string todos?: string + worktree?: string } export class NewTaskTool extends BaseTool<"new_task"> { readonly name = "new_task" as const async execute(params: NewTaskParams, task: Task, callbacks: ToolCallbacks): Promise { - const { mode, message, todos } = params + const { mode, message, todos, worktree } = params const { askApproval, handleError, pushToolResult } = callbacks try { @@ -115,6 +116,7 @@ export class NewTaskTool extends BaseTool<"new_task"> { message: unescapedMessage, initialTodos: todoItems, mode, + worktree: worktree || undefined, }) // Reflect delegation in tool result (no pause/unpause, no wait) diff --git a/src/core/tools/RunTeamPhaseTool.ts b/src/core/tools/RunTeamPhaseTool.ts new file mode 100644 index 00000000000..639b2e00877 --- /dev/null +++ b/src/core/tools/RunTeamPhaseTool.ts @@ -0,0 +1,172 @@ +import * as fs from "fs/promises" +import * as path from "path" + +import type { TeamConfig } from "@roo-code/types" + +import { Task } from "../task/Task" +import { formatResponse } from "../prompts/responses" +import type { ToolUse } from "../../shared/tools" +import { BaseTool, ToolCallbacks } from "./BaseTool" + +interface RunTeamPhaseParams { + team_slug: string + phase_name: string + task: string + context?: string | null +} + +/** + * Minimal provider surface needed by RunTeamPhaseTool. + * Cast from providerRef.deref() using `provider as TeamPhaseProvider`. + */ +interface TeamPhaseProvider { + getTeamConfig(slug: string): TeamConfig | undefined + delegateParentAndOpenChild(params: { + parentTaskId: string + message: string + initialTodos: [] + mode: string + worktree?: string + parallelQueue?: Array<{ mode: string; message: string; worktree?: string }> + }): Promise + spawnConcurrentChildren(params: { + parentTaskId: string + tasks: Array<{ mode: string; message: string; worktree?: string; role?: string }> + abortOnChildFailure?: boolean + }): Promise> +} + +export class RunTeamPhaseTool extends BaseTool<"run_team_phase"> { + readonly name = "run_team_phase" as const + + async execute(params: RunTeamPhaseParams, task: Task, callbacks: ToolCallbacks): Promise { + const { team_slug, phase_name, task: taskDesc, context } = params + const { askApproval, handleError, pushToolResult } = callbacks + + try { + const provider = task.providerRef.deref() as TeamPhaseProvider | undefined + if (!provider) { + pushToolResult(formatResponse.toolError("Provider reference lost")) + return + } + + // 1) Look up team config + const teamConfig = provider.getTeamConfig(team_slug) + if (!teamConfig) { + pushToolResult( + formatResponse.toolError( + `Team "${team_slug}" not found. Make sure .roo/teams/${team_slug}.json exists in the workspace.`, + ), + ) + return + } + + // 2) Find the phase + const phase = teamConfig.phases.find((p) => p.name === phase_name) + if (!phase) { + const available = teamConfig.phases.map((p) => p.name).join(", ") + pushToolResult( + formatResponse.toolError( + `Phase "${phase_name}" not found in team "${team_slug}". Available phases: ${available}`, + ), + ) + return + } + + if (!phase.agents || phase.agents.length === 0) { + pushToolResult(formatResponse.toolError(`Phase "${phase_name}" has no agents defined.`)) + return + } + + // 3) Load shared conventions (non-fatal if missing) + let conventionsPrefix = "" + if (teamConfig.conventions) { + try { + const conventionsPath = path.resolve(task.workspacePath, teamConfig.conventions) + const content = await fs.readFile(conventionsPath, "utf-8") + if (content.trim()) { + conventionsPrefix = `\n${content.trim()}\n\n\n` + } + } catch { + // Conventions file not found — proceed without it + } + } + + // 4) Interpolate {{task}}, {{context}}, {{phase}}, {{team}} in instructions + const interpolate = (template: string) => + template + .replace(/\{\{task\}\}/g, taskDesc) + .replace(/\{\{context\}\}/g, context ?? "") + .replace(/\{\{phase\}\}/g, phase_name) + .replace(/\{\{team\}\}/g, teamConfig.name) + + const agentSpecs = phase.agents.map((agent) => ({ + mode: agent.mode, + role: agent.role, + message: conventionsPrefix + interpolate(agent.instruction), + worktree: agent.worktree, + })) + + // 5) Ask approval + const toolMessage = JSON.stringify({ + tool: "runTeamPhase", + team: teamConfig.name, + phase: phase.label ?? phase_name, + agentCount: agentSpecs.length, + concurrent: phase.concurrent ?? false, + agents: agentSpecs.map((s) => ({ role: s.role ?? s.mode, mode: s.mode })), + }) + + const didApprove = await askApproval("tool", toolMessage) + if (!didApprove) return + + task.consecutiveMistakeCount = 0 + + // 6) Execute the phase + if (phase.concurrent) { + // Concurrent: all agents start simultaneously; parent stays alive. + const results = await provider.spawnConcurrentChildren({ + parentTaskId: task.taskId, + tasks: agentSpecs.map(({ mode, message, worktree, role }) => ({ mode, message, worktree, role })), + abortOnChildFailure: phase.abortOnChildFailure ?? false, + }) + pushToolResult(JSON.stringify(results, null, 2)) + } else if (agentSpecs.length === 1) { + // Single sequential agent — simple delegation (like new_task) + await provider.delegateParentAndOpenChild({ + parentTaskId: task.taskId, + message: agentSpecs[0].message, + initialTodos: [], + mode: agentSpecs[0].mode, + worktree: agentSpecs[0].worktree, + }) + // Parent is now suspended; this line is reached only if delegation didn't proceed. + pushToolResult(`Phase "${phase_name}" agent started. Awaiting completion...`) + } else { + // Multiple sequential agents — queue drain (first runs, rest queued) + const [first, ...rest] = agentSpecs + await provider.delegateParentAndOpenChild({ + parentTaskId: task.taskId, + message: first.message, + initialTodos: [], + mode: first.mode, + worktree: first.worktree, + parallelQueue: rest.map(({ mode, message, worktree }) => ({ mode, message, worktree })), + }) + // Parent is now suspended; this line is reached only if delegation didn't proceed. + pushToolResult( + `Phase "${phase_name}" started with ${agentSpecs.length} sequential agents. Awaiting all results...`, + ) + } + } catch (error) { + await handleError("running team phase", error as Error) + } + } + + override async handlePartial(task: Task, block: ToolUse<"run_team_phase">): Promise { + const partialMessage = JSON.stringify({ tool: "runTeamPhase" }) + await task.ask("tool", partialMessage, block.partial).catch(() => {}) + } +} + +export const runTeamPhaseTool = new RunTeamPhaseTool() diff --git a/src/core/tools/SpawnParallelTasksTool.ts b/src/core/tools/SpawnParallelTasksTool.ts new file mode 100644 index 00000000000..68d67e07e28 --- /dev/null +++ b/src/core/tools/SpawnParallelTasksTool.ts @@ -0,0 +1,145 @@ +import { TodoItem } from "@roo-code/types" + +import { Task } from "../task/Task" +import { getModeBySlug } from "../../shared/modes" +import { formatResponse } from "../prompts/responses" +import { BaseTool, ToolCallbacks } from "./BaseTool" +import type { ToolUse } from "../../shared/tools" +import { parseMarkdownChecklist } from "./UpdateTodoListTool" + +interface ParallelTaskSpec { + mode: string + message: string + worktree?: string + todos?: string +} + +interface SpawnParallelTasksParams { + tasks: ParallelTaskSpec[] + /** When true, the entire queue is abandoned if any child task fails or is aborted. Default: false (continue on failure). */ + abortOnChildFailure?: boolean + /** + * When true, all child tasks run concurrently (parent stays alive; JS cooperative multitasking). + * When false (default), tasks run sequentially via the parallel queue drain mechanism. + */ + concurrent?: boolean +} + +export class SpawnParallelTasksTool extends BaseTool<"spawn_parallel_tasks"> { + readonly name = "spawn_parallel_tasks" as const + + async execute(params: SpawnParallelTasksParams, task: Task, callbacks: ToolCallbacks): Promise { + const { tasks, abortOnChildFailure = false, concurrent = false } = params + const { askApproval, handleError, pushToolResult } = callbacks + + try { + if (!tasks || tasks.length < 2) { + task.consecutiveMistakeCount++ + task.recordToolError("spawn_parallel_tasks") + task.didToolFailInCurrentTurn = true + pushToolResult(formatResponse.toolError("spawn_parallel_tasks requires at least 2 tasks")) + return + } + + const provider = task.providerRef.deref() + if (!provider) { + pushToolResult(formatResponse.toolError("Provider reference lost")) + return + } + + const state = await provider.getState() + + // Validate all modes up front before asking approval + for (const spec of tasks) { + if (!spec.mode) { + pushToolResult(formatResponse.toolError(`Each task must have a mode`)) + return + } + if (!spec.message) { + pushToolResult(formatResponse.toolError(`Each task must have a message`)) + return + } + const targetMode = getModeBySlug(spec.mode, state?.customModes) + if (!targetMode) { + pushToolResult(formatResponse.toolError(`Invalid mode: ${spec.mode}`)) + return + } + } + + // Parse todos for all tasks + const parsedTasks: Array<{ spec: ParallelTaskSpec; todoItems: TodoItem[] }> = [] + for (const spec of tasks) { + let todoItems: TodoItem[] = [] + if (spec.todos) { + try { + todoItems = parseMarkdownChecklist(spec.todos) + } catch { + pushToolResult( + formatResponse.toolError(`Invalid todos format in task "${spec.message.slice(0, 40)}..."`), + ) + return + } + } + parsedTasks.push({ spec, todoItems }) + } + + const toolMessage = JSON.stringify({ + tool: "spawnParallelTasks", + taskCount: tasks.length, + tasks: tasks.map((t) => ({ mode: t.mode, message: t.message.slice(0, 100) })), + }) + + const didApprove = await askApproval("tool", toolMessage) + if (!didApprove) return + + task.consecutiveMistakeCount = 0 + + if (concurrent) { + // Concurrent path: all children run simultaneously; parent stays alive. + // spawnConcurrentChildren awaits all completions and returns aggregated results. + const results = await (provider as any).spawnConcurrentChildren({ + parentTaskId: task.taskId, + tasks: parsedTasks.map((t) => ({ + mode: t.spec.mode, + message: t.spec.message, + worktree: t.spec.worktree || undefined, + todos: t.todoItems, + })), + abortOnChildFailure, + }) + pushToolResult(JSON.stringify(results, null, 2)) + } else { + // Sequential fan-out: first task starts immediately; the rest are queued in the + // parent's history. reopenParentFromDelegation drains the queue, starting each + // child in turn, and resumes the parent with aggregated results when empty. + const [first, ...rest] = parsedTasks + + await (provider as any).delegateParentAndOpenChild({ + parentTaskId: task.taskId, + message: first.spec.message, + initialTodos: first.todoItems, + mode: first.spec.mode, + worktree: first.spec.worktree || undefined, + abortOnChildFailure, + parallelQueue: rest.map((t) => ({ + mode: t.spec.mode, + message: t.spec.message, + worktree: t.spec.worktree || undefined, + todos: t.spec.todos || undefined, + })), + }) + + pushToolResult(`Spawned ${tasks.length} sequential tasks. Awaiting all results...`) + } + } catch (error) { + await handleError("spawning parallel tasks", error) + } + } + + override async handlePartial(task: Task, block: ToolUse<"spawn_parallel_tasks">): Promise { + const partialMessage = JSON.stringify({ tool: "spawnParallelTasks" }) + await task.ask("tool", partialMessage, block.partial).catch(() => {}) + } +} + +export const spawnParallelTasksTool = new SpawnParallelTasksTool() diff --git a/src/core/webview/ClineProvider.ts b/src/core/webview/ClineProvider.ts index 2ffe421c095..2a484d29a49 100644 --- a/src/core/webview/ClineProvider.ts +++ b/src/core/webview/ClineProvider.ts @@ -70,12 +70,17 @@ import WorkspaceTracker from "../../integrations/workspace/WorkspaceTracker" import { McpHub } from "../../services/mcp/McpHub" import { McpServerManager } from "../../services/mcp/McpServerManager" +import { worktreeService } from "@roo-code/core" import { MarketplaceManager } from "../../services/marketplace" import { ShadowCheckpointService } from "../../services/checkpoints/ShadowCheckpointService" import { CodeIndexManager } from "../../services/code-index/manager" import type { IndexProgressUpdate } from "../../services/code-index/interfaces/manager" import { MdmService } from "../../services/mdm/MdmService" import { SkillsManager } from "../../services/skills/SkillsManager" +import { TeamsManager } from "../../services/teams/TeamsManager" +import { SwarmRegistry } from "../swarm/SwarmRegistry" +import { MailboxManager } from "../swarm/MailboxManager" +import { registerPermissionHandler, showWorkerPermissionDialog } from "../swarm/LeaderPermissionBridge" import { fileExistsAtPath } from "../../utils/fs" import { setTtsEnabled, setTtsSpeed } from "../../utils/tts" @@ -136,12 +141,25 @@ export class ClineProvider private disposables: vscode.Disposable[] = [] private webviewDisposables: vscode.Disposable[] = [] private view?: vscode.WebviewView | vscode.WebviewPanel - private clineStack: Task[] = [] + private tasks: Map = new Map() + private focusedTaskId?: string + private leaderTaskId?: string + private childCompletionHandlers: Map< + string, + { + resolve: (result: { summary: string; payload?: Record }) => void + reject: (reason: Error) => void + } + > = new Map() private codeIndexStatusSubscription?: vscode.Disposable private codeIndexManager?: CodeIndexManager private _workspaceTracker?: WorkspaceTracker // workSpaceTracker read-only for access outside this class protected mcpHub?: McpHub // Change from private to protected protected skillsManager?: SkillsManager + protected teamsManager?: TeamsManager + protected swarmRegistry: SwarmRegistry = new SwarmRegistry() + protected mailboxManager: MailboxManager = new MailboxManager() + private permissionBridgeCleanup?: () => void private marketplaceManager: MarketplaceManager private mdmService?: MdmService private taskCreationCallback: (task: Task) => void @@ -231,6 +249,15 @@ export class ClineProvider this.log(`Failed to initialize Skills Manager: ${error}`) }) + // Initialize Teams Manager for team workflow discovery + this.teamsManager = new TeamsManager(this) + this.teamsManager.initialize().catch((error) => { + this.log(`Failed to initialize Teams Manager: ${error}`) + }) + + // Register as the in-process permission approval surface for concurrent workers. + this.permissionBridgeCleanup = registerPermissionHandler(showWorkerPermissionDialog) + this.marketplaceManager = new MarketplaceManager(this.context, this.customModesManager) // Forward task events to the provider. @@ -464,14 +491,13 @@ export class ClineProvider } } - // Adds a new Task instance to clineStack, marking the start of a new task. - // The instance is pushed to the top of the stack (LIFO order). - // When the task is completed, the top instance is removed, reactivating the - // previous task. + // Registers a Task instance and marks it as focused/leader. async addClineToStack(task: Task) { - // Add this cline instance into the stack that represents the order of - // all the called tasks. - this.clineStack.push(task) + this.tasks.set(task.taskId, task) + this.focusedTaskId = task.taskId + if (!task.parentTask) { + this.leaderTaskId = task.taskId + } task.emit(RooCodeEventName.TaskFocused) // Perform special setup provider specific tasks. @@ -503,85 +529,99 @@ export class ClineProvider } } - // Removes and destroys the top Cline instance (the current finished task), - // activating the previous one (resuming the parent task). - async removeClineFromStack(options?: { skipDelegationRepair?: boolean }) { - if (this.clineStack.length === 0) { + // Removes and destroys a Task instance, refocusing the parent if applicable. + // Pass taskId to remove a specific task (e.g. a concurrent child); omit to remove + // whichever task is currently focused. + async removeClineFromStack(options?: { skipDelegationRepair?: boolean; taskId?: string }) { + if (this.tasks.size === 0) { return } - // Pop the top Cline instance from the stack. - let task = this.clineStack.pop() + const resolveId = options?.taskId ?? this.focusedTaskId + if (!resolveId) { + return + } - if (task) { - // Capture delegation metadata before abort/dispose, since abortTask(true) - // is async and the task reference is cleared afterwards. - const childTaskId = task.taskId - const parentTaskId = task.parentTaskId + let task = this.tasks.get(resolveId) + if (!task) { + return + } - task.emit(RooCodeEventName.TaskUnfocused) + this.tasks.delete(resolveId) - try { - // Abort the running task and set isAbandoned to true so - // all running promises will exit as well. - await task.abortTask(true) - } catch (e) { - this.log( - `[ClineProvider#removeClineFromStack] abortTask() failed ${task.taskId}.${task.instanceId}: ${e.message}`, - ) + // Update focus: if we just removed the focused task, focus the parent (or any remaining task). + if (this.focusedTaskId === resolveId) { + const parentId = task.parentTask?.taskId + if (parentId && this.tasks.has(parentId)) { + this.focusedTaskId = parentId + } else { + // Fall back to the most recently added task, or clear. + const remaining = Array.from(this.tasks.keys()) + this.focusedTaskId = remaining.length > 0 ? remaining[remaining.length - 1] : undefined } + } - // Remove event listeners before clearing the reference. - const cleanupFunctions = this.taskEventListeners.get(task) + // Update leader if the leader was removed. + if (this.leaderTaskId === resolveId) { + this.leaderTaskId = this.focusedTaskId + } - if (cleanupFunctions) { - cleanupFunctions.forEach((cleanup) => cleanup()) - this.taskEventListeners.delete(task) - } + // Capture delegation metadata before abort/dispose. + const childTaskId = task.taskId + const parentTaskId = task.parentTaskId - // Make sure no reference kept, once promises end it will be - // garbage collected. - task = undefined + task.emit(RooCodeEventName.TaskUnfocused) - // Delegation-aware parent metadata repair: - // If the popped task was a delegated child, repair the parent's metadata - // so it transitions from "delegated" back to "active" and becomes resumable - // from the task history list. - // Skip when called from delegateParentAndOpenChild() during nested delegation - // transitions (A→B→C), where the caller intentionally replaces the active - // child and will update the parent to point at the new child. - if (parentTaskId && childTaskId && !options?.skipDelegationRepair) { - try { - const { historyItem: parentHistory } = await this.getTaskWithId(parentTaskId) - - if (parentHistory.status === "delegated" && parentHistory.awaitingChildId === childTaskId) { - await this.updateTaskHistory({ - ...parentHistory, - status: "active", - awaitingChildId: undefined, - }) - this.log( - `[ClineProvider#removeClineFromStack] Repaired parent ${parentTaskId} metadata: delegated → active (child ${childTaskId} removed)`, - ) - } - } catch (err) { - // Non-fatal: log but do not block the pop operation. + try { + await task.abortTask(true) + } catch (e) { + this.log( + `[ClineProvider#removeClineFromStack] abortTask() failed ${task.taskId}.${task.instanceId}: ${e.message}`, + ) + } + + const cleanupFunctions = this.taskEventListeners.get(task) + if (cleanupFunctions) { + cleanupFunctions.forEach((cleanup) => cleanup()) + this.taskEventListeners.delete(task) + } + + // Allow GC. + task = undefined + + // Delegation-aware parent metadata repair: + // Skip when called from delegateParentAndOpenChild() during nested delegation + // transitions (A→B→C), where the caller will update the parent to point at the new child. + if (parentTaskId && childTaskId && !options?.skipDelegationRepair) { + try { + const { historyItem: parentHistory } = await this.getTaskWithId(parentTaskId) + + if (parentHistory.status === "delegated" && parentHistory.awaitingChildId === childTaskId) { + await this.updateTaskHistory({ + ...parentHistory, + status: "active", + awaitingChildId: undefined, + }) this.log( - `[ClineProvider#removeClineFromStack] Failed to repair parent metadata for ${parentTaskId} (non-fatal): ${ - err instanceof Error ? err.message : String(err) - }`, + `[ClineProvider#removeClineFromStack] Repaired parent ${parentTaskId} metadata: delegated → active (child ${childTaskId} removed)`, ) } + } catch (err) { + this.log( + `[ClineProvider#removeClineFromStack] Failed to repair parent metadata for ${parentTaskId} (non-fatal): ${ + err instanceof Error ? err.message : String(err) + }`, + ) } } } getTaskStackSize(): number { - return this.clineStack.length + return this.tasks.size } public getCurrentTaskStack(): string[] { - return this.clineStack.map((cline) => cline.taskId) + return Array.from(this.tasks.keys()) } // Pending Edit Operations Management @@ -672,9 +712,9 @@ export class ClineProvider this._disposed = true this.log("Disposing ClineProvider...") - // Clear all tasks from the stack. - while (this.clineStack.length > 0) { - await this.removeClineFromStack() + // Clear all tasks from the map. + for (const taskId of Array.from(this.tasks.keys())) { + await this.removeClineFromStack({ taskId }) } this.log("Cleared all tasks") @@ -709,6 +749,11 @@ export class ClineProvider this.mcpHub = undefined await this.skillsManager?.dispose() this.skillsManager = undefined + this.teamsManager = undefined + this.swarmRegistry.dispose() + this.mailboxManager.dispose() + this.permissionBridgeCleanup?.() + this.permissionBridgeCleanup = undefined this.marketplaceManager?.cleanup() this.customModesManager?.dispose() this.taskHistoryStore.dispose() @@ -1094,30 +1139,33 @@ export class ClineProvider }) if (isRehydratingCurrentTask) { - // Replace the current task in-place to avoid UI flicker - const stackIndex = this.clineStack.length - 1 + // Replace the focused task in-place to avoid UI flicker. + const oldTask = this.focusedTaskId ? this.tasks.get(this.focusedTaskId) : undefined - // Properly dispose of the old task to ensure garbage collection - const oldTask = this.clineStack[stackIndex] + if (oldTask) { + try { + await oldTask.abortTask(true) + } catch (e) { + this.log( + `[createTaskWithHistoryItem] abortTask() failed for old task ${oldTask.taskId}.${oldTask.instanceId}: ${e.message}`, + ) + } - // Abort the old task to stop running processes and mark as abandoned - try { - await oldTask.abortTask(true) - } catch (e) { - this.log( - `[createTaskWithHistoryItem] abortTask() failed for old task ${oldTask.taskId}.${oldTask.instanceId}: ${e.message}`, - ) - } + const cleanupFunctions = this.taskEventListeners.get(oldTask) + if (cleanupFunctions) { + cleanupFunctions.forEach((cleanup) => cleanup()) + this.taskEventListeners.delete(oldTask) + } - // Remove event listeners from the old task - const cleanupFunctions = this.taskEventListeners.get(oldTask) - if (cleanupFunctions) { - cleanupFunctions.forEach((cleanup) => cleanup()) - this.taskEventListeners.delete(oldTask) + this.tasks.delete(oldTask.taskId) } - // Replace the task in the stack - this.clineStack[stackIndex] = task + // Register the new task under its own id while preserving focus. + this.tasks.set(task.taskId, task) + this.focusedTaskId = task.taskId + if (!task.parentTask) { + this.leaderTaskId = task.taskId + } task.emit(RooCodeEventName.TaskFocused) // Perform preparation tasks and set up event listeners @@ -1265,8 +1313,8 @@ export class ClineProvider `style-src ${webview.cspSource} 'unsafe-inline' https://* http://${localServerUrl} http://0.0.0.0:${localPort}`, `img-src ${webview.cspSource} https://storage.googleapis.com https://img.clerk.com data:`, `media-src ${webview.cspSource}`, - `script-src 'unsafe-eval' ${webview.cspSource} https://* https://*.posthog.com http://${localServerUrl} http://0.0.0.0:${localPort} 'nonce-${nonce}'`, - `connect-src ${webview.cspSource} ${openRouterDomain} https://* https://*.posthog.com ws://${localServerUrl} ws://0.0.0.0:${localPort} http://${localServerUrl} http://0.0.0.0:${localPort}`, + `script-src 'unsafe-eval' ${webview.cspSource} https://* http://${localServerUrl} http://0.0.0.0:${localPort} 'nonce-${nonce}'`, + `connect-src ${webview.cspSource} ${openRouterDomain} https://* ws://${localServerUrl} ws://0.0.0.0:${localPort} http://${localServerUrl} http://0.0.0.0:${localPort}`, ] return /*html*/ ` @@ -1283,7 +1331,7 @@ export class ClineProvider window.AUDIO_BASE_URI = "${audioUri}" window.MATERIAL_ICONS_BASE_URI = "${materialIconsUri}" - Roo Code + Moo Code
@@ -1362,7 +1410,7 @@ export class ClineProvider window.AUDIO_BASE_URI = "${audioUri}" window.MATERIAL_ICONS_BASE_URI = "${materialIconsUri}" - Roo Code + Moo Code @@ -1859,13 +1907,7 @@ export class ClineProvider /* Condenses a task's message history to use fewer tokens. */ async condenseTaskContext(taskId: string) { - let task: Task | undefined - for (let i = this.clineStack.length - 1; i >= 0; i--) { - if (this.clineStack[i].taskId === taskId) { - task = this.clineStack[i] - break - } - } + const task = this.tasks.get(taskId) if (!task) { throw new Error(`Task with id ${taskId} not found in stack`) } @@ -2232,7 +2274,7 @@ export class ClineProvider // Ignore this error. } - const telemetryKey = process.env.POSTHOG_API_KEY + const telemetryKey = "" // Telemetry disabled — PostHog key injection removed const machineId = vscode.env.machineId const mergedAllowedCommands = this.mergeAllowedCommands(allowedCommands) const mergedDeniedCommands = this.mergeDeniedCommands(deniedCommands) @@ -2751,6 +2793,10 @@ export class ClineProvider return this.skillsManager } + public getTeamConfig(slug: string): import("@roo-code/types").TeamConfig | undefined { + return this.teamsManager?.getTeamConfig(slug) + } + /** * Check if the current state is compliant with MDM policy * @returns true if compliant or no MDM policy exists, false if MDM policy exists and user is non-compliant @@ -2829,11 +2875,14 @@ export class ClineProvider */ public getCurrentTask(): Task | undefined { - if (this.clineStack.length === 0) { + if (!this.focusedTaskId) { return undefined } + return this.tasks.get(this.focusedTaskId) + } - return this.clineStack[this.clineStack.length - 1] + public getTaskById(taskId: string): Task | undefined { + return this.tasks.get(taskId) } public getRecentTasks(): string[] { @@ -2881,6 +2930,39 @@ export class ClineProvider return this.recentTasksCache } + /** + * Returns a snapshot of the parallel-queue state for a given task. + * Useful for debugging and UI introspection of background/queued work. + */ + public async getParallelTaskStatus( + taskId: string, + ): Promise { + try { + const { historyItem } = await this.getTaskWithId(taskId) + const activeChild = this.getCurrentTask() + const activeChildId = + activeChild && historyItem.awaitingChildId === activeChild.taskId ? activeChild.taskId : undefined + return { + taskId, + historyStatus: historyItem.status, + worktreePath: historyItem.worktreePath, + queuedTasks: (historyItem.parallelQueue ?? []).map(({ mode, message, worktree }) => ({ + mode, + message, + worktree, + })), + completedResults: (historyItem.parallelResults ?? []).map(({ taskId: tid, summary, error }) => ({ + taskId: tid, + summary, + error, + })), + activeChildId, + } + } catch { + return undefined + } + } + // When initializing a new task, (not from history but from a tool command // new_task) there is no need to remove the previous task since the new // task is a subtask of the previous one, and when it finishes it is removed @@ -2959,12 +3041,12 @@ export class ClineProvider task: text, images, experiments, - rootTask: this.clineStack.length > 0 ? this.clineStack[0] : undefined, + rootTask: this.leaderTaskId ? this.tasks.get(this.leaderTaskId) : undefined, parentTask, - taskNumber: this.clineStack.length + 1, + taskNumber: this.tasks.size + 1, onCreated: this.taskCreationCallback, initialTodos: options.initialTodos, - // Ensure this task is present in clineStack before startTask() emits + // Ensure this task is present in the tasks map before startTask() emits // its initial state update, so state.currentTaskId is available ASAP. startTask: false, ...options, @@ -3071,8 +3153,8 @@ export class ClineProvider // Clear the current task without treating it as a subtask. // This is used when the user cancels a task that is not a subtask. public async clearTask(): Promise { - if (this.clineStack.length > 0) { - const task = this.clineStack[this.clineStack.length - 1] + const task = this.getCurrentTask() + if (task) { console.log(`[clearTask] clearing task ${task.taskId}.${task.instanceId}`) await this.removeClineFromStack() } @@ -3233,8 +3315,11 @@ export class ClineProvider message: string initialTodos: TodoItem[] mode: string + worktree?: string + abortOnChildFailure?: boolean + parallelQueue?: Array<{ mode: string; message: string; worktree?: string; todos?: string }> }): Promise { - const { parentTaskId, message, initialTodos, mode } = params + const { parentTaskId, message, initialTodos, mode, worktree, abortOnChildFailure, parallelQueue } = params // Metadata-driven delegation is always enabled @@ -3310,24 +3395,55 @@ export class ClineProvider ) } - // 4) Create child as sole active (parent reference preserved for lineage) + // 4) Optionally create a git worktree for filesystem isolation. + let childWorktreePath: string | undefined + if (worktree) { + try { + const shortId = parentTaskId.slice(-8) + const branchName = worktree === "auto" ? `roo/task-${shortId}-${Date.now().toString(36)}` : worktree + const worktreeBase = path.join(os.homedir(), ".roo", "worktrees") + const projectName = path.basename(parent.workspacePath) + const worktreeDest = path.join(worktreeBase, `${projectName}-${shortId}`) + const result = await worktreeService.createWorktree(parent.workspacePath, { + path: worktreeDest, + branch: branchName, + createNewBranch: true, + }) + if (result.success) { + childWorktreePath = worktreeDest + this.log(`[delegateParentAndOpenChild] Created worktree at ${worktreeDest} on branch ${branchName}`) + TelemetryService.instance.captureWorktreeCreated(parentTaskId, worktreeDest) + } else { + this.log(`[delegateParentAndOpenChild] Worktree creation failed (non-fatal): ${result.message}`) + } + } catch (err) { + this.log( + `[delegateParentAndOpenChild] Worktree creation error (non-fatal): ${ + (err as Error)?.message ?? String(err) + }`, + ) + } + } + + // 5) Create child as sole active (parent reference preserved for lineage). // Pass initialStatus: "active" to ensure the child task's historyItem is created // with status from the start, avoiding race conditions where the task might // call attempt_completion before status is persisted separately. // // Pass startTask: false to prevent the child from beginning its task loop // (and writing to globalState via saveClineMessages → updateTaskHistory) - // before we persist the parent's delegation metadata in step 5. - // Without this, the child's fire-and-forget startTask() races with step 5, + // before we persist the parent's delegation metadata in step 6. + // Without this, the child's fire-and-forget startTask() races with step 6, // and the last writer to globalState overwrites the other's changes— // causing the parent's delegation fields to be lost. const child = await this.createTask(message, undefined, parent as any, { initialTodos, initialStatus: "active", startTask: false, + workspacePath: childWorktreePath, }) - // 5) Persist parent delegation metadata BEFORE the child starts writing. + // 6) Persist parent delegation metadata BEFORE the child starts writing. try { const { historyItem } = await this.getTaskWithId(parentTaskId) const childIds = Array.from(new Set([...(historyItem.childIds ?? []), child.taskId])) @@ -3337,6 +3453,9 @@ export class ClineProvider delegatedToId: child.taskId, awaitingChildId: child.taskId, childIds, + // Store the parallel queue in the parent so reopenParentFromDelegation can drain it + ...(parallelQueue && parallelQueue.length > 0 ? { parallelQueue } : {}), + ...(abortOnChildFailure !== undefined ? { abortOnChildFailure } : {}), } await this.updateTaskHistory(updatedHistory) } catch (err) { @@ -3347,16 +3466,34 @@ export class ClineProvider ) } - // 6) Start the child task now that parent metadata is safely persisted. + // 7) Persist the child's worktree path so it can be cleaned up on completion. + if (childWorktreePath) { + try { + const { historyItem: childHistory } = await this.getTaskWithId(child.taskId) + await this.updateTaskHistory({ ...childHistory, worktreePath: childWorktreePath }) + } catch { + // non-fatal + } + } + + // 8) Start the child task now that parent metadata is safely persisted. child.start() - // 7) Emit TaskDelegated (provider-level) + // 9) Emit TaskSpawned and TaskDelegated (provider-level) try { + this.emit(RooCodeEventName.TaskSpawned, child.taskId) this.emit(RooCodeEventName.TaskDelegated, parentTaskId, child.taskId) } catch { // non-fatal } + if (parallelQueue && parallelQueue.length > 0) { + TelemetryService.instance.captureParallelTaskSpawned(parentTaskId, parallelQueue.length + 1) + this.log( + `[delegateParentAndOpenChild] Parallel queue: ${parallelQueue.length + 1} tasks queued for parent ${parentTaskId}`, + ) + } + return child } @@ -3367,8 +3504,11 @@ export class ClineProvider parentTaskId: string childTaskId: string completionResultSummary: string + completionPayload?: Record + /** Set to true when the child task failed or was aborted, for error aggregation and abortOnChildFailure checks. */ + childFailed?: boolean }): Promise { - const { parentTaskId, childTaskId, completionResultSummary } = params + const { parentTaskId, childTaskId, completionResultSummary, completionPayload, childFailed } = params const globalStoragePath = this.contextProxy.globalStorageUri.fsPath // 1) Load parent from history and current persisted messages @@ -3376,47 +3516,208 @@ export class ClineProvider let parentClineMessages: ClineMessage[] = [] try { - parentClineMessages = await readTaskMessages({ - taskId: parentTaskId, - globalStoragePath, - }) + parentClineMessages = await readTaskMessages({ taskId: parentTaskId, globalStoragePath }) } catch { parentClineMessages = [] } let parentApiMessages: any[] = [] try { - parentApiMessages = (await readApiMessages({ - taskId: parentTaskId, - globalStoragePath, - })) as any[] + parentApiMessages = (await readApiMessages({ taskId: parentTaskId, globalStoragePath })) as any[] } catch { parentApiMessages = [] } - // 2) Inject synthetic records: UI subtask_result and update API tool_result - const ts = Date.now() + // 2) Close child instance if still open (single-open-task invariant). + // This MUST happen BEFORE updating the child's status to "completed" because + // removeClineFromStack() → abortTask(true) → saveClineMessages() writes + // the historyItem with initialStatus (typically "active"), which would + // overwrite a "completed" status set earlier. + const current = this.getCurrentTask() + if (current?.taskId === childTaskId) { + await this.removeClineFromStack() + } + + // 3) Update child metadata to "completed" and clean up its worktree (if any). + let childWorktreePath: string | undefined + try { + const { historyItem: childHistory } = await this.getTaskWithId(childTaskId) + childWorktreePath = childHistory.worktreePath + await this.updateTaskHistory({ + ...childHistory, + status: "completed", + completionPayload: completionPayload, + }) + } catch (err) { + this.log( + `[reopenParentFromDelegation] Failed to persist child completed status for ${childTaskId}: ${ + (err as Error)?.message ?? String(err) + }`, + ) + } + + if (childWorktreePath) { + try { + await worktreeService.deleteWorktree(historyItem.workspace ?? this.cwd, childWorktreePath) + this.log(`[reopenParentFromDelegation] Deleted worktree at ${childWorktreePath}`) + TelemetryService.instance.captureWorktreeDeleted(childTaskId, childWorktreePath) + } catch (err) { + this.log( + `[reopenParentFromDelegation] Worktree cleanup failed (non-fatal): ${ + (err as Error)?.message ?? String(err) + }`, + ) + } + } + + // 4) Handle parallel queue: if the parent has more tasks queued, start the next one + // instead of resuming the parent. The parent resumes only when the queue is empty. + const errorMessage = childFailed + ? completionResultSummary || "Child task failed or was aborted" + : (completionPayload?.error as string | undefined) + const currentResult = { + taskId: childTaskId, + summary: completionResultSummary, + payload: completionPayload, + ...(errorMessage ? { error: errorMessage } : {}), + } + const accumulatedResults = [...(historyItem.parallelResults ?? []), currentResult] + const remainingQueue = historyItem.parallelQueue ?? [] + + if (childFailed) { + TelemetryService.instance.captureParallelTaskChildFailed(parentTaskId, childTaskId) + } + + // If abortOnChildFailure is set and the child failed, abandon the remaining queue + // and return the partial results immediately to the parent. + const shouldAbort = childFailed && historyItem.abortOnChildFailure === true + if (shouldAbort && remainingQueue.length > 0) { + this.log( + `[reopenParentFromDelegation] Aborting parallel queue for parent ${parentTaskId}: child ${childTaskId} failed and abortOnChildFailure is set`, + ) + } + + if (remainingQueue.length > 0 && !shouldAbort) { + // There are more parallel tasks to run — start the next one. + const [nextTask, ...rest] = remainingQueue + + // Persist the result so far and reduce the queue + const childIds = Array.from(new Set([...(historyItem.childIds ?? []), childTaskId])) + await this.updateTaskHistory({ + ...historyItem, + status: "delegated", + completedByChildId: childTaskId, + completionResultSummary, + childIds, + parallelQueue: rest, + parallelResults: accumulatedResults, + awaitingChildId: undefined, // will be set by delegateParentAndOpenChild + }) + + try { + this.emit(RooCodeEventName.TaskDelegationCompleted, parentTaskId, childTaskId, completionResultSummary) + } catch { + // non-fatal + } + + // Mode-switch to the next task's mode, then start it + try { + await this.handleModeSwitch(nextTask.mode as any) + } catch { + // non-fatal + } + + let nextTodoItems: TodoItem[] = [] + if (nextTask.todos) { + try { + const { parseMarkdownChecklist } = await import("../tools/UpdateTodoListTool") + nextTodoItems = parseMarkdownChecklist(nextTask.todos) + } catch { + // non-fatal: start with empty todos + } + } + + // Re-fetch history so delegateParentAndOpenChild sees the updated queue state + const { historyItem: refreshedParentHistory } = await this.getTaskWithId(parentTaskId) + // Create worktree before task so we have the path ready; use parentTaskId as name hint + const nextChildWorktreePath = nextTask.worktree + ? await this._createWorktreeForTask( + refreshedParentHistory.workspace ?? this.cwd, + nextTask.worktree, + parentTaskId, + ) + : undefined + const nextChild = await this.createTask(nextTask.message, undefined, undefined, { + initialTodos: nextTodoItems, + initialStatus: "active", + startTask: false, + workspacePath: nextChildWorktreePath, + }) + + // Record the next child in parent history + const updatedChildIds = Array.from(new Set([...(refreshedParentHistory.childIds ?? []), nextChild.taskId])) + await this.updateTaskHistory({ + ...refreshedParentHistory, + delegatedToId: nextChild.taskId, + awaitingChildId: nextChild.taskId, + childIds: updatedChildIds, + }) + + if (nextChildWorktreePath) { + try { + const { historyItem: nextChildHistory } = await this.getTaskWithId(nextChild.taskId) + await this.updateTaskHistory({ ...nextChildHistory, worktreePath: nextChildWorktreePath }) + } catch { + // non-fatal + } + } + + nextChild.start() + try { + this.emit(RooCodeEventName.TaskSpawned, nextChild.taskId) + this.emit(RooCodeEventName.TaskDelegated, parentTaskId, nextChild.taskId) + } catch { + // non-fatal + } + return + } - // Defensive: ensure arrays + // 5) All tasks done (or this was a plain new_task). Build the tool result content. if (!Array.isArray(parentClineMessages)) parentClineMessages = [] if (!Array.isArray(parentApiMessages)) parentApiMessages = [] + const ts = Date.now() + const isParallelFanIn = accumulatedResults.length > 1 + + // Determine result text — aggregated JSON for parallel fan-in, plain text otherwise + let toolResultContent: string + if (isParallelFanIn) { + toolResultContent = JSON.stringify(accumulatedResults, null, 2) + } else { + toolResultContent = `Subtask ${childTaskId} completed.\n\nResult:\n${completionResultSummary}` + } + const subtaskUiMessage: ClineMessage = { type: "say", say: "subtask_result", - text: completionResultSummary, + text: isParallelFanIn + ? `All ${accumulatedResults.length} parallel tasks completed:\n${toolResultContent}` + : completionResultSummary, ts, } parentClineMessages.push(subtaskUiMessage) await saveTaskMessages({ messages: parentClineMessages, taskId: parentTaskId, globalStoragePath }) - // Find the tool_use_id from the last assistant message's new_task tool_use + // Find the matching tool_use_id — supports both new_task and spawn_parallel_tasks let toolUseId: string | undefined for (let i = parentApiMessages.length - 1; i >= 0; i--) { const msg = parentApiMessages[i] if (msg.role === "assistant" && Array.isArray(msg.content)) { for (const block of msg.content) { - if (block.type === "tool_use" && block.name === "new_task") { + if ( + block.type === "tool_use" && + (block.name === "new_task" || block.name === "spawn_parallel_tasks") + ) { toolUseId = block.id break } @@ -3425,93 +3726,43 @@ export class ClineProvider } } - // Preferred: if the parent history contains the native tool_use for new_task, - // inject a matching tool_result for the Anthropic message contract: - // user → assistant (tool_use) → user (tool_result) if (toolUseId) { - // Check if the last message is already a user message with a tool_result for this tool_use_id - // (in case this is a retry or the history was already updated) const lastMsg = parentApiMessages[parentApiMessages.length - 1] let alreadyHasToolResult = false if (lastMsg?.role === "user" && Array.isArray(lastMsg.content)) { for (const block of lastMsg.content) { if (block.type === "tool_result" && block.tool_use_id === toolUseId) { - // Update the existing tool_result content - block.content = `Subtask ${childTaskId} completed.\n\nResult:\n${completionResultSummary}` + block.content = toolResultContent alreadyHasToolResult = true break } } } - // If no existing tool_result found, create a NEW user message with the tool_result if (!alreadyHasToolResult) { parentApiMessages.push({ role: "user", - content: [ - { - type: "tool_result" as const, - tool_use_id: toolUseId, - content: `Subtask ${childTaskId} completed.\n\nResult:\n${completionResultSummary}`, - }, - ], + content: [{ type: "tool_result" as const, tool_use_id: toolUseId, content: toolResultContent }], ts, }) } - // Validate the newly injected tool_result against the preceding assistant message. - // This ensures the tool_result's tool_use_id matches a tool_use in the immediately - // preceding assistant message (Anthropic API requirement). const lastMessage = parentApiMessages[parentApiMessages.length - 1] if (lastMessage?.role === "user") { const validatedMessage = validateAndFixToolResultIds(lastMessage, parentApiMessages.slice(0, -1)) parentApiMessages[parentApiMessages.length - 1] = validatedMessage } } else { - // If there is no corresponding tool_use in the parent API history, we cannot emit a - // tool_result. Fall back to a plain user text note so the parent can still resume. parentApiMessages.push({ role: "user", - content: [ - { - type: "text" as const, - text: `Subtask ${childTaskId} completed.\n\nResult:\n${completionResultSummary}`, - }, - ], + content: [{ type: "text" as const, text: toolResultContent }], ts, }) } await saveApiMessages({ messages: parentApiMessages as any, taskId: parentTaskId, globalStoragePath }) - // 3) Close child instance if still open (single-open-task invariant). - // This MUST happen BEFORE updating the child's status to "completed" because - // removeClineFromStack() → abortTask(true) → saveClineMessages() writes - // the historyItem with initialStatus (typically "active"), which would - // overwrite a "completed" status set earlier. - const current = this.getCurrentTask() - if (current?.taskId === childTaskId) { - await this.removeClineFromStack() - } - - // 4) Update child metadata to "completed" status. - // This runs after the abort so it overwrites the stale "active" status - // that saveClineMessages() may have written during step 3. - try { - const { historyItem: childHistory } = await this.getTaskWithId(childTaskId) - await this.updateTaskHistory({ - ...childHistory, - status: "completed", - }) - } catch (err) { - this.log( - `[reopenParentFromDelegation] Failed to persist child completed status for ${childTaskId}: ${ - (err as Error)?.message ?? String(err) - }`, - ) - } - - // 5) Update parent metadata and persist BEFORE emitting completion event + // 6) Update parent metadata and persist BEFORE emitting completion event const childIds = Array.from(new Set([...(historyItem.childIds ?? []), childTaskId])) const updatedHistory: typeof historyItem = { ...historyItem, @@ -3520,21 +3771,37 @@ export class ClineProvider completionResultSummary, awaitingChildId: undefined, childIds, + parallelQueue: undefined, + parallelResults: undefined, + abortOnChildFailure: undefined, } await this.updateTaskHistory(updatedHistory) - // 6) Emit TaskDelegationCompleted (provider-level) + if (isParallelFanIn) { + const hadFailures = accumulatedResults.some((r) => r.error !== undefined) + TelemetryService.instance.captureParallelTaskCompleted( + parentTaskId, + accumulatedResults.length, + accumulatedResults.length, + hadFailures, + ) + this.log( + `[reopenParentFromDelegation] Parallel fan-in complete for parent ${parentTaskId}: ${accumulatedResults.length} tasks, failures=${hadFailures}`, + ) + } + + // 7) Emit TaskDelegationCompleted (provider-level) try { this.emit(RooCodeEventName.TaskDelegationCompleted, parentTaskId, childTaskId, completionResultSummary) } catch { // non-fatal } - // 7) Reopen the parent from history as the sole active task (restores saved mode) + // 8) Reopen the parent from history as the sole active task (restores saved mode) // IMPORTANT: startTask=false to suppress resume-from-history ask scheduling const parentInstance = await this.createTaskWithHistoryItem(updatedHistory, { startTask: false }) - // 8) Inject restored histories into the in-memory instance before resuming + // 9) Inject restored histories into the in-memory instance before resuming if (parentInstance) { try { await parentInstance.overwriteClineMessages(parentClineMessages) @@ -3551,7 +3818,7 @@ export class ClineProvider await parentInstance.resumeAfterDelegation() } - // 9) Emit TaskDelegationResumed (provider-level) + // 10) Emit TaskDelegationResumed (provider-level) try { this.emit(RooCodeEventName.TaskDelegationResumed, parentTaskId, childTaskId) } catch { @@ -3559,6 +3826,344 @@ export class ClineProvider } } + /** + * Concurrent fan-out: spawn all child tasks simultaneously (parent stays alive in the map), + * then await all completions via Promise.all before returning aggregated results. + * + * The parent task MUST be in this.tasks and is NOT removed — it remains registered + * with its focusedTaskId preserved while children run. Each child calls + * resolveChildCompletion() when it finishes (from AttemptCompletionTool concurrent path). + */ + public async spawnConcurrentChildren(params: { + parentTaskId: string + tasks: Array<{ mode: string; message: string; worktree?: string; todos?: TodoItem[]; role?: string }> + abortOnChildFailure?: boolean + /** When true, workers enter an idle loop after each turn and await task_assignment + * or shutdown_request instead of completing immediately. */ + persistent?: boolean + }): Promise; error?: string }>> { + const { parentTaskId, tasks, abortOnChildFailure = false, persistent = false } = params + + const parent = this.tasks.get(parentTaskId) + if (!parent) { + throw new Error(`[spawnConcurrentChildren] Parent task ${parentTaskId} not found in active tasks`) + } + + TelemetryService.instance.captureParallelTaskSpawned(parentTaskId, tasks.length) + this.log(`[spawnConcurrentChildren] Spawning ${tasks.length} concurrent children for parent ${parentTaskId}`) + + // Create a swarm session so every worker gets a stable identity + color. + const sessionId = parentTaskId + this.swarmRegistry.createSession(sessionId, parentTaskId) + if (persistent) { + this.mailboxManager.createMailbox(sessionId) + } + this.emit(RooCodeEventName.SwarmSessionStarted, sessionId, parentTaskId) + + // Mark parent as swarm leader in its history item. + try { + const { historyItem: parentHistory } = await this.getTaskWithId(parentTaskId) + await this.updateTaskHistory({ ...parentHistory, swarmSessionId: sessionId, isSwarmLeader: true }) + } catch { + // non-fatal + } + + // Phase 1: sequential setup — mode switch and task creation must be serialised to + // prevent handleModeSwitch() from racing (it mutates global provider mode state). + // Task.start() is NOT called yet; that happens in phase 2. + type ChildEntry = { + child: Task + taskIdForError: string + completionPromise: Promise<{ + taskId: string + summary: string + payload?: Record + error?: string + }> + } + const entries: ChildEntry[] = [] + + for (let i = 0; i < tasks.length; i++) { + const spec = tasks[i] + + try { + await this.handleModeSwitch(spec.mode as any) + } catch { + // non-fatal + } + + let childWorktreePath: string | undefined + if (spec.worktree) { + childWorktreePath = await this._createWorktreeForTask(parent.workspacePath, spec.worktree, parentTaskId) + } + + const child = await this.createTask(spec.message, undefined, parent as any, { + initialTodos: spec.todos ?? [], + initialStatus: "active", + startTask: false, + workspacePath: childWorktreePath, + }) + + // Assign swarm identity to this worker. + const agentName = spec.role ?? `worker-${i + 1}` + const agentColor = this.swarmRegistry.assignColor() + const agentId = `${agentName}@${sessionId}` + const identity: import("@roo-code/types").AgentIdentity = { + agentId, + agentName, + color: agentColor, + isLeader: false, + taskId: child.taskId, + } + this.swarmRegistry.registerWorker(sessionId, identity) + this.emit(RooCodeEventName.WorkerRegistered, sessionId, child.taskId, agentName, agentColor) + + try { + const { historyItem: childHistory } = await this.getTaskWithId(child.taskId) + await this.updateTaskHistory({ + ...childHistory, + ...(childWorktreePath ? { worktreePath: childWorktreePath } : {}), + swarmSessionId: sessionId, + agentId, + agentName, + agentColor, + }) + } catch { + // non-fatal + } + + const completionPromise = new Promise<{ + taskId: string + summary: string + payload?: Record + error?: string + }>((resolve, reject) => { + this.childCompletionHandlers.set(child.taskId, { + resolve: (result) => resolve({ taskId: child.taskId, ...result }), + reject, + }) + }) + + entries.push({ child, taskIdForError: child.taskId, completionPromise }) + } + + // Phase 2: start all children in a tight sync loop so they run concurrently. + for (const { child } of entries) { + child.start() + this.emit(RooCodeEventName.TaskSpawned, child.taskId) + this.emit(RooCodeEventName.TaskDelegated, parentTaskId, child.taskId) + } + + // Phase 3: await completions. + // When abortOnChildFailure is set, the first rejection aborts all remaining siblings. + let abortTriggered = false + const abortSiblings = async (failedTaskId: string) => { + if (abortTriggered) return + abortTriggered = true + for (const { child } of entries) { + if (child.taskId === failedTaskId) continue + const handler = this.childCompletionHandlers.get(child.taskId) + this.childCompletionHandlers.delete(child.taskId) + // Abort the running child task. + await this.removeClineFromStack({ taskId: child.taskId, skipDelegationRepair: true }) + handler?.reject(new Error("Aborted: sibling task failed")) + TelemetryService.instance.captureParallelTaskChildFailed(parentTaskId, child.taskId) + } + this.log(`[spawnConcurrentChildren] Aborted remaining siblings after child ${failedTaskId} failed`) + } + + const wrappedPromises = entries.map(({ child, completionPromise }) => + completionPromise.then( + (r) => r, + async (err: unknown) => { + if (abortOnChildFailure) { + await abortSiblings(child.taskId) + } + throw err + }, + ), + ) + + const settled = await Promise.allSettled(wrappedPromises) + const results: Array<{ taskId: string; summary: string; payload?: Record; error?: string }> = + settled.map((s, i) => { + if (s.status === "fulfilled") { + return s.value + } + const msg = s.reason instanceof Error ? s.reason.message : String(s.reason) + return { taskId: entries[i]?.taskIdForError ?? `child-${i}`, summary: msg, error: msg } + }) + + const hadFailures = results.some((r) => r.error !== undefined) + TelemetryService.instance.captureParallelTaskCompleted(parentTaskId, results.length, tasks.length, hadFailures) + this.log( + `[spawnConcurrentChildren] All ${tasks.length} children completed for parent ${parentTaskId}. failures=${hadFailures}`, + ) + + if (persistent) { + this.mailboxManager.destroyMailbox(sessionId) + } + this.swarmRegistry.destroySession(sessionId) + this.emit(RooCodeEventName.SwarmSessionEnded, sessionId, parentTaskId) + + return results + } + + /** Assign a new task to an idle persistent worker. */ + public async assignTaskToWorker(sessionId: string, workerId: string, message: string): Promise { + await this.mailboxManager.assignTask(sessionId, workerId, message) + } + + /** Send shutdown_request to all workers in a session so they resolve and the parent can collect results. */ + public async shutdownWorkers(sessionId: string): Promise { + const session = this.swarmRegistry.getSession(sessionId) + if (!session) return + await Promise.all( + Object.keys(session.teammates).map((workerId) => this.mailboxManager.shutdownWorker(sessionId, workerId)), + ) + } + + /** + * Called by AttemptCompletionTool when a concurrent child completes. + * Resolves the Promise registered in childCompletionHandlers for this child, + * then removes the child from the tasks map. + */ + public async resolveChildCompletion(params: { + childTaskId: string + summary: string + payload?: Record + failed?: boolean + }): Promise { + const { childTaskId, summary, payload, failed } = params + const handler = this.childCompletionHandlers.get(childTaskId) + this.childCompletionHandlers.delete(childTaskId) + + // Update child history to "completed" and clean up worktree. + try { + const { historyItem: childHistory } = await this.getTaskWithId(childTaskId) + const worktreePath = childHistory.worktreePath + await this.updateTaskHistory({ ...childHistory, status: "completed", completionPayload: payload }) + if (worktreePath) { + try { + await worktreeService.deleteWorktree(childHistory.workspace ?? this.cwd, worktreePath) + TelemetryService.instance.captureWorktreeDeleted(childTaskId, worktreePath) + } catch { + // non-fatal + } + } + } catch { + // non-fatal + } + + // Remove the child from the live task map but do NOT change focus + // (parent is still focused and running). + await this.removeClineFromStack({ taskId: childTaskId, skipDelegationRepair: true }) + + if (handler) { + if (failed) { + handler.reject(new Error(summary)) + } else { + handler.resolve({ summary, payload }) + } + } + } + + /** + * Scans task history for orphaned worktrees — directories that still exist on disk but whose + * tasks are no longer running. This can happen if VS Code crashes mid-task. + * Logs findings and emits telemetry; offers optional VS Code notification for cleanup. + */ + public async detectAndCleanOrphanedWorktrees(): Promise { + try { + const allHistory = this.taskHistoryStore.getAll() + const activeTaskIds = new Set(this.tasks.keys()) + const orphans: Array<{ taskId: string; worktreePath: string }> = [] + + for (const item of allHistory) { + if (!item.worktreePath) continue + // A completed task whose worktree dir still exists is an orphan + if (item.status === "completed" || (item.status !== "active" && !activeTaskIds.has(item.id))) { + try { + const { default: fss } = await import("fs") + if (fss.existsSync(item.worktreePath)) { + orphans.push({ taskId: item.id, worktreePath: item.worktreePath }) + } + } catch { + // non-fatal + } + } + } + + if (orphans.length === 0) return + + for (const { taskId, worktreePath } of orphans) { + this.log( + `[detectAndCleanOrphanedWorktrees] Orphaned worktree detected: ${worktreePath} (task ${taskId})`, + ) + TelemetryService.instance.captureWorktreeOrphanDetected(worktreePath) + } + + const paths = orphans.map((o) => o.worktreePath).join(", ") + const action = await vscode.window.showWarningMessage( + `Moo Code detected ${orphans.length} orphaned worktree(s) from previous sessions:\n${paths}`, + "Clean up", + "Ignore", + ) + + if (action === "Clean up") { + for (const { taskId, worktreePath } of orphans) { + try { + const cwd = this.cwd + await worktreeService.deleteWorktree(cwd, worktreePath) + this.log(`[detectAndCleanOrphanedWorktrees] Cleaned up worktree: ${worktreePath}`) + TelemetryService.instance.captureWorktreeDeleted(taskId, worktreePath) + // Clear the worktreePath from the history item + const { historyItem } = await this.getTaskWithId(taskId) + await this.updateTaskHistory({ ...historyItem, worktreePath: undefined }) + } catch (err) { + this.log( + `[detectAndCleanOrphanedWorktrees] Failed to clean worktree ${worktreePath}: ${ + (err as Error)?.message ?? String(err) + }`, + ) + } + } + } + } catch (err) { + this.log( + `[detectAndCleanOrphanedWorktrees] Error during orphan scan (non-fatal): ${ + (err as Error)?.message ?? String(err) + }`, + ) + } + } + + /** Creates a worktree for a child task, returning the path on success or undefined on failure. */ + private async _createWorktreeForTask( + parentWorkspacePath: string, + worktree: string, + taskIdHint?: string, + ): Promise { + try { + const shortId = taskIdHint ? taskIdHint.slice(-8) : Math.random().toString(36).slice(2, 8) + const branchName = worktree === "auto" ? `roo/task-${shortId}-${Date.now().toString(36)}` : worktree + const worktreeBase = path.join(os.homedir(), ".roo", "worktrees") + const projectName = path.basename(parentWorkspacePath) + const worktreeDest = path.join(worktreeBase, `${projectName}-${shortId}`) + const result = await worktreeService.createWorktree(parentWorkspacePath, { + path: worktreeDest, + branch: branchName, + createNewBranch: true, + }) + if (result.success && taskIdHint) { + TelemetryService.instance.captureWorktreeCreated(taskIdHint, worktreeDest) + } + return result.success ? worktreeDest : undefined + } catch { + return undefined + } + } + /** * Convert a file path to a webview-accessible URI * This method safely converts file paths to URIs that can be loaded in the webview diff --git a/src/core/webview/__tests__/ClineProvider.flicker-free-cancel.spec.ts b/src/core/webview/__tests__/ClineProvider.flicker-free-cancel.spec.ts index 4bb01347a3d..9900583af78 100644 --- a/src/core/webview/__tests__/ClineProvider.flicker-free-cancel.spec.ts +++ b/src/core/webview/__tests__/ClineProvider.flicker-free-cancel.spec.ts @@ -191,9 +191,11 @@ describe("ClineProvider flicker-free cancel", () => { vi.mocked(Task).mockImplementation(() => mockTask2 as any) }) - it("should not remove current task from stack when rehydrating same taskId", async () => { - // Setup: Add a task to the stack first - ;(provider as any).clineStack = [mockTask1] + it("should not remove current task from map when rehydrating same taskId", async () => { + // Setup: Add a task to the map first + ;(provider as any).tasks = new Map([["task-1", mockTask1]]) + ;(provider as any).focusedTaskId = "task-1" + ;(provider as any).leaderTaskId = "task-1" // Mock event listeners for cleanup ;(provider as any).taskEventListeners = new WeakMap() @@ -221,9 +223,9 @@ describe("ClineProvider flicker-free cancel", () => { // Assert: removeClineFromStack should NOT be called expect(removeClineFromStackSpy).not.toHaveBeenCalled() - // Verify the task was replaced in-place - expect((provider as any).clineStack).toHaveLength(1) - expect((provider as any).clineStack[0]).toBe(mockTask2) + // Verify the task was replaced in-place (new task under same focusedTaskId) + expect((provider as any).tasks.size).toBe(1) + expect((provider as any).tasks.get("task-1")).toBe(mockTask2) // Verify old event listeners were cleaned up expect(mockCleanupFunctions[0]).toHaveBeenCalled() @@ -233,9 +235,10 @@ describe("ClineProvider flicker-free cancel", () => { expect(mockTask2.emit).toHaveBeenCalledWith("taskFocused") }) - it("should remove task from stack when creating different task", async () => { - // Setup: Add a task to the stack first - ;(provider as any).clineStack = [mockTask1] + it("should remove task from map when creating different task", async () => { + // Setup: Add a task to the map first + ;(provider as any).tasks = new Map([["task-1", mockTask1]]) + ;(provider as any).focusedTaskId = "task-1" // Spy on removeClineFromStack to verify it IS called const removeClineFromStackSpy = vi.spyOn(provider, "removeClineFromStack").mockResolvedValue(undefined) @@ -259,9 +262,10 @@ describe("ClineProvider flicker-free cancel", () => { expect(removeClineFromStackSpy).toHaveBeenCalled() }) - it("should handle empty stack gracefully during rehydration attempt", async () => { - // Setup: Empty stack - ;(provider as any).clineStack = [] + it("should handle empty map gracefully during rehydration attempt", async () => { + // Setup: Empty map + ;(provider as any).tasks = new Map() + ;(provider as any).focusedTaskId = undefined // Spy on removeClineFromStack const removeClineFromStackSpy = vi.spyOn(provider, "removeClineFromStack").mockResolvedValue(undefined) @@ -285,19 +289,24 @@ describe("ClineProvider flicker-free cancel", () => { expect(removeClineFromStackSpy).toHaveBeenCalled() }) - it("should maintain task stack integrity during flicker-free replacement", async () => { - // Setup: Stack with multiple tasks + it("should maintain task map integrity during flicker-free replacement", async () => { + // Setup: Map with multiple tasks (parent + current child) const mockParentTask = { taskId: "parent-task", instanceId: "parent-instance", emit: vi.fn(), } - ;(provider as any).clineStack = [mockParentTask, mockTask1] + ;(provider as any).tasks = new Map([ + ["parent-task", mockParentTask], + ["task-1", mockTask1], + ]) + ;(provider as any).focusedTaskId = "task-1" + ;(provider as any).leaderTaskId = "parent-task" ;(provider as any).taskEventListeners = new WeakMap() ;(provider as any).taskEventListeners.set(mockTask1, [vi.fn()]) - // Act: Rehydrate the current (top) task + // Act: Rehydrate the current (focused) task const historyItem: HistoryItem = { id: "task-1", number: 1, @@ -311,9 +320,9 @@ describe("ClineProvider flicker-free cancel", () => { await provider.createTaskWithHistoryItem(historyItem) - // Assert: Stack should maintain parent task and replace current task - expect((provider as any).clineStack).toHaveLength(2) - expect((provider as any).clineStack[0]).toBe(mockParentTask) - expect((provider as any).clineStack[1]).toBe(mockTask2) + // Assert: Map should maintain parent task and replace current task + expect((provider as any).tasks.size).toBe(2) + expect((provider as any).tasks.get("parent-task")).toBe(mockParentTask) + expect((provider as any).tasks.get("task-1")).toBe(mockTask2) }) }) diff --git a/src/core/webview/__tests__/ClineProvider.spec.ts b/src/core/webview/__tests__/ClineProvider.spec.ts index da0fb2003fb..94b1f0a4773 100644 --- a/src/core/webview/__tests__/ClineProvider.spec.ts +++ b/src/core/webview/__tests__/ClineProvider.spec.ts @@ -1209,8 +1209,9 @@ describe("ClineProvider", () => { }) test("handles case when no current task exists", async () => { - // Clear the cline stack - ;(provider as any).clineStack = [] + // Clear the tasks map + ;(provider as any).tasks = new Map() + ;(provider as any).focusedTaskId = undefined // Trigger message deletion const messageHandler = (mockWebviewView.webview.onDidReceiveMessage as any).mock.calls[0][0] diff --git a/src/core/webview/diagnosticsHandler.ts b/src/core/webview/diagnosticsHandler.ts index 212ddbc5dee..3b5b62024b2 100644 --- a/src/core/webview/diagnosticsHandler.ts +++ b/src/core/webview/diagnosticsHandler.ts @@ -65,7 +65,7 @@ export async function generateErrorDiagnostics(params: GenerateDiagnosticsParams // Prepend human-readable guidance comments before the JSON payload const headerComment = - "// Please share this file with Roo Code Support (support@roocode.com) to diagnose the issue faster\n" + + "// Please share this file with Moo Code Support (support@moo-code.dev) to diagnose the issue faster\n" + "// Just make sure you're OK sharing the contents of the conversation below.\n\n" const jsonContent = JSON.stringify(diagnostics, null, 2) const fullContent = headerComment + jsonContent diff --git a/src/core/webview/webviewMessageHandler.ts b/src/core/webview/webviewMessageHandler.ts index e3b8c1bea88..7d3df1d733f 100644 --- a/src/core/webview/webviewMessageHandler.ts +++ b/src/core/webview/webviewMessageHandler.ts @@ -994,7 +994,7 @@ export const webviewMessageHandler = async ( key: "roo", options: { provider: "roo", - baseUrl: process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy", + baseUrl: process.env.ROO_CODE_PROVIDER_URL ?? "", apiKey: CloudService.hasInstance() ? CloudService.instance.authService?.getSessionToken() : undefined, @@ -1134,7 +1134,7 @@ export const webviewMessageHandler = async ( try { const rooOptions = { provider: "roo" as const, - baseUrl: process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy", + baseUrl: process.env.ROO_CODE_PROVIDER_URL ?? "", apiKey: CloudService.hasInstance() ? CloudService.instance.authService?.getSessionToken() : undefined, @@ -1377,6 +1377,11 @@ export const webviewMessageHandler = async ( } break } + case "openVSCodeSettings": { + const query = message.text || "" + await vscode.commands.executeCommand("workbench.action.openSettings", query) + break + } case "openMcpSettings": { const mcpSettingsFilePath = await provider.getMcpHub()?.getMcpSettingsFilePath() diff --git a/src/extension.ts b/src/extension.ts index 19c0d70585a..ef6bbec96fc 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -100,7 +100,7 @@ async function checkWorktreeAutoOpen( // Open the Roo Code sidebar with a slight delay to ensure UI is ready setTimeout(async () => { try { - await vscode.commands.executeCommand("roo-cline.plusButtonClicked") + await vscode.commands.executeCommand("moo-code.plusButtonClicked") } catch (error) { outputChannel.appendLine( `[Worktree] Error auto-opening sidebar: ${error instanceof Error ? error.message : String(error)}`, @@ -137,11 +137,12 @@ export async function activate(context: vscode.ExtensionContext) { // Initialize telemetry service. const telemetryService = TelemetryService.createInstance() - try { - telemetryService.register(new PostHogTelemetryClient()) - } catch (error) { - console.warn("Failed to register PostHogTelemetryClient:", error) - } + // Telemetry disabled — PostHogTelemetryClient registration removed + // try { + // telemetryService.register(new PostHogTelemetryClient()) + // } catch (error) { + // console.warn("Failed to register PostHogTelemetryClient:", error) + // } // Create logger for cloud services. const cloudLogger = createDualLogger(createOutputChannelLogger(outputChannel)) @@ -210,7 +211,7 @@ export async function activate(context: vscode.ExtensionContext) { : undefined await refreshModels({ provider: "roo", - baseUrl: process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy", + baseUrl: process.env.ROO_CODE_PROVIDER_URL ?? "", apiKey: sessionToken, }) } else { @@ -268,15 +269,16 @@ export async function activate(context: vscode.ExtensionContext) { "user-info": userInfoHandler, }) - try { - if (cloudService.telemetryClient) { - TelemetryService.instance.register(cloudService.telemetryClient) - } - } catch (error) { - outputChannel.appendLine( - `[CloudService] Failed to register TelemetryClient: ${error instanceof Error ? error.message : String(error)}`, - ) - } + // Telemetry disabled — cloud telemetry registration removed + // try { + // if (cloudService.telemetryClient) { + // TelemetryService.instance.register(cloudService.telemetryClient) + // } + // } catch (error) { + // outputChannel.appendLine( + // `[CloudService] Failed to register TelemetryClient: ${error instanceof Error ? error.message : String(error)}`, + // ) + // } // Add to subscriptions for proper cleanup on deactivate. context.subscriptions.push(cloudService) @@ -302,6 +304,14 @@ export async function activate(context: vscode.ExtensionContext) { // Check for worktree auto-open path (set when switching to a worktree) await checkWorktreeAutoOpen(context, outputChannel) + // Scan for orphaned worktrees from previous sessions (e.g. after a crash). + // Run in background so it doesn't delay activation. + void provider.detectAndCleanOrphanedWorktrees().catch((err) => { + outputChannel.appendLine( + `[OrphanWorktree] Startup scan failed (non-fatal): ${err instanceof Error ? err.message : String(err)}`, + ) + }) + // Auto-import configuration if specified in settings. try { await autoImportSettings(outputChannel, { diff --git a/src/extension/api.ts b/src/extension/api.ts index 4a66b40078d..22930d7f450 100644 --- a/src/extension/api.ts +++ b/src/extension/api.ts @@ -139,7 +139,7 @@ export class API extends EventEmitter implements RooCodeAPI { try { const models = await getModels({ provider: "roo" as const, - baseUrl: process.env.ROO_CODE_PROVIDER_URL ?? "https://api.roocode.com/proxy", + baseUrl: process.env.ROO_CODE_PROVIDER_URL ?? "", apiKey: CloudService.hasInstance() ? CloudService.instance.authService?.getSessionToken() : undefined, diff --git a/src/integrations/terminal/Terminal.ts b/src/integrations/terminal/Terminal.ts index 38ace9d4b16..df531ee58ee 100644 --- a/src/integrations/terminal/Terminal.ts +++ b/src/integrations/terminal/Terminal.ts @@ -17,7 +17,7 @@ export class Terminal extends BaseTerminal { const env = Terminal.getEnv() const iconPath = new vscode.ThemeIcon("rocket") - this.terminal = terminal ?? vscode.window.createTerminal({ cwd, name: "Roo Code", iconPath, env }) + this.terminal = terminal ?? vscode.window.createTerminal({ cwd, name: "Moo Code", iconPath, env }) if (Terminal.getTerminalZdotdir()) { ShellIntegrationManager.terminalTmpDirs.set(id, env.ZDOTDIR) diff --git a/src/package.json b/src/package.json index 51249cc09ac..721a18089f2 100644 --- a/src/package.json +++ b/src/package.json @@ -1,8 +1,8 @@ { - "name": "roo-cline", + "name": "moo-code", "displayName": "%extension.displayName%", "description": "%extension.description%", - "publisher": "RooVeterinaryInc", + "publisher": "moo-code", "version": "3.52.1", "icon": "assets/icons/icon.png", "galleryBanner": { @@ -14,13 +14,13 @@ "node": "20.19.2" }, "author": { - "name": "Roo Code" + "name": "Moo Code" }, "repository": { "type": "git", - "url": "https://github.com/RooCodeInc/Roo-Code" + "url": "https://github.com/DavinciDreams/Roo-Code" }, - "homepage": "https://roocode.com", + "homepage": "https://github.com/DavinciDreams/Roo-Code", "categories": [ "AI", "Chat", @@ -42,8 +42,8 @@ "sonnet", "ai", "llama", - "roo code", - "roocode" + "moo code", + "moocode" ], "activationEvents": [ "onLanguage", @@ -54,119 +54,119 @@ "viewsContainers": { "activitybar": [ { - "id": "roo-cline-ActivityBar", + "id": "moo-code-ActivityBar", "title": "%views.activitybar.title%", "icon": "assets/icons/icon.svg" } ] }, "views": { - "roo-cline-ActivityBar": [ + "moo-code-ActivityBar": [ { "type": "webview", - "id": "roo-cline.SidebarProvider", + "id": "moo-code.SidebarProvider", "name": "%views.sidebar.name%" } ] }, "commands": [ { - "command": "roo-cline.plusButtonClicked", + "command": "moo-code.plusButtonClicked", "title": "%command.newTask.title%", "icon": "$(edit)" }, { - "command": "roo-cline.historyButtonClicked", + "command": "moo-code.historyButtonClicked", "title": "%command.history.title%", "icon": "$(history)" }, { - "command": "roo-cline.marketplaceButtonClicked", + "command": "moo-code.marketplaceButtonClicked", "title": "%command.marketplace.title%", "icon": "$(extensions)" }, { - "command": "roo-cline.popoutButtonClicked", + "command": "moo-code.popoutButtonClicked", "title": "%command.openInEditor.title%", "icon": "$(link-external)" }, { - "command": "roo-cline.cloudButtonClicked", + "command": "moo-code.cloudButtonClicked", "title": "%command.cloud.title%", "icon": "$(cloud)" }, { - "command": "roo-cline.settingsButtonClicked", + "command": "moo-code.settingsButtonClicked", "title": "%command.settings.title%", "icon": "$(settings-gear)" }, { - "command": "roo-cline.openInNewTab", + "command": "moo-code.openInNewTab", "title": "%command.openInNewTab.title%", "category": "%configuration.title%" }, { - "command": "roo-cline.explainCode", + "command": "moo-code.explainCode", "title": "%command.explainCode.title%", "category": "%configuration.title%" }, { - "command": "roo-cline.fixCode", + "command": "moo-code.fixCode", "title": "%command.fixCode.title%", "category": "%configuration.title%" }, { - "command": "roo-cline.improveCode", + "command": "moo-code.improveCode", "title": "%command.improveCode.title%", "category": "%configuration.title%" }, { - "command": "roo-cline.addToContext", + "command": "moo-code.addToContext", "title": "%command.addToContext.title%", "category": "%configuration.title%" }, { - "command": "roo-cline.newTask", + "command": "moo-code.newTask", "title": "%command.newTask.title%", "category": "%configuration.title%" }, { - "command": "roo-cline.terminalAddToContext", + "command": "moo-code.terminalAddToContext", "title": "%command.terminal.addToContext.title%", "category": "Terminal" }, { - "command": "roo-cline.terminalFixCommand", + "command": "moo-code.terminalFixCommand", "title": "%command.terminal.fixCommand.title%", "category": "Terminal" }, { - "command": "roo-cline.terminalExplainCommand", + "command": "moo-code.terminalExplainCommand", "title": "%command.terminal.explainCommand.title%", "category": "Terminal" }, { - "command": "roo-cline.setCustomStoragePath", + "command": "moo-code.setCustomStoragePath", "title": "%command.setCustomStoragePath.title%", "category": "%configuration.title%" }, { - "command": "roo-cline.importSettings", + "command": "moo-code.importSettings", "title": "%command.importSettings.title%", "category": "%configuration.title%" }, { - "command": "roo-cline.focusInput", + "command": "moo-code.focusInput", "title": "%command.focusInput.title%", "category": "%configuration.title%" }, { - "command": "roo-cline.acceptInput", + "command": "moo-code.acceptInput", "title": "%command.acceptInput.title%", "category": "%configuration.title%" }, { - "command": "roo-cline.toggleAutoApprove", + "command": "moo-code.toggleAutoApprove", "title": "%command.toggleAutoApprove.title%", "category": "%configuration.title%" } @@ -174,112 +174,112 @@ "menus": { "editor/context": [ { - "submenu": "roo-cline.contextMenu", + "submenu": "moo-code.contextMenu", "group": "1" } ], - "roo-cline.contextMenu": [ + "moo-code.contextMenu": [ { - "command": "roo-cline.addToContext", + "command": "moo-code.addToContext", "group": "1_actions@1" }, { - "command": "roo-cline.explainCode", + "command": "moo-code.explainCode", "group": "1_actions@2" }, { - "command": "roo-cline.improveCode", + "command": "moo-code.improveCode", "group": "1_actions@3" } ], "terminal/context": [ { - "submenu": "roo-cline.terminalMenu", + "submenu": "moo-code.terminalMenu", "group": "2" } ], - "roo-cline.terminalMenu": [ + "moo-code.terminalMenu": [ { - "command": "roo-cline.terminalAddToContext", + "command": "moo-code.terminalAddToContext", "group": "1_actions@1" }, { - "command": "roo-cline.terminalFixCommand", + "command": "moo-code.terminalFixCommand", "group": "1_actions@2" }, { - "command": "roo-cline.terminalExplainCommand", + "command": "moo-code.terminalExplainCommand", "group": "1_actions@3" } ], "view/title": [ { - "command": "roo-cline.plusButtonClicked", + "command": "moo-code.plusButtonClicked", "group": "navigation@1", - "when": "view == roo-cline.SidebarProvider" + "when": "view == moo-code.SidebarProvider" }, { - "command": "roo-cline.settingsButtonClicked", + "command": "moo-code.settingsButtonClicked", "group": "navigation@2", - "when": "view == roo-cline.SidebarProvider" + "when": "view == moo-code.SidebarProvider" }, { - "command": "roo-cline.cloudButtonClicked", + "command": "moo-code.cloudButtonClicked", "group": "navigation@3", - "when": "view == roo-cline.SidebarProvider" + "when": "view == moo-code.SidebarProvider" }, { - "command": "roo-cline.marketplaceButtonClicked", + "command": "moo-code.marketplaceButtonClicked", "group": "navigation@4", - "when": "view == roo-cline.SidebarProvider" + "when": "view == moo-code.SidebarProvider" }, { - "command": "roo-cline.historyButtonClicked", + "command": "moo-code.historyButtonClicked", "group": "overflow@1", - "when": "view == roo-cline.SidebarProvider" + "when": "view == moo-code.SidebarProvider" }, { - "command": "roo-cline.popoutButtonClicked", + "command": "moo-code.popoutButtonClicked", "group": "overflow@2", - "when": "view == roo-cline.SidebarProvider" + "when": "view == moo-code.SidebarProvider" } ], "editor/title": [ { - "command": "roo-cline.plusButtonClicked", + "command": "moo-code.plusButtonClicked", "group": "navigation@1", - "when": "activeWebviewPanelId == roo-cline.TabPanelProvider" + "when": "activeWebviewPanelId == moo-code.TabPanelProvider" }, { - "command": "roo-cline.settingsButtonClicked", + "command": "moo-code.settingsButtonClicked", "group": "navigation@2", - "when": "activeWebviewPanelId == roo-cline.TabPanelProvider" + "when": "activeWebviewPanelId == moo-code.TabPanelProvider" }, { - "command": "roo-cline.cloudButtonClicked", + "command": "moo-code.cloudButtonClicked", "group": "navigation@3", - "when": "activeWebviewPanelId == roo-cline.TabPanelProvider" + "when": "activeWebviewPanelId == moo-code.TabPanelProvider" }, { - "command": "roo-cline.marketplaceButtonClicked", + "command": "moo-code.marketplaceButtonClicked", "group": "navigation@4", - "when": "activeWebviewPanelId == roo-cline.TabPanelProvider" + "when": "activeWebviewPanelId == moo-code.TabPanelProvider" }, { - "command": "roo-cline.historyButtonClicked", + "command": "moo-code.historyButtonClicked", "group": "overflow@1", - "when": "activeWebviewPanelId == roo-cline.TabPanelProvider" + "when": "activeWebviewPanelId == moo-code.TabPanelProvider" }, { - "command": "roo-cline.popoutButtonClicked", + "command": "moo-code.popoutButtonClicked", "group": "overflow@2", - "when": "activeWebviewPanelId == roo-cline.TabPanelProvider" + "when": "activeWebviewPanelId == moo-code.TabPanelProvider" } ] }, "keybindings": [ { - "command": "roo-cline.addToContext", + "command": "moo-code.addToContext", "key": "cmd+k cmd+a", "mac": "cmd+k cmd+a", "win": "ctrl+k ctrl+a", @@ -287,7 +287,7 @@ "when": "editorTextFocus && editorHasSelection" }, { - "command": "roo-cline.toggleAutoApprove", + "command": "moo-code.toggleAutoApprove", "key": "cmd+alt+a", "mac": "cmd+alt+a", "win": "ctrl+alt+a", @@ -296,18 +296,18 @@ ], "submenus": [ { - "id": "roo-cline.contextMenu", + "id": "moo-code.contextMenu", "label": "%views.contextMenu.label%" }, { - "id": "roo-cline.terminalMenu", + "id": "moo-code.terminalMenu", "label": "%views.terminalMenu.label%" } ], "configuration": { "title": "%configuration.title%", "properties": { - "roo-cline.allowedCommands": { + "moo-code.allowedCommands": { "type": "array", "items": { "type": "string" @@ -319,7 +319,7 @@ ], "description": "%commands.allowedCommands.description%" }, - "roo-cline.deniedCommands": { + "moo-code.deniedCommands": { "type": "array", "items": { "type": "string" @@ -327,14 +327,14 @@ "default": [], "description": "%commands.deniedCommands.description%" }, - "roo-cline.commandExecutionTimeout": { + "moo-code.commandExecutionTimeout": { "type": "number", "default": 0, "minimum": 0, "maximum": 600, "description": "%commands.commandExecutionTimeout.description%" }, - "roo-cline.commandTimeoutAllowlist": { + "moo-code.commandTimeoutAllowlist": { "type": "array", "items": { "type": "string" @@ -342,12 +342,12 @@ "default": [], "description": "%commands.commandTimeoutAllowlist.description%" }, - "roo-cline.preventCompletionWithOpenTodos": { + "moo-code.preventCompletionWithOpenTodos": { "type": "boolean", "default": false, "description": "%commands.preventCompletionWithOpenTodos.description%" }, - "roo-cline.vsCodeLmModelSelector": { + "moo-code.vsCodeLmModelSelector": { "type": "object", "properties": { "vendor": { @@ -361,74 +361,168 @@ }, "description": "%settings.vsCodeLmModelSelector.description%" }, - "roo-cline.customStoragePath": { + "moo-code.customStoragePath": { "type": "string", "default": "", "description": "%settings.customStoragePath.description%" }, - "roo-cline.enableCodeActions": { + "moo-code.enableCodeActions": { "type": "boolean", "default": true, "description": "%settings.enableCodeActions.description%" }, - "roo-cline.autoImportSettingsPath": { + "moo-code.autoImportSettingsPath": { "type": "string", "default": "", "description": "%settings.autoImportSettingsPath.description%" }, - "roo-cline.maximumIndexedFilesForFileSearch": { + "moo-code.maximumIndexedFilesForFileSearch": { "type": "number", "default": 10000, "minimum": 5000, "maximum": 500000, "description": "%settings.maximumIndexedFilesForFileSearch.description%" }, - "roo-cline.useAgentRules": { + "moo-code.useAgentRules": { "type": "boolean", "default": true, "description": "%settings.useAgentRules.description%" }, - "roo-cline.apiRequestTimeout": { + "moo-code.apiRequestTimeout": { "type": "number", "default": 600, "minimum": 0, "maximum": 3600, "description": "%settings.apiRequestTimeout.description%" }, - "roo-cline.newTaskRequireTodos": { + "moo-code.newTaskRequireTodos": { "type": "boolean", "default": false, "description": "%settings.newTaskRequireTodos.description%" }, - "roo-cline.codeIndex.embeddingBatchSize": { + "moo-code.codeIndex.embeddingBatchSize": { "type": "number", "default": 60, "minimum": 1, "maximum": 200, "description": "%settings.codeIndex.embeddingBatchSize.description%" }, - "roo-cline.debug": { + "moo-code.debug": { "type": "boolean", "default": false, "description": "%settings.debug.description%" }, - "roo-cline.debugProxy.enabled": { + "moo-code.debugProxy.enabled": { "type": "boolean", "default": false, "description": "%settings.debugProxy.enabled.description%", "markdownDescription": "%settings.debugProxy.enabled.description%" }, - "roo-cline.debugProxy.serverUrl": { + "moo-code.debugProxy.serverUrl": { "type": "string", "default": "http://127.0.0.1:8888", "description": "%settings.debugProxy.serverUrl.description%", "markdownDescription": "%settings.debugProxy.serverUrl.description%" }, - "roo-cline.debugProxy.tlsInsecure": { + "moo-code.debugProxy.tlsInsecure": { "type": "boolean", "default": false, "description": "%settings.debugProxy.tlsInsecure.description%", "markdownDescription": "%settings.debugProxy.tlsInsecure.description%" + }, + "moo-code.microCompactEnabled": { + "type": "boolean", + "default": true, + "description": "%settings.microCompactEnabled.description%", + "icon": "settings-gear" + }, + "moo-code.microCompactThreshold": { + "type": "number", + "default": 5, + "minimum": 1, + "maximum": 50, + "description": "%settings.microCompactThreshold.description%", + "icon": "settings-gear" + }, + "moo-code.microCompactKeepRecent": { + "type": "number", + "default": 3, + "minimum": 1, + "maximum": 20, + "description": "%settings.microCompactKeepRecent.description%", + "icon": "settings-gear" + }, + "moo-code.sessionMemoryCompactEnabled": { + "type": "boolean", + "default": true, + "description": "%settings.sessionMemoryCompactEnabled.description%", + "icon": "settings-gear" + }, + "moo-code.sessionMemoryCompactMinTokens": { + "type": "number", + "default": 10000, + "minimum": 1000, + "maximum": 100000, + "description": "%settings.sessionMemoryCompactMinTokens.description%", + "icon": "settings-gear" + }, + "moo-code.sessionMemoryCompactMaxTokens": { + "type": "number", + "default": 50000, + "minimum": 10000, + "maximum": 200000, + "description": "%settings.sessionMemoryCompactMaxTokens.description%", + "icon": "settings-gear" + }, + "moo-code.sessionMemoryMinTokensToInit": { + "type": "number", + "default": 10000, + "minimum": 1000, + "maximum": 100000, + "description": "%settings.sessionMemoryMinTokensToInit.description%", + "icon": "settings-gear" + }, + "moo-code.sessionMemoryMinTokensBetweenUpdate": { + "type": "number", + "default": 5000, + "minimum": 1000, + "maximum": 50000, + "description": "%settings.sessionMemoryMinTokensBetweenUpdate.description%", + "icon": "settings-gear" + }, + "moo-code.sessionMemoryToolCallsBetweenUpdates": { + "type": "number", + "default": 3, + "minimum": 1, + "maximum": 20, + "description": "%settings.sessionMemoryToolCallsBetweenUpdates.description%", + "icon": "settings-gear" + }, + "moo-code.compactHooksEnabled": { + "type": "boolean", + "default": true, + "description": "%settings.compactHooksEnabled.description%", + "icon": "settings-gear" + }, + "moo-code.compactHooksPath": { + "type": "string", + "default": "", + "description": "%settings.compactHooksPath.description%", + "icon": "settings-gear" + }, + "moo-code.promptTooLongRetryEnabled": { + "type": "boolean", + "default": true, + "description": "%settings.promptTooLongRetryEnabled.description%", + "icon": "settings-gear" + }, + "moo-code.promptTooLongMaxRetries": { + "type": "number", + "default": 2, + "minimum": 0, + "maximum": 5, + "description": "%settings.promptTooLongMaxRetries.description%", + "icon": "settings-gear" } } } diff --git a/src/package.nls.json b/src/package.nls.json index 177b392f775..76b7a15525d 100644 --- a/src/package.nls.json +++ b/src/package.nls.json @@ -1,10 +1,10 @@ { - "extension.displayName": "Roo Code", + "extension.displayName": "Moo Code", "extension.description": "A whole dev team of AI agents in your editor.", - "views.contextMenu.label": "Roo Code", - "views.terminalMenu.label": "Roo Code", - "views.activitybar.title": "Roo Code", - "views.sidebar.name": "Roo Code", + "views.contextMenu.label": "Moo Code", + "views.terminalMenu.label": "Moo Code", + "views.activitybar.title": "Moo Code", + "views.sidebar.name": "Moo Code", "command.newTask.title": "New Task", "command.history.title": "Task History", "command.marketplace.title": "Marketplace", @@ -25,7 +25,7 @@ "command.terminal.explainCommand.title": "Explain This Command", "command.acceptInput.title": "Accept Input/Suggestion", "command.toggleAutoApprove.title": "Toggle Auto-Approve", - "configuration.title": "Roo Code", + "configuration.title": "Moo Code", "commands.allowedCommands.description": "Commands that can be auto-executed when 'Always approve execute operations' is enabled", "commands.deniedCommands.description": "Command prefixes that will be automatically denied without asking for approval. In case of conflicts with allowed commands, the longest prefix match takes precedence. Add * to deny all commands.", "commands.commandExecutionTimeout.description": "Maximum time in seconds to wait for command execution to complete before timing out (0 = no timeout, 1-600s, default: 0s)", @@ -35,8 +35,8 @@ "settings.vsCodeLmModelSelector.vendor.description": "The vendor of the language model (e.g. copilot)", "settings.vsCodeLmModelSelector.family.description": "The family of the language model (e.g. gpt-4)", "settings.customStoragePath.description": "Custom storage path. Leave empty to use the default location. Supports absolute paths (e.g. 'D:\\RooCodeStorage')", - "settings.enableCodeActions.description": "Enable Roo Code quick fixes", - "settings.autoImportSettingsPath.description": "Path to a RooCode configuration file to automatically import on extension startup. Supports absolute paths and paths relative to the home directory (e.g. '~/Documents/roo-code-settings.json'). Leave empty to disable auto-import.", + "settings.enableCodeActions.description": "Enable Moo Code quick fixes", + "settings.autoImportSettingsPath.description": "Path to a MooCode configuration file to automatically import on extension startup. Supports absolute paths and paths relative to the home directory (e.g. '~/Documents/moo-code-settings.json'). Leave empty to disable auto-import.", "settings.maximumIndexedFilesForFileSearch.description": "Maximum number of files to index for the @ file search feature. Higher values provide better search results in large projects but may use more memory. Default: 10,000.", "settings.useAgentRules.description": "Enable loading of AGENTS.md files for agent-specific rules (see https://agent-rules.org/)", "settings.apiRequestTimeout.description": "Maximum time in seconds to wait for API responses (0 = no timeout, 1-3600s, default: 600s). Higher values are recommended for local providers like LM Studio and Ollama that may need more processing time.", @@ -45,5 +45,18 @@ "settings.debug.description": "Enable debug mode to show additional buttons for viewing API conversation history and UI messages as prettified JSON in temporary files.", "settings.debugProxy.enabled.description": "**Enable Debug Proxy** — Route all outbound network requests through a proxy for MITM debugging. Only active when running in debug mode (F5).", "settings.debugProxy.serverUrl.description": "Proxy URL (e.g., `http://127.0.0.1:8888`). Only used when **Debug Proxy** is enabled.", - "settings.debugProxy.tlsInsecure.description": "Accept self-signed certificates from the proxy. **Required for MITM inspection.** ⚠️ Insecure — only use for local debugging." + "settings.debugProxy.tlsInsecure.description": "Accept self-signed certificates from the proxy. **Required for MITM inspection.** ⚠️ Insecure — only use for local debugging.", + "settings.microCompactEnabled.description": "Enable microcompact to automatically clear old tool result content (file reads, shell output, search results, etc.) to save tokens. Runs before full condensation.", + "settings.microCompactThreshold.description": "Number of messages back to start clearing tool results. When there are more than this many compactable tool results, older ones will be cleared. Default: 5.", + "settings.microCompactKeepRecent.description": "Number of most-recent compactable tool results to keep. The most recent N tool results are always preserved. Default: 3.", + "settings.sessionMemoryCompactEnabled.description": "Enable session memory compaction to use pre-extracted session memory as a condensation summary instead of calling the LLM again. This can significantly reduce API costs and improve performance for long conversations.", + "settings.sessionMemoryCompactMinTokens.description": "Minimum tokens to preserve after session memory compaction. The compaction will keep at least this many tokens of recent messages. Default: 10000.", + "settings.sessionMemoryCompactMaxTokens.description": "Maximum tokens to preserve after session memory compaction. This is a hard cap to prevent keeping too many messages. Default: 50000.", + "settings.sessionMemoryMinTokensToInit.description": "Minimum context window tokens before initializing session memory extraction. Session memory will only start being extracted after the conversation reaches this size. Default: 10000.", + "settings.sessionMemoryMinTokensBetweenUpdate.description": "Minimum context window growth (in tokens) between session memory updates. Session memory will only be updated after the conversation grows by this many tokens. Default: 5000.", + "settings.sessionMemoryToolCallsBetweenUpdates.description": "Number of tool calls between session memory updates. Session memory will be updated after this many tool calls occur. Default: 3.", + "settings.compactHooksEnabled.description": "Enable pre/post compact hooks to execute custom commands before and after context condensation. Hooks are defined in a .roo-hooks.json file in the workspace root.", + "settings.compactHooksPath.description": "Custom path to the hooks configuration file. Leave empty to use the default .roo-hooks.json file in the workspace root. Supports relative paths from the workspace root.", + "settings.promptTooLongRetryEnabled.description": "Enable automatic retry with head truncation when condensation hits a prompt-too-long error. This helps recover from situations where the condensed context is still too large for the API.", + "settings.promptTooLongMaxRetries.description": "Maximum number of retry attempts when condensation hits a prompt-too-long error. Each retry truncates more messages from the beginning of the conversation. Range: 0-5, default: 2." } diff --git a/src/services/checkpoints/ShadowCheckpointService.ts b/src/services/checkpoints/ShadowCheckpointService.ts index 89ae52c435e..754309b07f3 100644 --- a/src/services/checkpoints/ShadowCheckpointService.ts +++ b/src/services/checkpoints/ShadowCheckpointService.ts @@ -176,7 +176,7 @@ export abstract class ShadowCheckpointService extends EventEmitter { await git.init({ "--template": "" }) await git.addConfig("core.worktree", this.workspaceDir) // Sets the working tree to the current workspace. await git.addConfig("commit.gpgSign", "false") // Disable commit signing for shadow repo. - await git.addConfig("user.name", "Roo Code") + await git.addConfig("user.name", "Moo Code") await git.addConfig("user.email", "noreply@example.com") await this.writeExcludeFile() await this.stageAll(git) diff --git a/src/services/code-index/embedders/bedrock.ts b/src/services/code-index/embedders/bedrock.ts index 7652840c290..183f9978cfb 100644 --- a/src/services/code-index/embedders/bedrock.ts +++ b/src/services/code-index/embedders/bedrock.ts @@ -41,7 +41,7 @@ export class BedrockEmbedder implements IEmbedder { const credentials = this.profile ? fromIni({ profile: this.profile }) : fromNodeProviderChain() this.bedrockClient = new BedrockRuntimeClient({ - userAgentAppId: `RooCode#${Package.version}`, + userAgentAppId: `MooCode#${Package.version}`, region: this.region, credentials, }) diff --git a/src/services/code-index/embedders/openrouter.ts b/src/services/code-index/embedders/openrouter.ts index 2ffdd7afb64..6e4d9620d84 100644 --- a/src/services/code-index/embedders/openrouter.ts +++ b/src/services/code-index/embedders/openrouter.ts @@ -76,8 +76,8 @@ export class OpenRouterEmbedder implements IEmbedder { baseURL: this.baseUrl, apiKey: apiKey, defaultHeaders: { - "HTTP-Referer": "https://github.com/RooCodeInc/Roo-Code", - "X-Title": "Roo Code", + "HTTP-Referer": "https://github.com/moo-code/Moo-Code", + "X-Title": "Moo Code", }, }) } catch (error) { diff --git a/src/services/marketplace/RemoteConfigLoader.ts b/src/services/marketplace/RemoteConfigLoader.ts index b5851ae854d..e8ff646305e 100644 --- a/src/services/marketplace/RemoteConfigLoader.ts +++ b/src/services/marketplace/RemoteConfigLoader.ts @@ -1,117 +1,37 @@ -import axios from "axios" -import * as yaml from "yaml" -import { z } from "zod" - -import { - type MarketplaceItem, - type MarketplaceItemType, - modeMarketplaceItemSchema, - mcpMarketplaceItemSchema, -} from "@roo-code/types" -import { getRooCodeApiUrl } from "@roo-code/cloud" - -const modeMarketplaceResponse = z.object({ - items: z.array(modeMarketplaceItemSchema), -}) - -const mcpMarketplaceResponse = z.object({ - items: z.array(mcpMarketplaceItemSchema), -}) - +import type { MarketplaceItem, MarketplaceItemType } from "@roo-code/types" + +/** + * RemoteConfigLoader — cloud features disabled. + * + * All methods that would fetch from app.roocode.com return empty arrays. + * No HTTP requests are made to upstream servers. + */ export class RemoteConfigLoader { - private apiBaseUrl: string private cache: Map = new Map() private cacheDuration = 5 * 60 * 1000 // 5 minutes constructor() { - this.apiBaseUrl = getRooCodeApiUrl() + // Cloud features disabled — no API base URL needed } - async loadAllItems(hideMarketplaceMcps = false): Promise { - const items: MarketplaceItem[] = [] - - const modesPromise = this.fetchModes() - const mcpsPromise = hideMarketplaceMcps ? Promise.resolve([]) : this.fetchMcps() - - const [modes, mcps] = await Promise.all([modesPromise, mcpsPromise]) - - items.push(...modes, ...mcps) - return items + async loadAllItems(_hideMarketplaceMcps = false): Promise { + // Cloud features disabled — return empty array, no HTTP calls + return [] } private async fetchModes(): Promise { - const cacheKey = "modes" - const cached = this.getFromCache(cacheKey) - - if (cached) { - return cached - } - - const data = await this.fetchWithRetry(`${this.apiBaseUrl}/api/marketplace/modes`) - - const yamlData = yaml.parse(data) - const validated = modeMarketplaceResponse.parse(yamlData) - - const items: MarketplaceItem[] = validated.items.map((item) => ({ - type: "mode" as const, - ...item, - })) - - this.setCache(cacheKey, items) - return items + // Cloud features disabled — return empty array + return [] } private async fetchMcps(): Promise { - const cacheKey = "mcps" - const cached = this.getFromCache(cacheKey) - - if (cached) { - return cached - } - - const data = await this.fetchWithRetry(`${this.apiBaseUrl}/api/marketplace/mcps`) - - const yamlData = yaml.parse(data) - const validated = mcpMarketplaceResponse.parse(yamlData) - - const items: MarketplaceItem[] = validated.items.map((item) => ({ - type: "mcp" as const, - ...item, - })) - - this.setCache(cacheKey, items) - return items - } - - private async fetchWithRetry(url: string, maxRetries = 3): Promise { - let lastError: Error - - for (let i = 0; i < maxRetries; i++) { - try { - const response = await axios.get(url, { - timeout: 10000, // 10 second timeout - headers: { - Accept: "application/json", - "Content-Type": "application/json", - }, - }) - return response.data as T - } catch (error) { - lastError = error as Error - if (i < maxRetries - 1) { - // Exponential backoff: 1s, 2s, 4s - const delay = Math.pow(2, i) * 1000 - await new Promise((resolve) => setTimeout(resolve, delay)) - } - } - } - - throw lastError! + // Cloud features disabled — return empty array + return [] } async getItem(id: string, type: MarketplaceItemType): Promise { - const items = await this.loadAllItems() - return items.find((item) => item.id === id && item.type === type) || null + // Cloud features disabled — no items available + return null } private getFromCache(key: string): MarketplaceItem[] | null { diff --git a/src/services/mcp/McpHub.ts b/src/services/mcp/McpHub.ts index ea38ee02d6d..00219e2ac34 100644 --- a/src/services/mcp/McpHub.ts +++ b/src/services/mcp/McpHub.ts @@ -592,7 +592,10 @@ export class McpHub { // Get project-level MCP configuration path private async getProjectMcpPath(): Promise { - const workspacePath = this.providerRef.deref()?.cwd ?? getWorkspacePath() + const provider = this.providerRef.deref() + // For worktree tasks, prefer the current task's workspacePath so each worktree + // can have its own .roo/mcp.json with branch-specific server definitions. + const workspacePath = provider?.getCurrentTask?.()?.workspacePath ?? provider?.cwd ?? getWorkspacePath() const projectMcpDir = path.join(workspacePath, ".roo") const projectMcpPath = path.join(projectMcpDir, "mcp.json") @@ -687,7 +690,7 @@ export class McpHub { try { const client = new Client( { - name: "Roo Code", + name: "Moo Code", version: this.providerRef.deref()?.context.extension?.packageJSON?.version ?? "1.0.0", }, { diff --git a/src/services/mdm/MdmService.ts b/src/services/mdm/MdmService.ts index 63bdbe29fca..921d2a7d1c4 100644 --- a/src/services/mdm/MdmService.ts +++ b/src/services/mdm/MdmService.ts @@ -147,19 +147,19 @@ export class MdmService { switch (platform) { case "win32": { - // Windows: %ProgramData%\RooCode\mdm.json or mdm.dev.json + // Windows: %ProgramData%\MooCode\mdm.json or mdm.dev.json const programData = process.env.PROGRAMDATA || "C:\\ProgramData" - return path.join(programData, "RooCode", configFileName) + return path.join(programData, "MooCode", configFileName) } case "darwin": - // macOS: /Library/Application Support/RooCode/mdm.json or mdm.dev.json - return `/Library/Application Support/RooCode/${configFileName}` + // macOS: /Library/Application Support/MooCode/mdm.json or mdm.dev.json + return `/Library/Application Support/MooCode/${configFileName}` case "linux": default: - // Linux: /etc/roo-code/mdm.json or mdm.dev.json - return `/etc/roo-code/${configFileName}` + // Linux: /etc/moo-code/mdm.json or mdm.dev.json + return `/etc/moo-code/${configFileName}` } } diff --git a/src/services/teams/TeamsManager.ts b/src/services/teams/TeamsManager.ts new file mode 100644 index 00000000000..abda7b9ab29 --- /dev/null +++ b/src/services/teams/TeamsManager.ts @@ -0,0 +1,66 @@ +import * as fs from "fs/promises" +import * as path from "path" + +import type { TeamConfig } from "@roo-code/types" +import type { ClineProvider } from "../../core/webview/ClineProvider" + +export class TeamsManager { + private teams: Map = new Map() + private providerRef: WeakRef + + constructor(provider: ClineProvider) { + this.providerRef = new WeakRef(provider) + } + + async initialize(): Promise { + await this.discoverTeams() + } + + /** + * Scan .roo/teams/*.json in the current workspace and cache all valid TeamConfig objects. + * Non-fatal: missing directory or malformed files are silently skipped. + */ + async discoverTeams(): Promise { + this.teams.clear() + const provider = this.providerRef.deref() + if (!provider) return + + const teamsDir = path.join(provider.cwd, ".roo", "teams") + + let entries: import("fs").Dirent[] + try { + entries = await fs.readdir(teamsDir, { withFileTypes: true, encoding: "utf-8" }) + } catch { + // Directory doesn't exist — normal for projects without teams + return + } + + for (const entry of entries) { + if (!entry.isFile() || !entry.name.endsWith(".json")) continue + const filePath = path.join(teamsDir, entry.name) + try { + const raw = await fs.readFile(filePath, "utf-8") + const config = JSON.parse(raw) as TeamConfig + if (typeof config.slug === "string" && config.slug && Array.isArray(config.phases)) { + config.$source = filePath + this.teams.set(config.slug, config) + } + } catch { + // Malformed JSON or missing required fields — skip silently + } + } + } + + getTeamConfig(slug: string): TeamConfig | undefined { + return this.teams.get(slug) + } + + listTeams(): TeamConfig[] { + return Array.from(this.teams.values()) + } + + /** Reload all team configs from disk (e.g., after a file-system change). */ + async refresh(): Promise { + await this.discoverTeams() + } +} diff --git a/src/shared/tools.ts b/src/shared/tools.ts index d2dd9907b17..423748666a4 100644 --- a/src/shared/tools.ts +++ b/src/shared/tools.ts @@ -81,6 +81,9 @@ export const toolParamNames = [ // read_file legacy format parameter (backward compatibility) "files", "line_ranges", + // run_team_phase parameters + "team_slug", + "phase_name", ] as const export type ToolParamName = (typeof toolParamNames)[number] @@ -116,6 +119,7 @@ export type NativeToolArgs = { update_todo_list: { todos: string } use_mcp_tool: { server_name: string; tool_name: string; arguments?: Record } write_to_file: { path: string; content: string } + run_team_phase: { team_slug: string; phase_name: string; task: string; context?: string | null } // Add more tools as they are migrated to native protocol } @@ -284,6 +288,8 @@ export const TOOL_DISPLAY_NAMES: Record = { attempt_completion: "complete tasks", switch_mode: "switch modes", new_task: "create new task", + spawn_parallel_tasks: "spawn parallel tasks", + run_team_phase: "run team phase", codebase_search: "codebase search", update_todo_list: "update todo list", run_slash_command: "run slash command", @@ -308,7 +314,7 @@ export const TOOL_GROUPS: Record = { tools: ["use_mcp_tool", "access_mcp_resource"], }, modes: { - tools: ["switch_mode", "new_task"], + tools: ["switch_mode", "new_task", "run_team_phase"], alwaysAvailable: true, }, } diff --git a/webview-ui/src/components/chat/ChatView.tsx b/webview-ui/src/components/chat/ChatView.tsx index 7008dbe590d..6ddd3e7e808 100644 --- a/webview-ui/src/components/chat/ChatView.tsx +++ b/webview-ui/src/components/chat/ChatView.tsx @@ -33,7 +33,6 @@ import RooTips from "@src/components/welcome/RooTips" import { StandardTooltip, Button } from "@src/components/ui" import { CloudUpsellDialog } from "@src/components/cloud/CloudUpsellDialog" -import TelemetryBanner from "../common/TelemetryBanner" import VersionIndicator from "../common/VersionIndicator" import HistoryPreview from "../history/HistoryPreview" import Announcement from "./Announcement" @@ -87,7 +86,7 @@ const ChatViewComponent: React.ForwardRefRenderFunction - {telemetrySetting === "unset" && } {(showAnnouncement || showAnnouncementModal) && ( { diff --git a/webview-ui/src/components/history/SubtaskRow.tsx b/webview-ui/src/components/history/SubtaskRow.tsx index 0089e1f81db..deae0ff0ff0 100644 --- a/webview-ui/src/components/history/SubtaskRow.tsx +++ b/webview-ui/src/components/history/SubtaskRow.tsx @@ -7,6 +7,17 @@ import { countAllSubtasks } from "./types" import { StandardTooltip } from "../ui" import SubtaskCollapsibleRow from "./SubtaskCollapsibleRow" +const AGENT_COLOR_MAP: Record = { + red: "#f87171", + blue: "#60a5fa", + green: "#4ade80", + yellow: "#facc15", + purple: "#c084fc", + orange: "#fb923c", + pink: "#f472b6", + cyan: "#22d3ee", +} + interface SubtaskRowProps { /** The subtask tree node to display */ node: SubtaskTreeNode @@ -49,9 +60,19 @@ const SubtaskRow = ({ node, depth, onToggleExpand, className }: SubtaskRowProps) handleClick() } }}> - - {item.task} - +
+ {item.agentColor && AGENT_COLOR_MAP[item.agentColor] && ( + + + + )} + + {item.task} + +
diff --git a/webview-ui/src/components/settings/ContextManagementSettings.tsx b/webview-ui/src/components/settings/ContextManagementSettings.tsx index 8663ea6e038..53b8a64daea 100644 --- a/webview-ui/src/components/settings/ContextManagementSettings.tsx +++ b/webview-ui/src/components/settings/ContextManagementSettings.tsx @@ -472,6 +472,68 @@ export const ContextManagementSettings = ({ /> + {/* Session Memory */} + +
+ Session Memory Compaction + +
+
+ Extracts structured notes from your conversation and uses them when compacting context, avoiding + an extra LLM call. Configure thresholds via{" "} + + . +
+
+ + {/* Compact Hooks */} + +
+ Compact Hooks + +
+
+ Run shell commands or HTTP webhooks before and after context condensation. Define hooks in{" "} + .roo-hooks.json{" "} + at the workspace root. Configure via{" "} + + . +
+
+ {/* Auto Condense Context */} posthog.identify(distinctId), - capture_pageview: false, - capture_pageleave: false, - autocapture: false, - }) - } else { - TelemetryClient.telemetryEnabled = false - } + public updateTelemetryState(_telemetrySetting: TelemetrySetting, _apiKey?: string, _distinctId?: string) { + // No-op: telemetry disabled } public static getInstance(): TelemetryClient { @@ -34,14 +16,8 @@ class TelemetryClient { return TelemetryClient.instance } - public capture(eventName: string, properties?: Record) { - if (TelemetryClient.telemetryEnabled) { - try { - posthog.capture(eventName, properties) - } catch (_error) { - // Silently fail if there's an error capturing an event. - } - } + public capture(_eventName: string, _properties?: Record) { + // No-op: telemetry disabled } } diff --git a/webview-ui/src/utils/docLinks.ts b/webview-ui/src/utils/docLinks.ts index 5d8b517b866..812562355a5 100644 --- a/webview-ui/src/utils/docLinks.ts +++ b/webview-ui/src/utils/docLinks.ts @@ -9,6 +9,6 @@ export function buildDocLink(path: string, campaign: string): string { // Remove any leading slash from path const cleanPath = path.replace(/^\//, "") const [basePath, hash] = cleanPath.split("#") - const baseUrl = `https://docs.roocode.com/${basePath}?utm_source=extension&utm_medium=ide&utm_campaign=${encodeURIComponent(campaign)}` + const baseUrl = `https://docs.moo-code.dev/${basePath}?utm_source=extension&utm_medium=ide&utm_campaign=${encodeURIComponent(campaign)}` return hash ? `${baseUrl}#${hash}` : baseUrl }