diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000000000000000000000000000000000000..7e79f30eadf9be8b732b2e7f506ba0ce6ed03bf6
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,5 @@
+dist-server/
+cli/
+node_modules/
+plandex-server
+plandex-cloud
diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..a471a42dd8c935f2a29ccf02b81058c332492b4c
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+*.sh text eol=lf
\ No newline at end of file
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c50ed48643bb9d35f6d7ba0670c4b71bfe632d4f
--- /dev/null
+++ b/.github/workflows/docker-publish.yml
@@ -0,0 +1,113 @@
+name: Build and publish Docker Image
+
+on:
+ release:
+ types: [created]
+ workflow_dispatch: # enable manual triggering
+
+jobs:
+ check_release:
+ runs-on: ubuntu-latest
+ outputs:
+ should_build: ${{ steps.check_release.outputs.should_build }}
+ tag: ${{ steps.check_release.outputs.tag }}
+
+ steps:
+ - name: Check release tag and find latest server tag
+ id: check_release
+ run: |
+ if [ "${{ github.event_name }}" == "release" ]; then
+ # This is a release event - check if the tag starts with 'server'
+ if [[ "${{ github.ref_name }}" == server* ]]; then
+ echo "This is a server release: ${{ github.ref_name }}"
+ echo "should_build=true" >> $GITHUB_OUTPUT
+ echo "tag=${{ github.ref_name }}" >> $GITHUB_OUTPUT
+ else
+ echo "This is not a server release. Skipping build."
+ echo "should_build=false" >> $GITHUB_OUTPUT
+ fi
+ else
+ # This is a manual workflow_dispatch event
+ echo "This is a manual workflow trigger. Proceeding to find latest server tag."
+ echo "should_build=true" >> $GITHUB_OUTPUT
+ echo "tag=latest_server" >> $GITHUB_OUTPUT
+ fi
+
+ build_and_push:
+ needs: check_release
+ if: needs.check_release.outputs.should_build == 'true'
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Check out the repo
+ uses: actions/checkout@v2
+ with:
+ fetch-depth: 0 # Fetch all history and tags
+
+ - name: Find latest server tag
+ id: find_tag
+ if: needs.check_release.outputs.tag == 'latest_server'
+ run: |
+ # Find the latest tag that starts with 'server'
+ LATEST_SERVER_TAG=$(git tag -l "server*" --sort=-creatordate | head -n 1)
+
+ if [ -z "$LATEST_SERVER_TAG" ]; then
+ echo "No tags starting with 'server' found."
+ echo "skip=true" >> $GITHUB_OUTPUT
+ else
+ echo "Found latest server tag: $LATEST_SERVER_TAG"
+ echo "skip=false" >> $GITHUB_OUTPUT
+ echo "tag=$LATEST_SERVER_TAG" >> $GITHUB_OUTPUT
+ fi
+ shell: bash
+
+ - name: Set release tag
+ id: set_tag
+ if: needs.check_release.outputs.tag != 'latest_server'
+ run: |
+ echo "skip=false" >> $GITHUB_OUTPUT
+ echo "tag=${{ needs.check_release.outputs.tag }}" >> $GITHUB_OUTPUT
+
+ - name: Skip build if no server tag found
+ if: (steps.find_tag.outputs.skip == 'true' && needs.check_release.outputs.tag == 'latest_server')
+ run: |
+ echo "Skipping build because no tag starting with 'server' was found."
+ exit 1
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v2
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v1
+
+ - name: Log in to Docker Hub
+ uses: docker/login-action@v1
+ with:
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
+ password: ${{ secrets.DOCKERHUB_TOKEN }}
+
+ - name: Determine tag to use
+ id: determine_tag
+ run: |
+ if [ "${{ needs.check_release.outputs.tag }}" == "latest_server" ]; then
+ TAG="${{ steps.find_tag.outputs.tag }}"
+ else
+ TAG="${{ needs.check_release.outputs.tag }}"
+ fi
+ echo "Using tag: $TAG"
+ echo "tag=$TAG" >> $GITHUB_OUTPUT
+
+ - name: Sanitize tag name
+ id: sanitize
+ run: echo "SANITIZED_TAG_NAME=$(echo ${{ steps.determine_tag.outputs.tag }} | tr '/' '-' | tr '+' '-')" >> $GITHUB_OUTPUT
+
+ - name: Build and push
+ uses: docker/build-push-action@v2
+ with:
+ context: ./app/
+ file: ./app/server/Dockerfile
+ push: true
+ platforms: linux/amd64,linux/arm64
+ tags: |
+ plandexai/plandex-server:${{ steps.sanitize.outputs.SANITIZED_TAG_NAME }}
+ plandexai/plandex-server:latest
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..f825f3f9f9baf6a508bf8415dfbce69777548ccc
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,27 @@
+.plandex/
+.plandex-dev/
+.plandex-v2/
+.plandex-dev-v2/
+.envkey
+.env
+.env.*
+plandex
+plandex-dev
+plandex-server
+*.exe
+node_modules/
+/tools/
+/static/
+/infra/
+/payments-dashboard/
+.DS_Store
+.goreleaser.yml
+dist/
+__pycache__/
+
+.aider.*
+*.code-workspace
+
+__pycache__/
+
+.repo_ignore
\ No newline at end of file
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..4aa379f2105798e6d6a8a4f1020ba272f23995f4
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2025 PlandexAI Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..be6a66a80e9a7330bf1b4299bde28311060ff8a7
--- /dev/null
+++ b/README.md
@@ -0,0 +1,234 @@
+
+
+
+
+💻 Plandex is a terminal-based AI development tool that can **plan and execute** large coding tasks that span many steps and touch dozens of files. It can handle up to 2M tokens of context directly (~100k per file), and can index directories with 20M tokens or more using tree-sitter project maps.
+
+🔬 **A cumulative diff review sandbox** keeps AI-generated changes separate from your project files until they are ready to go. Command execution is controlled so you can easily roll back and debug. Plandex helps you get the most out of AI without leaving behind a mess in your project.
+
+🧠 **Combine the best models** from Anthropic, OpenAI, Google, and open source providers to build entire features and apps with a robust terminal-based workflow.
+
+🚀 Plandex is capable of full autonomy—it can load relevant files, plan and implement changes, execute commands, and automatically debug—but it's also highly flexible and configurable, giving developers fine-grained control and a step-by-step review process when needed.
+
+💪 Plandex is designed to be resilient to large projects and files. If you've found that others tools struggle once your project gets past a certain size or the changes are too complex, give Plandex a shot.
+
+## Smart context management that works in big projects
+
+- 🐘 **2M token effective context window** with default model pack. Plandex loads only what's needed for each step.
+
+- 🗄️ **Reliable in large projects and files.** Easily generate, review, revise, and apply changes spanning dozens of files.
+
+- 🗺️ **Fast project map generation** and syntax validation with tree-sitter. Supports 30+ languages.
+
+- 💰 **Context caching** is used across the board for OpenAI, Anthropic, and Google models, reducing costs and latency.
+
+## Tight control or full autonomy—it's up to you
+
+- 🚦 **Configurable autonomy:** go from full auto mode to fine-grained control depending on the task.
+
+- 🐞 **Automated debugging** of terminal commands (like builds, linters, tests, deployments, and scripts). If you have Chrome installed, you can also automatically debug browser applications.
+
+## Tools that help you get production-ready results
+
+- 💬 **A project-aware chat mode** that helps you flesh out ideas before moving to implementation. Also great for asking questions and learning about a codebase.
+
+- 🧠 **Easily try + combine models** from multiple providers. Curated model packs offer different tradeoffs of capability, cost, and speed, as well as open source and provider-specific packs.
+
+- 🛡️ **Reliable file edits** that prioritize correctness. While most edits are quick and cheap, Plandex validates both syntax and logic as needed, with multiple fallback layers when there are problems.
+
+- 🔀 **Full-fledged version control** for every update to the plan, including branches for exploring multiple paths or comparing different models.
+
+- 📂 **Git integration** with commit message generation and optional automatic commits.
+
+## Dev-friendly, easy to install
+
+- 🧑💻 **REPL mode** with fuzzy auto-complete for commands and file loading. Just run `plandex` in any project to get started.
+
+- 🛠️ **CLI interface** for scripting or piping data into context.
+
+- 📦 **One-line, zero dependency CLI install**. Dockerized local mode for easily self-hosting the server. Cloud-hosting options for extra reliability and convenience.
+
+## Workflow 🔄
+
+
+
+## Examples 🎥
+
+
+
+
Plandex is a terminal-based AI programming engine for complex tasks.
To accept the invite, first install Plandex, then open a terminal and run 'plandex sign-in'. Enter '%s' when asked for your email and follow the prompts from there.
If you have questions, feedback, or run into a problem, you can reply directly to this email, start a discussion, or open an issue.
`, inviteeFirstName, inviterName, orgName, email)
+
+ textBody := fmt.Sprintf(`Hi %s,\n\n%s has invited you to join the org %s on Plandex.\n\nPlandex is a terminal-based AI programming engine for complex tasks.\n\nTo accept the invite, first install Plandex (https://docs.plandex.ai/install/), then open a terminal and run 'plandex sign-in'. Enter '%s' when asked for your email and follow the prompts from there.\n\nIf you have questions, feedback, or run into a problem, you can reply directly to this email, start a discussion (https://github.com/plandex-ai/plandex/discussions), or open an issue (https://github.com/plandex-ai/plandex/issues).`, inviteeFirstName, inviterName, orgName, email)
+
+ if os.Getenv("IS_CLOUD") == "" {
+ return sendEmailViaSMTP(email, subject, htmlBody, textBody)
+ } else {
+ return SendEmailViaSES(email, subject, htmlBody, textBody)
+ }
+ } else {
+ // Send notification
+ err := beeep.Notify("Invite Sent", fmt.Sprintf("Invite sent to %s (email not sent in development)", email), "")
+ if err != nil {
+ return fmt.Errorf("error sending notification in dev: %v", err)
+ }
+ }
+
+ return nil
+}
diff --git a/app/server/email/verification.go b/app/server/email/verification.go
new file mode 100644
index 0000000000000000000000000000000000000000..d618c9962d02a13c02c69ffb047356b915d47503
--- /dev/null
+++ b/app/server/email/verification.go
@@ -0,0 +1,43 @@
+package email
+
+import (
+ "fmt"
+ "log"
+ "os"
+
+ "github.com/atotto/clipboard"
+ "github.com/gen2brain/beeep"
+)
+
+func SendVerificationEmail(email string, pin string) error {
+ // Check if the environment is production
+ if os.Getenv("GOENV") == "production" {
+ // Production environment - send email using AWS SES
+ subject := "Your Plandex Pin"
+ htmlBody := fmt.Sprintf(`
Hi there,
+
Welcome to Plandex! Your pin is:
+%s
+
It will be valid for the next 5 minutes.
+
If you didn't request this, you can safely ignore the email.
`, pin)
+ textBody := fmt.Sprintf("Hi there,\n\nWelcome to Plandex! Your pin is:\n\n%s\n\nIt will be valid for the next 5 minutes.\n\nIf you didn't request this, you can safely ignore the email.", pin)
+
+ if os.Getenv("IS_CLOUD") == "" {
+ return sendEmailViaSMTP(email, subject, htmlBody, textBody)
+ } else {
+ return SendEmailViaSES(email, subject, htmlBody, textBody)
+ }
+ }
+
+ if os.Getenv("GOENV") == "development" {
+ // Development environment
+ log.Printf("Development mode: Verification pin is %s for email %s", pin, email)
+
+ // Copy pin to clipboard
+ clipboard.WriteAll(pin) // ignore error
+
+ // Send notification
+ beeep.Notify("Verification Pin", fmt.Sprintf("Verification pin %s copied to clipboard %s", pin, email), "") // ignore error
+ }
+
+ return nil
+}
diff --git a/app/server/go.mod b/app/server/go.mod
new file mode 100644
index 0000000000000000000000000000000000000000..9eb18710016a8f5f84be397a110ff74dc25c13e5
--- /dev/null
+++ b/app/server/go.mod
@@ -0,0 +1,53 @@
+module plandex-server
+
+go 1.23.3
+
+require (
+ github.com/davecgh/go-spew v1.1.1
+ github.com/google/uuid v1.6.0
+ github.com/gorilla/mux v1.8.1
+ github.com/pkg/errors v0.9.1
+ github.com/sashabaranov/go-openai v1.40.0
+ plandex-shared v0.0.0-00010101000000-000000000000
+)
+
+require (
+ github.com/dlclark/regexp2 v1.11.5 // indirect
+ github.com/go-toast/toast v0.0.0-20190211030409-01e6764cf0a4 // indirect
+ github.com/godbus/dbus/v5 v5.1.0 // indirect
+ github.com/google/go-cmp v0.7.0 // indirect
+ github.com/hashicorp/errwrap v1.1.0 // indirect
+ github.com/hashicorp/go-multierror v1.1.1 // indirect
+ github.com/jinzhu/copier v0.4.0 // indirect
+ github.com/jmespath/go-jmespath v0.4.0 // indirect
+ github.com/mattn/go-colorable v0.1.14 // indirect
+ github.com/mattn/go-isatty v0.0.20 // indirect
+ github.com/mattn/go-runewidth v0.0.16 // indirect
+ github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d // indirect
+ github.com/olekukonko/tablewriter v0.0.5 // indirect
+ github.com/pkoukk/tiktoken-go v0.1.7 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/rivo/uniseg v0.4.7 // indirect
+ github.com/shopspring/decimal v1.4.0 // indirect
+ github.com/tadvi/systray v0.0.0-20190226123456-11a2b8fa57af // indirect
+ go.uber.org/atomic v1.11.0 // indirect
+ golang.org/x/image v0.27.0 // indirect
+ golang.org/x/sys v0.33.0 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
+
+require (
+ github.com/atotto/clipboard v0.1.4
+ github.com/aws/aws-sdk-go v1.55.7
+ github.com/fatih/color v1.18.0
+ github.com/gen2brain/beeep v0.0.0-20240516210008-9c006672e7f4
+ github.com/golang-migrate/migrate/v4 v4.18.3
+ github.com/jmoiron/sqlx v1.4.0
+ github.com/lib/pq v1.10.9
+ github.com/smacker/go-tree-sitter v0.0.0-20240827094217-dd81d9e9be82
+ github.com/stretchr/testify v1.10.0
+ golang.org/x/mod v0.21.0
+ golang.org/x/net v0.40.0
+)
+
+replace plandex-shared => ../shared
diff --git a/app/server/go.sum b/app/server/go.sum
new file mode 100644
index 0000000000000000000000000000000000000000..a55f5e68ca4fa80dc628cfb47f8999ce98b7b6e8
--- /dev/null
+++ b/app/server/go.sum
@@ -0,0 +1,134 @@
+filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
+filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
+github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
+github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
+github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
+github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dhui/dktest v0.4.5 h1:uUfYBIVREmj/Rw6MvgmqNAYzTiKOHJak+enB5Di73MM=
+github.com/dhui/dktest v0.4.5/go.mod h1:tmcyeHDKagvlDrz7gDKq4UAJOLIfVZYkfD5OnHDwcCo=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/dlclark/regexp2 v1.11.5 h1:Q/sSnsKerHeCkc/jSTNq1oCm7KiVgUMZRDUoRu0JQZQ=
+github.com/dlclark/regexp2 v1.11.5/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=
+github.com/docker/docker v27.2.0+incompatible h1:Rk9nIVdfH3+Vz4cyI/uhbINhEZ/oLmc+CBXmH6fbNk4=
+github.com/docker/docker v27.2.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
+github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
+github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/gen2brain/beeep v0.0.0-20240516210008-9c006672e7f4 h1:ygs9POGDQpQGLJPlq4+0LBUmMBNox1N4JSpw+OETcvI=
+github.com/gen2brain/beeep v0.0.0-20240516210008-9c006672e7f4/go.mod h1:0W7dI87PvXJ1Sjs0QPvWXKcQmNERY77e8l7GFhZB/s4=
+github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
+github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
+github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
+github.com/go-toast/toast v0.0.0-20190211030409-01e6764cf0a4 h1:qZNfIGkIANxGv/OqtnntR4DfOY2+BgwR60cAcu/i3SE=
+github.com/go-toast/toast v0.0.0-20190211030409-01e6764cf0a4/go.mod h1:kW3HQ4UdaAyrUCSSDR4xUzBKW6O2iA4uHhk7AtyYp10=
+github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
+github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-migrate/migrate/v4 v4.18.3 h1:EYGkoOsvgHHfm5U/naS1RP/6PL/Xv3S4B/swMiAmDLs=
+github.com/golang-migrate/migrate/v4 v4.18.3/go.mod h1:99BKpIi6ruaaXRM1A77eqZ+FWPQ3cfRa+ZVy5bmWMaY=
+github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
+github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
+github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
+github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8=
+github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg=
+github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
+github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
+github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
+github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
+github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
+github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
+github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
+github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
+github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
+github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
+github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
+github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
+github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
+github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d h1:VhgPp6v9qf9Agr/56bj7Y/xa04UccTW04VP0Qed4vnQ=
+github.com/nu7hatch/gouuid v0.0.0-20131221200532-179d4d0c4d8d/go.mod h1:YUTz3bUH2ZwIWBy3CJBeOBEugqcmXREj14T+iG/4k4U=
+github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
+github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
+github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkoukk/tiktoken-go v0.1.7 h1:qOBHXX4PHtvIvmOtyg1EeKlwFRiMKAcoMp4Q+bLQDmw=
+github.com/pkoukk/tiktoken-go v0.1.7/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
+github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
+github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
+github.com/sashabaranov/go-openai v1.40.0 h1:Peg9Iag5mUJtPW00aYatlsn97YML0iNULiLNe74iPrU=
+github.com/sashabaranov/go-openai v1.40.0/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
+github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
+github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
+github.com/smacker/go-tree-sitter v0.0.0-20240827094217-dd81d9e9be82 h1:6C8qej6f1bStuePVkLSFxoU22XBS165D3klxlzRg8F4=
+github.com/smacker/go-tree-sitter v0.0.0-20240827094217-dd81d9e9be82/go.mod h1:xe4pgH49k4SsmkQq5OT8abwhWmnzkhpgnXeekbx2efw=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tadvi/systray v0.0.0-20190226123456-11a2b8fa57af h1:6yITBqGTE2lEeTPG04SN9W+iWHCRyHqlVYILiSXziwk=
+github.com/tadvi/systray v0.0.0-20190226123456-11a2b8fa57af/go.mod h1:4F09kP5F+am0jAwlQLddpoMDM+iewkxxt6nxUQ5nq5o=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8=
+go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw=
+go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8=
+go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc=
+go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8=
+go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4=
+go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w=
+golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g=
+golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
+golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
+golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=
+golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
+golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/app/server/handlers/accounts.go b/app/server/handlers/accounts.go
new file mode 100644
index 0000000000000000000000000000000000000000..3c4177e63fc98c9b350e3fdbf339fd7a3901d0b7
--- /dev/null
+++ b/app/server/handlers/accounts.go
@@ -0,0 +1,137 @@
+package handlers
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "plandex-server/db"
+ "plandex-server/hooks"
+ "plandex-server/types"
+ "strings"
+
+ shared "plandex-shared"
+
+ "github.com/jmoiron/sqlx"
+)
+
+func CreateAccountHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for CreateAccountHandler")
+
+ if os.Getenv("IS_CLOUD") != "" {
+ log.Println("Creating accounts is not supported in cloud mode")
+ http.Error(w, "Creating accounts is not supported in cloud mode", http.StatusNotImplemented)
+ return
+ }
+
+ isLocalMode := (os.Getenv("GOENV") == "development" && os.Getenv("LOCAL_MODE") == "1")
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var req shared.CreateAccountRequest
+ err = json.Unmarshal(body, &req)
+ if err != nil {
+ log.Printf("Error unmarshalling request: %v\n", err)
+ http.Error(w, "Error unmarshalling request: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ req.Email = strings.ToLower(req.Email)
+
+ var emailVerificationId string
+
+ // skipping email verification in dev/local mode
+ if !isLocalMode {
+ emailVerificationId, err = db.ValidateEmailVerification(req.Email, req.Pin)
+
+ if err != nil {
+ log.Printf("Error validating email verification: %v\n", err)
+ http.Error(w, "Error validating email verification: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+
+ var apiErr *shared.ApiError
+ var user *db.User
+ var userId string
+ var token string
+ var orgId string
+
+ err = db.WithTx(r.Context(), "create account", func(tx *sqlx.Tx) error {
+ res, err := db.CreateAccount(req.UserName, req.Email, emailVerificationId, tx)
+
+ if err != nil {
+ return fmt.Errorf("error creating account: %v", err)
+ }
+
+ user = res.User
+ userId = user.Id
+ token = res.Token
+ orgId = res.OrgId
+
+ _, apiErr = hooks.ExecHook(hooks.CreateAccount, hooks.HookParams{
+ Auth: &types.ServerAuth{
+ User: user,
+ OrgId: orgId,
+ },
+ })
+
+ return nil
+ })
+
+ if apiErr != nil {
+ writeApiError(w, *apiErr)
+ return
+ }
+
+ if err != nil {
+ log.Printf("Error creating account: %v\n", err)
+ http.Error(w, "Error creating account: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ // get orgs
+ orgs, err := db.GetAccessibleOrgsForUser(user)
+
+ if err != nil {
+ log.Printf("Error getting orgs for user: %v\n", err)
+ http.Error(w, "Error getting orgs for user: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ apiOrgs, apiErr := toApiOrgs(orgs)
+
+ if apiErr != nil {
+ log.Printf("Error converting orgs to API orgs: %v\n", apiErr)
+ writeApiError(w, *apiErr)
+ return
+ }
+
+ resp := shared.SessionResponse{
+ UserId: userId,
+ Token: token,
+ Email: req.Email,
+ UserName: req.UserName,
+ Orgs: apiOrgs,
+ IsLocalMode: os.Getenv("GOENV") == "development" && os.Getenv("LOCAL_MODE") == "1",
+ }
+
+ bytes, err := json.Marshal(resp)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully created account")
+
+ w.Write(bytes)
+}
diff --git a/app/server/handlers/auth_helpers.go b/app/server/handlers/auth_helpers.go
new file mode 100644
index 0000000000000000000000000000000000000000..6023a51b67d6b6b66ebdc1279d44e29570227fd3
--- /dev/null
+++ b/app/server/handlers/auth_helpers.go
@@ -0,0 +1,751 @@
+package handlers
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "plandex-server/db"
+ "plandex-server/hooks"
+ "plandex-server/types"
+ "strings"
+ "time"
+
+ shared "plandex-shared"
+
+ "github.com/jmoiron/sqlx"
+ "golang.org/x/mod/semver"
+)
+
+func Authenticate(w http.ResponseWriter, r *http.Request, requireOrg bool) *types.ServerAuth {
+ return execAuthenticate(w, r, requireOrg, true)
+}
+
+func AuthenticateOptional(w http.ResponseWriter, r *http.Request, requireOrg bool) *types.ServerAuth {
+ return execAuthenticate(w, r, requireOrg, false)
+}
+
+func GetAuthHeader(r *http.Request) (*shared.AuthHeader, error) {
+ authHeader := r.Header.Get("Authorization")
+
+ // check for a cookie as well for ui requests
+ if authHeader == "" {
+ log.Println("no auth header - checking for cookie")
+
+ // Try to get auth token from a cookie as a fallback
+ cookie, err := r.Cookie("authToken")
+ if err != nil {
+ if err == http.ErrNoCookie {
+ log.Println("no auth cookie")
+ return nil, nil
+ }
+ return nil, fmt.Errorf("error retrieving auth cookie: %v", err)
+ }
+ // Use the token from the cookie as the fallback authorization header
+ authHeader = cookie.Value
+ log.Println("got auth header from cookie")
+ }
+
+ if authHeader == "" {
+ return nil, nil
+ }
+
+ if !strings.HasPrefix(authHeader, "Bearer ") {
+ return nil, fmt.Errorf("invalid auth header")
+ }
+
+ // strip off the "Bearer " prefix
+ encoded := strings.TrimPrefix(authHeader, "Bearer ")
+
+ // decode the base64-encoded credentials
+ bytes, err := base64.URLEncoding.DecodeString(encoded)
+
+ if err != nil {
+ return nil, fmt.Errorf("error decoding auth token: %v", err)
+ }
+
+ // parse the credentials
+ var parsed shared.AuthHeader
+ err = json.Unmarshal(bytes, &parsed)
+
+ if err != nil {
+ return nil, fmt.Errorf("error parsing auth token: %v", err)
+ }
+
+ return &parsed, nil
+}
+
+func ClearAuthCookieIfBrowser(w http.ResponseWriter, r *http.Request) error {
+ acceptHeader := r.Header.Get("Accept")
+ if acceptHeader == "" {
+ // no accept header, not a browser request
+ return nil
+ }
+
+ // Check for existing auth cookie
+ _, err := r.Cookie("authToken")
+ if err == http.ErrNoCookie {
+ // No auth cookie, nothing to clear
+ return nil
+ }
+ if err != nil {
+ return fmt.Errorf("error retrieving auth cookie: %v", err)
+ }
+
+ var domain string
+ if os.Getenv("GOENV") == "production" {
+ domain = os.Getenv("APP_SUBDOMAIN") + ".plandex.ai"
+ }
+
+ // Clear the authToken cookie
+ http.SetCookie(w, &http.Cookie{
+ Name: "authToken",
+ Path: "/",
+ Value: "",
+ MaxAge: -1,
+ Secure: os.Getenv("GOENV") != "development",
+ HttpOnly: true,
+ SameSite: http.SameSiteLaxMode,
+ Domain: domain,
+ })
+
+ log.Println("cleared auth cookie")
+
+ return nil
+}
+
+func ClearAccountFromCookies(w http.ResponseWriter, r *http.Request, userId string) error {
+ // Get stored accounts
+ storedAccounts, err := GetAccountsFromCookie(r)
+ if err != nil {
+ return fmt.Errorf("error getting accounts from cookie: %v", err)
+ }
+
+ // Remove the account with the given userId
+ for i, account := range storedAccounts {
+ if account.UserId == userId {
+ storedAccounts = append(storedAccounts[:i], storedAccounts[i+1:]...)
+ break
+ }
+ }
+
+ // Marshal the updated accounts
+ updatedAccountsBytes, err := json.Marshal(storedAccounts)
+ if err != nil {
+ return fmt.Errorf("error marshalling updated accounts: %v", err)
+ }
+
+ // Encode to base64
+ encodedAccounts := base64.URLEncoding.EncodeToString(updatedAccountsBytes)
+
+ // Set the updated accounts cookie
+ var domain string
+ if os.Getenv("GOENV") == "production" {
+ domain = os.Getenv("APP_SUBDOMAIN") + ".plandex.ai"
+ }
+ http.SetCookie(w, &http.Cookie{
+ Name: "accounts",
+ Path: "/",
+ Value: encodedAccounts,
+ Secure: os.Getenv("GOENV") != "development",
+ HttpOnly: true,
+ SameSite: http.SameSiteLaxMode,
+ Domain: domain,
+ })
+
+ return nil
+}
+
+func SetAuthCookieIfBrowser(w http.ResponseWriter, r *http.Request, user *db.User, token, orgId string) error {
+ log.Println("setting auth cookie if browser")
+
+ acceptHeader := r.Header.Get("Accept")
+ if acceptHeader == "" {
+ // no accept header, not a browser request
+ log.Println("not a browser request")
+ return nil
+ }
+
+ log.Println("is browser - setting auth cookie")
+
+ if token == "" {
+ authHeader, err := GetAuthHeader(r)
+ if err != nil {
+ return fmt.Errorf("error getting auth header: %v", err)
+ }
+ token = authHeader.Token
+ }
+
+ if token == "" {
+ return fmt.Errorf("no token")
+ }
+
+ // set authToken cookie
+ authHeader := shared.AuthHeader{
+ Token: token,
+ OrgId: orgId,
+ }
+
+ bytes, err := json.Marshal(authHeader)
+
+ if err != nil {
+ return fmt.Errorf("error marshalling auth header: %v", err)
+ }
+
+ // base64 encode
+ token = base64.URLEncoding.EncodeToString(bytes)
+
+ var domain string
+ if os.Getenv("GOENV") == "production" {
+ domain = os.Getenv("APP_SUBDOMAIN") + ".plandex.ai"
+ }
+
+ cookie := &http.Cookie{
+ Name: "authToken",
+ Path: "/",
+ Value: "Bearer " + token,
+ Secure: os.Getenv("GOENV") != "development",
+ HttpOnly: true,
+ SameSite: http.SameSiteLaxMode,
+ Domain: domain,
+ Expires: time.Now().Add(time.Hour * 24 * 90),
+ }
+
+ log.Println("setting auth cookie", cookie)
+
+ http.SetCookie(w, cookie)
+
+ storedAccounts, err := GetAccountsFromCookie(r)
+
+ if err != nil {
+ return fmt.Errorf("error getting accounts from cookie: %v", err)
+ }
+
+ found := false
+ for _, account := range storedAccounts {
+ if account.UserId == user.Id {
+ found = true
+
+ account.Token = token
+ account.Email = user.Email
+ account.UserName = user.Name
+ break
+ }
+ }
+
+ if !found {
+ storedAccounts = append(storedAccounts, &shared.ClientAccount{
+ Email: user.Email,
+ UserName: user.Name,
+ UserId: user.Id,
+ Token: token,
+ })
+ }
+
+ bytes, err = json.Marshal(storedAccounts)
+
+ if err != nil {
+ return fmt.Errorf("error marshalling accounts: %v", err)
+ }
+
+ // base64 encode
+ accounts := base64.URLEncoding.EncodeToString(bytes)
+
+ http.SetCookie(w, &http.Cookie{
+ Name: "accounts",
+ Path: "/",
+ Value: accounts,
+ Secure: os.Getenv("GOENV") != "development",
+ HttpOnly: true,
+ SameSite: http.SameSiteLaxMode,
+ Domain: domain,
+ Expires: time.Now().Add(time.Hour * 24 * 90),
+ })
+
+ return nil
+}
+
+func GetAccountsFromCookie(r *http.Request) ([]*shared.ClientAccount, error) {
+ accountsCookie, err := r.Cookie("accounts")
+
+ if err == http.ErrNoCookie {
+ return []*shared.ClientAccount{}, nil
+ } else if err != nil {
+ return nil, fmt.Errorf("error getting accounts cookie: %v", err)
+ }
+
+ bytes, err := base64.URLEncoding.DecodeString(accountsCookie.Value)
+ if err != nil {
+ return nil, fmt.Errorf("error decoding accounts cookie: %v", err)
+ }
+
+ var accounts []*shared.ClientAccount
+ err = json.Unmarshal(bytes, &accounts)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling accounts cookie: %v", err)
+ }
+
+ return accounts, nil
+}
+
+func ValidateAndSignIn(w http.ResponseWriter, r *http.Request, req shared.SignInRequest) (*shared.SessionResponse, error) {
+ var user *db.User
+ var emailVerificationId string
+ var signInCodeId string
+ var signInCodeOrgId string
+ var err error
+
+ isLocalMode := (os.Getenv("GOENV") == "development" && os.Getenv("LOCAL_MODE") == "1")
+
+ if req.IsSignInCode {
+ res, err := db.ValidateSignInCode(req.Pin)
+
+ if err != nil {
+ log.Printf("Error validating sign in code: %v\n", err)
+ return nil, fmt.Errorf("error validating sign in code: %v", err)
+ }
+
+ user, err = db.GetUser(res.UserId)
+
+ if err != nil {
+ log.Printf("Error getting user: %v\n", err)
+ return nil, fmt.Errorf("error getting user: %v", err)
+ }
+
+ if user == nil {
+ log.Printf("User not found for id: %v\n", res.UserId)
+ return nil, fmt.Errorf("user not found")
+ }
+
+ signInCodeId = res.Id
+ signInCodeOrgId = res.OrgId
+ } else {
+ req.Email = strings.ToLower(req.Email)
+ user, err = db.GetUserByEmail(req.Email)
+
+ if err != nil {
+ log.Printf("Error getting user: %v\n", err)
+ return nil, fmt.Errorf("error getting user: %v", err)
+ }
+
+ if user == nil {
+ log.Printf("User not found for email: %v\n", req.Email)
+ return nil, fmt.Errorf("not found")
+ }
+
+ // only validate email in non-local mode
+ if !isLocalMode {
+ emailVerificationId, err = db.ValidateEmailVerification(req.Email, req.Pin)
+
+ if err != nil {
+ log.Printf("Error validating email verification: %v\n", err)
+ return nil, fmt.Errorf("error validating email verification: %v", err)
+ }
+
+ log.Println("Email verification successful")
+ }
+ }
+
+ var token string
+ var authTokenId string
+
+ err = db.WithTx(r.Context(), "validate and sign in", func(tx *sqlx.Tx) error {
+ var err error
+ // create auth token
+ token, authTokenId, err = db.CreateAuthToken(user.Id, tx)
+
+ if err != nil {
+ log.Printf("Error creating auth token: %v\n", err)
+ return fmt.Errorf("error creating auth token: %v", err)
+ }
+
+ if req.IsSignInCode {
+ // update sign in code with auth token id
+ _, err = tx.Exec("UPDATE sign_in_codes SET auth_token_id = $1 WHERE id = $2", authTokenId, signInCodeId)
+
+ if err != nil {
+ log.Printf("Error updating sign in code: %v\n", err)
+ return fmt.Errorf("error updating sign in code: %v", err)
+ }
+ } else if !isLocalMode { // only update email verification in non-local mode
+ // update email verification with user and auth token ids
+ _, err = tx.Exec("UPDATE email_verifications SET user_id = $1, auth_token_id = $2 WHERE id = $3", user.Id, authTokenId, emailVerificationId)
+
+ if err != nil {
+ log.Printf("Error updating email verification: %v\n", err)
+ return fmt.Errorf("error updating email verification: %v", err)
+ }
+
+ log.Println("Email verification updated")
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ return nil, fmt.Errorf("error validating and signing in: %v", err)
+ }
+
+ // get orgs
+ orgs, err := db.GetAccessibleOrgsForUser(user)
+
+ if err != nil {
+ log.Printf("Error getting orgs for user: %v\n", err)
+ return nil, fmt.Errorf("error getting orgs for user: %v", err)
+ }
+
+ if req.IsSignInCode {
+ filteredOrgs := []*db.Org{}
+ for _, org := range orgs {
+ if org.Id == signInCodeOrgId {
+ filteredOrgs = append(filteredOrgs, org)
+ }
+ }
+ orgs = filteredOrgs
+ }
+
+ // with a single org, set the orgId in the cookie
+ // otherwise, the user will be prompted to select an org
+ var orgId string
+ if len(orgs) == 1 {
+ orgId = orgs[0].Id
+ }
+
+ log.Println("Setting auth cookie if browser")
+ err = SetAuthCookieIfBrowser(w, r, user, token, orgId)
+ if err != nil {
+ log.Printf("Error setting auth cookie: %v\n", err)
+ return nil, fmt.Errorf("error setting auth cookie: %v", err)
+ }
+
+ apiOrgs, apiErr := toApiOrgs(orgs)
+
+ if apiErr != nil {
+ log.Printf("Error converting orgs to api orgs: %v\n", apiErr)
+ return nil, fmt.Errorf("error converting orgs to api orgs: %v", apiErr)
+ }
+
+ resp := shared.SessionResponse{
+ UserId: user.Id,
+ Token: token,
+ Email: user.Email,
+ UserName: user.Name,
+ Orgs: apiOrgs,
+ IsLocalMode: os.Getenv("GOENV") == "development" && os.Getenv("LOCAL_MODE") == "1",
+ }
+
+ return &resp, nil
+}
+
+func requireMinClientVersion(w http.ResponseWriter, r *http.Request, minVersion string) bool {
+ msg := fmt.Sprintf("Client version is too old for this endpoint. Please upgrade to version %s or later.", minVersion)
+
+ version := r.Header.Get("X-Client-Version")
+ if version == "" {
+ http.Error(w, msg, http.StatusBadRequest)
+ return false
+ }
+
+ if version == "development" {
+ return true
+ }
+
+ if semver.Compare(version, minVersion) < 0 {
+ http.Error(w, msg, http.StatusBadRequest)
+ return false
+ }
+
+ return true
+}
+
+func execAuthenticate(w http.ResponseWriter, r *http.Request, requireOrg bool, raiseErr bool) *types.ServerAuth {
+ log.Println("authenticating request")
+
+ parsed, err := GetAuthHeader(r)
+
+ if err != nil {
+ log.Printf("error getting auth header: %v\n", err)
+ if raiseErr {
+ http.Error(w, "error getting auth header", http.StatusInternalServerError)
+ }
+ return nil
+ }
+
+ if parsed == nil {
+ log.Println("no auth header")
+ if raiseErr {
+ http.Error(w, "no auth header", http.StatusUnauthorized)
+ }
+ return nil
+ }
+
+ // validate the token
+ authToken, err := db.ValidateAuthToken(parsed.Token)
+
+ if err != nil {
+ log.Printf("error validating auth token: %v\n", err)
+
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeInvalidToken,
+ Status: http.StatusUnauthorized,
+ Msg: "Invalid auth token",
+ })
+ return nil
+ }
+
+ user, err := db.GetUser(authToken.UserId)
+
+ if err != nil {
+ log.Printf("error getting user: %v\n", err)
+ if raiseErr {
+ http.Error(w, "error getting user", http.StatusInternalServerError)
+ }
+ return nil
+ }
+
+ if !requireOrg {
+ return &types.ServerAuth{
+ AuthToken: authToken,
+ User: user,
+ }
+ }
+
+ if parsed.OrgId == "" {
+ log.Println("no org id")
+ if raiseErr {
+ http.Error(w, "no org id", http.StatusUnauthorized)
+ }
+ return nil
+ }
+
+ // validate the org membership
+ isMember, err := db.ValidateOrgMembership(authToken.UserId, parsed.OrgId)
+
+ if err != nil {
+ log.Printf("error validating org membership: %v\n", err)
+ if raiseErr {
+ http.Error(w, "error validating org membership", http.StatusInternalServerError)
+ }
+ return nil
+ }
+
+ if !isMember {
+ // check if there's an invite for this user and accept it if so (adds the user to the org)
+ invite, err := db.GetActiveInviteByEmail(parsed.OrgId, user.Email)
+
+ if err != nil {
+ log.Printf("error getting invite for org user: %v\n", err)
+ if raiseErr {
+ http.Error(w, "error getting invite for org user", http.StatusInternalServerError)
+ }
+ return nil
+ }
+
+ if invite != nil {
+ log.Println("accepting invite")
+
+ err := db.AcceptInvite(r.Context(), invite, authToken.UserId)
+
+ if err != nil {
+ log.Printf("error accepting invite: %v\n", err)
+ if raiseErr {
+ http.Error(w, "error accepting invite", http.StatusInternalServerError)
+ }
+ return nil
+ }
+
+ } else {
+ log.Println("user is not a member of the org")
+ if raiseErr {
+ http.Error(w, "not a member of org", http.StatusUnauthorized)
+ }
+ return nil
+ }
+ }
+
+ // get user permissions
+ permissions, err := db.GetUserPermissions(authToken.UserId, parsed.OrgId)
+
+ if err != nil {
+ log.Printf("error getting user permissions: %v\n", err)
+ if raiseErr {
+ http.Error(w, "error getting user permissions", http.StatusInternalServerError)
+ }
+ return nil
+ }
+
+ // build the permissions map
+ permissionsMap := make(shared.Permissions)
+ for _, permission := range permissions {
+ permissionsMap[permission] = true
+ }
+
+ auth := &types.ServerAuth{
+ AuthToken: authToken,
+ User: user,
+ OrgId: parsed.OrgId,
+ Permissions: permissionsMap,
+ }
+
+ // don't send hash for org-session requests
+ var hash string
+ if r.URL.Path != "/orgs/session" {
+ hash = parsed.Hash
+ }
+
+ _, apiErr := hooks.ExecHook(hooks.Authenticate, hooks.HookParams{
+ Auth: auth,
+ AuthenticateHookRequestParams: &hooks.AuthenticateHookRequestParams{
+ Path: r.URL.Path,
+ Hash: hash,
+ },
+ })
+
+ if apiErr != nil {
+ writeApiError(w, *apiErr)
+ return nil
+ }
+
+ log.Printf("UserId: %s, Email: %s, OrgId: %s\n", authToken.UserId, user.Email, parsed.OrgId)
+
+ return auth
+
+}
+
+func authorizeProject(w http.ResponseWriter, projectId string, auth *types.ServerAuth) bool {
+ return authorizeProjectOptional(w, projectId, auth, true)
+}
+
+func authorizeProjectOptional(w http.ResponseWriter, projectId string, auth *types.ServerAuth, shouldErr bool) bool {
+ log.Println("authorizing project")
+
+ projectExists, err := db.ProjectExists(auth.OrgId, projectId)
+
+ if err != nil {
+ log.Printf("error validating project: %v\n", err)
+ http.Error(w, "error validating project", http.StatusInternalServerError)
+ return false
+ }
+
+ if !projectExists && shouldErr {
+ log.Println("project does not exist in org")
+ http.Error(w, "project does not exist in org", http.StatusNotFound)
+ return false
+ }
+
+ return projectExists
+}
+
+func authorizeProjectRename(w http.ResponseWriter, projectId string, auth *types.ServerAuth) bool {
+ if !authorizeProject(w, projectId, auth) {
+ return false
+ }
+
+ if !auth.HasPermission(shared.PermissionRenameAnyProject) {
+ log.Println("User does not have permission to rename project")
+ http.Error(w, "User does not have permission to rename project", http.StatusForbidden)
+ return false
+ }
+
+ return true
+}
+
+func authorizeProjectDelete(w http.ResponseWriter, projectId string, auth *types.ServerAuth) bool {
+ if !authorizeProject(w, projectId, auth) {
+ return false
+ }
+
+ if !auth.HasPermission(shared.PermissionDeleteAnyProject) {
+ log.Println("User does not have permission to delete project")
+ http.Error(w, "User does not have permission to delete project", http.StatusForbidden)
+ return false
+ }
+
+ return true
+}
+
+func authorizePlan(w http.ResponseWriter, planId string, auth *types.ServerAuth) *db.Plan {
+ log.Println("authorizing plan")
+
+ plan, err := db.ValidatePlanAccess(planId, auth.User.Id, auth.OrgId)
+
+ if err != nil {
+ log.Printf("error validating plan membership: %v\n", err)
+ http.Error(w, "error validating plan membership", http.StatusInternalServerError)
+ return nil
+ }
+
+ if plan == nil {
+ log.Println("user doesn't have access the plan")
+ http.Error(w, "no access to plan", http.StatusUnauthorized)
+ return nil
+ }
+
+ return plan
+}
+
+func authorizePlanUpdate(w http.ResponseWriter, planId string, auth *types.ServerAuth) *db.Plan {
+ plan := authorizePlan(w, planId, auth)
+
+ if plan == nil {
+ return nil
+ }
+
+ if plan.OwnerId != auth.User.Id && !auth.HasPermission(shared.PermissionUpdateAnyPlan) {
+ log.Println("User does not have permission to update plan")
+ http.Error(w, "User does not have permission to update plan", http.StatusForbidden)
+ return nil
+ }
+
+ return plan
+}
+
+func authorizePlanDelete(w http.ResponseWriter, planId string, auth *types.ServerAuth) *db.Plan {
+ plan := authorizePlan(w, planId, auth)
+
+ if plan == nil {
+ return nil
+ }
+
+ if plan.OwnerId != auth.User.Id && !auth.HasPermission(shared.PermissionDeleteAnyPlan) {
+ log.Println("User does not have permission to delete plan")
+ http.Error(w, "User does not have permission to delete plan", http.StatusForbidden)
+ return nil
+ }
+
+ return plan
+}
+
+func authorizePlanRename(w http.ResponseWriter, planId string, auth *types.ServerAuth) *db.Plan {
+ plan := authorizePlan(w, planId, auth)
+
+ if plan == nil {
+ return nil
+ }
+
+ if plan.OwnerId != auth.User.Id && !auth.HasPermission(shared.PermissionRenameAnyPlan) {
+ log.Println("User does not have permission to rename plan")
+ http.Error(w, "User does not have permission to rename plan", http.StatusForbidden)
+ return nil
+ }
+
+ return plan
+}
+
+func authorizePlanArchive(w http.ResponseWriter, planId string, auth *types.ServerAuth) *db.Plan {
+ plan := authorizePlan(w, planId, auth)
+
+ if plan == nil {
+ return nil
+ }
+
+ if plan.OwnerId != auth.User.Id && !auth.HasPermission(shared.PermissionArchiveAnyPlan) {
+ log.Println("User does not have permission to archive plan")
+ http.Error(w, "User does not have permission to archive plan", http.StatusForbidden)
+ return nil
+ }
+
+ return plan
+}
diff --git a/app/server/handlers/branches.go b/app/server/handlers/branches.go
new file mode 100644
index 0000000000000000000000000000000000000000..6aaf31a79f9c806ddda7e6bac5a2fe967fefa253
--- /dev/null
+++ b/app/server/handlers/branches.go
@@ -0,0 +1,207 @@
+package handlers
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "plandex-server/db"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+ "github.com/jmoiron/sqlx"
+)
+
+func ListBranchesHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for ListBranchesHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+
+ log.Println("planId: ", planId)
+
+ if authorizePlan(w, planId, auth) == nil {
+ return
+ }
+
+ var err error
+
+ ctx, cancel := context.WithCancel(r.Context())
+ var branches []*db.Branch
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: "main",
+ Reason: "list branches",
+ Scope: db.LockScopeRead,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ res, err := db.ListPlanBranches(repo, planId)
+
+ if err != nil {
+ return err
+ }
+
+ branches = res
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error getting branches: %v\n", err)
+ http.Error(w, "Error getting branches: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ jsonBytes, err := json.Marshal(branches)
+
+ if err != nil {
+ log.Printf("Error marshalling branches: %v\n", err)
+ http.Error(w, "Error marshalling branches: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully retrieved branches")
+
+ w.Write(jsonBytes)
+}
+
+func CreateBranchHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for CreateBranchHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+
+ log.Println("planId: ", planId)
+
+ plan := authorizePlan(w, planId, auth)
+ if plan == nil {
+ return
+ }
+
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer func() {
+ log.Println("Closing request body")
+ r.Body.Close()
+ }()
+
+ var req shared.CreateBranchRequest
+ if err := json.Unmarshal(body, &req); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body ", http.StatusBadRequest)
+ return
+ }
+
+ parentBranch, err := db.GetDbBranch(planId, branch)
+
+ if err != nil {
+ log.Printf("Error getting parent branch: %v\n", err)
+ http.Error(w, "Error getting parent branch: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: "main",
+ Reason: "create branch",
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+
+ err := db.WithTx(ctx, "create branch", func(tx *sqlx.Tx) error {
+ _, err = db.CreateBranch(repo, plan, parentBranch, req.Name, tx)
+
+ if err != nil {
+ return fmt.Errorf("error creating branch: %v", err)
+ }
+
+ return nil
+ })
+
+ return err
+ })
+
+ if err != nil {
+ log.Printf("Error creating branch: %v\n", err)
+ http.Error(w, "Error creating branch: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully created branch")
+}
+
+func DeleteBranchHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for DeleteBranchHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+
+ log.Println("planId: ", planId)
+
+ if authorizePlan(w, planId, auth) == nil {
+ return
+ }
+
+ if branch == "main" {
+ log.Println("Cannot delete main branch")
+ http.Error(w, "Cannot delete main branch", http.StatusBadRequest)
+ return
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: "main",
+ Reason: "delete branch",
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ err := repo.GitDeleteBranch(branch)
+ return err
+ })
+
+ if err != nil {
+ log.Printf("Error deleting branch: %v\n", err)
+ http.Error(w, "Error deleting branch: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully deleted branch")
+}
diff --git a/app/server/handlers/client_helper.go b/app/server/handlers/client_helper.go
new file mode 100644
index 0000000000000000000000000000000000000000..30384a7943c53eae1b4c0077f4557ee34e7afd93
--- /dev/null
+++ b/app/server/handlers/client_helper.go
@@ -0,0 +1,84 @@
+package handlers
+
+import (
+ "log"
+ "net/http"
+ "os"
+ "plandex-server/db"
+ "plandex-server/hooks"
+ "plandex-server/model"
+ "plandex-server/types"
+ shared "plandex-shared"
+)
+
+type initClientsParams struct {
+ w http.ResponseWriter
+ auth *types.ServerAuth
+
+ apiKeys map[string]string // deprecated
+ openAIOrgId string // deprecated
+
+ authVars map[string]string
+
+ plan *db.Plan
+ settings *shared.PlanSettings
+ orgUserConfig *shared.OrgUserConfig
+}
+
+type initClientsResult struct {
+ clients map[string]model.ClientInfo
+ authVars map[string]string
+}
+
+func initClients(params initClientsParams) initClientsResult {
+ w := params.w
+ settings := params.settings
+ orgUserConfig := params.orgUserConfig
+
+ authVars := map[string]string{}
+ if params.authVars != nil {
+ authVars = params.authVars
+ } else if params.apiKeys != nil {
+ authVars = map[string]string{}
+ for envVar, apiKey := range params.apiKeys {
+ authVars[envVar] = apiKey
+ }
+ if params.openAIOrgId != "" {
+ authVars["OPENAI_ORG_ID"] = params.openAIOrgId
+ }
+ }
+
+ hookResult, apiErr := hooks.ExecHook(hooks.GetIntegratedModels, hooks.HookParams{
+ Auth: params.auth,
+ Plan: params.plan,
+ })
+
+ if apiErr != nil {
+ log.Printf("Error getting integrated models: %v\n", apiErr)
+ http.Error(w, "Error getting integrated models", http.StatusInternalServerError)
+ return initClientsResult{}
+ }
+
+ if hookResult.GetIntegratedModelsResult != nil && hookResult.GetIntegratedModelsResult.IntegratedModelsMode {
+ merged := map[string]string{}
+ for k, v := range hookResult.GetIntegratedModelsResult.AuthVars {
+ merged[k] = v
+ }
+ if authVars[shared.AnthropicClaudeMaxTokenEnvVar] != "" {
+ merged[shared.AnthropicClaudeMaxTokenEnvVar] = authVars[shared.AnthropicClaudeMaxTokenEnvVar]
+ }
+ authVars = merged
+ }
+ if len(authVars) == 0 && os.Getenv("IS_CLOUD") != "" {
+ log.Println("No api keys/credentials provided for models")
+ http.Error(w, "No api keys/credentials provided for models", http.StatusBadRequest)
+ return initClientsResult{}
+ }
+
+ clients := model.InitClients(authVars, settings, orgUserConfig)
+
+ return initClientsResult{
+ clients: clients,
+ authVars: authVars,
+ }
+}
diff --git a/app/server/handlers/context_helper.go b/app/server/handlers/context_helper.go
new file mode 100644
index 0000000000000000000000000000000000000000..6ab41954b9c9b3637dc9d809cde4c8b122ee0d8b
--- /dev/null
+++ b/app/server/handlers/context_helper.go
@@ -0,0 +1,303 @@
+package handlers
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ "plandex-server/model"
+ "plandex-server/types"
+ "runtime"
+ "runtime/debug"
+
+ shared "plandex-shared"
+)
+
+type loadContextsParams struct {
+ w http.ResponseWriter
+ r *http.Request
+ auth *types.ServerAuth
+ loadReq *shared.LoadContextRequest
+ plan *db.Plan
+ branchName string
+ cachedMapsByPath map[string]*db.CachedMap
+ autoLoaded bool
+}
+
+func loadContexts(
+ params loadContextsParams,
+) (*shared.LoadContextResponse, []*db.Context) {
+ w := params.w
+ r := params.r
+ auth := params.auth
+ loadReq := params.loadReq
+ plan := params.plan
+ branchName := params.branchName
+ cachedMapsByPath := params.cachedMapsByPath
+ autoLoaded := params.autoLoaded
+
+ log.Printf("[ContextHelper] Starting loadContexts with %d contexts, cachedMapsByPath: %v, autoLoaded: %v", len(*loadReq), cachedMapsByPath != nil, autoLoaded)
+
+ // check file count and size limits
+ // this is all a sanity check - we should have already checked these limits in the client
+ totalFiles := 0
+ mapFilesCount := 0
+ for _, context := range *loadReq {
+ totalFiles++
+ if context.ContextType == shared.ContextMapType {
+ mapFilesCount++
+ log.Printf("[ContextHelper] Found map file: %s with %d map bodies", context.FilePath, len(context.MapBodies))
+
+ if len(context.MapBodies) > shared.MaxContextMapPaths {
+ log.Printf("Error: Too many map files to load (found %d, limit is %d)\n", len(context.MapBodies), shared.MaxContextMapPaths)
+ http.Error(w, fmt.Sprintf("Too many map files to load (found %d, limit is %d)", len(context.MapBodies), shared.MaxContextMapPaths), http.StatusBadRequest)
+ return nil, nil
+ }
+
+ // these are already mapped, so they shouldn't be anywhere close to the input limit, but we'll use it for the sanity check
+ for _, body := range context.MapBodies {
+ if len(body) > shared.MaxContextMapSingleInputSize {
+ log.Printf("Error: Map file %s exceeds size limit (size %d, limit %d)\n", context.FilePath, len(body), shared.MaxContextMapSingleInputSize)
+ http.Error(w, fmt.Sprintf("Map file %s exceeds size limit (size %d, limit %d)", context.FilePath, len(body), shared.MaxContextMapSingleInputSize), http.StatusBadRequest)
+ return nil, nil
+ }
+ }
+ }
+
+ if totalFiles > shared.MaxContextCount {
+ log.Printf("Error: Too many contexts to load (found %d, limit is %d)\n", totalFiles, shared.MaxContextCount)
+ http.Error(w, fmt.Sprintf("Too many contexts to load (found %d, limit is %d)", totalFiles, shared.MaxContextCount), http.StatusBadRequest)
+ return nil, nil
+ }
+
+ fileSize := int64(len(context.Body))
+ if fileSize > shared.MaxContextBodySize {
+ log.Printf("Error: Context %s exceeds size limit (size %.2f MB, limit %d MB)\n", context.Name, float64(fileSize)/1024/1024, int(shared.MaxContextBodySize)/1024/1024)
+ http.Error(w, fmt.Sprintf("Context %s exceeds size limit (size %.2f MB, limit %d MB)", context.Name, float64(fileSize)/1024/1024, int(shared.MaxContextBodySize)/1024/1024), http.StatusBadRequest)
+ return nil, nil
+ }
+
+ }
+
+ if mapFilesCount > 0 {
+ log.Printf("[ContextHelper] Processing %d map files out of %d total contexts", mapFilesCount, totalFiles)
+ }
+
+ var err error
+
+ var settings *shared.PlanSettings
+ var clients map[string]model.ClientInfo
+ var authVars map[string]string
+ var orgUserConfig *shared.OrgUserConfig
+
+ for _, context := range *loadReq {
+ if context.ContextType == shared.ContextPipedDataType || context.ContextType == shared.ContextNoteType || context.ContextType == shared.ContextImageType {
+
+ settings, err = db.GetPlanSettings(plan)
+
+ if err != nil {
+ log.Printf("Error getting plan settings: %v\n", err)
+ http.Error(w, "Error getting plan settings: "+err.Error(), http.StatusInternalServerError)
+ return nil, nil
+ }
+
+ orgUserConfig, err = db.GetOrgUserConfig(auth.User.Id, auth.OrgId)
+ if err != nil {
+ log.Printf("Error getting org user config: %v\n", err)
+ http.Error(w, "Error getting org user config: "+err.Error(), http.StatusInternalServerError)
+ return nil, nil
+ }
+
+ res := initClients(
+ initClientsParams{
+ w: w,
+ auth: auth,
+ apiKeys: context.ApiKeys,
+ openAIOrgId: context.OpenAIOrgId,
+ authVars: context.AuthVars,
+ plan: plan,
+ settings: settings,
+ },
+ )
+
+ clients = res.clients
+ authVars = res.authVars
+
+ break
+ }
+ }
+
+ // ensure image compatibility if we're loading an image
+ for _, context := range *loadReq {
+ if context.ContextType == shared.ContextImageType {
+ if !settings.GetModelPack().Planner.GetSharedBaseConfig(settings).HasImageSupport {
+ log.Printf("Error loading context: %s does not support images in context\n", settings.GetModelPack().Planner.ModelId)
+ http.Error(w, fmt.Sprintf("Error loading context: %s does not support images in context", settings.GetModelPack().Planner.ModelId), http.StatusBadRequest)
+ return nil, nil
+ }
+ }
+ }
+
+ // get name for piped data or notes if present
+ num := 0
+ errCh := make(chan error, len(*loadReq))
+ for _, context := range *loadReq {
+ if context.ContextType == shared.ContextPipedDataType {
+ num++
+
+ go func(context *shared.LoadContextParams) {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in GenPipedDataName: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("panic in GenPipedDataName: %v\n%s", r, debug.Stack())
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+
+ name, err := model.GenPipedDataName(model.GenPipedDataNameParams{
+ Ctx: r.Context(),
+ Auth: auth,
+ Plan: plan,
+ Settings: settings,
+ AuthVars: authVars,
+ SessionId: context.SessionId,
+ Clients: clients,
+ PipedContent: context.Body,
+ OrgUserConfig: orgUserConfig,
+ })
+
+ if err != nil {
+ errCh <- fmt.Errorf("error generating name for piped data: %v", err)
+ return
+ }
+
+ context.Name = name
+ errCh <- nil
+ }(context)
+ } else if context.ContextType == shared.ContextNoteType {
+ num++
+
+ go func(context *shared.LoadContextParams) {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in GenNoteName: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("panic in GenNoteName: %v\n%s", r, debug.Stack())
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+
+ name, err := model.GenNoteName(r.Context(), auth, plan, settings, orgUserConfig, clients, authVars, context.Body, context.SessionId)
+
+ if err != nil {
+ errCh <- fmt.Errorf("error generating name for note: %v", err)
+ return
+ }
+
+ context.Name = name
+ errCh <- nil
+ }(context)
+ }
+ }
+ if num > 0 {
+ for i := 0; i < num; i++ {
+ err := <-errCh
+ if err != nil {
+ log.Printf("Error: %v\n", err)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return nil, nil
+ }
+ }
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ var loadRes *shared.LoadContextResponse
+ var dbContexts []*db.Context
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: plan.Id,
+ Branch: branchName,
+ Reason: "load contexts",
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancel,
+ ClearRepoOnErr: true,
+ }, func(repo *db.GitRepo) error {
+ log.Printf("[ContextHelper] Calling db.LoadContexts with %d contexts, %d cached maps", len(*loadReq), len(cachedMapsByPath))
+ for path := range cachedMapsByPath {
+ log.Printf("[ContextHelper] Using cached map for path: %s", path)
+ }
+
+ res, dbContextsRes, err := db.LoadContexts(ctx, db.LoadContextsParams{
+ OrgId: auth.OrgId,
+ Plan: plan,
+ BranchName: branchName,
+ Req: loadReq,
+ UserId: auth.User.Id,
+ CachedMapsByPath: cachedMapsByPath,
+ AutoLoaded: autoLoaded,
+ })
+
+ if err != nil {
+ return err
+ }
+
+ loadRes = res
+ dbContexts = dbContextsRes
+
+ log.Printf("[ContextHelper] db.LoadContexts completed successfully, loaded %d contexts", len(dbContexts))
+
+ // Log information about loaded map contexts
+ mapContextsCount := 0
+ for _, context := range dbContexts {
+ if context.ContextType == shared.ContextMapType {
+ mapContextsCount++
+ log.Printf("[ContextHelper] Loaded map context: %s, path: %s, tokens: %d", context.Name, context.FilePath, context.NumTokens)
+ }
+ }
+ if mapContextsCount > 0 {
+ log.Printf("[ContextHelper] Successfully loaded %d map contexts out of %d total contexts", mapContextsCount, len(dbContexts))
+ }
+
+ if loadRes.MaxTokensExceeded {
+ return nil
+ }
+
+ log.Printf("[ContextHelper] Committing changes to branch %s", branchName)
+ err = repo.GitAddAndCommit(branchName, res.Msg)
+
+ if err != nil {
+ return fmt.Errorf("error committing changes: %v", err)
+ }
+
+ log.Printf("[ContextHelper] Committing changes to branch %s completed successfully", branchName)
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error loading contexts: %v\n", err)
+ http.Error(w, "Error loading contexts: "+err.Error(), http.StatusInternalServerError)
+ return nil, nil
+ }
+
+ if loadRes.MaxTokensExceeded {
+ log.Printf("The total number of tokens (%d) exceeds the maximum allowed (%d)", loadRes.TotalTokens, loadRes.MaxTokens)
+ bytes, err := json.Marshal(loadRes)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return nil, nil
+ }
+
+ w.Write(bytes)
+ return nil, nil
+ }
+
+ return loadRes, dbContexts
+}
diff --git a/app/server/handlers/err_helper.go b/app/server/handlers/err_helper.go
new file mode 100644
index 0000000000000000000000000000000000000000..53f1c5008de42f498b708679bfa5834b88c71137
--- /dev/null
+++ b/app/server/handlers/err_helper.go
@@ -0,0 +1,29 @@
+package handlers
+
+import (
+ "encoding/json"
+ "log"
+ "net/http"
+
+ shared "plandex-shared"
+)
+
+func writeApiError(w http.ResponseWriter, apiErr shared.ApiError) {
+ bytes, err := json.Marshal(apiErr)
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ // If marshalling fails, fall back to a simpler error message
+ http.Error(w, "Error marshalling response", http.StatusInternalServerError)
+ return
+ }
+
+ log.Printf("API Error: %v\n", apiErr.Msg)
+
+ w.Header().Set("Content-Type", "application/json")
+ w.WriteHeader(apiErr.Status)
+
+ _, writeErr := w.Write(bytes)
+ if writeErr != nil {
+ log.Printf("Error writing response: %v\n", writeErr)
+ }
+}
diff --git a/app/server/handlers/file_maps.go b/app/server/handlers/file_maps.go
new file mode 100644
index 0000000000000000000000000000000000000000..09a0a43c75e0e599e1f0b5ccb4a794d4fd7c45a4
--- /dev/null
+++ b/app/server/handlers/file_maps.go
@@ -0,0 +1,224 @@
+package handlers
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ "runtime"
+ "runtime/debug"
+ "sync"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+)
+
+func GetFileMapHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for GetFileMapHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ log.Println("GetFileMapHandler: auth failed")
+ return
+ }
+
+ var req shared.GetFileMapRequest
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ http.Error(w, fmt.Sprintf("Error decoding request: %v", err), http.StatusBadRequest)
+ return
+ }
+
+ log.Println("GetFileMapHandler: checking limits")
+
+ if len(req.MapInputs) > shared.MaxContextMapPaths {
+ http.Error(w, fmt.Sprintf("Too many files to map: %d (max %d)", len(req.MapInputs), shared.MaxContextMapPaths), http.StatusBadRequest)
+ return
+ }
+
+ totalSize := 0
+ for path, input := range req.MapInputs {
+ // the client should be truncating inputs to the max size, but we'll check here too
+ if len(input) > shared.MaxContextMapSingleInputSize {
+ http.Error(w, fmt.Sprintf("File %s is too large: %d (max %d)", path, len(input), shared.MaxContextMapSingleInputSize), http.StatusBadRequest)
+ return
+ }
+ totalSize += len(input)
+ }
+
+ // On the client, once the total size limit is exceeded, we send empty file maps for remaining files
+ if totalSize > shared.MaxContextMapTotalInputSize+10000 {
+ http.Error(w, fmt.Sprintf("Max map size exceeded: %d (max %d)", totalSize, shared.MaxContextMapTotalInputSize), http.StatusBadRequest)
+ return
+ }
+
+ // Check batch size limits
+ if len(req.MapInputs) > shared.ContextMapMaxBatchSize {
+ http.Error(w, fmt.Sprintf("Batch contains too many files: %d (max %d)", len(req.MapInputs), shared.ContextMapMaxBatchSize), http.StatusBadRequest)
+ return
+ }
+
+ if int64(totalSize) > shared.ContextMapMaxBatchBytes {
+ http.Error(w, fmt.Sprintf("Batch size too large: %d bytes (max %d bytes)", totalSize, shared.ContextMapMaxBatchBytes), http.StatusBadRequest)
+ return
+ }
+
+ results := make(chan shared.FileMapBodies, 1)
+
+ err := queueProjectMapJob(projectMapJob{
+ inputs: req.MapInputs,
+ ctx: r.Context(),
+ results: results,
+ })
+ if err != nil {
+ log.Println("GetFileMapHandler: map queue is full")
+ http.Error(w, "Too many project map jobs, please try again later", http.StatusTooManyRequests)
+ return
+ }
+
+ select {
+ case <-r.Context().Done():
+ http.Error(w, "Request was cancelled", http.StatusRequestTimeout)
+ return
+ case maps := <-results:
+ if maps == nil {
+ http.Error(w, "Mapping timed out", http.StatusRequestTimeout)
+ return
+ }
+
+ resp := shared.GetFileMapResponse{
+ MapBodies: maps,
+ }
+ respBytes, err := json.Marshal(resp)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Error marshalling response: %v", err), http.StatusInternalServerError)
+ return
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Write(respBytes)
+
+ log.Printf("GetFileMapHandler success - writing response bytes: %d", len(respBytes))
+ }
+}
+
+func LoadCachedFileMapHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for LoadCachedFileMapHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branchName := vars["branch"]
+ log.Println("planId: ", planId, "branchName: ", branchName)
+
+ plan := authorizePlan(w, planId, auth)
+
+ if plan == nil {
+ return
+ }
+
+ var req shared.LoadCachedFileMapRequest
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ http.Error(w, fmt.Sprintf("Error decoding request: %v", err), http.StatusBadRequest)
+ return
+ }
+
+ cachedMetaByPath := map[string]*shared.Context{}
+ cachedMapsByPath := map[string]*db.CachedMap{}
+ var mu sync.Mutex
+ errCh := make(chan error, len(req.FilePaths))
+
+ for _, path := range req.FilePaths {
+ go func(path string) {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in LoadCachedFileMapHandler: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("panic in LoadCachedFileMapHandler: %v\n%s", r, debug.Stack())
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+
+ cachedContext, err := db.GetCachedMap(plan.OrgId, plan.ProjectId, path)
+ if err != nil {
+ errCh <- fmt.Errorf("error getting cached map: %v", err)
+ return
+ }
+ if cachedContext != nil {
+ mu.Lock()
+ cachedMetaByPath[path] = cachedContext.ToMeta().ToApi()
+ cachedMapsByPath[path] = &db.CachedMap{
+ MapParts: cachedContext.MapParts,
+ MapShas: cachedContext.MapShas,
+ MapTokens: cachedContext.MapTokens,
+ MapSizes: cachedContext.MapSizes,
+ }
+ mu.Unlock()
+ }
+ errCh <- nil
+ }(path)
+ }
+
+ for range req.FilePaths {
+ err := <-errCh
+ if err != nil {
+ log.Printf("Error getting cached map: %v", err)
+ http.Error(w, fmt.Sprintf("Error getting cached map: %v", err), http.StatusInternalServerError)
+ return
+ }
+ }
+
+ resp := shared.LoadCachedFileMapResponse{}
+
+ var loadRes *shared.LoadContextResponse
+ if len(cachedMetaByPath) == 0 {
+ log.Println("no cached maps found")
+ } else {
+ log.Println("cached map found")
+
+ cachedByPath := map[string]bool{}
+ for _, cachedContext := range cachedMetaByPath {
+ cachedByPath[cachedContext.FilePath] = true
+ }
+ resp.CachedByPath = cachedByPath
+
+ var loadReq shared.LoadContextRequest
+ for _, cachedContext := range cachedMetaByPath {
+ loadReq = append(loadReq, &shared.LoadContextParams{
+ ContextType: shared.ContextMapType,
+ Name: cachedContext.Name,
+ FilePath: cachedContext.FilePath,
+ Body: cachedContext.Body,
+ })
+ }
+
+ loadRes, _ = loadContexts(loadContextsParams{
+ w: w,
+ r: r,
+ auth: auth,
+ loadReq: &loadReq,
+ plan: plan,
+ branchName: branchName,
+ cachedMapsByPath: cachedMapsByPath,
+ })
+
+ if loadRes == nil {
+ log.Println("LoadCachedFileMapHandler - loadRes is nil")
+ return
+ }
+
+ resp.LoadRes = loadRes
+ }
+
+ bytes, err := json.Marshal(resp)
+ if err != nil {
+ log.Printf("Error marshalling response: %v", err)
+ http.Error(w, fmt.Sprintf("Error marshalling response: %v", err), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+}
diff --git a/app/server/handlers/file_maps_queue.go b/app/server/handlers/file_maps_queue.go
new file mode 100644
index 0000000000000000000000000000000000000000..aa3a8f00ca79f70ee8436315d870149002b496b4
--- /dev/null
+++ b/app/server/handlers/file_maps_queue.go
@@ -0,0 +1,139 @@
+package handlers
+
+import (
+ "context"
+ "errors"
+ "log"
+ "math"
+ "plandex-server/syntax/file_map"
+ shared "plandex-shared"
+ "runtime"
+ "sync"
+ "time"
+)
+
+// simple in-memory per-instance queue for file map jobs
+// ensures mapping doesn't take over all available CPUs
+
+const fileMapMaxQueueSize = 20 // caller errors out if this is exceeded
+var fileMapMaxConcurrency = 3 // set to 3/4 of available CPUs below
+const mapJobTimeout = 60 * time.Second
+
+type projectMapJob struct {
+ inputs shared.FileMapInputs
+ ctx context.Context
+ results chan shared.FileMapBodies
+}
+
+var projectMapQueue = make(chan projectMapJob, fileMapMaxQueueSize)
+
+var mapCPUSem chan struct{}
+
+func init() {
+ // Use 3/4 of available CPUs for mapping workers
+ cpus := runtime.NumCPU()
+ fileMapMaxConcurrency = int(math.Ceil(float64(cpus) * 0.75))
+ if fileMapMaxConcurrency < 1 {
+ fileMapMaxConcurrency = 1
+ }
+
+ log.Printf("fileMapMaxConcurrency: %d", fileMapMaxConcurrency)
+
+ mapCPUSem = make(chan struct{}, fileMapMaxConcurrency)
+
+ // start workers, one per CPU
+ for i := 0; i < fileMapMaxConcurrency; i++ {
+ go processProjectMapQueue()
+ }
+}
+
+func processProjectMapQueue() {
+ for job := range projectMapQueue {
+ if job.ctx.Err() != nil {
+ if job.ctx.Err() == context.DeadlineExceeded {
+ log.Printf("processProjectMapQueue: job context deadline exceeded: %v", job.ctx.Err())
+ safeSend(job.results, nil)
+ continue
+ }
+ log.Printf("processProjectMapQueue: job context cancelled: %v", job.ctx.Err())
+ safeSend(job.results, nil)
+ continue
+ }
+ ctxWithTimeout, cancel := context.WithTimeout(job.ctx, mapJobTimeout)
+ mapWorker(projectMapJob{
+ inputs: job.inputs,
+ ctx: ctxWithTimeout,
+ results: job.results,
+ })
+ cancel()
+ }
+}
+
+func queueProjectMapJob(job projectMapJob) error {
+ log.Printf("queueProjectMapJob: len(projectMapQueue): %d", len(projectMapQueue))
+ select {
+ case projectMapQueue <- job:
+ return nil
+ default:
+ return errors.New("queue is full")
+ }
+}
+
+func mapWorker(job projectMapJob) {
+ maps := make(shared.FileMapBodies)
+ wg := sync.WaitGroup{}
+ var mu sync.Mutex
+
+ log.Printf("mapWorker: len(job.inputs): %d", len(job.inputs))
+
+ for path, input := range job.inputs {
+ if !shared.HasFileMapSupport(path) {
+ mu.Lock()
+ maps[path] = "[NO MAP]"
+ mu.Unlock()
+ continue
+ }
+
+ wg.Add(1)
+ go func(path string, input string) {
+ if job.ctx.Err() != nil {
+ wg.Done()
+ return
+ }
+
+ mapCPUSem <- struct{}{}
+ defer func() { <-mapCPUSem }()
+ defer wg.Done()
+
+ fileMap, err := file_map.MapFile(job.ctx, path, []byte(input))
+ if err != nil {
+ // Skip files that can't be parsed, just log the error
+ log.Printf("Error mapping file %s: %v", path, err)
+ mu.Lock()
+ maps[path] = "[NO MAP]"
+ mu.Unlock()
+ return
+ }
+ mu.Lock()
+ maps[path] = fileMap.String()
+ mu.Unlock()
+ }(path, input)
+ }
+
+ wg.Wait()
+
+ if job.ctx.Err() != nil {
+ safeSend(job.results, nil)
+ return
+ }
+
+ safeSend(job.results, maps)
+}
+
+func safeSend(ch chan shared.FileMapBodies, v shared.FileMapBodies) {
+ // never block, never panic
+ select {
+ case ch <- v:
+ default: // buffer already full – receiver must have gone away
+ }
+}
diff --git a/app/server/handlers/invites.go b/app/server/handlers/invites.go
new file mode 100644
index 0000000000000000000000000000000000000000..b504ca57b9a1547c4d468530f087691c89a8314e
--- /dev/null
+++ b/app/server/handlers/invites.go
@@ -0,0 +1,407 @@
+package handlers
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "plandex-server/db"
+ "plandex-server/email"
+ "strings"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+ "github.com/jmoiron/sqlx"
+)
+
+func InviteUserHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received a request for InviteUserHandler")
+
+ if os.Getenv("GOENV") == "development" && os.Getenv("LOCAL_MODE") == "1" {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusForbidden,
+ Msg: "Local mode is not supported for invites",
+ })
+ return
+ }
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ org, err := db.GetOrg(auth.OrgId)
+
+ if err != nil {
+ log.Printf("Error getting org: %v\n", err)
+ http.Error(w, "Error getting org: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if org.IsTrial {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeTrialActionNotAllowed,
+ Status: http.StatusForbidden,
+ Msg: "Trial user can't invite other users",
+ })
+
+ return
+ }
+
+ currentUserId := auth.User.Id
+
+ var req shared.InviteRequest
+ err = json.NewDecoder(r.Body).Decode(&req)
+ if err != nil {
+ log.Printf("Error unmarshalling request: %v\n", err)
+ http.Error(w, "Error unmarshalling request: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ req.Email = strings.ToLower(req.Email)
+
+ // ensure current user can invite target user
+ permission := shared.Permission(strings.Join([]string{string(shared.PermissionInviteUser), req.OrgRoleId}, "|"))
+
+ if !auth.HasPermission(permission) {
+ log.Printf("User does not have permission to invite user with role: %v\n", req.OrgRoleId)
+ http.Error(w, "User does not have permission to invite user with role: "+req.OrgRoleId, http.StatusForbidden)
+ return
+ }
+
+ // ensure user doesn't already have access to org via domain
+ split := strings.Split(req.Email, "@")
+ if len(split) != 2 {
+ log.Printf("Invalid email: %v\n", req.Email)
+ http.Error(w, "Invalid email: "+req.Email, http.StatusBadRequest)
+ return
+ }
+ domain := &split[1]
+
+ if org.AutoAddDomainUsers && org.Domain == domain {
+ log.Printf("User already has access to org via domain: %v\n", domain)
+ http.Error(w, "User already has access to org via domain: "+*domain, http.StatusBadRequest)
+ }
+
+ // ensure user with this email isn't already in the org
+ user, err := db.GetUserByEmail(req.Email)
+
+ if err != nil {
+ log.Printf("Error getting user: %v\n", err)
+ http.Error(w, "Error getting user: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if user != nil {
+ isMember, err := db.ValidateOrgMembership(user.Id, auth.OrgId)
+
+ if err != nil {
+ log.Printf("Error validating org membership: %v\n", err)
+ http.Error(w, "Error validating org membership: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if isMember {
+ log.Println("User is already a member of org")
+ http.Error(w, "User is already a member of org", http.StatusBadRequest)
+ return
+ }
+ }
+
+ // ensure invite isn't already active
+ invite, err := db.GetActiveInviteByEmail(auth.OrgId, req.Email)
+
+ if err != nil {
+ log.Printf("Error getting invite: %v\n", err)
+ http.Error(w, "Error getting invite: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if invite != nil {
+ log.Println("Invite already exists")
+ http.Error(w, "Invite already exists", http.StatusBadRequest)
+ return
+ }
+
+ err = db.WithTx(r.Context(), "invite user", func(tx *sqlx.Tx) error {
+
+ err = db.CreateInvite(&db.Invite{
+ OrgId: auth.OrgId,
+ OrgRoleId: req.OrgRoleId,
+ Email: req.Email,
+ Name: req.Name,
+ InviterId: currentUserId,
+ }, tx)
+
+ if err != nil {
+ log.Printf("Error creating invite: %v\n", err)
+ return fmt.Errorf("error creating invite: %v", err)
+ }
+
+ err = email.SendInviteEmail(req.Email, req.Name, auth.User.Name, org.Name)
+
+ if err != nil {
+ log.Printf("Error sending invite email: %v\n", err)
+ return fmt.Errorf("error sending invite email: %v", err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error inviting user: %v\n", err)
+ http.Error(w, "Error inviting user: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully created invite")
+}
+
+func ListPendingInvitesHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received a request for ListInvitesHandler")
+
+ if os.Getenv("GOENV") == "development" && os.Getenv("LOCAL_MODE") == "1" {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusForbidden,
+ Msg: "Local mode is not supported for invites",
+ })
+ return
+ }
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ org, err := db.GetOrg(auth.OrgId)
+ if err != nil {
+ log.Printf("Error getting org: %v\n", err)
+ http.Error(w, "Error getting org: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if org.IsTrial {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeTrialActionNotAllowed,
+ Status: http.StatusForbidden,
+ Msg: "Trial user can't list invites",
+ })
+ return
+ }
+
+ invites, err := db.ListPendingInvites(auth.OrgId)
+
+ if err != nil {
+ log.Printf("Error listing invites: %v\n", err)
+ http.Error(w, "Error listing invites: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var apiInvites []*shared.Invite
+ for _, invite := range invites {
+ apiInvites = append(apiInvites, invite.ToApi())
+ }
+
+ bytes, err := json.Marshal(apiInvites)
+
+ if err != nil {
+ log.Printf("Error marshalling invites: %v\n", err)
+ http.Error(w, "Error marshalling invites: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+ log.Println("Successfully processed request for ListPendingInvitesHandler")
+}
+
+func ListAcceptedInvitesHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received a request for ListAcceptedInvitesHandler")
+
+ if os.Getenv("GOENV") == "development" && os.Getenv("LOCAL_MODE") == "1" {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusForbidden,
+ Msg: "Local mode is not supported for invites",
+ })
+ return
+ }
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ org, err := db.GetOrg(auth.OrgId)
+ if err != nil {
+ log.Printf("Error getting org: %v\n", err)
+ http.Error(w, "Error getting org: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if org.IsTrial {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeTrialActionNotAllowed,
+ Status: http.StatusForbidden,
+ Msg: "Trial user can't list invites",
+ })
+ return
+ }
+
+ invites, err := db.ListAcceptedInvites(auth.OrgId)
+
+ if err != nil {
+ log.Printf("Error listing invites: %v\n", err)
+ http.Error(w, "Error listing invites: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var apiInvites []*shared.Invite
+ for _, invite := range invites {
+ apiInvites = append(apiInvites, invite.ToApi())
+ }
+
+ bytes, err := json.Marshal(apiInvites)
+
+ if err != nil {
+ log.Printf("Error marshalling invites: %v\n", err)
+ http.Error(w, "Error marshalling invites: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+ log.Println("Successfully processed request for ListAcceptedInvitesHandler")
+}
+
+func ListAllInvitesHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received a request for ListAllInvitesHandler")
+
+ if os.Getenv("GOENV") == "development" && os.Getenv("LOCAL_MODE") == "1" {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusForbidden,
+ Msg: "Local mode is not supported for invites",
+ })
+ return
+ }
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ org, err := db.GetOrg(auth.OrgId)
+ if err != nil {
+ log.Printf("Error getting org: %v\n", err)
+ http.Error(w, "Error getting org: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if org.IsTrial {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeTrialActionNotAllowed,
+ Status: http.StatusForbidden,
+ Msg: "Trial user can't list invites",
+ })
+ return
+ }
+
+ invites, err := db.ListAllInvites(auth.OrgId)
+
+ if err != nil {
+ log.Printf("Error listing invites: %v\n", err)
+ http.Error(w, "Error listing invites: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var apiInvites []*shared.Invite
+ for _, invite := range invites {
+ apiInvites = append(apiInvites, invite.ToApi())
+ }
+
+ bytes, err := json.Marshal(apiInvites)
+
+ if err != nil {
+ log.Printf("Error marshalling invites: %v\n", err)
+ http.Error(w, "Error marshalling invites: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+ log.Println("Successfully processed request for ListAllInvitesHandler")
+}
+
+func DeleteInviteHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received a request for DeleteInviteHandler")
+
+ if os.Getenv("GOENV") == "development" && os.Getenv("LOCAL_MODE") == "1" {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusForbidden,
+ Msg: "Local mode is not supported for invites",
+ })
+ return
+ }
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ org, err := db.GetOrg(auth.OrgId)
+ if err != nil {
+ log.Printf("Error getting org: %v\n", err)
+ http.Error(w, "Error getting org: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if org.IsTrial {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeTrialActionNotAllowed,
+ Status: http.StatusForbidden,
+ Msg: "Trial user can't delete invites",
+ })
+ return
+ }
+
+ vars := mux.Vars(r)
+ inviteId := vars["inviteId"]
+
+ invite, err := db.GetInvite(inviteId)
+
+ if err != nil {
+ log.Printf("Error getting invite: %v\n", err)
+ http.Error(w, "Error getting invite: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if invite == nil || invite.OrgId != auth.OrgId {
+ log.Printf("Invite not found: %v\n", inviteId)
+ http.Error(w, "Invite not found: "+inviteId, http.StatusNotFound)
+ return
+ }
+
+ // ensure current user can remove target invite
+ removePermission := shared.Permission(strings.Join([]string{string(shared.PermissionRemoveUser), invite.OrgRoleId}, "|"))
+
+ invitePermission := shared.Permission(strings.Join([]string{string(shared.PermissionInviteUser), invite.OrgRoleId}, "|"))
+
+ if !(auth.HasPermission(removePermission) ||
+ (auth.User.Id == invite.InviterId && auth.HasPermission(invitePermission))) {
+ log.Printf("User does not have permission to remove invite with role: %v\n", invite.OrgRoleId)
+ http.Error(w, "User does not have permission to remove invite with role: "+invite.OrgRoleId, http.StatusForbidden)
+ return
+ }
+
+ err = db.DeleteInvite(inviteId, nil)
+
+ if err != nil {
+ log.Printf("Error deleting invite: %v\n", err)
+ http.Error(w, "Error deleting invite: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully deleted invite")
+}
diff --git a/app/server/handlers/models.go b/app/server/handlers/models.go
new file mode 100644
index 0000000000000000000000000000000000000000..59884caa64a496946d0e7679ebc2fa9445f1d335
--- /dev/null
+++ b/app/server/handlers/models.go
@@ -0,0 +1,521 @@
+package handlers
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "plandex-server/db"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+ "github.com/jmoiron/sqlx"
+)
+
+const CustomModelsMinClientVersion = "2.2.0"
+
+func UpsertCustomModelsHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for CreateCustomModelHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ if !requireMinClientVersion(w, r, CustomModelsMinClientVersion) {
+ return
+ }
+
+ var modelsInput shared.ModelsInput
+ if err := json.NewDecoder(r.Body).Decode(&modelsInput); err != nil {
+ log.Printf("Error decoding request body: %v\n", err)
+ http.Error(w, "Invalid request body: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ if len(modelsInput.CustomProviders) > 0 {
+ if os.Getenv("IS_CLOUD") != "" {
+ http.Error(w, "Custom model providers are not supported on Plandex Cloud", http.StatusBadRequest)
+ return
+ }
+ }
+
+ if len(modelsInput.CustomModels) > 0 {
+ if os.Getenv("IS_CLOUD") != "" {
+ apiOrg, err := getApiOrg(auth.OrgId)
+ if err != nil {
+ log.Printf("Error fetching org: %v\n", err)
+ http.Error(w, "Failed to create custom model: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if apiOrg.IntegratedModelsMode {
+ http.Error(w, "Custom models are not supported on Plandex Cloud in Integrated Models mode", http.StatusBadRequest)
+ return
+ }
+ }
+ }
+
+ hasDuplicates, errMsg := modelsInput.CheckNoDuplicates()
+ if !hasDuplicates {
+ http.Error(w, "Has duplicates: "+errMsg, http.StatusBadRequest)
+ return
+ }
+
+ for _, provider := range modelsInput.CustomProviders {
+ if provider.Name == "" {
+ msg := "Provider name is required"
+ log.Println(msg)
+ http.Error(w, msg, http.StatusBadRequest)
+ return
+ }
+ }
+
+ for _, model := range modelsInput.CustomModels {
+ if model.ModelId == "" {
+ msg := "Model id is required"
+ log.Println(msg)
+ http.Error(w, msg, http.StatusBadRequest)
+ return
+ }
+
+ if shared.BuiltInBaseModelsById[model.ModelId] != nil {
+ msg := fmt.Sprintf("%s is a built-in base model id, so it can't be used for a custom model", model.ModelId)
+ log.Println(msg)
+ http.Error(w, msg, http.StatusUnprocessableEntity)
+ return
+ }
+ }
+
+ for _, modelPack := range modelsInput.CustomModelPacks {
+ if modelPack.Name == "" {
+ msg := "Model pack name is required"
+ log.Println(msg)
+ http.Error(w, msg, http.StatusBadRequest)
+ return
+ }
+
+ if shared.BuiltInModelPacksByName[modelPack.Name] != nil {
+ msg := fmt.Sprintf("%s is a built-in model pack name, so it can't be used for a custom model pack", modelPack.Name)
+ log.Println(msg)
+ http.Error(w, msg, http.StatusUnprocessableEntity)
+ return
+ }
+ }
+
+ var existingCustomModelIds = make(map[shared.ModelId]bool)
+ var existingCustomProviderNames = make(map[string]bool)
+
+ customModels, err := db.ListCustomModels(auth.OrgId)
+ if err != nil {
+ log.Printf("Error fetching custom models: %v\n", err)
+ http.Error(w, "Failed to create custom model: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ customModelPacks, err := db.ListModelPacks(auth.OrgId)
+ if err != nil {
+ log.Printf("Error fetching custom model packs: %v\n", err)
+ http.Error(w, "Failed to create custom model: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var customProviders []*db.CustomProvider
+
+ if os.Getenv("IS_CLOUD") == "" {
+ customProviders, err = db.ListCustomProviders(auth.OrgId)
+ if err != nil {
+ log.Printf("Error fetching custom providers: %v\n", err)
+ http.Error(w, "Failed to create custom model: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+
+ apiCustomModels := make([]*shared.CustomModel, len(customModels))
+ for i, model := range customModels {
+ apiCustomModels[i] = model.ToApi()
+ }
+
+ apiCustomProviders := make([]*shared.CustomProvider, len(customProviders))
+ for i, provider := range customProviders {
+ apiCustomProviders[i] = provider.ToApi()
+ }
+
+ apiCustomModelPacks := make([]*shared.ModelPackSchema, len(customModelPacks))
+ for i, modelPack := range customModelPacks {
+ apiCustomModelPacks[i] = modelPack.ToApi().ToModelPackSchema()
+ }
+
+ updatedModelsInput := modelsInput.FilterUnchanged(&shared.ModelsInput{
+ CustomModels: apiCustomModels,
+ CustomProviders: apiCustomProviders,
+ CustomModelPacks: apiCustomModelPacks,
+ })
+
+ for _, model := range customModels {
+ existingCustomModelIds[model.ModelId] = true
+ }
+
+ for _, provider := range customProviders {
+ existingCustomProviderNames[provider.Name] = true
+ }
+
+ inputModelIds := make(map[string]bool)
+ inputProviderNames := make(map[string]bool)
+ inputModelPackNames := make(map[string]bool)
+
+ for _, model := range modelsInput.CustomModels {
+ inputModelIds[string(model.ModelId)] = true
+ }
+
+ for _, provider := range modelsInput.CustomProviders {
+ inputProviderNames[provider.Name] = true
+ }
+
+ for _, modelPack := range modelsInput.CustomModelPacks {
+ inputModelPackNames[modelPack.Name] = true
+ }
+
+ var toUpsertCustomModels []*db.CustomModel
+ var toUpsertCustomProviders []*db.CustomProvider
+ var toUpsertModelPacks []*db.ModelPack
+
+ for _, provider := range updatedModelsInput.CustomProviders {
+ dbProvider := db.CustomProviderFromApi(provider)
+ dbProvider.Id = provider.Id
+ dbProvider.OrgId = auth.OrgId
+
+ toUpsertCustomProviders = append(toUpsertCustomProviders, dbProvider)
+ }
+
+ for _, model := range updatedModelsInput.CustomModels {
+ // ensure that providers to upsert are either built-in, being imported, or already exist
+ for _, provider := range model.Providers {
+ if provider.Provider == shared.ModelProviderCustom {
+ _, exists := existingCustomProviderNames[*provider.CustomProvider]
+ _, creating := inputProviderNames[*provider.CustomProvider]
+ if !exists && !creating {
+ msg := fmt.Sprintf("'%s' is not a custom model provider that exists or is being imported", *provider.CustomProvider)
+ log.Println(msg)
+ http.Error(w, msg, http.StatusUnprocessableEntity)
+ return
+ }
+ } else {
+ pc, builtIn := shared.BuiltInModelProviderConfigs[provider.Provider]
+ if !builtIn {
+ msg := fmt.Sprintf("'%s' is not a built-in model provider", provider.Provider)
+ log.Println(msg)
+ http.Error(w, msg, http.StatusUnprocessableEntity)
+ return
+ }
+ if os.Getenv("IS_CLOUD") != "" && pc.LocalOnly {
+ msg := fmt.Sprintf("'%s' is a local-only model provider, so it can't be used on Plandex Cloud", provider.Provider)
+ log.Println(msg)
+ http.Error(w, msg, http.StatusUnprocessableEntity)
+ return
+ }
+ }
+ }
+
+ dbModel := db.CustomModelFromApi(model)
+ dbModel.Id = model.Id
+ dbModel.OrgId = auth.OrgId
+
+ toUpsertCustomModels = append(toUpsertCustomModels, dbModel)
+ }
+
+ for _, modelPack := range updatedModelsInput.CustomModelPacks {
+ // ensure that all models are either built-in, being imported, or already exist
+ allModelIds := modelPack.AllModelIds()
+
+ for _, modelId := range allModelIds {
+ _, exists := existingCustomModelIds[modelId]
+ _, creating := inputModelIds[string(modelId)]
+ bm, builtIn := shared.BuiltInBaseModelsById[modelId]
+
+ if !exists && !creating && !builtIn {
+ msg := fmt.Sprintf("'%s' is not built-in, not being imported, and not an existing custom model", modelId)
+ log.Println(msg)
+ http.Error(w, msg, http.StatusUnprocessableEntity)
+ return
+ }
+
+ if builtIn && os.Getenv("IS_CLOUD") != "" && bm.IsLocalOnly() {
+ msg := fmt.Sprintf("'%s' is a local-only built-in model, so it can't be used on Plandex Cloud", modelId)
+ log.Println(msg)
+ http.Error(w, msg, http.StatusUnprocessableEntity)
+ return
+ }
+ }
+
+ mp := modelPack.ToModelPack()
+ dbMp := db.ModelPackFromApi(&mp)
+ dbMp.OrgId = auth.OrgId
+ dbMp.Id = mp.Id
+
+ toUpsertModelPacks = append(toUpsertModelPacks, dbMp)
+ }
+
+ toDeleteCustomModelIds := []string{}
+ toDeleteCustomProviderIds := []string{}
+ toDeleteModelPackIds := []string{}
+
+ for _, model := range customModels {
+ if _, exists := inputModelIds[string(model.ModelId)]; !exists {
+ toDeleteCustomModelIds = append(toDeleteCustomModelIds, model.Id)
+ }
+ }
+
+ for _, provider := range customProviders {
+ if _, exists := inputProviderNames[provider.Name]; !exists {
+ toDeleteCustomProviderIds = append(toDeleteCustomProviderIds, provider.Id)
+ }
+ }
+
+ for _, modelPack := range customModelPacks {
+ if _, exists := inputModelPackNames[modelPack.Name]; !exists {
+ toDeleteModelPackIds = append(toDeleteModelPackIds, modelPack.Id)
+ }
+ }
+
+ numChanges := len(toUpsertCustomModels) + len(toUpsertCustomProviders) + len(toUpsertModelPacks) + len(toDeleteCustomModelIds) + len(toDeleteCustomProviderIds) + len(toDeleteModelPackIds)
+ if numChanges == 0 {
+ w.WriteHeader(http.StatusOK)
+ log.Println("No changes to custom models/providers/model packs")
+ return
+ }
+
+ err = db.WithTx(r.Context(), "create custom models/providers/model packs", func(tx *sqlx.Tx) error {
+ for _, model := range toUpsertCustomModels {
+ if err := db.UpsertCustomModel(tx, model); err != nil {
+ return fmt.Errorf("error creating custom model: %w", err)
+ }
+ }
+
+ for _, provider := range toUpsertCustomProviders {
+ if err := db.UpsertCustomProvider(tx, provider); err != nil {
+ return fmt.Errorf("error creating custom provider: %w", err)
+ }
+ }
+
+ for _, modelPack := range toUpsertModelPacks {
+ if err := db.UpsertModelPack(tx, modelPack); err != nil {
+ return fmt.Errorf("error creating model pack: %w", err)
+ }
+ }
+
+ if len(toDeleteCustomModelIds) > 0 {
+ if err := db.DeleteCustomModels(tx, auth.OrgId, toDeleteCustomModelIds); err != nil {
+ return fmt.Errorf("error deleting custom models: %w", err)
+ }
+ }
+
+ if len(toDeleteCustomProviderIds) > 0 {
+ if err := db.DeleteCustomProviders(tx, auth.OrgId, toDeleteCustomProviderIds); err != nil {
+ return fmt.Errorf("error deleting custom providers: %w", err)
+ }
+ }
+
+ if len(toDeleteModelPackIds) > 0 {
+ if err := db.DeleteModelPacks(tx, auth.OrgId, toDeleteModelPackIds); err != nil {
+ return fmt.Errorf("error deleting model packs: %w", err)
+ }
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error: %v\n", err)
+ http.Error(w, "Failed to import custom models/providers/model packs: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.WriteHeader(http.StatusOK)
+
+ log.Println("Successfully imported custom models/providers/model packs")
+}
+
+func GetCustomModelHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for GetCustomModelHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ id := mux.Vars(r)["modelId"]
+
+ res, err := db.GetCustomModel(auth.OrgId, id)
+ if err != nil {
+ log.Printf("Error fetching custom model: %v\n", err)
+ http.Error(w, "Failed to fetch custom model: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if res == nil {
+ http.Error(w, "Custom model not found", http.StatusNotFound)
+ return
+ }
+
+ err = json.NewEncoder(w).Encode(res.ToApi())
+ if err != nil {
+ log.Printf("Error encoding custom model: %v\n", err)
+ http.Error(w, fmt.Sprintf("Error encoding custom model: %v", err), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully fetched custom model")
+}
+
+func ListCustomModelsHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for ListCustomModelsHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ if !requireMinClientVersion(w, r, CustomModelsMinClientVersion) {
+ return
+ }
+
+ models, err := db.ListCustomModels(auth.OrgId)
+ if err != nil {
+ log.Printf("Error fetching custom models: %v\n", err)
+ http.Error(w, "Failed to fetch custom models: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var apiList []*shared.CustomModel
+ for _, m := range models {
+ apiList = append(apiList, m.ToApi())
+ }
+
+ err = json.NewEncoder(w).Encode(apiList)
+ if err != nil {
+ log.Printf("Error encoding custom models: %v\n", err)
+ http.Error(w, fmt.Sprintf("Error encoding custom models: %v", err), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully fetched custom models")
+}
+
+func GetCustomProviderHandler(w http.ResponseWriter, r *http.Request) {
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ id := mux.Vars(r)["providerId"]
+
+ res, err := db.GetCustomProvider(auth.OrgId, id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ err = json.NewEncoder(w).Encode(res.ToApi())
+ if err != nil {
+ log.Printf("Error encoding custom provider: %v\n", err)
+ http.Error(w, fmt.Sprintf("Error encoding custom provider: %v", err), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully fetched custom provider")
+}
+
+func ListCustomProvidersHandler(w http.ResponseWriter, r *http.Request) {
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ if os.Getenv("IS_CLOUD") != "" {
+ http.Error(w, "Custom model providers are not supported on Plandex Cloud", http.StatusBadRequest)
+ return
+ }
+
+ list, err := db.ListCustomProviders(auth.OrgId)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var apiList []*shared.CustomProvider
+ for _, p := range list {
+ apiList = append(apiList, p.ToApi())
+ }
+
+ err = json.NewEncoder(w).Encode(apiList)
+ if err != nil {
+ log.Printf("Error encoding custom providers: %v\n", err)
+ http.Error(w, fmt.Sprintf("Error encoding custom providers: %v", err), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully fetched custom providers")
+}
+
+func CreateModelPackHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for CreateModelPackHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ if !requireMinClientVersion(w, r, CustomModelsMinClientVersion) {
+ return
+ }
+
+ http.Error(w, "Use POST /custom_models instead to create model packs", http.StatusBadRequest)
+}
+
+func UpdateModelPackHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for UpdateModelPackHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ if !requireMinClientVersion(w, r, CustomModelsMinClientVersion) {
+ return
+ }
+
+ http.Error(w, "Use POST /custom_models instead to update model packs", http.StatusBadRequest)
+}
+
+func ListModelPacksHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for ListModelPacksHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ if !requireMinClientVersion(w, r, CustomModelsMinClientVersion) {
+ return
+ }
+
+ sets, err := db.ListModelPacks(auth.OrgId)
+ if err != nil {
+ log.Printf("Error fetching model packs: %v\n", err)
+ http.Error(w, "Failed to fetch model packs: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var apiPacks []*shared.ModelPack
+
+ for _, mp := range sets {
+ apiPacks = append(apiPacks, mp.ToApi())
+ }
+
+ json.NewEncoder(w).Encode(apiPacks)
+
+ log.Println("Successfully fetched model packs")
+}
diff --git a/app/server/handlers/org_helpers.go b/app/server/handlers/org_helpers.go
new file mode 100644
index 0000000000000000000000000000000000000000..eda2969506db28565d4676daedfddb1c5c940d37
--- /dev/null
+++ b/app/server/handlers/org_helpers.go
@@ -0,0 +1,63 @@
+package handlers
+
+import (
+ "log"
+ "plandex-server/db"
+ "plandex-server/hooks"
+
+ shared "plandex-shared"
+)
+
+func toApiOrgs(orgs []*db.Org) ([]*shared.Org, *shared.ApiError) {
+ var orgIds []string
+ for _, org := range orgs {
+ orgIds = append(orgIds, org.Id)
+ }
+
+ hookRes, apiErr := hooks.ExecHook(hooks.GetApiOrgs, hooks.HookParams{
+ GetApiOrgIds: orgIds,
+ })
+
+ if apiErr != nil {
+ log.Printf("Error getting integrated models mode by org id: %v\n", apiErr)
+ return nil, apiErr
+ }
+
+ var apiOrgs []*shared.Org
+ for _, org := range orgs {
+ if hookRes.ApiOrgsById != nil {
+ hookApiOrg := hookRes.ApiOrgsById[org.Id]
+ apiOrgs = append(apiOrgs, hookApiOrg)
+ } else {
+ apiOrgs = append(apiOrgs, org.ToApi())
+ }
+ }
+
+ return apiOrgs, nil
+}
+
+func getApiOrg(orgId string) (*shared.Org, *shared.ApiError) {
+ org, err := db.GetOrg(orgId)
+ if err != nil {
+ log.Printf("Error getting org: %v\n", err)
+ return nil, &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Msg: "Error getting org",
+ }
+ }
+
+ hookRes, apiErr := hooks.ExecHook(hooks.GetApiOrgs, hooks.HookParams{
+ GetApiOrgIds: []string{org.Id},
+ })
+
+ if apiErr != nil {
+ log.Printf("Error getting integrated models mode by org id: %v\n", apiErr)
+ return nil, apiErr
+ }
+
+ if hookRes.ApiOrgsById != nil {
+ return hookRes.ApiOrgsById[org.Id], nil
+ }
+
+ return org.ToApi(), nil
+}
diff --git a/app/server/handlers/orgs.go b/app/server/handlers/orgs.go
new file mode 100644
index 0000000000000000000000000000000000000000..2f4318cdf6d53e36bb5fac6359aaf605cbc1d936
--- /dev/null
+++ b/app/server/handlers/orgs.go
@@ -0,0 +1,257 @@
+package handlers
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "plandex-server/db"
+ "plandex-server/hooks"
+
+ shared "plandex-shared"
+
+ "github.com/jmoiron/sqlx"
+)
+
+func ListOrgsHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for ListOrgsHandler")
+
+ auth := Authenticate(w, r, false)
+ if auth == nil {
+ return
+ }
+
+ orgs, err := db.GetAccessibleOrgsForUser(auth.User)
+
+ if err != nil {
+ log.Printf("Error listing orgs: %v\n", err)
+ http.Error(w, "Error listing orgs: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ apiOrgs, apiErr := toApiOrgs(orgs)
+
+ if apiErr != nil {
+ log.Printf("Error converting orgs to api: %v\n", apiErr)
+ writeApiError(w, *apiErr)
+ return
+ }
+
+ bytes, err := json.Marshal(apiOrgs)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully listed orgs")
+
+ w.Write(bytes)
+}
+
+func CreateOrgHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for CreateOrgHandler")
+
+ if os.Getenv("IS_CLOUD") != "" {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusForbidden,
+ Msg: "Plandex Cloud orgs can only be created by starting a trial",
+ })
+ return
+ }
+
+ auth := Authenticate(w, r, false)
+ if auth == nil {
+ return
+ }
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var req shared.CreateOrgRequest
+ err = json.Unmarshal(body, &req)
+ if err != nil {
+ log.Printf("Error unmarshalling request: %v\n", err)
+ http.Error(w, "Error unmarshalling request: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var apiErr *shared.ApiError
+ var org *db.Org
+ err = db.WithTx(r.Context(), "create org", func(tx *sqlx.Tx) error {
+ var err error
+ var domain *string
+ if req.AutoAddDomainUsers {
+ if shared.IsEmailServiceDomain(auth.User.Domain) {
+ log.Printf("Invalid domain: %v\n", auth.User.Domain)
+ return fmt.Errorf("invalid domain: %v", auth.User.Domain)
+ }
+
+ domain = &auth.User.Domain
+ }
+
+ // create a new org
+ org, err = db.CreateOrg(&req, auth.AuthToken.UserId, domain, tx)
+
+ if err != nil {
+ log.Printf("Error creating org: %v\n", err)
+ return fmt.Errorf("error creating org: %v", err)
+ }
+
+ if org.AutoAddDomainUsers && org.Domain != nil {
+ err = db.AddOrgDomainUsers(org.Id, *org.Domain, tx)
+
+ if err != nil {
+ log.Printf("Error adding org domain users: %v\n", err)
+ return fmt.Errorf("error adding org domain users: %v", err)
+ }
+ }
+
+ _, apiErr = hooks.ExecHook(hooks.CreateOrg, hooks.HookParams{
+ Auth: auth,
+ Tx: tx,
+
+ CreateOrgHookRequestParams: &hooks.CreateOrgHookRequestParams{
+ Org: org,
+ },
+ })
+
+ return nil
+ })
+
+ if apiErr != nil {
+ writeApiError(w, *apiErr)
+ return
+ }
+
+ if err != nil {
+ log.Printf("Error creating org: %v\n", err)
+ http.Error(w, "Error creating org: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ resp := shared.CreateOrgResponse{
+ Id: org.Id,
+ }
+
+ bytes, err := json.Marshal(resp)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ err = SetAuthCookieIfBrowser(w, r, auth.User, "", org.Id)
+ if err != nil {
+ log.Printf("Error setting auth cookie: %v\n", err)
+ http.Error(w, "Error setting auth cookie: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully created org")
+
+ w.Write(bytes)
+}
+
+func GetOrgSessionHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for GetOrgSessionHandler")
+
+ auth := Authenticate(w, r, true)
+
+ if auth == nil {
+ return
+ }
+
+ org, apiErr := getApiOrg(auth.OrgId)
+
+ if apiErr != nil {
+ log.Printf("Error converting org to api: %v\n", apiErr)
+ writeApiError(w, *apiErr)
+ return
+ }
+
+ bytes, err := json.Marshal(org)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ err = SetAuthCookieIfBrowser(w, r, auth.User, "", org.Id)
+ if err != nil {
+ log.Printf("Error setting auth cookie: %v\n", err)
+ http.Error(w, "Error setting auth cookie: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+
+ log.Println("Successfully got org session")
+}
+
+func ListOrgRolesHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for ListOrgRolesHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ org, err := db.GetOrg(auth.OrgId)
+ if err != nil {
+ log.Printf("Error getting org: %v\n", err)
+ http.Error(w, "Error getting org: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if org.IsTrial {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeTrialActionNotAllowed,
+ Status: http.StatusForbidden,
+ Msg: "Trial user can't list org roles",
+ })
+ return
+ }
+
+ if !auth.HasPermission(shared.PermissionListOrgRoles) {
+ log.Println("User cannot list org roles")
+ http.Error(w, "User cannot list org roles", http.StatusForbidden)
+ return
+ }
+
+ roles, err := db.ListOrgRoles(auth.OrgId)
+
+ if err != nil {
+ log.Printf("Error listing org roles: %v\n", err)
+ http.Error(w, "Error listing org roles: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var apiRoles []*shared.OrgRole
+ for _, role := range roles {
+ apiRoles = append(apiRoles, role.ToApi())
+ }
+
+ bytes, err := json.Marshal(apiRoles)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully listed org roles")
+
+ w.Write(bytes)
+}
diff --git a/app/server/handlers/plan_config.go b/app/server/handlers/plan_config.go
new file mode 100644
index 0000000000000000000000000000000000000000..dc101e442909226b0730fef5e8c10c5e7b457a4c
--- /dev/null
+++ b/app/server/handlers/plan_config.go
@@ -0,0 +1,156 @@
+package handlers
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/db"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+ "github.com/jmoiron/sqlx"
+)
+
+func GetPlanConfigHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for GetPlanConfigHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+
+ log.Println("planId: ", planId)
+
+ plan := authorizePlan(w, planId, auth)
+ if plan == nil {
+ return
+ }
+
+ config, err := db.GetPlanConfig(planId)
+ if err != nil {
+ log.Println("Error getting plan config: ", err)
+ http.Error(w, "Error getting plan config", http.StatusInternalServerError)
+ return
+ }
+
+ res := shared.GetPlanConfigResponse{
+ Config: config,
+ }
+
+ bytes, err := json.Marshal(res)
+ if err != nil {
+ log.Println("Error marshalling response: ", err)
+ http.Error(w, "Error marshalling response", http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+ log.Println("GetPlanConfigHandler processed successfully")
+}
+
+func UpdatePlanConfigHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for UpdatePlanConfigHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+
+ log.Println("planId: ", planId)
+
+ plan := authorizePlan(w, planId, auth)
+ if plan == nil {
+ return
+ }
+
+ var req shared.UpdatePlanConfigRequest
+ err := json.NewDecoder(r.Body).Decode(&req)
+ if err != nil {
+ log.Println("Error decoding request body: ", err)
+ http.Error(w, "Error decoding request body", http.StatusBadRequest)
+ return
+ }
+
+ err = db.StorePlanConfig(planId, req.Config)
+ if err != nil {
+ log.Println("Error storing plan config: ", err)
+ http.Error(w, "Error storing plan config", http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("UpdatePlanConfigHandler processed successfully")
+}
+
+func GetDefaultPlanConfigHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for GetDefaultPlanConfigHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ config, err := db.GetDefaultPlanConfig(auth.User.Id)
+ if err != nil {
+ log.Println("Error getting default plan config: ", err)
+ http.Error(w, "Error getting default plan config", http.StatusInternalServerError)
+ return
+ }
+
+ res := shared.GetDefaultPlanConfigResponse{
+ Config: config,
+ }
+
+ bytes, err := json.Marshal(res)
+ if err != nil {
+ log.Println("Error marshalling response: ", err)
+ http.Error(w, "Error marshalling response", http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+ log.Println("GetDefaultPlanConfigHandler processed successfully")
+}
+
+func UpdateDefaultPlanConfigHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for UpdateDefaultPlanConfigHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ var req shared.UpdateDefaultPlanConfigRequest
+ err := json.NewDecoder(r.Body).Decode(&req)
+ if err != nil {
+ log.Println("Error decoding request body: ", err)
+ http.Error(w, "Error decoding request body", http.StatusBadRequest)
+ return
+ }
+
+ err = db.WithTx(r.Context(), "update default plan config", func(tx *sqlx.Tx) error {
+
+ err := db.StoreDefaultPlanConfig(auth.User.Id, req.Config, tx)
+ if err != nil {
+ log.Println("Error storing default plan config: ", err)
+ return fmt.Errorf("error storing default plan config: %v", err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Println("Error updating default plan config: ", err)
+ http.Error(w, "Error updating default plan config", http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("UpdateDefaultPlanConfigHandler processed successfully")
+}
diff --git a/app/server/handlers/plans_changes.go b/app/server/handlers/plans_changes.go
new file mode 100644
index 0000000000000000000000000000000000000000..8003bb81df1f0ed5b223ba1bf20876fc58790935
--- /dev/null
+++ b/app/server/handlers/plans_changes.go
@@ -0,0 +1,579 @@
+package handlers
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ modelPlan "plandex-server/model/plan"
+ "time"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+)
+
+func CurrentPlanHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for CurrentPlanHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+ sha := vars["sha"]
+
+ log.Println("planId: ", planId, "branch: ", branch, "sha: ", sha)
+
+ if authorizePlan(w, planId, auth) == nil {
+ return
+ }
+
+ // Just in case this was sent immediately after a stream finished, wait a little before locking to allow for cleanup
+ time.Sleep(100 * time.Millisecond)
+
+ ctx, cancel := context.WithCancel(r.Context())
+ scope := db.LockScopeRead
+ if sha != "" {
+ scope = db.LockScopeWrite
+ }
+ log.Printf("locking with scope: %s", scope)
+
+ var planState *shared.CurrentPlanState
+
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Scope: scope,
+ Ctx: ctx,
+ CancelFn: cancel,
+ Reason: "get current plan state",
+ }, func(repo *db.GitRepo) error {
+ var err error
+ if sha != "" {
+ err = repo.GitCheckoutSha(sha)
+ if err != nil {
+ return fmt.Errorf("error checking out sha: %v", err)
+ }
+
+ defer func() {
+ checkoutErr := repo.GitCheckoutBranch(branch)
+ if checkoutErr != nil {
+ log.Printf("Error checking out branch: %v\n", checkoutErr)
+ }
+ }()
+ }
+
+ planState, err = db.GetCurrentPlanState(db.CurrentPlanStateParams{
+ OrgId: auth.OrgId,
+ PlanId: planId,
+ })
+
+ if err != nil {
+ return fmt.Errorf("error getting current plan state: %v", err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error getting current plan state: %v\n", err)
+ http.Error(w, "Error getting current plan state: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ jsonBytes, err := json.Marshal(planState)
+
+ if err != nil {
+ log.Printf("Error marshalling plan state: %v\n", err)
+ http.Error(w, "Error marshalling plan state: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully retrieved current plan state")
+
+ w.Write(jsonBytes)
+}
+
+func ApplyPlanHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for ApplyPlanHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+ log.Println("planId: ", planId, "branch: ", branch)
+
+ plan := authorizePlan(w, planId, auth)
+ if plan == nil {
+ return
+ }
+
+ var err error
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer r.Body.Close()
+
+ var requestBody shared.ApplyPlanRequest
+ if err := json.Unmarshal(body, &requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ // Just in case this was sent immediately after a stream finished, wait a little before locking to allow for cleanup
+ time.Sleep(100 * time.Millisecond)
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ var settings *shared.PlanSettings
+ var currentPlanParams db.CurrentPlanStateParams
+ var currentPlan *shared.CurrentPlanState
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Scope: db.LockScopeRead,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ var err error
+ settings, err = db.GetPlanSettings(plan)
+ if err != nil {
+ return fmt.Errorf("error getting plan settings: %v", err)
+ }
+
+ currentPlanParams, err = db.GetFullCurrentPlanStateParams(auth.OrgId, planId)
+ if err != nil {
+ return fmt.Errorf("error getting current plan state params: %v", err)
+ }
+
+ currentPlan, err = db.GetCurrentPlanState(currentPlanParams)
+ if err != nil {
+ return fmt.Errorf("error getting current plan state: %v", err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error getting current plan state: %v\n", err)
+ http.Error(w, "Error getting current plan state: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("ApplyPlanHandler: Got current plan state:", currentPlan != nil)
+
+ res := initClients(
+ initClientsParams{
+ w: w,
+ auth: auth,
+ apiKeys: requestBody.ApiKeys,
+ openAIOrgId: requestBody.OpenAIOrgId,
+ authVars: requestBody.AuthVars,
+ plan: plan,
+ settings: settings,
+ },
+ )
+
+ clients := res.clients
+ authVars := res.authVars
+
+ commitMsg, err := modelPlan.GenCommitMsgForPendingResults(modelPlan.GenCommitMsgForPendingResultsParams{
+ Auth: auth,
+ Plan: plan,
+ Clients: clients,
+ Settings: settings,
+ Current: currentPlan,
+ AuthVars: authVars,
+ SessionId: requestBody.SessionId,
+ Ctx: r.Context(),
+ })
+
+ if err != nil {
+ log.Printf("Error generating commit message: %v\n", err)
+ http.Error(w, "Error generating commit message: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancel,
+ ClearRepoOnErr: true,
+ }, func(repo *db.GitRepo) error {
+ return db.ApplyPlan(repo, ctx, db.ApplyPlanParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ BranchName: branch,
+ Plan: plan,
+ CurrentPlanState: currentPlan,
+ CurrentPlanStateParams: ¤tPlanParams,
+ CommitMsg: commitMsg,
+ })
+ })
+
+ if err != nil {
+ log.Printf("Error applying plan: %v\n", err)
+ http.Error(w, "Error applying plan: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write([]byte(commitMsg))
+
+ log.Println("Successfully applied plan", planId)
+}
+
+func RejectAllChangesHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for RejectAllChangesHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+ log.Println("planId: ", planId, "branch: ", branch)
+
+ if authorizePlan(w, planId, auth) == nil {
+ return
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancel,
+ ClearRepoOnErr: true,
+ }, func(repo *db.GitRepo) error {
+ err := db.RejectAllResults(auth.OrgId, planId)
+ if err != nil {
+ return err
+ }
+
+ err = repo.GitAddAndCommit(branch, "🚫 Rejected all pending changes")
+ if err != nil {
+ return fmt.Errorf("error committing rejected changes: %v", err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error rejecting all changes: %v\n", err)
+ http.Error(w, "Error rejecting all changes: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully rejected all changes for plan", planId)
+}
+
+func RejectFileHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for RejectFileHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+
+ log.Println("planId: ", planId, "branch: ", branch)
+
+ if authorizePlan(w, planId, auth) == nil {
+ return
+ }
+
+ var req shared.RejectFileRequest
+ err := json.NewDecoder(r.Body).Decode(&req)
+ if err != nil {
+ log.Printf("Error decoding request: %v\n", err)
+ http.Error(w, "Error decoding request: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancel,
+ ClearRepoOnErr: true,
+ }, func(repo *db.GitRepo) error {
+ err = db.RejectPlanFile(auth.OrgId, planId, req.FilePath, time.Now())
+ if err != nil {
+ return err
+ }
+
+ err = repo.GitAddAndCommit(branch, fmt.Sprintf("🚫 Rejected pending changes to file: %s", req.FilePath))
+ if err != nil {
+ return fmt.Errorf("error committing rejected changes: %v", err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error rejecting result: %v\n", err)
+ http.Error(w, "Error rejecting result: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully rejected plan file", req.FilePath)
+}
+
+func RejectFilesHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for RejectFilesHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+
+ log.Println("planId: ", planId, "branch: ", branch)
+
+ if authorizePlan(w, planId, auth) == nil {
+ return
+ }
+
+ var req shared.RejectFilesRequest
+ err := json.NewDecoder(r.Body).Decode(&req)
+ if err != nil {
+ log.Printf("Error decoding request: %v\n", err)
+ http.Error(w, "Error decoding request: "+err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancel,
+ ClearRepoOnErr: true,
+ }, func(repo *db.GitRepo) error {
+ err = db.RejectPlanFiles(auth.OrgId, planId, req.Paths, time.Now())
+ if err != nil {
+ return err
+ }
+
+ msg := "🚫 Rejected pending changes to file"
+ if len(req.Paths) > 1 {
+ msg += "s"
+ }
+ msg += ":"
+
+ for _, path := range req.Paths {
+ msg += fmt.Sprintf("\n • %s", path)
+ }
+
+ err = repo.GitAddAndCommit(branch, msg)
+ if err != nil {
+ return fmt.Errorf("error committing rejected changes: %v", err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error rejecting result: %v\n", err)
+ http.Error(w, "Error rejecting result: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully rejected plan files", req.Paths)
+}
+
+func ArchivePlanHandler(w http.ResponseWriter, r *http.Request) {
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ log.Println("Received request for ArchivePlanHandler")
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ log.Println("planId: ", planId)
+
+ plan := authorizePlanArchive(w, planId, auth)
+
+ if plan == nil {
+ return
+ }
+
+ if plan.ArchivedAt != nil {
+ log.Println("Plan already archived")
+ http.Error(w, "Plan already archived", http.StatusBadRequest)
+ return
+ }
+
+ res, err := db.Conn.Exec("UPDATE plans SET archived_at = NOW() WHERE id = $1", planId)
+
+ if err != nil {
+ log.Printf("Error archiving plan: %v\n", err)
+ http.Error(w, "Error archiving plan: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ rowsAffected, err := res.RowsAffected()
+ if err != nil {
+ log.Printf("Error getting rows affected: %v\n", err)
+ http.Error(w, "Error getting rows affected: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if rowsAffected == 0 {
+ log.Println("Plan not found")
+ http.Error(w, "Not found", http.StatusNotFound)
+ return
+ }
+
+ log.Println("Successfully archived plan", planId)
+}
+
+func UnarchivePlanHandler(w http.ResponseWriter, r *http.Request) {
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ log.Println("Received request for UnarchivePlanHandler")
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ log.Println("planId: ", planId)
+
+ plan := authorizePlanArchive(w, planId, auth)
+
+ if plan == nil {
+ return
+ }
+
+ if plan.ArchivedAt == nil {
+ log.Println("Plan isn't archived")
+ http.Error(w, "Plan isn't archived", http.StatusBadRequest)
+ return
+ }
+
+ res, err := db.Conn.Exec("UPDATE plans SET archived_at = NULL WHERE id = $1", planId)
+
+ if err != nil {
+ log.Printf("Error archiving plan: %v\n", err)
+ http.Error(w, "Error archiving plan: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ rowsAffected, err := res.RowsAffected()
+ if err != nil {
+ log.Printf("Error getting rows affected: %v\n", err)
+ http.Error(w, "Error getting rows affected: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if rowsAffected == 0 {
+ log.Println("Plan not found")
+ http.Error(w, "Not found", http.StatusNotFound)
+ return
+ }
+
+ log.Println("Successfully unarchived plan", planId)
+}
+
+func GetPlanDiffsHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for GetPlanDiffs")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+ plain := r.URL.Query().Get("plain") == "true"
+
+ log.Println("planId: ", planId, "branch: ", branch)
+
+ if authorizePlan(w, planId, auth) == nil {
+ return
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+ var diffs string
+
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Scope: db.LockScopeRead,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ var err error
+ diffs, err = db.GetPlanDiffs(auth.OrgId, planId, plain)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error getting plan diffs: %v\n", err)
+ http.Error(w, "Error getting plan diffs: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Printf("diffs: %s", diffs)
+
+ w.Write([]byte(diffs))
+
+ log.Println("Successfully retrieved plan diffs")
+}
diff --git a/app/server/handlers/plans_context.go b/app/server/handlers/plans_context.go
new file mode 100644
index 0000000000000000000000000000000000000000..2a378824774aea7d8840c5fec49f72cb5fcec571
--- /dev/null
+++ b/app/server/handlers/plans_context.go
@@ -0,0 +1,447 @@
+package handlers
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "plandex-server/db"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+)
+
+func ListContextHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for ListContextHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+ log.Println("planId: ", planId, "branch: ", branch)
+
+ if authorizePlan(w, planId, auth) == nil {
+ return
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+ var dbContexts []*db.Context
+
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Reason: "list contexts",
+ Scope: db.LockScopeRead,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ res, err := db.GetPlanContexts(auth.OrgId, planId, false, false)
+ if err != nil {
+ return err
+ }
+
+ dbContexts = res
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error getting contexts: %v\n", err)
+ http.Error(w, "Error getting contexts: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var apiContexts []*shared.Context
+
+ for _, dbContext := range dbContexts {
+ apiContexts = append(apiContexts, dbContext.ToApi())
+ }
+
+ bytes, err := json.Marshal(apiContexts)
+
+ if err != nil {
+ log.Printf("Error marshalling contexts: %v\n", err)
+ http.Error(w, "Error marshalling contexts: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+}
+
+func GetContextBodyHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for GetContextBodyHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+ contextId := vars["contextId"]
+ log.Println("planId:", planId, "branch:", branch, "contextId:", contextId)
+
+ if authorizePlan(w, planId, auth) == nil {
+ return
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ var dbContexts []*db.Context
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Reason: "get context body",
+ Scope: db.LockScopeRead,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ res, err := db.GetPlanContexts(auth.OrgId, planId, true, false)
+ if err != nil {
+ return err
+ }
+
+ dbContexts = res
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error getting contexts: %v\n", err)
+ http.Error(w, "Error getting contexts: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var targetContext *db.Context
+ for _, dbContext := range dbContexts {
+ if dbContext.Id == contextId {
+ targetContext = dbContext
+ break
+ }
+ }
+
+ if targetContext == nil {
+ http.Error(w, "Context not found", http.StatusNotFound)
+ return
+ }
+
+ response := shared.GetContextBodyResponse{
+ Body: targetContext.Body,
+ }
+
+ bytes, err := json.Marshal(response)
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+}
+
+func LoadContextHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for LoadContextHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branchName := vars["branch"]
+ log.Println("planId: ", planId)
+
+ plan := authorizePlan(w, planId, auth)
+ if plan == nil {
+ return
+ }
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer r.Body.Close()
+
+ var requestBody shared.LoadContextRequest
+ if err := json.Unmarshal(body, &requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ res, _ := loadContexts(loadContextsParams{
+ w: w,
+ r: r,
+ auth: auth,
+ loadReq: &requestBody,
+ plan: plan,
+ branchName: branchName,
+ })
+
+ if res == nil {
+ return
+ }
+
+ bytes, err := json.Marshal(res)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully processed LoadContextHandler request")
+
+ w.Write(bytes)
+}
+
+func UpdateContextHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for UpdateContextHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branchName := vars["branch"]
+ log.Println("planId: ", planId)
+
+ plan := authorizePlan(w, planId, auth)
+ if plan == nil {
+ return
+ }
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer r.Body.Close()
+
+ var requestBody shared.UpdateContextRequest
+ if err := json.Unmarshal(body, &requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ var updateRes *shared.UpdateContextResponse
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branchName,
+ Reason: "update contexts",
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancel,
+ ClearRepoOnErr: true,
+ }, func(repo *db.GitRepo) error {
+ var err error
+ updateRes, err = db.UpdateContexts(db.UpdateContextsParams{
+ Req: &requestBody,
+ OrgId: auth.OrgId,
+ Plan: plan,
+ BranchName: branchName,
+ })
+
+ if err != nil {
+ return err
+ }
+
+ if updateRes.MaxTokensExceeded {
+ return nil
+ }
+
+ err = repo.GitAddAndCommit(branchName, updateRes.Msg)
+
+ if err != nil {
+ return fmt.Errorf("error committing changes: %v", err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error error updating contexts: %v\n", err)
+ http.Error(w, "Error error updating contexts: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if updateRes.MaxTokensExceeded {
+ log.Printf("The total number of tokens (%d) exceeds the maximum allowed (%d)", updateRes.TotalTokens, updateRes.MaxTokens)
+ bytes, err := json.Marshal(updateRes)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+ return
+ }
+
+ bytes, err := json.Marshal(updateRes)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully processed UpdateContextHandler request")
+
+ w.Write(bytes)
+}
+
+func DeleteContextHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for DeleteContextHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branchName := vars["branch"]
+ log.Println("planId: ", planId)
+
+ plan := authorizePlan(w, planId, auth)
+
+ if plan == nil {
+ return
+ }
+
+ branch, err := db.GetDbBranch(planId, branchName)
+
+ if err != nil {
+ log.Printf("Error getting branch: %v\n", err)
+ http.Error(w, "Error getting branch: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer r.Body.Close()
+
+ var requestBody shared.DeleteContextRequest
+ if err := json.Unmarshal(body, &requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ var dbContexts []*db.Context
+ var toRemove []*db.Context
+ var commitMsg string
+ removeTokens := 0
+ var toRemoveApiContexts []*shared.Context
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branchName,
+ Reason: "delete contexts",
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancel,
+ ClearRepoOnErr: true,
+ }, func(repo *db.GitRepo) error {
+ var err error
+ dbContexts, err = db.GetPlanContexts(auth.OrgId, planId, false, false)
+
+ if err != nil {
+ return fmt.Errorf("error getting contexts: %v", err)
+ }
+
+ for _, dbContext := range dbContexts {
+ if _, ok := requestBody.Ids[dbContext.Id]; ok {
+ toRemove = append(toRemove, dbContext)
+ }
+ }
+
+ err = db.ContextRemove(auth.OrgId, planId, toRemove)
+
+ if err != nil {
+ return fmt.Errorf("error removing contexts: %v", err)
+ }
+
+ for _, dbContext := range toRemove {
+ toRemoveApiContexts = append(toRemoveApiContexts, dbContext.ToApi())
+ removeTokens += dbContext.NumTokens
+ }
+
+ commitMsg = shared.SummaryForRemoveContext(toRemoveApiContexts, branch.ContextTokens) + "\n\n" + shared.TableForRemoveContext(toRemoveApiContexts)
+
+ err = repo.GitAddAndCommit(branchName, commitMsg)
+
+ if err != nil {
+ return fmt.Errorf("error committing changes: %v", err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error deleting contexts: %v\n", err)
+ http.Error(w, "Error deleting contexts: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ err = db.AddPlanContextTokens(planId, branchName, -removeTokens)
+ if err != nil {
+ log.Printf("Error updating plan tokens: %v\n", err)
+ http.Error(w, "Error updating plan tokens: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ res := shared.DeleteContextResponse{
+ TokensRemoved: removeTokens,
+ TotalTokens: branch.ContextTokens - removeTokens,
+ Msg: commitMsg,
+ }
+
+ bytes, err := json.Marshal(res)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully deleted contexts")
+
+ w.Write(bytes)
+}
diff --git a/app/server/handlers/plans_convo.go b/app/server/handlers/plans_convo.go
new file mode 100644
index 0000000000000000000000000000000000000000..ea077f0e6111c7517c1cc4a88c9977f9edec268e
--- /dev/null
+++ b/app/server/handlers/plans_convo.go
@@ -0,0 +1,160 @@
+package handlers
+
+import (
+ "context"
+ "encoding/json"
+ "log"
+ "net/http"
+ "plandex-server/db"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+)
+
+func ListConvoHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received a request for ListConvoHandler")
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+ log.Println("planId: ", planId, "branch: ", branch)
+
+ if authorizePlan(w, planId, auth) == nil {
+ return
+ }
+
+ var err error
+ var convoMessages []*db.ConvoMessage
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Reason: "list convo",
+ Scope: db.LockScopeRead,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ res, err := db.GetPlanConvo(auth.OrgId, planId)
+
+ if err != nil {
+ return err
+ }
+
+ convoMessages = res
+
+ return nil
+ })
+
+ if err != nil {
+ log.Println("Error getting plan convo: ", err)
+ http.Error(w, "Error getting plan convo: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ apiConvoMessages := make([]*shared.ConvoMessage, len(convoMessages))
+ for i, convoMessage := range convoMessages {
+ apiConvoMessages[i] = convoMessage.ToApi()
+ }
+
+ bytes, err := json.Marshal(apiConvoMessages)
+
+ if err != nil {
+ log.Println("Error marshalling plan convo: ", err)
+ http.Error(w, "Error marshalling plan convo: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully processed request for ListConvoHandler")
+ w.Write(bytes)
+
+}
+
+func GetPlanStatusHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received a request for GetPlanStatusHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+
+ log.Println("planId: ", planId, "branch: ", branch)
+
+ plan := authorizePlan(w, planId, auth)
+ if plan == nil {
+ return
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ var convoMessages []*db.ConvoMessage
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Reason: "get plan status",
+ Scope: db.LockScopeRead,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ res, err := db.GetPlanConvo(auth.OrgId, planId)
+
+ if err != nil {
+ return err
+ }
+
+ convoMessages = res
+
+ return nil
+ })
+
+ if err != nil {
+ log.Println("Error getting plan convo: ", err)
+ http.Error(w, "Error getting plan convo: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if len(convoMessages) == 0 {
+ log.Println("No messages found for plan")
+ return
+ }
+
+ convoMessageIds := make([]string, len(convoMessages))
+ for i, convoMessage := range convoMessages {
+ convoMessageIds[i] = convoMessage.Id
+ }
+
+ summmaries, err := db.GetPlanSummaries(planId, convoMessageIds)
+
+ if err != nil {
+ log.Println("Error getting plan summaries: ", err)
+ http.Error(w, "Error getting plan summaries: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if len(summmaries) == 0 {
+ log.Println("No summaries found for plan")
+ return
+ }
+
+ latestSummary := summmaries[len(summmaries)-1]
+
+ bytes := []byte(latestSummary.Summary)
+
+ w.Write(bytes)
+
+ log.Println("Successfully processed request for GetPlanStatusHandler")
+}
diff --git a/app/server/handlers/plans_crud.go b/app/server/handlers/plans_crud.go
new file mode 100644
index 0000000000000000000000000000000000000000..a3e52919808fa9b8b839d59bc23a7b76a6a1e7b0
--- /dev/null
+++ b/app/server/handlers/plans_crud.go
@@ -0,0 +1,658 @@
+package handlers
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ "plandex-server/hooks"
+ "runtime"
+ "runtime/debug"
+ "sort"
+ "strings"
+ "time"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+)
+
+func CreatePlanHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for CreatePlanHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ if !auth.HasPermission(shared.PermissionCreatePlan) {
+ log.Println("User does not have permission to create a plan")
+ http.Error(w, "User does not have permission to create a plan", http.StatusForbidden)
+ return
+ }
+
+ vars := mux.Vars(r)
+ projectId := vars["projectId"]
+
+ log.Println("projectId: ", projectId)
+
+ if !authorizeProject(w, projectId, auth) {
+ return
+ }
+
+ _, apiErr := hooks.ExecHook(hooks.WillCreatePlan, hooks.HookParams{Auth: auth})
+ if apiErr != nil {
+ writeApiError(w, *apiErr)
+ return
+ }
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer r.Body.Close()
+
+ var requestBody shared.CreatePlanRequest
+ if err := json.Unmarshal(body, &requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ name := requestBody.Name
+ if name == "" {
+ name = "draft"
+ }
+
+ if name == "draft" {
+ // delete any existing draft plans
+ err = db.DeleteDraftPlans(auth.OrgId, projectId, auth.User.Id)
+
+ if err != nil {
+ log.Printf("Error deleting draft plans: %v\n", err)
+ http.Error(w, "Error deleting draft plans: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ } else {
+ i := 2
+ originalName := name
+ for {
+ var count int
+ err := db.Conn.Get(&count, "SELECT COUNT(*) FROM plans WHERE project_id = $1 AND owner_id = $2 AND name = $3", projectId, auth.User.Id, name)
+
+ if err != nil {
+ log.Printf("Error checking if plan exists: %v\n", err)
+ http.Error(w, "Error checking if plan exists: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if count == 0 {
+ break
+ }
+
+ name = originalName + "." + fmt.Sprint(i)
+ i++
+ }
+ }
+
+ plan, err := db.CreatePlan(r.Context(), auth.OrgId, projectId, auth.User.Id, name)
+
+ if err != nil {
+ log.Printf("Error creating plan: %v\n", err)
+ http.Error(w, "Error creating plan: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ resp := shared.CreatePlanResponse{
+ Id: plan.Id,
+ Name: plan.Name,
+ }
+
+ bytes, err := json.Marshal(resp)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+
+ log.Printf("Successfully created plan: %v\n", plan)
+}
+
+func GetPlanHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for GetPlanHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+
+ log.Println("planId: ", planId)
+
+ plan := authorizePlan(w, planId, auth)
+
+ if plan == nil {
+ return
+ }
+
+ bytes, err := json.Marshal(plan)
+
+ if err != nil {
+ log.Printf("Error marshalling plan: %v\n", err)
+ http.Error(w, "Error marshalling plan: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+}
+
+func RenamePlanHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for RenamePlanHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+
+ log.Println("planId: ", planId)
+
+ plan := authorizePlan(w, planId, auth)
+
+ if plan == nil {
+ return
+ }
+
+ var requestBody shared.RenamePlanRequest
+ if err := json.NewDecoder(r.Body).Decode(&requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ if plan.OwnerId != auth.User.Id {
+ log.Println("Only the plan owner can rename a plan")
+ http.Error(w, "Only the plan owner can rename a plan", http.StatusForbidden)
+ return
+ }
+
+ if requestBody.Name == "" {
+ log.Println("Name cannot be empty")
+ http.Error(w, "Name cannot be empty", http.StatusBadRequest)
+ return
+ }
+
+ err := db.RenamePlan(planId, requestBody.Name, nil)
+
+ if err != nil {
+ log.Printf("Error renaming plan: %v\n", err)
+ http.Error(w, "Error renaming plan: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully renamed plan")
+}
+
+func DeletePlanHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for DeletePlanHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+
+ log.Println("planId: ", planId)
+
+ plan := authorizePlanDelete(w, planId, auth)
+
+ if plan == nil {
+ return
+ }
+
+ if plan.OwnerId != auth.User.Id {
+ log.Println("Only the plan owner can delete a plan")
+ http.Error(w, "Only the plan owner can delete a plan", http.StatusForbidden)
+ return
+ }
+
+ res, err := db.Conn.Exec("DELETE FROM plans WHERE id = $1", planId)
+
+ if err != nil {
+ log.Printf("Error deleting plan: %v\n", err)
+ http.Error(w, "Error deleting plan: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ rowsAffected, err := res.RowsAffected()
+ if err != nil {
+ log.Printf("Error getting rows affected: %v\n", err)
+ http.Error(w, "Error getting rows affected: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if rowsAffected == 0 {
+ log.Println("Plan not found")
+ http.Error(w, "Not found", http.StatusNotFound)
+ return
+ }
+
+ err = db.DeletePlanDir(auth.OrgId, planId)
+
+ if err != nil {
+ log.Printf("Error deleting plan dir: %v\n", err)
+ http.Error(w, "Error deleting plan dir: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully deleted plan", planId)
+}
+
+func DeleteAllPlansHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for DeleteAllPlansHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ projectId := vars["projectId"]
+
+ log.Println("projectId: ", projectId)
+
+ if !authorizeProject(w, projectId, auth) {
+ return
+ }
+
+ err := db.DeleteOwnerPlans(auth.OrgId, projectId, auth.User.Id)
+
+ if err != nil {
+ log.Printf("Error deleting plans: %v\n", err)
+ http.Error(w, "Error deleting plans: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully deleted all plans")
+}
+
+func ListPlansHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for ListPlans")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ projectIds := r.URL.Query()["projectId"]
+
+ log.Println("projectIds: ", projectIds)
+
+ var apiPlans []*shared.Plan
+
+ writePlans := func() {
+ jsonBytes, err := json.Marshal(apiPlans)
+ if err != nil {
+ log.Printf("Error marshalling plans: %v\n", err)
+ http.Error(w, "Error marshalling plans: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(jsonBytes)
+ }
+
+ if len(projectIds) == 0 {
+ writePlans()
+ return
+ }
+
+ authorizedProjectIds := []string{}
+ for _, projectId := range projectIds {
+ if authorizeProjectOptional(w, projectId, auth, false) {
+ authorizedProjectIds = append(authorizedProjectIds, projectId)
+ }
+ }
+
+ if len(authorizedProjectIds) == 0 {
+ writePlans()
+ return
+ }
+
+ plans, err := db.ListOwnedPlans(authorizedProjectIds, auth.User.Id, false)
+
+ if err != nil {
+ log.Printf("Error listing plans: %v\n", err)
+ http.Error(w, "Error listing plans: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ for _, plan := range plans {
+ apiPlans = append(apiPlans, plan.ToApi())
+ }
+
+ writePlans()
+}
+
+func ListArchivedPlansHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for ListArchivedPlansHandler")
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ projectIds := r.URL.Query()["projectId"]
+
+ log.Println("projectIds: ", projectIds)
+
+ var apiPlans []*shared.Plan
+
+ writePlans := func() {
+ jsonBytes, err := json.Marshal(apiPlans)
+ if err != nil {
+ log.Printf("Error marshalling plans: %v\n", err)
+ http.Error(w, "Error marshalling plans: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully processed ListArchivedPlansHandler request")
+
+ w.Write(jsonBytes)
+ }
+
+ if len(projectIds) == 0 {
+ writePlans()
+ return
+ }
+
+ authorizedProjectIds := []string{}
+ for _, projectId := range projectIds {
+ if authorizeProjectOptional(w, projectId, auth, false) {
+ authorizedProjectIds = append(authorizedProjectIds, projectId)
+ }
+ }
+
+ plans, err := db.ListOwnedPlans(authorizedProjectIds, auth.User.Id, true)
+
+ if err != nil {
+ log.Printf("Error listing plans: %v\n", err)
+ http.Error(w, "Error listing plans: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ for _, plan := range plans {
+ apiPlans = append(apiPlans, plan.ToApi())
+ }
+
+ writePlans()
+}
+
+func ListPlansRunningHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for ListPlansRunningHandler")
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ projectIds := r.URL.Query()["projectId"]
+ includeRecent := r.URL.Query().Get("recent") == "true"
+
+ log.Println("projectIds: ", projectIds)
+
+ if len(projectIds) == 0 {
+ log.Println("No project ids provided")
+ http.Error(w, "No project ids provided", http.StatusBadRequest)
+ return
+ }
+
+ for _, projectId := range projectIds {
+ if !authorizeProject(w, projectId, auth) {
+ return
+ }
+ }
+
+ plans, err := db.ListOwnedPlans(projectIds, auth.User.Id, false)
+
+ if err != nil {
+ log.Printf("Error listing plans: %v\n", err)
+ http.Error(w, "Error listing plans: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var planIds []string
+ for _, plan := range plans {
+ planIds = append(planIds, plan.Id)
+ }
+
+ errCh := make(chan error, 2)
+ var streams []*db.ModelStream
+ var branches []*db.Branch
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in ListPlansRunningHandler: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("panic in ListPlansRunningHandler: %v\n%s", r, debug.Stack())
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+
+ var err error
+ if includeRecent {
+ streams, err = db.GetActiveOrRecentModelStreams(planIds)
+ } else {
+ streams, err = db.GetActiveModelStreams(planIds)
+ }
+ if err != nil {
+ errCh <- fmt.Errorf("error getting recent model streams: %v", err)
+ return
+ }
+ errCh <- nil
+ }()
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in ListPlansRunningHandler: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("panic in ListPlansRunningHandler: %v\n%s", r, debug.Stack())
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+ var err error
+ branches, err = db.ListBranchesForPlans(auth.OrgId, planIds)
+ if err != nil {
+ errCh <- fmt.Errorf("error getting branches: %v", err)
+ return
+ }
+ errCh <- nil
+ }()
+
+ for i := 0; i < 2; i++ {
+ err := <-errCh
+ if err != nil {
+ log.Println(err)
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ }
+
+ res := shared.ListPlansRunningResponse{
+ Branches: []*shared.Branch{},
+ StreamStartedAtByBranchId: map[string]time.Time{},
+ StreamFinishedAtByBranchId: map[string]time.Time{},
+ PlansById: map[string]*shared.Plan{},
+ StreamIdByBranchId: map[string]string{},
+ }
+
+ var apiPlansById = make(map[string]*shared.Plan)
+ for _, plan := range plans {
+ apiPlan := plan.ToApi()
+ apiPlansById[plan.Id] = apiPlan
+ }
+
+ var apiBranchesByComposite = make(map[string]*shared.Branch)
+ for _, branch := range branches {
+ apiBranch := branch.ToApi()
+ apiBranchesByComposite[branch.PlanId+"|"+branch.Name] = apiBranch
+ }
+
+ addedBranches := make(map[string]bool)
+ for _, stream := range streams {
+ branchComposite := stream.PlanId + "|" + stream.Branch
+ apiBranch, ok := apiBranchesByComposite[branchComposite]
+ if !ok {
+ log.Printf("Stream %s has no branch\n", stream.Id)
+ http.Error(w, "Stream has no branch", http.StatusInternalServerError)
+ return
+ }
+
+ apiPlan, ok := apiPlansById[stream.PlanId]
+ if !ok {
+ log.Printf("Stream %s has no plan\n", stream.Id)
+ http.Error(w, "Stream has no plan", http.StatusInternalServerError)
+ return
+ }
+
+ if !addedBranches[branchComposite] {
+ res.Branches = append(res.Branches, apiBranch)
+ addedBranches[branchComposite] = true
+ }
+
+ res.StreamStartedAtByBranchId[apiBranch.Id] = stream.CreatedAt
+ if stream.FinishedAt != nil {
+ res.StreamFinishedAtByBranchId[apiBranch.Id] = *stream.FinishedAt
+ }
+ res.StreamIdByBranchId[apiBranch.Id] = stream.Id
+
+ res.PlansById[stream.PlanId] = apiPlan
+ }
+
+ sort.Slice(res.Branches, func(i, j int) bool {
+ iComposite := res.Branches[i].PlanId + "|" + res.Branches[i].Name
+ jComposite := res.Branches[j].PlanId + "|" + res.Branches[j].Name
+ iFinishedAt, iOk := res.StreamFinishedAtByBranchId[iComposite]
+ jFinishedAt, jOk := res.StreamFinishedAtByBranchId[jComposite]
+ iCreatedAt := res.StreamStartedAtByBranchId[iComposite]
+ jCreatedAt := res.StreamStartedAtByBranchId[jComposite]
+
+ if iOk && jOk {
+ return iFinishedAt.Before(jFinishedAt) // Sort finished streams by finishedAt in ascending order.
+ }
+ if iOk {
+ return false // Place i after j if i is finished and j is not.
+ }
+ if jOk {
+ return true // Place i before j if i is not finished and j is.
+ }
+ return iCreatedAt.Before(jCreatedAt) // Sort by createdAt in ascending order if both are unfinished.
+ })
+
+ bytes, err := json.Marshal(res)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully processed ListPlansRunningHandler request")
+
+ w.Write(bytes)
+}
+
+func GetCurrentBranchByPlanIdHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for CurrentBranchByPlanIdHandler")
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ projectId := vars["projectId"]
+
+ log.Println("projectId: ", projectId)
+
+ if !authorizeProject(w, projectId, auth) {
+ return
+ }
+
+ var req shared.GetCurrentBranchByPlanIdRequest
+ if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ plans, err := db.ListOwnedPlans([]string{projectId}, auth.User.Id, false)
+
+ if err != nil {
+ log.Printf("Error listing plans: %v\n", err)
+ http.Error(w, "Error listing plans: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if len(plans) == 0 {
+ log.Println("No plans found")
+ http.Error(w, "No plans found", http.StatusNotFound)
+ return
+ }
+
+ query := "SELECT * FROM branches WHERE "
+
+ var orConditions []string
+ var queryArgs []interface{}
+ currentArg := 1
+ for _, plan := range plans {
+ branchName, ok := req.CurrentBranchByPlanId[plan.Id]
+
+ if !ok {
+ continue
+ }
+
+ orConditions = append(orConditions, fmt.Sprintf("(plan_id = $%d AND name = $%d)", currentArg, currentArg+1))
+ queryArgs = append(queryArgs, plan.Id, branchName)
+
+ currentArg += 2
+ }
+
+ query += "(" + strings.Join(orConditions, " OR ") + ") AND archived_at IS NULL AND deleted_at IS NULL"
+
+ var branches []db.Branch
+ err = db.Conn.Select(&branches, query, queryArgs...)
+
+ if err != nil {
+ log.Printf("Error getting branches: %v\n", err)
+ http.Error(w, "Error getting branches: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ res := map[string]*shared.Branch{}
+ for _, branch := range branches {
+ res[branch.PlanId] = branch.ToApi()
+ }
+
+ bytes, err := json.Marshal(res)
+
+ if err != nil {
+ log.Printf("Error marshalling branches: %v\n", err)
+ http.Error(w, "Error marshalling branches: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully processed GetCurrentBranchByPlanIdHandler request")
+
+ w.Write(bytes)
+}
diff --git a/app/server/handlers/plans_exec.go b/app/server/handlers/plans_exec.go
new file mode 100644
index 0000000000000000000000000000000000000000..c3cd78336b691d779f04b3c28f7fd222e01ea231
--- /dev/null
+++ b/app/server/handlers/plans_exec.go
@@ -0,0 +1,632 @@
+package handlers
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ "plandex-server/hooks"
+ "plandex-server/host"
+ modelPlan "plandex-server/model/plan"
+ "plandex-server/notify"
+ "plandex-server/types"
+ "time"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+)
+
+func TellPlanHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for TellPlanHandler", "ip:", host.Ip)
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+
+ log.Println("planId: ", planId)
+
+ plan := authorizePlanExecUpdate(w, planId, auth)
+ if plan == nil {
+ return
+ }
+
+ settings, err := db.GetPlanSettings(plan)
+ if err != nil {
+ log.Printf("Error getting plan settings: %v\n", err)
+ http.Error(w, "Error getting plan settings", http.StatusInternalServerError)
+ return
+ }
+
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error reading request body: %v", err))
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer func() {
+ log.Println("Closing request body")
+ r.Body.Close()
+ }()
+
+ var requestBody shared.TellPlanRequest
+ if err := json.Unmarshal(body, &requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error parsing request body: %v", err))
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ _, apiErr := hooks.ExecHook(hooks.WillTellPlan, hooks.HookParams{
+ Auth: auth,
+ Plan: plan,
+ })
+ if apiErr != nil {
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error executing will tell plan hook: %v", apiErr))
+ writeApiError(w, *apiErr)
+ return
+ }
+
+ orgUserConfig, err := db.GetOrgUserConfig(auth.User.Id, auth.OrgId)
+ if err != nil {
+ log.Printf("Error getting org user config: %v\n", err)
+ http.Error(w, "Error getting org user config", http.StatusInternalServerError)
+ return
+ }
+
+ res := initClients(
+ initClientsParams{
+ w: w,
+ auth: auth,
+ apiKeys: requestBody.ApiKeys,
+ openAIOrgId: requestBody.OpenAIOrgId,
+ authVars: requestBody.AuthVars,
+ plan: plan,
+ settings: settings,
+ orgUserConfig: orgUserConfig,
+ },
+ )
+ err = modelPlan.Tell(modelPlan.TellParams{
+ Clients: res.clients,
+ Plan: plan,
+ Branch: branch,
+ Auth: auth,
+ Req: &requestBody,
+ AuthVars: res.authVars,
+ })
+
+ if err != nil {
+ log.Printf("Error telling plan: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error telling plan: %v", err))
+ http.Error(w, "Error telling plan: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if requestBody.ConnectStream {
+ startResponseStream(r.Context(), w, auth, planId, branch, false)
+ }
+
+ log.Println("Successfully processed request for TellPlanHandler")
+}
+
+func BuildPlanHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for BuildPlanHandler", "ip:", host.Ip)
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+
+ log.Println("planId: ", planId)
+ plan := authorizePlanExecUpdate(w, planId, auth)
+ if plan == nil {
+ return
+ }
+
+ settings, err := db.GetPlanSettings(plan)
+ if err != nil {
+ log.Printf("Error getting plan settings: %v\n", err)
+ http.Error(w, "Error getting plan settings", http.StatusInternalServerError)
+ return
+ }
+
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error reading request body: %v", err))
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer func() {
+ log.Println("Closing request body")
+ r.Body.Close()
+ }()
+
+ var requestBody shared.BuildPlanRequest
+ if err := json.Unmarshal(body, &requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error parsing request body: %v", err))
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ orgUserConfig, err := db.GetOrgUserConfig(auth.User.Id, auth.OrgId)
+ if err != nil {
+ log.Printf("Error getting org user config: %v\n", err)
+ http.Error(w, "Error getting org user config", http.StatusInternalServerError)
+ return
+ }
+
+ res := initClients(
+ initClientsParams{
+ w: w,
+ auth: auth,
+ apiKeys: requestBody.ApiKeys,
+ openAIOrgId: requestBody.OpenAIOrgId,
+ authVars: requestBody.AuthVars,
+ plan: plan,
+ settings: settings,
+ orgUserConfig: orgUserConfig,
+ },
+ )
+ numBuilds, err := modelPlan.Build(modelPlan.BuildParams{
+ Clients: res.clients,
+ AuthVars: res.authVars,
+ Plan: plan,
+ Branch: branch,
+ Auth: auth,
+ SessionId: requestBody.SessionId,
+ OrgUserConfig: orgUserConfig,
+ Settings: settings,
+ })
+
+ if err != nil {
+ log.Printf("Error building plan: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error building plan: %v", err))
+ http.Error(w, "Error building plan", http.StatusInternalServerError)
+ return
+ }
+
+ if numBuilds == 0 {
+ log.Println("No builds were executed")
+ go notify.NotifyErr(notify.SeverityInfo, fmt.Errorf("no builds were executed"))
+ http.Error(w, shared.NoBuildsErr, http.StatusNotFound)
+ return
+ }
+
+ if requestBody.ConnectStream {
+ startResponseStream(r.Context(), w, auth, planId, branch, false)
+ }
+
+ log.Println("Successfully processed request for BuildPlanHandler")
+}
+
+func ConnectPlanHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for ConnectPlanHandler", "ip:", host.Ip)
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+ log.Println("planId: ", planId)
+ log.Println("branch: ", branch)
+ active := modelPlan.GetActivePlan(planId, branch)
+ isProxy := r.URL.Query().Get("proxy") == "true"
+
+ if active == nil {
+ if isProxy {
+ log.Println("No active plan on proxied request")
+ go notify.NotifyErr(notify.SeverityInfo, fmt.Errorf("no active plan on proxied request"))
+ http.Error(w, "No active plan", http.StatusNotFound)
+ return
+ }
+
+ log.Println("No active plan -- proxying request")
+
+ proxyActivePlanMethod(w, r, planId, branch, "connect")
+ return
+ }
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ log.Println("No auth")
+ return
+ }
+
+ plan := authorizePlan(w, planId, auth)
+ if plan == nil {
+ log.Println("No plan")
+ return
+ }
+
+ startResponseStream(r.Context(), w, auth, planId, branch, true)
+
+ log.Println("Successfully processed request for ConnectPlanHandler")
+}
+
+func StopPlanHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for StopPlanHandler", "ip:", host.Ip)
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+ log.Println("planId: ", planId)
+ log.Println("branch: ", branch)
+ active := modelPlan.GetActivePlan(planId, branch)
+ isProxy := r.URL.Query().Get("proxy") == "true"
+
+ if active == nil {
+ if isProxy {
+ log.Println("No active plan on proxied request")
+ http.Error(w, "No active plan", http.StatusNotFound)
+ return
+ }
+ proxyActivePlanMethod(w, r, planId, branch, "stop")
+ return
+ }
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ if authorizePlan(w, planId, auth) == nil {
+ return
+ }
+
+ log.Println("Sending stream aborted message to client")
+
+ active.Stream(shared.StreamMessage{
+ Type: shared.StreamMessageAborted,
+ })
+
+ // give some time for stream message to be processed before canceling
+ log.Println("Sleeping for 100ms before canceling")
+ time.Sleep(100 * time.Millisecond)
+
+ var err error
+ ctx, cancel := context.WithCancel(r.Context())
+
+ // this is here to ensure that the plan is stopped even if the db operation fails
+ defer func() {
+ err = modelPlan.Stop(planId, branch, auth.User.Id, auth.OrgId)
+
+ if err != nil {
+ log.Printf("Error stopping plan: %v\n", err)
+ }
+
+ log.Println("Successfully processed request for StopPlanHandler")
+ }()
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Reason: "stop plan",
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ log.Println("Stopping plan - storing partial reply")
+ err = modelPlan.StorePartialReply(repo, planId, branch, auth.User.Id, auth.OrgId)
+ return err
+ })
+
+ if err != nil {
+ log.Printf("Error storing partial reply: %v\n", err)
+ http.Error(w, "Error storing partial reply", http.StatusInternalServerError)
+ return
+ }
+}
+
+func RespondMissingFileHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for RespondMissingFileHandler", "ip:", host.Ip)
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+ log.Println("planId: ", planId)
+ log.Println("branch: ", branch)
+ isProxy := r.URL.Query().Get("proxy") == "true"
+
+ active := modelPlan.GetActivePlan(planId, branch)
+ if active == nil {
+ if isProxy {
+ log.Println("No active plan on proxied request")
+ http.Error(w, "No active plan", http.StatusNotFound)
+ return
+ }
+
+ proxyActivePlanMethod(w, r, planId, branch, "respond_missing_file")
+ return
+ }
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ plan := authorizePlan(w, planId, auth)
+ if plan == nil {
+ return
+ }
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer r.Body.Close()
+
+ var requestBody shared.RespondMissingFileRequest
+ if err := json.Unmarshal(body, &requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ log.Println("missing file choice:", requestBody.Choice)
+
+ if requestBody.Choice == shared.RespondMissingFileChoiceLoad {
+ log.Println("loading missing file")
+ res, dbContexts := loadContexts(loadContextsParams{
+ w: w,
+ r: r,
+ auth: auth,
+ loadReq: &shared.LoadContextRequest{
+ &shared.LoadContextParams{
+ ContextType: shared.ContextFileType,
+ Name: requestBody.FilePath,
+ FilePath: requestBody.FilePath,
+ Body: requestBody.Body,
+ },
+ },
+ plan: plan,
+ branchName: branch,
+ autoLoaded: true,
+ })
+ if res == nil {
+ return
+ }
+
+ dbContext := dbContexts[0]
+
+ log.Println("loaded missing file:", dbContext.FilePath)
+
+ modelPlan.UpdateActivePlan(planId, branch, func(activePlan *types.ActivePlan) {
+ if activePlan == nil {
+ log.Println("Active plan is nil")
+ http.Error(w, "Active plan is nil", http.StatusInternalServerError)
+ return
+ }
+ activePlan.Contexts = append(activePlan.Contexts, dbContext)
+ activePlan.ContextsByPath[dbContext.FilePath] = dbContext
+ })
+ }
+
+ // This will resume model stream
+ log.Println("Resuming model stream")
+ active.MissingFileResponseCh <- requestBody.Choice
+
+ log.Println("Successfully processed request for RespondMissingFileHandler")
+}
+
+func AutoLoadContextHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for AutoLoadContextHandler", "ip:", host.Ip)
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+ log.Println("planId: ", planId)
+ log.Println("branch: ", branch)
+
+ isProxy := r.URL.Query().Get("proxy") == "true"
+
+ active := modelPlan.GetActivePlan(planId, branch)
+ if active == nil {
+ if isProxy {
+ log.Println("No active plan on proxied request")
+ http.Error(w, "No active plan", http.StatusNotFound)
+ return
+ }
+
+ proxyActivePlanMethod(w, r, planId, branch, "auto_load_context")
+ return
+ }
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ plan := authorizePlan(w, planId, auth)
+ if plan == nil {
+ return
+ }
+
+ var err error
+ defer func() {
+ if err == nil {
+ active.AutoLoadContextCh <- struct{}{}
+ } else {
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Error in AutoLoadContextHandler: " + err.Error(),
+ }
+ }
+ }()
+
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer r.Body.Close()
+
+ var requestBody shared.LoadContextRequest
+ if err := json.Unmarshal(body, &requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ log.Println("AutoLoadContextHandler - loading contexts")
+
+ var res *shared.LoadContextResponse
+ var dbContexts []*db.Context
+ if len(requestBody) > 0 {
+ res, dbContexts = loadContexts(loadContextsParams{
+ w: w,
+ r: r,
+ auth: auth,
+ loadReq: &requestBody,
+ plan: plan,
+ branchName: branch,
+ autoLoaded: true,
+ })
+ }
+
+ if res == nil {
+ // the client will treat this as a no-op
+ markdownRes := shared.LoadContextResponse{
+ TokensAdded: 0,
+ TotalTokens: 0,
+ MaxTokensExceeded: false,
+ MaxTokens: 0,
+ Msg: "",
+ }
+
+ bytes, err := json.Marshal(markdownRes)
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response", http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+ return
+ }
+
+ log.Println("AutoLoadContextHandler - updating active plan")
+
+ modelPlan.UpdateActivePlan(planId, branch, func(activePlan *types.ActivePlan) {
+ if activePlan == nil {
+ log.Println("Active plan is nil")
+ http.Error(w, "Active plan is nil", http.StatusInternalServerError)
+ return
+ }
+ activePlan.Contexts = append(activePlan.Contexts, dbContexts...)
+ for _, dbContext := range dbContexts {
+ activePlan.ContextsByPath[dbContext.FilePath] = dbContext
+ }
+ })
+
+ log.Println("AutoLoadContextHandler - updated active plan")
+
+ var apiContexts []*shared.Context
+ for _, dbContext := range dbContexts {
+ apiContexts = append(apiContexts, dbContext.ToApi())
+ }
+
+ msg := shared.SummaryForLoadContext(apiContexts, res.TokensAdded, res.TotalTokens)
+ msg += "\n\n" + shared.TableForLoadContext(apiContexts, true)
+
+ markdownRes := shared.LoadContextResponse{
+ TokensAdded: res.TokensAdded,
+ TotalTokens: res.TotalTokens,
+ MaxTokensExceeded: res.MaxTokensExceeded,
+ MaxTokens: res.MaxTokens,
+ Msg: msg,
+ }
+
+ bytes, err := json.Marshal(markdownRes)
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response", http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+
+ log.Println("Successfully processed request for AutoLoadContextHandler")
+}
+
+func GetBuildStatusHandler(w http.ResponseWriter, r *http.Request) {
+ // logs are too chatty on this function, uncomment if you need to debug
+ // log.Println("Received request for GetBuildStatusHandler", "ip:", host.Ip)
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+
+ isProxy := r.URL.Query().Get("proxy") == "true"
+
+ active := modelPlan.GetActivePlan(planId, branch)
+ if active == nil {
+ if isProxy {
+ log.Println("No active plan on proxied request")
+ http.Error(w, "No active plan", http.StatusNotFound)
+ return
+ }
+
+ proxyActivePlanMethod(w, r, planId, branch, "auto_load_context")
+ return
+ }
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ plan := authorizePlan(w, planId, auth)
+ if plan == nil {
+ return
+ }
+
+ response := shared.GetBuildStatusResponse{
+ BuiltFiles: active.BuiltFiles,
+ IsBuildingByPath: active.IsBuildingByPath,
+ }
+
+ bytes, err := json.Marshal(response)
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response", http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+
+ // log.Println("Successfully processed request for GetBuildStatusHandler")
+}
+
+func authorizePlanExecUpdate(w http.ResponseWriter, planId string, auth *types.ServerAuth) *db.Plan {
+ plan := authorizePlan(w, planId, auth)
+ if plan == nil {
+ return nil
+ }
+
+ if plan.OwnerId != auth.User.Id && !auth.HasPermission(shared.PermissionUpdateAnyPlan) {
+ log.Println("User does not have permission to update plan")
+ http.Error(w, "User does not have permission to update plan", http.StatusForbidden)
+ return nil
+ }
+
+ return plan
+}
diff --git a/app/server/handlers/plans_versions.go b/app/server/handlers/plans_versions.go
new file mode 100644
index 0000000000000000000000000000000000000000..ae19fe05294613c208997c1bbf916545a8dca2fe
--- /dev/null
+++ b/app/server/handlers/plans_versions.go
@@ -0,0 +1,184 @@
+package handlers
+
+import (
+ "context"
+ "encoding/json"
+ "io"
+ "log"
+ "net/http"
+ "plandex-server/db"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+)
+
+func ListLogsHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for ListLogsHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+
+ log.Println("planId: ", planId, "branch: ", branch)
+
+ if authorizePlan(w, planId, auth) == nil {
+ return
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ var body string
+ var shas []string
+
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Reason: "list logs",
+ Scope: db.LockScopeRead,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ var err error
+ body, shas, err = repo.GetGitCommitHistory(branch)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Println("Error getting logs: ", err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ res := shared.LogResponse{
+ Body: body,
+ Shas: shas,
+ }
+
+ bytes, err := json.Marshal(res)
+
+ if err != nil {
+ log.Println("Error marshalling logs: ", err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+
+ log.Println("Successfully processed request for ListLogsHandler")
+}
+
+func RewindPlanHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for RewindPlanHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+
+ log.Println("planId: ", planId)
+
+ if authorizePlan(w, planId, auth) == nil {
+ return
+ }
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer r.Body.Close()
+
+ var requestBody shared.RewindPlanRequest
+ if err := json.Unmarshal(body, &requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Reason: "rewind plan",
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ return repo.GitRewindToSha(branch, requestBody.Sha)
+ })
+
+ if err != nil {
+ log.Println("Error rewinding plan: ", err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ err = db.SyncPlanTokens(auth.OrgId, planId, branch)
+
+ if err != nil {
+ log.Println("Error syncing plan tokens: ", err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ var sha string
+ var latest string
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Reason: "get latest commit",
+ Scope: db.LockScopeRead,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ sha, latest, err = repo.GetLatestCommit(branch)
+ return err
+ })
+
+ if err != nil {
+ log.Println("Error getting latest commit: ", err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ res := shared.RewindPlanResponse{
+ LatestSha: sha,
+ LatestCommit: latest,
+ }
+
+ bytes, err := json.Marshal(res)
+
+ if err != nil {
+ log.Println("Error marshalling response: ", err)
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+
+ log.Println("Successfully processed request for RewindPlanHandler")
+}
diff --git a/app/server/handlers/projects.go b/app/server/handlers/projects.go
new file mode 100644
index 0000000000000000000000000000000000000000..80337c43d846a8730c7d4e5f4f69f47f5db6ded1
--- /dev/null
+++ b/app/server/handlers/projects.go
@@ -0,0 +1,228 @@
+package handlers
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "plandex-server/db"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+ "github.com/jmoiron/sqlx"
+)
+
+func CreateProjectHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for CreateProjectHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer r.Body.Close()
+
+ var requestBody shared.CreateProjectRequest
+ if err := json.Unmarshal(body, &requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ if requestBody.Name == "" {
+ log.Println("Received empty name field")
+ http.Error(w, "name field is required", http.StatusBadRequest)
+ return
+ }
+
+ var projectId string
+ err = db.WithTx(r.Context(), "create project", func(tx *sqlx.Tx) error {
+ var err error
+
+ projectId, err = db.CreateProject(auth.OrgId, requestBody.Name, tx)
+
+ if err != nil {
+ log.Printf("Error creating project: %v\n", err)
+ return fmt.Errorf("error creating project: %v", err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error creating project: %v\n", err)
+ http.Error(w, "Error creating project: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ resp := shared.CreateProjectResponse{
+ Id: projectId,
+ }
+
+ bytes, err := json.Marshal(resp)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+
+ log.Println("Successfully created project", projectId)
+}
+
+func ListProjectsHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for ListProjectsHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ rows, err := db.Conn.Query("SELECT id, name FROM projects WHERE org_id = $1", auth.OrgId)
+
+ if err != nil {
+ log.Printf("Error listing projects: %v\n", err)
+ http.Error(w, "Error listing projects: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var projects []shared.Project
+
+ for rows.Next() {
+ var project shared.Project
+ err := rows.Scan(&project.Id, &project.Name)
+ if err != nil {
+ log.Printf("Error scanning project: %v\n", err)
+ http.Error(w, "Error scanning project: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ projects = append(projects, project)
+ }
+
+ bytes, err := json.Marshal(projects)
+ if err != nil {
+ log.Printf("Error marshalling projects: %v\n", err)
+ http.Error(w, "Error marshalling projects: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+}
+
+func ProjectSetPlanHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for UpdateProjectSetPlanHandler")
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ projectId := vars["projectId"]
+
+ log.Println("projectId: ", projectId)
+
+ if !authorizeProject(w, projectId, auth) {
+ return
+ }
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer r.Body.Close()
+
+ var requestBody shared.SetProjectPlanRequest
+ if err := json.Unmarshal(body, &requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ if requestBody.PlanId == "" {
+ log.Println("Received empty planId field")
+ http.Error(w, "planId field is required", http.StatusBadRequest)
+ return
+ }
+
+ // update statement here -- need auth / current user id
+
+ log.Println("Successfully set project plan", projectId)
+}
+
+func RenameProjectHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for RenameProjectHandler")
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ projectId := vars["projectId"]
+
+ log.Println("projectId: ", projectId)
+
+ if !authorizeProjectRename(w, projectId, auth) {
+ return
+ }
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body", http.StatusInternalServerError)
+ return
+ }
+ defer r.Body.Close()
+
+ var requestBody shared.RenameProjectRequest
+ if err := json.Unmarshal(body, &requestBody); err != nil {
+ log.Printf("Error parsing request body: %v\n", err)
+ http.Error(w, "Error parsing request body", http.StatusBadRequest)
+ return
+ }
+
+ if requestBody.Name == "" {
+ log.Println("Received empty name field")
+ http.Error(w, "name field is required", http.StatusBadRequest)
+ return
+ }
+
+ res, err := db.Conn.Exec("UPDATE projects SET name = $1 WHERE id = $2", requestBody.Name, projectId)
+
+ if err != nil {
+ log.Printf("Error updating project: %v\n", err)
+ http.Error(w, "Error updating project: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ rowsAffected, err := res.RowsAffected()
+
+ if err != nil {
+ log.Printf("Error getting rows affected: %v\n", err)
+ http.Error(w, "Error getting rows affected: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if rowsAffected == 0 {
+ log.Printf("Project not found: %v\n", projectId)
+ http.Error(w, "Project not found: "+projectId, http.StatusNotFound)
+ return
+ }
+
+ log.Println("Successfully renamed project", projectId)
+
+}
diff --git a/app/server/handlers/proxy_helper.go b/app/server/handlers/proxy_helper.go
new file mode 100644
index 0000000000000000000000000000000000000000..2b84881ebad3b39fbc91f497c467ef664e87fe42
--- /dev/null
+++ b/app/server/handlers/proxy_helper.go
@@ -0,0 +1,106 @@
+package handlers
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "plandex-server/db"
+ "plandex-server/host"
+ "time"
+
+ shared "plandex-shared"
+)
+
+func proxyActivePlanMethod(w http.ResponseWriter, r *http.Request, planId, branch, method string) {
+ modelStream, err := db.GetActiveModelStream(planId, branch)
+
+ if err != nil {
+ log.Printf("Error getting active model stream: %v\n", err)
+ http.Error(w, "Error getting active model stream", http.StatusInternalServerError)
+ return
+ }
+
+ if modelStream == nil {
+ log.Printf("No active model stream for plan %s\n", planId)
+ http.Error(w, "No active model stream for plan", http.StatusNotFound)
+ return
+ }
+
+ if modelStream.InternalIp == host.Ip {
+ // No active plan for this plan or else we wouldn't be calling proxyActivePlanMethod -- set the model stream to finished because something went wrong
+ err := db.SetModelStreamFinished(modelStream.Id)
+ if err != nil {
+ log.Printf("Error setting model stream %s to finished: %v\n", modelStream.Id, err)
+ }
+
+ err = db.SetPlanStatus(planId, branch, shared.PlanStatusError, "No active stream for plan")
+ if err != nil {
+ log.Printf("Error setting plan %s status to error: %v\n", planId, err)
+ }
+
+ log.Printf("No active plan for plan %s\n", planId)
+ http.Error(w, "No active plan for plan", http.StatusNotFound)
+ return
+ } else {
+ log.Printf("Forwarding request to %s\n", modelStream.InternalIp)
+ proxyUrl := fmt.Sprintf("http://%s:%s/plans/%s/%s/%s", modelStream.InternalIp, os.Getenv("PORT"), planId, branch, method)
+ proxyUrl += "?proxy=true"
+
+ log.Printf("Proxy url: %s\n", proxyUrl)
+ proxyRequest(w, r, proxyUrl)
+ return
+ }
+}
+
+func proxyRequest(w http.ResponseWriter, originalRequest *http.Request, url string) {
+ client := &http.Client{
+ Timeout: time.Second * 10,
+ }
+
+ // Create a new request based on the original request
+ req, err := http.NewRequestWithContext(originalRequest.Context(), originalRequest.Method, url, originalRequest.Body)
+ if err != nil {
+ log.Printf("Error creating request for proxy: %v\n", err)
+ http.Error(w, "Error creating request for proxy", http.StatusInternalServerError)
+ return
+ }
+
+ // Copy the headers from the original request to the new request
+ for name, headers := range originalRequest.Header {
+ for _, h := range headers {
+ req.Header.Add(name, h)
+ }
+ }
+
+ // Copy the body from the original request to the new request if it's a POST or PUT
+ if originalRequest.Method == http.MethodPost || originalRequest.Method == http.MethodPut {
+ req.Body = originalRequest.Body
+ }
+
+ // Make the request
+ resp, err := client.Do(req)
+ if err != nil {
+ log.Printf("Error forwarding request: %v\n", err)
+ http.Error(w, "Error forwarding request", http.StatusInternalServerError)
+ return
+ }
+ defer resp.Body.Close()
+
+ // Copy the response headers and status code
+ for name, headers := range resp.Header {
+ for _, h := range headers {
+ w.Header().Add(name, h)
+ }
+ }
+ w.WriteHeader(resp.StatusCode)
+
+ log.Printf("Proxy forwarded successfully with status code: %d\n", resp.StatusCode)
+
+ // Copy the response body
+ if _, err := io.Copy(w, resp.Body); err != nil {
+ log.Printf("Error copying response body: %v\n", err)
+ http.Error(w, "Error copying response body", http.StatusInternalServerError)
+ }
+}
diff --git a/app/server/handlers/sessions.go b/app/server/handlers/sessions.go
new file mode 100644
index 0000000000000000000000000000000000000000..ba6ba6bddaa7a7cd0b161cf8148c0d0b44e71ab9
--- /dev/null
+++ b/app/server/handlers/sessions.go
@@ -0,0 +1,346 @@
+package handlers
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "plandex-server/db"
+ "plandex-server/email"
+ "strings"
+
+ shared "plandex-shared"
+)
+
+func CreateEmailVerificationHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for CreateEmailVerificationHandler")
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var req shared.CreateEmailVerificationRequest
+ err = json.Unmarshal(body, &req)
+ if err != nil {
+ log.Printf("Error unmarshalling request: %v\n", err)
+ http.Error(w, "Error unmarshalling request: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ req.Email = strings.ToLower(req.Email)
+
+ var hasAccount bool
+ if req.UserId == "" {
+ user, err := db.GetUserByEmail(req.Email)
+
+ if err != nil {
+ log.Printf("Error getting user: %v\n", err)
+ http.Error(w, "Error getting user: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ hasAccount = user != nil
+ } else {
+ hasAccount = true
+
+ user, err := db.GetUser(req.UserId)
+
+ if err != nil {
+ log.Printf("Error getting user: %v\n", err)
+ http.Error(w, "Error getting user: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if user == nil {
+ log.Printf("User not found for id: %v\n", req.UserId)
+ http.Error(w, "User not found", http.StatusNotFound)
+ return
+ }
+
+ if user.Email != req.Email {
+ log.Printf("User email does not match for id: %v\n", req.UserId)
+ http.Error(w, "User email does not match", http.StatusBadRequest)
+ return
+ }
+ }
+
+ if req.RequireUser && !hasAccount {
+ log.Printf("User not found for email: %v\n", req.Email)
+ http.Error(w, "User not found", http.StatusNotFound)
+ return
+ } else if req.RequireNoUser && hasAccount {
+ log.Printf("User already exists for email: %v\n", req.Email)
+ http.Error(w, "User already exists", http.StatusConflict)
+ return
+ }
+
+ var res shared.CreateEmailVerificationResponse
+
+ if !(os.Getenv("GOENV") == "development" && os.Getenv("LOCAL_MODE") == "1") {
+ // create pin - 6 alphanumeric characters
+ pinBytes, err := shared.GetRandomAlphanumeric(6)
+ if err != nil {
+ log.Printf("Error generating random pin: %v\n", err)
+ http.Error(w, "Error generating random pin: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ // get sha256 hash of pin
+ hashBytes := sha256.Sum256(pinBytes)
+ pinHash := hex.EncodeToString(hashBytes[:])
+
+ // create verification
+ err = db.CreateEmailVerification(req.Email, req.UserId, pinHash)
+
+ if err != nil {
+ log.Printf("Error creating email verification: %v\n", err)
+ http.Error(w, "Error creating email verification: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ err = email.SendVerificationEmail(req.Email, string(pinBytes))
+
+ if err != nil {
+ log.Printf("Error sending verification email: %v\n", err)
+ http.Error(w, "Error sending verification email: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ res = shared.CreateEmailVerificationResponse{
+ HasAccount: hasAccount,
+ }
+ } else {
+ res = shared.CreateEmailVerificationResponse{
+ HasAccount: hasAccount,
+ IsLocalMode: true,
+ }
+ }
+
+ bytes, err := json.Marshal(res)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully created email verification")
+
+ w.Write(bytes)
+}
+
+func CheckEmailPinHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for VerifyEmailPinHandler")
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var req shared.VerifyEmailPinRequest
+ err = json.Unmarshal(body, &req)
+ if err != nil {
+ log.Printf("Error unmarshalling request: %v\n", err)
+ http.Error(w, "Error unmarshalling request: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+ req.Email = strings.ToLower(req.Email)
+
+ _, err = db.ValidateEmailVerification(req.Email, req.Pin)
+
+ if err != nil {
+ if err.Error() == db.InvalidOrExpiredPinError {
+ http.Error(w, "Invalid or expired pin", http.StatusNotFound)
+ return
+ }
+
+ log.Printf("Error validating email verification: %v\n", err)
+ http.Error(w, "Error validating email verification: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully verified email pin")
+}
+
+// sign in codes allow users to authenticate between different clients
+// like UI to CLI or vice versa
+func CreateSignInCodeHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for CreateSignInCodeHandler")
+
+ auth := Authenticate(w, r, true)
+
+ if auth == nil {
+ return
+ }
+
+ // create pin - 6 alphanumeric characters
+ pinBytes, err := shared.GetRandomAlphanumeric(6)
+ if err != nil {
+ log.Printf("Error generating random pin: %v\n", err)
+ http.Error(w, "Error generating random pin: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ // get sha256 hash of pin
+ hashBytes := sha256.Sum256(pinBytes)
+ pinHash := hex.EncodeToString(hashBytes[:])
+
+ err = db.CreateSignInCode(auth.User.Id, auth.OrgId, pinHash)
+
+ if err != nil {
+ log.Printf("Error creating sign in code: %v\n", err)
+ http.Error(w, "Error creating sign in code: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully created sign in code")
+
+ // return the pin as a response
+ w.Write(pinBytes)
+}
+
+func SignInHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for SignInHandler")
+
+ // read the request body
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var req shared.SignInRequest
+ err = json.Unmarshal(body, &req)
+ if err != nil {
+ log.Printf("Error unmarshalling request: %v\n", err)
+ http.Error(w, "Error unmarshalling request: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Validating and signing in")
+ resp, err := ValidateAndSignIn(w, r, req)
+
+ if err != nil {
+ log.Printf("Error signing in: %v\n", err)
+ http.Error(w, "Error signing in: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ bytes, err := json.Marshal(resp)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully signed in")
+
+ w.Write(bytes)
+}
+
+func SignOutHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for SignOutHandler")
+
+ auth := Authenticate(w, r, false)
+ if auth == nil {
+ return
+ }
+
+ _, err := db.Conn.Exec("UPDATE auth_tokens SET deleted_at = NOW() WHERE token_hash = $1", auth.AuthToken.TokenHash)
+
+ if err != nil {
+ log.Printf("Error deleting auth token: %v\n", err)
+ http.Error(w, "Error deleting auth token: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ err = ClearAuthCookieIfBrowser(w, r)
+
+ if err != nil {
+ log.Printf("Error clearing auth cookie: %v\n", err)
+ http.Error(w, "Error clearing auth cookie: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ err = ClearAccountFromCookies(w, r, auth.User.Id)
+
+ if err != nil {
+ log.Printf("Error clearing account from cookies: %v\n", err)
+ http.Error(w, "Error clearing account from cookies: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully signed out")
+}
+
+func GetOrgUserConfigHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for GetOrgUserConfigHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ orgUserConfig, err := db.GetOrgUserConfig(auth.User.Id, auth.OrgId)
+
+ if err != nil {
+ log.Printf("Error getting org user config: %v\n", err)
+ http.Error(w, "Error getting org user config: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ bytes, err := json.Marshal(orgUserConfig)
+
+ if err != nil {
+ log.Printf("Error marshalling response: %v\n", err)
+ http.Error(w, "Error marshalling response: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+}
+
+func UpdateOrgUserConfigHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for UpdateOrgUserConfigHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ body, err := io.ReadAll(r.Body)
+ if err != nil {
+ log.Printf("Error reading request body: %v\n", err)
+ http.Error(w, "Error reading request body: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ var req shared.OrgUserConfig
+ err = json.Unmarshal(body, &req)
+ if err != nil {
+ log.Printf("Error unmarshalling request: %v\n", err)
+ http.Error(w, "Error unmarshalling request: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ err = db.UpdateOrgUserConfig(auth.User.Id, auth.OrgId, &req)
+
+ if err != nil {
+ log.Printf("Error updating org user config: %v\n", err)
+ http.Error(w, "Error updating org user config: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully updated org user config")
+}
diff --git a/app/server/handlers/settings.go b/app/server/handlers/settings.go
new file mode 100644
index 0000000000000000000000000000000000000000..043bdb2809fe757740b15776d23aea2852fef804
--- /dev/null
+++ b/app/server/handlers/settings.go
@@ -0,0 +1,541 @@
+package handlers
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "plandex-server/db"
+ "reflect"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+ "github.com/jmoiron/sqlx"
+)
+
+func GetSettingsHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for GetSettingsHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+
+ log.Println("planId: ", planId, "branch: ", branch)
+
+ plan := authorizePlan(w, planId, auth)
+ if plan == nil {
+ return
+ }
+
+ var settings *shared.PlanSettings
+ ctx, cancel := context.WithCancel(r.Context())
+
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Reason: "get settings",
+ Scope: db.LockScopeRead,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ res, err := db.GetPlanSettings(plan)
+ if err != nil {
+ return err
+ }
+
+ settings = res
+
+ return nil
+ })
+
+ if err != nil {
+ log.Println("Error getting settings: ", err)
+ http.Error(w, "Error getting settings", http.StatusInternalServerError)
+ return
+ }
+
+ bytes, err := json.Marshal(settings)
+
+ if err != nil {
+ log.Println("Error marshalling settings: ", err)
+ http.Error(w, "Error marshalling settings", http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("GetSettingsHandler processed successfully")
+
+ w.Write(bytes)
+}
+
+func UpdateSettingsHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for UpdateSettingsHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ vars := mux.Vars(r)
+ planId := vars["planId"]
+ branch := vars["branch"]
+
+ log.Println("planId: ", planId, "branch: ", branch)
+
+ plan := authorizePlan(w, planId, auth)
+
+ if plan == nil {
+ return
+ }
+
+ var req shared.UpdateSettingsRequest
+ err := json.NewDecoder(r.Body).Decode(&req)
+
+ if err != nil {
+ log.Println("Error decoding request body: ", err)
+ http.Error(w, "Error decoding request body", http.StatusInternalServerError)
+ return
+ }
+
+ if req.ModelPackName == "" && req.ModelPack == nil {
+ log.Println("No model pack name or model pack provided")
+ http.Error(w, "No model pack name or model pack provided", http.StatusBadRequest)
+ return
+ }
+
+ if req.ModelPackName != "" {
+ if mp, builtIn := shared.BuiltInModelPacksByName[req.ModelPackName]; builtIn {
+ if os.Getenv("IS_CLOUD") != "" && mp.LocalProvider != "" {
+ msg := fmt.Sprintf("Built-in local model pack %s can't be used on Plandex Cloud", req.ModelPackName)
+ log.Println(msg)
+ http.Error(w, msg, http.StatusUnprocessableEntity)
+ return
+ }
+ }
+ }
+
+ if req.ModelPack != nil {
+ if req.ModelPack.LocalProvider != "" {
+ msg := fmt.Sprintf("Local model pack %s can't be used on Plandex Cloud", req.ModelPack.Name)
+ log.Println(msg)
+ http.Error(w, msg, http.StatusUnprocessableEntity)
+ return
+ }
+
+ ids := req.ModelPack.ToModelPackSchema().AllModelIds()
+ for _, id := range ids {
+ bm, builtIn := shared.BuiltInBaseModelsById[id]
+ if builtIn && os.Getenv("IS_CLOUD") != "" && bm.IsLocalOnly() {
+ msg := fmt.Sprintf("Built-in local model %s can't be used on Plandex Cloud", id)
+ log.Println(msg)
+ http.Error(w, msg, http.StatusUnprocessableEntity)
+ return
+ }
+ }
+ }
+
+ ctx, cancel := context.WithCancel(r.Context())
+
+ var commitMsg string
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Reason: "update settings",
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancel,
+ }, func(repo *db.GitRepo) error {
+ originalSettings, err := db.GetPlanSettings(plan)
+
+ if err != nil {
+ return fmt.Errorf("error getting settings: %v", err)
+ }
+
+ settings, err := originalSettings.DeepCopy()
+ if err != nil {
+ return fmt.Errorf("error copying settings: %v", err)
+ }
+
+ if req.ModelPackName != "" {
+ settings.SetModelPackByName(req.ModelPackName)
+ } else if req.ModelPack != nil {
+ settings.SetCustomModelPack(req.ModelPack)
+ } else {
+ return fmt.Errorf("no model pack name or model pack provided")
+ }
+
+ // log.Println("Original settings:")
+ // spew.Dump(originalSettings)
+
+ // log.Println("req.Settings:")
+ // spew.Dump(req.Settings)
+
+ err = db.StorePlanSettings(plan, *settings)
+
+ if err != nil {
+ return fmt.Errorf("error storing settings: %v", err)
+ }
+
+ commitMsg = getUpdateCommitMsg(settings, originalSettings, false)
+
+ err = repo.GitAddAndCommit(branch, commitMsg)
+
+ if err != nil {
+ return fmt.Errorf("error committing settings: %v", err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Println("Error updating settings: ", err)
+ http.Error(w, "Error updating settings", http.StatusInternalServerError)
+ return
+ }
+
+ res := shared.UpdateSettingsResponse{
+ Msg: commitMsg,
+ }
+ bytes, err := json.Marshal(res)
+
+ if err != nil {
+ log.Println("Error marshalling response: ", err)
+ http.Error(w, "Error marshalling response", http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+
+ log.Println("UpdateSettingsHandler processed successfully")
+
+}
+
+func GetDefaultSettingsHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for GetDefaultSettingsHandler")
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ settings, err := db.GetOrgDefaultSettings(auth.OrgId)
+
+ if err != nil {
+ log.Println("Error getting default settings: ", err)
+ http.Error(w, "Error getting default settings", http.StatusInternalServerError)
+ return
+ }
+
+ bytes, err := json.Marshal(settings)
+
+ if err != nil {
+ log.Println("Error marshalling default settings: ", err)
+ http.Error(w, "Error marshalling default settings", http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+
+ log.Println("GetDefaultSettingsHandler processed successfully")
+}
+
+func UpdateDefaultSettingsHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received request for UpdateDefaultSettingsHandler")
+
+ auth := Authenticate(w, r, true)
+
+ if auth == nil {
+ return
+ }
+
+ var req shared.UpdateSettingsRequest
+ err := json.NewDecoder(r.Body).Decode(&req)
+
+ if err != nil {
+ log.Println("Error decoding request body: ", err)
+ http.Error(w, "Error decoding request body", http.StatusInternalServerError)
+ return
+ }
+
+ if req.ModelPackName == "" && req.ModelPack == nil {
+ log.Println("No model pack name or model pack provided")
+ http.Error(w, "No model pack name or model pack provided", http.StatusBadRequest)
+ return
+ }
+
+ var originalSettings *shared.PlanSettings
+ var settings *shared.PlanSettings
+
+ err = db.WithTx(r.Context(), "update default settings", func(tx *sqlx.Tx) error {
+ var err error
+
+ originalSettings, err = db.GetOrgDefaultSettingsForUpdate(auth.OrgId, tx)
+
+ if err != nil {
+ log.Println("Error getting default settings: ", err)
+ return fmt.Errorf("error getting default settings: %v", err)
+ }
+
+ settings, err = originalSettings.DeepCopy()
+ if err != nil {
+ return fmt.Errorf("error copying settings: %v", err)
+ }
+
+ if req.ModelPackName != "" {
+ settings.SetModelPackByName(req.ModelPackName)
+ } else if req.ModelPack != nil {
+ settings.SetCustomModelPack(req.ModelPack)
+ } else {
+ return fmt.Errorf("no model pack name or model pack provided")
+ }
+
+ // log.Println("Original settings:")
+ // spew.Dump(originalSettings)
+
+ // log.Println("req.Settings:")
+ // spew.Dump(req.Settings)
+
+ err = db.StoreOrgDefaultSettings(auth.OrgId, settings, tx)
+
+ if err != nil {
+ log.Println("Error storing default settings: ", err)
+ return fmt.Errorf("error storing default settings: %v", err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Println("Error updating default settings: ", err)
+ http.Error(w, "Error updating default settings", http.StatusInternalServerError)
+ return
+ }
+
+ commitMsg := getUpdateCommitMsg(settings, originalSettings, true)
+
+ res := shared.UpdateSettingsResponse{
+ Msg: commitMsg,
+ }
+ bytes, err := json.Marshal(res)
+
+ if err != nil {
+ log.Println("Error marshalling response: ", err)
+ http.Error(w, "Error marshalling response", http.StatusInternalServerError)
+ return
+ }
+
+ w.Write(bytes)
+
+ log.Println("UpdateDefaultSettingsHandler processed successfully")
+}
+
+func getUpdateCommitMsg(settings *shared.PlanSettings, originalSettings *shared.PlanSettings, isOrgDefault bool) string {
+ // log.Println("Comparing settings")
+ // log.Println("Original:")
+ // spew.Dump(originalSettings)
+ // log.Println("New:")
+ // spew.Dump(settings)
+
+ // log.Println("Changes to settings:", strings.Join(changes, "\n"))
+ var s string
+ if isOrgDefault {
+ s = "⚙️ Updated org-wide default settings:"
+ } else {
+ s = "⚙️ Updated model settings:"
+ }
+
+ var changes []string
+ changes = compareSettings(originalSettings, settings, changes)
+
+ if len(changes) == 0 {
+ return "No changes to settings"
+ }
+
+ for _, change := range changes {
+ s += "\n" + " • " + change
+ }
+
+ return s
+}
+
+func compareSettings(original, updated *shared.PlanSettings, changes []string) []string {
+ if updated.ModelPackName != "" {
+ originalName := "custom"
+ if original.ModelPackName != "" {
+ originalName = original.ModelPackName
+ }
+ changes = append(changes, fmt.Sprintf("model-pack | %v → %v", originalName, updated.ModelPackName))
+ } else if updated.ModelPack != nil {
+ if original.ModelPack == nil {
+ changes = append(changes, fmt.Sprintf("model-pack | %v → %v", original.ModelPackName, "custom"))
+ }
+
+ changes = compareAny(original.GetModelPack().ToModelPackSchema().ModelPackSchemaRoles, updated.GetModelPack().ToModelPackSchema().ModelPackSchemaRoles, "", changes)
+ }
+
+ return changes
+}
+
+func compareAny(a, b interface{}, path string, changes []string) []string {
+ aVal, bVal := reflect.ValueOf(a), reflect.ValueOf(b)
+
+ if !aVal.IsValid() && !bVal.IsValid() {
+ return changes
+ }
+
+ // Pointer / nil handling – BEFORE dereferencing
+ if aVal.Kind() == reflect.Ptr || bVal.Kind() == reflect.Ptr {
+ // both nil → nothing
+ if (aVal.Kind() == reflect.Ptr && aVal.IsNil()) &&
+ (bVal.Kind() == reflect.Ptr && bVal.IsNil()) {
+ return changes
+ }
+
+ // one nil, one non-nil → record diff
+ if (aVal.Kind() == reflect.Ptr && aVal.IsNil()) ||
+ (bVal.Kind() == reflect.Ptr && bVal.IsNil()) {
+ aStr := "none"
+ bStr := "none"
+ if aVal.Kind() != reflect.Ptr || !aVal.IsNil() {
+ aStr = short(aVal)
+ }
+ if bVal.Kind() != reflect.Ptr || !bVal.IsNil() {
+ bStr = short(bVal)
+ }
+ changes = append(changes, fmt.Sprintf("%s | %s → %s", path, aStr, bStr))
+ return changes
+ }
+
+ // both non-nil pointers → safe to dereference
+ if aVal.Kind() == reflect.Ptr {
+ aVal = aVal.Elem()
+ }
+ if bVal.Kind() == reflect.Ptr {
+ bVal = bVal.Elem()
+ }
+ }
+
+ // Check again after dereferencing
+ if !aVal.IsValid() && !bVal.IsValid() {
+ return changes
+ }
+
+ // One side nil → record diff and stop
+ if !aVal.IsValid() || !bVal.IsValid() {
+ var aStr, bStr string
+ if aVal.IsValid() {
+ aStr = short(aVal)
+ } else {
+ aStr = "none"
+ }
+ if bVal.IsValid() {
+ bStr = short(bVal)
+ } else {
+ bStr = "none"
+ }
+ changes = append(changes, fmt.Sprintf("%s | %s → %s", path, aStr, bStr))
+ return changes
+ }
+
+ // Ensure we can safely call Interface()
+ if !aVal.CanInterface() || !bVal.CanInterface() {
+ return changes
+ }
+
+ if reflect.DeepEqual(aVal.Interface(), bVal.Interface()) {
+ return changes // No difference found
+ }
+
+ switch aVal.Kind() {
+ case reflect.Struct:
+ for i := 0; i < aVal.NumField(); i++ {
+ field := aVal.Type().Field(i)
+ if !field.IsExported() {
+ continue // Skip unexported fields
+ }
+ fieldName := field.Name
+ dasherizedName := shared.Dasherize(fieldName)
+
+ updatedPath := path
+ if !(dasherizedName == "model-set" ||
+ dasherizedName == "model-role-config" ||
+ dasherizedName == "base-model-config" ||
+ dasherizedName == "planner-model-config" ||
+ dasherizedName == "task-model-config") {
+ if updatedPath != "" {
+ updatedPath = updatedPath + "." + dasherizedName
+ } else {
+ if dasherizedName == "model-overrides" {
+ dasherizedName = "overrides"
+ }
+ updatedPath = dasherizedName
+ }
+ }
+
+ changes = compareAny(aVal.Field(i).Interface(), bVal.Field(i).Interface(), updatedPath, changes)
+ }
+ default:
+ var aStr, bStr string
+ if aVal.IsValid() {
+ aStr = short(aVal)
+ } else {
+ aStr = "no override"
+ }
+
+ if bVal.IsValid() {
+ bStr = short(bVal)
+ } else {
+ bStr = "no override"
+ }
+
+ change := fmt.Sprintf("%s | %v → %v", path, aStr, bStr)
+ changes = append(changes, change)
+ }
+
+ return changes
+}
+
+func short(v reflect.Value) string {
+ if !v.IsValid() {
+ return "none"
+ }
+
+ // If it’s a pointer, follow it once
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return "none"
+ }
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.String:
+ return v.String()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return fmt.Sprintf("%d", v.Int())
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return fmt.Sprintf("%d", v.Uint())
+ case reflect.Float32, reflect.Float64:
+ return fmt.Sprintf("%g", v.Float())
+ case reflect.Struct:
+ // Special-case ModelRoleConfigSchema: show the ModelId only
+ if f := v.FieldByName("ModelId"); f.IsValid() && f.Kind() == reflect.String {
+ return f.String()
+ }
+ return fmt.Sprintf("%T", v.Interface()) // fall-back: just the type name
+ default:
+ return fmt.Sprintf("%v", v.Interface())
+ }
+}
diff --git a/app/server/handlers/stream_helper.go b/app/server/handlers/stream_helper.go
new file mode 100644
index 0000000000000000000000000000000000000000..eabd16f0e29d898e8618bb61533dca382d7bf431
--- /dev/null
+++ b/app/server/handlers/stream_helper.go
@@ -0,0 +1,228 @@
+package handlers
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ modelPlan "plandex-server/model/plan"
+ "plandex-server/types"
+ "time"
+
+ shared "plandex-shared"
+)
+
+const HeartbeatInterval = 5 * time.Second
+
+func startResponseStream(reqCtx context.Context, w http.ResponseWriter, auth *types.ServerAuth, planId, branch string, isConnect bool) {
+ log.Println("Response stream manager: starting plan stream")
+
+ active := modelPlan.GetActivePlan(planId, branch)
+
+ if active == nil {
+ log.Printf("Response stream manager: active plan not found for plan ID %s on branch %s\n", planId, branch)
+ http.Error(w, "Active plan not found", http.StatusNotFound)
+ return
+ }
+
+ w.Header().Set("Transfer-Encoding", "chunked")
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+
+ // send initial message to client
+ msg := shared.StreamMessage{
+ Type: shared.StreamMessageStart,
+ }
+
+ bytes, err := json.Marshal(msg)
+
+ if err != nil {
+ log.Printf("Response stream manager: error marshalling message: %v\n", err)
+ return
+ }
+
+ log.Println("Response stream manager: sending initial message")
+ err = sendStreamMessage(w, string(bytes))
+ if err != nil {
+ log.Println("Response stream manager: error sending initial message:", err)
+ return
+ }
+
+ if isConnect {
+ time.Sleep(100 * time.Millisecond)
+ err = initConnectActive(auth, planId, branch, w)
+
+ if err != nil {
+ log.Println("Response stream manager: error initializing connection to active plan:", err)
+ return
+ }
+ }
+
+ subscriptionId, ch := modelPlan.SubscribePlan(reqCtx, planId, branch)
+ defer func() {
+ log.Println("Response stream manager: client stream closed")
+ modelPlan.UnsubscribePlan(planId, branch, subscriptionId)
+ }()
+
+ if isConnect {
+ time.Sleep(50 * time.Millisecond)
+ } else {
+ time.Sleep(100 * time.Millisecond)
+ }
+
+ chHeartbeat := make(chan string)
+
+ // send heartbeats while the stream is active
+ go func() {
+ ticker := time.NewTicker(HeartbeatInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ticker.C:
+ chHeartbeat <- string(shared.StreamMessageHeartbeat)
+ case <-reqCtx.Done():
+ return
+ }
+ }
+ }()
+
+ for {
+ select {
+ case <-reqCtx.Done():
+ log.Println("Response stream manager: request context done")
+ return
+ case msg := <-chHeartbeat:
+ err = sendStreamMessage(w, msg)
+ if err != nil {
+ return
+ }
+ case msg := <-ch:
+ // log.Println("Response stream manager: sending message:", msg)
+ err = sendStreamMessage(w, msg)
+ if err != nil {
+ return
+ }
+ }
+ }
+
+}
+
+func sendStreamMessage(w http.ResponseWriter, msg string) error {
+ bytes := []byte(msg + shared.STREAM_MESSAGE_SEPARATOR)
+
+ // log.Printf("Response stream manager: writing message to client: %s\n", msg)
+
+ _, err := w.Write(bytes)
+ if err != nil {
+ log.Printf("Response stream manager: error writing to client: %v\n", err)
+ return err
+ } else if flusher, ok := w.(http.Flusher); ok {
+ flusher.Flush()
+ }
+ return nil
+}
+
+func initConnectActive(auth *types.ServerAuth, planId, branch string, w http.ResponseWriter) error {
+ log.Println("Response stream manager: initializing connection to active plan")
+
+ active := modelPlan.GetActivePlan(planId, branch)
+
+ if active == nil {
+ return fmt.Errorf("active plan not found for plan ID %s on branch %s", planId, branch)
+ }
+
+ msg := shared.StreamMessage{
+ Type: shared.StreamMessageConnectActive,
+ }
+
+ if active.Prompt != "" && !active.BuildOnly {
+ msg.InitPrompt = active.Prompt
+ }
+
+ if active.BuildOnly {
+ msg.InitBuildOnly = true
+ }
+
+ if len(active.StoredReplyIds) > 0 {
+ convo, err := db.GetPlanConvo(auth.OrgId, active.Id)
+ if err != nil {
+ return fmt.Errorf("error getting plan convo: %v", err)
+ }
+
+ convoMsgById := map[string]*db.ConvoMessage{}
+ for _, convoMsg := range convo {
+ convoMsgById[convoMsg.Id] = convoMsg
+ }
+
+ for _, replyId := range active.StoredReplyIds {
+ if convoMsg, ok := convoMsgById[replyId]; ok {
+ msg.InitReplies = append(msg.InitReplies, convoMsg.Message)
+ }
+ }
+ }
+
+ if active.CurrentReplyContent != "" {
+ msg.InitReplies = append(msg.InitReplies, active.CurrentReplyContent)
+ }
+
+ if active.MissingFilePath != "" {
+ msg.MissingFilePath = active.MissingFilePath
+ }
+
+ bytes, err := json.Marshal(msg)
+
+ if err != nil {
+ return fmt.Errorf("error marshalling message: %v", err)
+ }
+
+ log.Println("Response stream manager: sending connect message")
+ err = sendStreamMessage(w, string(bytes))
+
+ if err != nil {
+ return fmt.Errorf("error sending connect message: %v", err)
+ }
+
+ buildQueuesByPath := modelPlan.GetActivePlan(planId, branch).BuildQueuesByPath
+
+ // if we're connecting to an active stream and there are active builds, send initial build info
+ if len(buildQueuesByPath) > 0 {
+
+ for path, queue := range buildQueuesByPath {
+ buildInfo := shared.BuildInfo{Path: path}
+
+ for _, build := range queue {
+ if build.BuildFinished() {
+ buildInfo.NumTokens = 0
+ buildInfo.Finished = true
+ } else {
+ // no longer showing token counts in build info - leaving commented out for now for reference
+ // tokens := build.WithLineNumsBufferTokens
+ buildInfo.Finished = false
+ // buildInfo.NumTokens += tokens
+ }
+ }
+
+ msg := shared.StreamMessage{
+ Type: shared.StreamMessageBuildInfo,
+ BuildInfo: &buildInfo,
+ }
+ bytes, err := json.Marshal(msg)
+
+ if err != nil {
+ return fmt.Errorf("error marshalling message: %v", err)
+ }
+
+ err = sendStreamMessage(w, string(bytes))
+
+ if err != nil {
+ return fmt.Errorf("error sending message: %v", err)
+ }
+
+ }
+
+ }
+
+ return nil
+}
diff --git a/app/server/handlers/users.go b/app/server/handlers/users.go
new file mode 100644
index 0000000000000000000000000000000000000000..dee22a3429a08e3923a3350cea9b8357a65cfc72
--- /dev/null
+++ b/app/server/handlers/users.go
@@ -0,0 +1,223 @@
+package handlers
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "plandex-server/db"
+ "strings"
+
+ shared "plandex-shared"
+
+ "github.com/gorilla/mux"
+ "github.com/jmoiron/sqlx"
+)
+
+func ListUsersHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received a request for ListUsersHandler")
+
+ if os.Getenv("GOENV") == "development" && os.Getenv("LOCAL_MODE") == "1" {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusForbidden,
+ Msg: "Local mode is not supported for user management",
+ })
+ return
+ }
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ org, err := db.GetOrg(auth.OrgId)
+ if err != nil {
+ log.Printf("Error getting org: %v\n", err)
+ http.Error(w, "Error getting org: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if org.IsTrial {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeTrialActionNotAllowed,
+ Status: http.StatusForbidden,
+ Msg: "Trial user can't list users",
+ })
+ return
+ }
+
+ users, err := db.ListUsers(auth.OrgId)
+ if err != nil {
+ log.Println("Error listing users: ", err)
+ http.Error(w, "Error listing users: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ apiUsers := make([]*shared.User, 0, len(users))
+ for _, user := range users {
+ apiUsers = append(apiUsers, user.ToApi())
+ }
+
+ orgUsers, err := db.ListOrgUsers(auth.OrgId)
+ if err != nil {
+ log.Println("Error listing org users: ", err)
+ http.Error(w, "Error listing org users: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ orgUsersByUserId := make(map[string]*shared.OrgUser)
+ for _, orgUser := range orgUsers {
+ orgUsersByUserId[orgUser.UserId] = orgUser.ToApi()
+ }
+
+ resp := shared.ListUsersResponse{
+ Users: apiUsers,
+ OrgUsersByUserId: orgUsersByUserId,
+ }
+
+ bytes, err := json.Marshal(resp)
+
+ if err != nil {
+ log.Println("Error marshalling users: ", err)
+ http.Error(w, "Error marshalling users: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully processed request for ListUsersHandler")
+
+ w.Write(bytes)
+}
+
+func DeleteOrgUserHandler(w http.ResponseWriter, r *http.Request) {
+ log.Println("Received a request for DeleteOrgUserHandler")
+
+ if os.Getenv("GOENV") == "development" && os.Getenv("LOCAL_MODE") == "1" {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusForbidden,
+ Msg: "Local mode is not supported for user management",
+ })
+ return
+ }
+
+ auth := Authenticate(w, r, true)
+ if auth == nil {
+ return
+ }
+
+ org, err := db.GetOrg(auth.OrgId)
+ if err != nil {
+ log.Printf("Error getting org: %v\n", err)
+ http.Error(w, "Error getting org: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if org.IsTrial {
+ writeApiError(w, shared.ApiError{
+ Type: shared.ApiErrorTypeTrialActionNotAllowed,
+ Status: http.StatusForbidden,
+ Msg: "Trial user can't delete users",
+ })
+ return
+ }
+
+ vars := mux.Vars(r)
+ userId := vars["userId"]
+
+ log.Println("userId: ", userId)
+
+ orgUser, err := db.GetOrgUser(userId, auth.OrgId)
+
+ if err != nil {
+ log.Printf("Error getting org user: %v\n", err)
+ http.Error(w, "Error getting org user: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ // ensure current user can remove target user
+ removePermission := shared.Permission(strings.Join([]string{string(shared.PermissionRemoveUser), orgUser.OrgRoleId}, "|"))
+
+ if !auth.HasPermission(removePermission) {
+ log.Printf("User does not have permission to remove user with role: %v\n", orgUser.OrgRoleId)
+ http.Error(w, "User does not have permission to remove user with role: "+orgUser.OrgRoleId, http.StatusForbidden)
+ return
+ }
+
+ // verify user is org member
+ isMember, err := db.ValidateOrgMembership(userId, auth.OrgId)
+
+ if err != nil {
+ log.Printf("Error validating org membership: %v\n", err)
+ http.Error(w, "Error validating org membership: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if !isMember {
+ log.Printf("User %s is not a member of org %s\n", userId, auth.OrgId)
+ http.Error(w, "User "+userId+" is not a member of org "+auth.OrgId, http.StatusForbidden)
+ return
+ }
+
+ orgOwnerRoleId, err := db.GetOrgOwnerRoleId()
+
+ if err != nil {
+ log.Printf("Error getting org owner role id: %v\n", err)
+ http.Error(w, "Error getting org owner role id: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ // verify user isn't the only org owner
+ if orgUser.OrgRoleId == orgOwnerRoleId {
+ numOwners, err := db.NumUsersWithRole(auth.OrgId, orgOwnerRoleId)
+
+ if err != nil {
+ log.Printf("Error getting number of org owners: %v\n", err)
+ http.Error(w, "Error getting number of org owners: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ if numOwners == 1 {
+ log.Println("Cannot delete the only org owner")
+ http.Error(w, "Cannot delete the only org owner", http.StatusForbidden)
+ return
+ }
+ }
+
+ err = db.WithTx(r.Context(), "delete org user", func(tx *sqlx.Tx) error {
+
+ err = db.DeleteOrgUser(auth.OrgId, userId, tx)
+
+ if err != nil {
+ log.Println("Error deleting org user: ", err)
+ return fmt.Errorf("error deleting org user: %v", err)
+ }
+
+ invite, err := db.GetActiveInviteByEmail(auth.OrgId, auth.User.Email)
+
+ if err != nil {
+ log.Println("Error getting invite for org user: ", err)
+ return fmt.Errorf("error getting invite for org user: %v", err)
+ }
+
+ if invite != nil {
+ err = db.DeleteInvite(invite.Id, tx)
+
+ if err != nil {
+ log.Println("Error deleting invite: ", err)
+ return fmt.Errorf("error deleting invite: %v", err)
+ }
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Println("Error deleting org user: ", err)
+ http.Error(w, "Error deleting org user: "+err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ log.Println("Successfully processed request for DeleteOrgUserHandler")
+}
diff --git a/app/server/hooks/hooks.go b/app/server/hooks/hooks.go
new file mode 100644
index 0000000000000000000000000000000000000000..d5baa97fce8e7bcdf3c1fb7085352dbc17c960c6
--- /dev/null
+++ b/app/server/hooks/hooks.go
@@ -0,0 +1,188 @@
+package hooks
+
+import (
+ "context"
+ "plandex-server/db"
+ "plandex-server/types"
+ "time"
+
+ shared "plandex-shared"
+
+ "github.com/jmoiron/sqlx"
+ "github.com/sashabaranov/go-openai"
+)
+
+const (
+ HealthCheck = "health_check"
+
+ CreateAccount = "create_account"
+ WillCreatePlan = "will_create_plan"
+ WillTellPlan = "will_tell_plan"
+ WillExecPlan = "will_exec_plan"
+ WillSendModelRequest = "will_send_model_request"
+ DidSendModelRequest = "did_send_model_request"
+ DidFinishBuilderRun = "did_finish_builder_run"
+ CreateOrg = "create_org"
+ Authenticate = "authenticate"
+ GetIntegratedModels = "get_integrated_models"
+ GetApiOrgs = "get_api_orgs"
+ CallFastApply = "call_fast_apply"
+)
+
+type WillSendModelRequestParams struct {
+ InputTokens int
+ OutputTokens int
+ ModelName shared.ModelName
+ IsUserPrompt bool
+ ModelTag shared.ModelTag
+ ModelId shared.ModelId
+}
+
+type DidSendModelRequestParams struct {
+ InputTokens int
+ OutputTokens int
+ CachedTokens int
+ ModelId shared.ModelId
+ ModelTag shared.ModelTag
+ ModelName shared.ModelName
+ ModelProvider shared.ModelProvider
+ ModelRole shared.ModelRole
+ ModelPackName string
+ Purpose string
+ GenerationId string
+ PlanId string
+ ModelStreamId string
+ ConvoMessageId string
+ BuildId string
+ StoppedEarly bool
+ UserCancelled bool
+ HadError bool
+ NoReportedUsage bool
+ SessionId string
+
+ RequestStartedAt time.Time
+ Streaming bool
+ StreamResult string
+ FirstTokenAt time.Time
+ Req *types.ExtendedChatCompletionRequest
+ Res *openai.ChatCompletionResponse
+ ModelConfig *shared.ModelRoleConfig
+}
+
+type DidFinishBuilderRunParams struct {
+ PlanId string
+ FilePath string
+ FileExt string
+ Lang string
+ GenerationIds []string
+
+ ValidateModelConfig *shared.ModelRoleConfig
+ FastApplyModelConfig *shared.ModelRoleConfig
+ WholeFileModelConfig *shared.ModelRoleConfig
+
+ AutoApplySuccess bool
+ AutoApplyValidationReasons []string
+ AutoApplyValidationSyntaxErrors []string
+ AutoApplyValidationPassed bool
+ AutoApplyValidationFailureResponse string
+ AutoApplyValidationStartedAt time.Time
+ AutoApplyValidationFinishedAt time.Time
+
+ DidReplacement bool
+ ReplacementSuccess bool
+ ReplacementSyntaxErrors []string
+ ReplacementFailureResponse string
+ ReplacementStartedAt time.Time
+ ReplacementFinishedAt time.Time
+
+ DidRewriteProposed bool
+ RewriteProposedSuccess bool
+ RewriteProposedSyntaxErrors []string
+ RewriteProposedFailureResponse string
+ RewriteProposedStartedAt time.Time
+ RewriteProposedFinishedAt time.Time
+
+ DidFastApply bool
+ FastApplySuccess bool
+ FastApplySyntaxErrors []string
+ FastApplyFailureResponse string
+ FastApplyStartedAt time.Time
+ FastApplyFinishedAt time.Time
+
+ BuiltWholeFile bool
+ BuildWholeFileStartedAt time.Time
+ BuildWholeFileFinishedAt time.Time
+
+ StartedAt time.Time
+ FinishedAt time.Time
+}
+
+type CreateOrgHookRequestParams struct {
+ Org *db.Org
+}
+
+type AuthenticateHookRequestParams struct {
+ Path string
+ Hash string
+}
+
+type FastApplyParams struct {
+ InitialCode string `json:"initialCode"`
+ EditSnippet string `json:"editSnippet"`
+
+ InitialCodeTokens int
+ EditSnippetTokens int
+
+ Language shared.Language
+
+ Ctx context.Context
+}
+
+type HookParams struct {
+ Auth *types.ServerAuth
+ Plan *db.Plan
+ Tx *sqlx.Tx
+
+ WillSendModelRequestParams *WillSendModelRequestParams
+ DidSendModelRequestParams *DidSendModelRequestParams
+ CreateOrgHookRequestParams *CreateOrgHookRequestParams
+ GetApiOrgIds []string
+ AuthenticateHookRequestParams *AuthenticateHookRequestParams
+ DidFinishBuilderRunParams *DidFinishBuilderRunParams
+ FastApplyParams *FastApplyParams
+}
+
+type GetIntegratedModelsResult struct {
+ IntegratedModelsMode bool
+ AuthVars map[string]string
+}
+
+type FastApplyResult struct {
+ MergedCode string
+}
+
+type HookResult struct {
+ GetIntegratedModelsResult *GetIntegratedModelsResult
+ ApiOrgsById map[string]*shared.Org
+ FastApplyResult *FastApplyResult
+}
+
+type Hook func(params HookParams) (HookResult, *shared.ApiError)
+
+var hooks = make(map[string]Hook)
+
+func RegisterHook(name string, hook Hook) {
+ hooks[name] = hook
+}
+
+func ExecHook(name string, params HookParams) (HookResult, *shared.ApiError) {
+ hook, ok := hooks[name]
+ if !ok {
+ return HookResult{}, nil
+ }
+ return hook(params)
+}
+
+func TestUpdate() {
+
+}
diff --git a/app/server/host/ip.go b/app/server/host/ip.go
new file mode 100644
index 0000000000000000000000000000000000000000..5468e04fa103a2254ee2e62f30269a1198658e7b
--- /dev/null
+++ b/app/server/host/ip.go
@@ -0,0 +1,76 @@
+package host
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+)
+
+var Ip string
+
+func LoadIp() error {
+ if os.Getenv("GOENV") == "development" {
+ Ip = "localhost"
+ return nil
+ }
+
+ if os.Getenv("IS_CLOUD") != "" {
+ var err error
+ Ip, err = getAwsIp()
+
+ if err != nil {
+ return fmt.Errorf("error getting AWS ECS IP: %v", err)
+ }
+
+ log.Println("Got AWS ECS IP: ", Ip)
+
+ } else if os.Getenv("IP") != "" {
+ Ip = os.Getenv("IP")
+ return nil
+ }
+
+ return nil
+}
+
+type ecsMetadata struct {
+ Networks []struct {
+ IPv4Addresses []string `json:"IPv4Addresses"`
+ } `json:"Networks"`
+}
+
+var awsIp string
+
+func getAwsIp() (string, error) {
+ ecsMetadataURL := os.Getenv("ECS_CONTAINER_METADATA_URI")
+
+ log.Printf("Getting ECS metadata from %s\n", ecsMetadataURL)
+
+ resp, err := http.Get(ecsMetadataURL)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return "", err
+ }
+
+ var metadata ecsMetadata
+ err = json.Unmarshal(body, &metadata)
+ if err != nil {
+ return "", err
+ }
+
+ if len(metadata.Networks) == 0 || len(metadata.Networks[0].IPv4Addresses) == 0 {
+ return "", errors.New("no IP address found in ECS metadata")
+ }
+
+ awsIp = metadata.Networks[0].IPv4Addresses[0]
+
+ return awsIp, nil
+}
diff --git a/app/server/litellm_proxy.py b/app/server/litellm_proxy.py
new file mode 100644
index 0000000000000000000000000000000000000000..178094a99c3baaf29bd4f3cc0a46b0e66906b623
--- /dev/null
+++ b/app/server/litellm_proxy.py
@@ -0,0 +1,183 @@
+from litellm.llms.anthropic.common_utils import AnthropicModelInfo
+from typing import List, Optional
+
+_orig_get_hdrs = AnthropicModelInfo.get_anthropic_headers
+
+def _oauth_get_hdrs(
+ self,
+ api_key: str,
+ anthropic_version: Optional[str] = None,
+ computer_tool_used: bool = False,
+ prompt_caching_set: bool = False,
+ pdf_used: bool = False,
+ file_id_used: bool = False,
+ mcp_server_used: bool = False,
+ is_vertex_request: bool = False,
+ user_anthropic_beta_headers: Optional[List[str]] = None,
+):
+ # call the original builder first
+ hdrs = _orig_get_hdrs(
+ self,
+ api_key=api_key,
+ anthropic_version=anthropic_version,
+ computer_tool_used=computer_tool_used,
+ prompt_caching_set=prompt_caching_set,
+ pdf_used=pdf_used,
+ file_id_used=file_id_used,
+ mcp_server_used=mcp_server_used,
+ is_vertex_request=is_vertex_request,
+ user_anthropic_beta_headers=user_anthropic_beta_headers,
+ )
+
+ # remove x-api-key when we detect an OAuth access-token
+ print(f"api_key: {api_key}")
+ if api_key and api_key.startswith(("sk-ant-oat", "sk-ant-oau")):
+ hdrs["anthropic-beta"] = "oauth-2025-04-20"
+ hdrs["anthropic-product"] = "claude-code"
+ hdrs.pop("x-api-key", None)
+
+ print(f"Anthropic headers: {hdrs}")
+
+ return hdrs
+
+# monkey-patch AnthropicModelInfo.get_anthropic_headers to handle OAuth headers
+AnthropicModelInfo.get_anthropic_headers = _oauth_get_hdrs
+
+from fastapi import FastAPI, Request
+from fastapi.responses import StreamingResponse, JSONResponse
+from litellm import completion, _turn_on_debug
+import json
+import re
+
+# _turn_on_debug()
+
+LOGGING_ENABLED = False
+
+print("Litellm proxy: starting proxy server on port 4000...")
+
+app = FastAPI()
+
+@app.get("/health")
+async def health():
+ return {"status": "ok"}
+
+@app.post("/v1/chat/completions")
+async def passthrough(request: Request):
+ payload = await request.json()
+
+ if LOGGING_ENABLED:
+ # Log the request data for debugging
+ try:
+ # Get headers (excluding authorization to avoid logging credentials)
+ headers = dict(request.headers)
+ if "Authorization" in headers:
+ headers["Authorization"] = "Bearer [REDACTED]"
+ if "api-key" in headers:
+ headers["api-key"] = "[REDACTED]"
+
+ # Create a log-friendly representation
+ request_data = {
+ "method": request.method,
+ "url": str(request.url),
+ "headers": headers,
+ "body": payload
+ }
+
+ # Log the request data
+ print("Incoming request to /v1/chat/completions:")
+ print(json.dumps(request_data, indent=2))
+ except Exception as e:
+ print(f"Error logging request: {str(e)}")
+
+ model = payload.get("model", None)
+ print(f"Litellm proxy: calling model: {model}")
+
+ api_key = payload.pop("api_key", None)
+
+ if not api_key:
+ api_key = request.headers.get("Authorization")
+
+ if not api_key:
+ api_key = request.headers.get("api-key")
+
+ if api_key and api_key.startswith("Bearer "):
+ api_key = api_key.replace("Bearer ", "")
+
+ # api key optional for local/ollama models, so no need to error if not provided
+
+ # clean up for ollama if needed
+ payload = normalise_for_ollama(payload)
+
+ try:
+ if payload.get("stream"):
+
+ try:
+ response_stream = completion(api_key=api_key, **payload)
+ except Exception as e:
+ return error_response(e)
+ def stream_generator():
+ try:
+ for chunk in response_stream:
+ yield f"data: {json.dumps(chunk.to_dict())}\n\n"
+ yield "data: [DONE]\n\n"
+ except Exception as e:
+ # surface the problem to the client _inside_ the SSE stream
+ yield f"data: {json.dumps({'error': str(e)})}\n\n"
+ return
+
+ finally:
+ try:
+ response_stream.close()
+ except AttributeError:
+ pass
+
+ print(f"Litellm proxy: Initiating streaming response for model: {payload.get('model', 'unknown')}")
+ return StreamingResponse(stream_generator(), media_type="text/event-stream")
+
+ else:
+ print(f"Litellm proxy: Non-streaming response requested for model: {payload.get('model', 'unknown')}")
+ try:
+ result = completion(api_key=api_key, **payload)
+ except Exception as e:
+ return error_response(e)
+ return JSONResponse(content=result)
+
+ except Exception as e:
+ err_msg = str(e)
+ print(f"Litellm proxy: Error: {err_msg}")
+ status_match = re.search(r"status code: (\d+)", err_msg)
+ if status_match:
+ status_code = int(status_match.group(1))
+ else:
+ status_code = 500
+ return JSONResponse(
+ status_code=status_code,
+ content={"error": err_msg}
+ )
+
+def error_response(exc: Exception) -> JSONResponse:
+ status = getattr(exc, "status_code", 500)
+ retry_after = (
+ getattr(getattr(exc, "response", None), "headers", {})
+ .get("Retry-After")
+ )
+ hdrs = {"Retry-After": retry_after} if retry_after else {}
+ return JSONResponse(status_code=status, content={"error": str(exc)}, headers=hdrs)
+
+def normalise_for_ollama(p):
+ if not p.get("model", "").startswith("ollama"):
+ return p
+
+ # flatten content parts
+ for m in p.get("messages", []):
+ if isinstance(m["content"], list): # [{type:"text", text:"…"}]
+ m["content"] = "".join(part.get("text", "")
+ for part in m["content"]
+ if part.get("type") == "text")
+
+ # drop params Ollama ignores
+ for k in ("top_p", "temperature", "presence_penalty",
+ "tool_choice", "tools", "seed"):
+ p.pop(k, None)
+
+ return p
\ No newline at end of file
diff --git a/app/server/main.go b/app/server/main.go
new file mode 100644
index 0000000000000000000000000000000000000000..a6639522ff598d37dad7980bfafb04b06a4c3424
--- /dev/null
+++ b/app/server/main.go
@@ -0,0 +1,38 @@
+package main
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "plandex-server/model"
+ "plandex-server/routes"
+ "plandex-server/setup"
+
+ "github.com/gorilla/mux"
+)
+
+func main() {
+ // Configure the default logger to include milliseconds in timestamps
+ log.SetFlags(log.LstdFlags | log.Lmicroseconds | log.Lshortfile)
+
+ routes.RegisterHandlePlandex(func(router *mux.Router, path string, isStreaming bool, handler routes.PlandexHandler) *mux.Route {
+ return router.HandleFunc(path, handler)
+ })
+
+ err := model.EnsureLiteLLM(2)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to start LiteLLM proxy: %v", err))
+ }
+ setup.RegisterShutdownHook(func() {
+ model.ShutdownLiteLLMServer()
+ })
+
+ r := mux.NewRouter()
+ routes.AddHealthRoutes(r)
+ routes.AddApiRoutes(r)
+ routes.AddProxyableApiRoutes(r)
+ setup.MustLoadIp()
+ setup.MustInitDb()
+ setup.StartServer(r, nil, nil)
+ os.Exit(0)
+}
diff --git a/app/server/migrations/2023120500_init.down.sql b/app/server/migrations/2023120500_init.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..2996e4120c132dbdd3b219e6245d52bbc5ba881c
--- /dev/null
+++ b/app/server/migrations/2023120500_init.down.sql
@@ -0,0 +1,16 @@
+DROP TABLE IF EXISTS branches;
+
+DROP TABLE IF EXISTS convo_summaries;
+DROP TABLE IF EXISTS plan_builds;
+
+DROP TABLE IF EXISTS users_projects;
+DROP TABLE IF EXISTS plans;
+DROP TABLE IF EXISTS projects;
+DROP TABLE IF EXISTS orgs_users;
+
+DROP TABLE IF EXISTS email_verifications;
+DROP TABLE IF EXISTS auth_tokens;
+DROP TABLE IF EXISTS invites;
+
+DROP TABLE IF EXISTS orgs;
+DROP TABLE IF EXISTS users;
diff --git a/app/server/migrations/2023120500_init.up.sql b/app/server/migrations/2023120500_init.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..066cbfdc70fb3b2c0b196480fd3d6eb33265f818
--- /dev/null
+++ b/app/server/migrations/2023120500_init.up.sql
@@ -0,0 +1,176 @@
+CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
+
+CREATE OR REPLACE FUNCTION update_updated_at_column()
+RETURNS TRIGGER AS $$
+BEGIN
+ NEW.updated_at = NOW();
+ RETURN NEW;
+END;
+$$ LANGUAGE plpgsql;
+
+CREATE TABLE IF NOT EXISTS users (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ name VARCHAR(255) NOT NULL,
+ email VARCHAR(255) NOT NULL,
+ domain VARCHAR(255) NOT NULL,
+ is_trial BOOLEAN NOT NULL,
+ num_non_draft_plans INTEGER NOT NULL DEFAULT 0,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE TRIGGER update_users_modtime BEFORE UPDATE ON users FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+ALTER TABLE users ADD UNIQUE (email);
+CREATE INDEX users_domain_idx ON users(domain);
+
+CREATE TABLE IF NOT EXISTS orgs (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ name VARCHAR(255) NOT NULL,
+ domain VARCHAR(255),
+ auto_add_domain_users BOOLEAN NOT NULL DEFAULT FALSE,
+ owner_id UUID NOT NULL REFERENCES users(id),
+ is_trial BOOLEAN NOT NULL,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE TRIGGER update_orgs_modtime BEFORE UPDATE ON orgs FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+ALTER TABLE orgs ADD UNIQUE (domain);
+
+CREATE TABLE IF NOT EXISTS orgs_users (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+ user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE TRIGGER update_orgs_users_modtime BEFORE UPDATE ON orgs_users FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE INDEX orgs_users_user_idx ON orgs_users(user_id);
+CREATE INDEX orgs_users_org_idx ON orgs_users(org_id);
+
+CREATE TABLE IF NOT EXISTS invites (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+ name VARCHAR(255) NOT NULL,
+ email VARCHAR(255) NOT NULL,
+ inviter_id UUID NOT NULL REFERENCES users(id),
+ invitee_id UUID REFERENCES users(id),
+ accepted_at TIMESTAMP,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE TRIGGER update_invites_modtime BEFORE UPDATE ON invites FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE INDEX invites_pending_idx ON invites(org_id, (accepted_at IS NULL));
+CREATE INDEX invites_email_idx ON invites(email, (accepted_at IS NULL));
+CREATE INDEX invites_org_user_idx ON invites(org_id, invitee_id);
+
+CREATE TABLE IF NOT EXISTS auth_tokens (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ token_hash VARCHAR(64) NOT NULL,
+ is_trial BOOLEAN NOT NULL DEFAULT FALSE,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ deleted_at TIMESTAMP
+);
+CREATE UNIQUE INDEX auth_tokens_idx ON auth_tokens(token_hash);
+
+CREATE TABLE IF NOT EXISTS email_verifications (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ email VARCHAR(255) NOT NULL,
+ pin_hash VARCHAR(64) NOT NULL,
+ user_id UUID REFERENCES users(id),
+ auth_token_id UUID REFERENCES auth_tokens(id),
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE TRIGGER update_email_verifications_modtime BEFORE UPDATE ON email_verifications FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE UNIQUE INDEX email_verifications_idx ON email_verifications(pin_hash, email, created_at DESC);
+
+CREATE TABLE IF NOT EXISTS projects (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+ name VARCHAR(255) NOT NULL,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE TRIGGER update_projects_modtime BEFORE UPDATE ON projects FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE TABLE IF NOT EXISTS plans (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+ owner_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
+ name VARCHAR(255) NOT NULL,
+ shared_with_org_at TIMESTAMP,
+ total_replies INTEGER NOT NULL DEFAULT 0,
+ active_branches INTEGER NOT NULL DEFAULT 0,
+ archived_at TIMESTAMP,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE TRIGGER update_plans_modtime BEFORE UPDATE ON plans FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE INDEX plans_name_idx ON plans(project_id, owner_id, name);
+CREATE INDEX plans_archived_idx ON plans(project_id, owner_id, archived_at);
+
+CREATE TABLE IF NOT EXISTS branches (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+ owner_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ plan_id UUID NOT NULL REFERENCES plans(id) ON DELETE CASCADE,
+ parent_branch_id UUID REFERENCES branches(id) ON DELETE CASCADE,
+ name VARCHAR(255) NOT NULL,
+ status VARCHAR(32) NOT NULL,
+ error TEXT,
+ context_tokens INTEGER NOT NULL DEFAULT 0,
+ convo_tokens INTEGER NOT NULL DEFAULT 0,
+ shared_with_org_at TIMESTAMP,
+ archived_at TIMESTAMP,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ deleted_at TIMESTAMP
+);
+CREATE TRIGGER update_branches_modtime BEFORE UPDATE ON branches FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE UNIQUE INDEX branches_name_idx ON branches(plan_id, name, archived_at, deleted_at);
+
+
+CREATE TABLE IF NOT EXISTS users_projects (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+ user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
+ last_active_plan_id UUID REFERENCES plans(id),
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE TRIGGER update_users_projects_modtime BEFORE UPDATE ON users_projects FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE INDEX users_projects_idx ON users_projects(user_id, org_id, project_id);
+
+CREATE TABLE IF NOT EXISTS convo_summaries (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+ plan_id UUID NOT NULL REFERENCES plans(id) ON DELETE CASCADE,
+ latest_convo_message_id UUID NOT NULL,
+ latest_convo_message_created_at TIMESTAMP NOT NULL,
+ summary TEXT NOT NULL,
+ tokens INTEGER NOT NULL,
+ num_messages INTEGER NOT NULL,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+
+CREATE TABLE IF NOT EXISTS plan_builds (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+ plan_id UUID NOT NULL REFERENCES plans(id) ON DELETE CASCADE,
+ convo_message_id UUID NOT NULL,
+ file_path VARCHAR(255) NOT NULL,
+ error TEXT,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE TRIGGER update_plan_builds_modtime BEFORE UPDATE ON plan_builds FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
\ No newline at end of file
diff --git a/app/server/migrations/2024011700_rbac.down.sql b/app/server/migrations/2024011700_rbac.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..4bcf21738a8b8ad43efb400e977f7c91573ab150
--- /dev/null
+++ b/app/server/migrations/2024011700_rbac.down.sql
@@ -0,0 +1,8 @@
+ALTER TABLE orgs_users DROP COLUMN org_role_id;
+ALTER TABLE invites DROP COLUMN org_role_id;
+
+DROP TABLE IF EXISTS org_roles_permissions;
+DROP TABLE IF EXISTS permissions;
+DROP TABLE IF EXISTS org_roles;
+
+
diff --git a/app/server/migrations/2024011700_rbac.up.sql b/app/server/migrations/2024011700_rbac.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..a51f7a623926e5b688acd79533e7dc820b29e1af
--- /dev/null
+++ b/app/server/migrations/2024011700_rbac.up.sql
@@ -0,0 +1,133 @@
+
+CREATE TABLE IF NOT EXISTS org_roles (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID REFERENCES orgs(id) ON DELETE CASCADE,
+ name VARCHAR(255) NOT NULL,
+ label VARCHAR(255) NOT NULL,
+ description TEXT NOT NULL,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE TRIGGER update_org_roles_modtime BEFORE UPDATE ON org_roles FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE UNIQUE INDEX org_roles_org_idx ON org_roles(org_id, name);
+
+ALTER TABLE orgs_users ADD COLUMN org_role_id UUID NOT NULL REFERENCES org_roles(id) ON DELETE RESTRICT;
+CREATE INDEX orgs_users_org_role_idx ON orgs_users(org_id, org_role_id);
+
+ALTER TABLE invites ADD COLUMN org_role_id UUID NOT NULL REFERENCES org_roles(id) ON DELETE RESTRICT;
+CREATE INDEX invites_org_role_idx ON invites(org_id, org_role_id);
+
+CREATE TABLE IF NOT EXISTS permissions (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ name VARCHAR(255) NOT NULL,
+ description TEXT NOT NULL,
+ resource_id UUID,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+
+CREATE TABLE IF NOT EXISTS org_roles_permissions (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_role_id UUID NOT NULL REFERENCES org_roles(id) ON DELETE CASCADE,
+ permission_id UUID NOT NULL REFERENCES permissions(id) ON DELETE CASCADE,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+
+INSERT INTO org_roles (name, label, description) VALUES
+ ('owner', 'Owner', 'Can read and update any plan, invite other owners/admins/members, manage email domain auth, manage billing, read audit logs, delete the org'),
+ ('billing_admin', 'Billing Admin', 'Can manage billing'),
+ ('admin', 'Admin', 'Can read and update any plan, invite other admins/members'),
+ ('member', 'Member', 'Can read and update their own plans or plans shared with them');
+
+DO $$
+DECLARE
+ owner_org_role_id UUID;
+ billing_admin_org_role_id UUID;
+ admin_org_role_id UUID;
+ member_org_role_id UUID;
+BEGIN
+ SELECT id INTO owner_org_role_id FROM org_roles WHERE org_id IS NULL AND name = 'owner';
+ SELECT id INTO billing_admin_org_role_id FROM org_roles WHERE org_id IS NULL AND name = 'billing_admin';
+ SELECT id INTO admin_org_role_id FROM org_roles WHERE org_id IS NULL AND name = 'admin';
+ SELECT id INTO member_org_role_id FROM org_roles WHERE org_id IS NULL AND name = 'member';
+
+ INSERT INTO permissions (name, description, resource_id) VALUES
+ ('delete_org', 'Delete an org', NULL),
+ ('manage_email_domain_auth', 'Configure whether orgs_users from the org''s email domain are auto-admitted to org', NULL),
+ ('manage_billing', 'Manage an org''s billing', NULL),
+
+ ('invite_user', 'Invite owners to an org', owner_org_role_id),
+ ('invite_user', 'Invite admins to an org', admin_org_role_id),
+ ('invite_user', 'Invite billing admins to an org', billing_admin_org_role_id),
+ ('invite_user', 'Invite members to an org', member_org_role_id),
+
+ ('remove_user', 'Remove owners from an org', owner_org_role_id),
+ ('remove_user', 'Remove admins from an org', admin_org_role_id),
+ ('remove_user', 'Remove billing admins from an org', billing_admin_org_role_id),
+ ('remove_user', 'Remove members from an org', member_org_role_id),
+
+ ('set_user_role', 'Update an owner''s role in an org', owner_org_role_id),
+ ('set_user_role', 'Update an admin''s role in an org', admin_org_role_id),
+ ('set_user_role', 'Update a billing admin''s role in an org', billing_admin_org_role_id),
+ ('set_user_role', 'Update a member''s role in an org', member_org_role_id),
+
+ ('list_org_roles', 'List org roles', NULL),
+
+ ('create_project', 'Create a project', NULL),
+ ('rename_any_project', 'Rename a project', NULL),
+ ('delete_any_project', 'Delete a project', NULL),
+
+ ('create_plan', 'Create a plan', NULL),
+
+ ('manage_any_plan_shares', 'Unshare a plan any user shared', NULL),
+ ('rename_any_plan', 'Rename a plan', NULL),
+ ('delete_any_plan', 'Delete a plan', NULL),
+ ('update_any_plan', 'Update a plan', NULL),
+ ('archive_any_plan', 'Archive a plan', NULL);
+END $$;
+
+-- Insert all permissions for the 'org owner' role
+INSERT INTO org_roles_permissions (org_role_id, permission_id)
+SELECT
+ (SELECT id FROM org_roles WHERE name = 'owner') AS org_role_id,
+ p.id AS permission_id
+FROM
+ permissions p;
+
+-- Insert all permissions except specific ones and those exclusive to 'owner' or 'billing admin' for the 'org admin' role
+INSERT INTO org_roles_permissions (org_role_id, permission_id)
+SELECT
+ (SELECT id FROM org_roles WHERE name = 'admin') AS org_role_id,
+ p.id AS permission_id
+FROM
+ permissions p
+WHERE
+ p.name NOT IN ('delete_org', 'manage_email_domain_auth', 'manage_billing')
+ AND NOT EXISTS (
+ SELECT 1 FROM permissions p2
+ WHERE p2.resource_id IN (SELECT id FROM org_roles WHERE name IN ('owner', 'billing_admin'))
+ AND p2.id = p.id
+ );
+
+INSERT INTO org_roles_permissions (org_role_id, permission_id)
+SELECT
+ (SELECT id FROM org_roles WHERE name = 'billing_admin') AS org_role_id,
+ p.id AS permission_id
+FROM
+ permissions p
+WHERE
+ p.name IN (
+ 'manage_billing'
+ );
+
+INSERT INTO org_roles_permissions (org_role_id, permission_id)
+SELECT
+ (SELECT id FROM org_roles WHERE name = 'member') AS org_role_id,
+ p.id AS permission_id
+FROM
+ permissions p
+WHERE
+ p.name IN (
+ 'create_project',
+ 'create_plan'
+ );
diff --git a/app/server/migrations/2024012400_streams.down.sql b/app/server/migrations/2024012400_streams.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..e7ab755552bc7170201a3226e719795ae1168d05
--- /dev/null
+++ b/app/server/migrations/2024012400_streams.down.sql
@@ -0,0 +1,2 @@
+DROP TABLE IF EXISTS model_streams;
+-- DROP TABLE IF EXISTS model_stream_subscriptions;
\ No newline at end of file
diff --git a/app/server/migrations/2024012400_streams.up.sql b/app/server/migrations/2024012400_streams.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..a8c27c7d00c743cbf90989cdc8ad6d5aa5c50b7a
--- /dev/null
+++ b/app/server/migrations/2024012400_streams.up.sql
@@ -0,0 +1,23 @@
+CREATE TABLE IF NOT EXISTS model_streams (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+ plan_id UUID NOT NULL REFERENCES plans(id) ON DELETE CASCADE,
+ branch VARCHAR(255) NOT NULL,
+ internal_ip VARCHAR(45) NOT NULL,
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ finished_at TIMESTAMP
+);
+
+CREATE UNIQUE INDEX model_streams_plan_idx ON model_streams(plan_id, branch, finished_at);
+
+-- CREATE TABLE IF NOT EXISTS model_stream_subscriptions (
+-- id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+-- org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+-- plan_id UUID NOT NULL REFERENCES plans(id) ON DELETE CASCADE,
+-- model_stream_id UUID NOT NULL REFERENCES model_streams(id) ON DELETE CASCADE,
+-- user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+-- user_ip VARCHAR(45) NOT NULL,
+-- created_at TIMESTAMP NOT NULL DEFAULT NOW()
+-- finished_at TIMESTAMP
+-- );
+
diff --git a/app/server/migrations/2024012500_locks.down.sql b/app/server/migrations/2024012500_locks.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..13a5a8ec9299fa6b4a044166bfb87fa3d8b6e173
--- /dev/null
+++ b/app/server/migrations/2024012500_locks.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS repo_locks;
\ No newline at end of file
diff --git a/app/server/migrations/2024012500_locks.up.sql b/app/server/migrations/2024012500_locks.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..2ba06e27f62f9a3398db52bcbcd2d6070581d0c8
--- /dev/null
+++ b/app/server/migrations/2024012500_locks.up.sql
@@ -0,0 +1,12 @@
+CREATE UNLOGGED TABLE IF NOT EXISTS repo_locks (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+ user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ plan_id UUID NOT NULL REFERENCES plans(id) ON DELETE CASCADE,
+ plan_build_id UUID REFERENCES plan_builds(id) ON DELETE CASCADE,
+ scope VARCHAR(1) NOT NULL,
+ branch VARCHAR(255),
+ created_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+
+CREATE INDEX repo_locks_plan_idx ON repo_locks(plan_id);
diff --git a/app/server/migrations/2024013000_plan_build_convo_ids.down.sql b/app/server/migrations/2024013000_plan_build_convo_ids.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..1dfa59ca7f9d60685524f142dfc4dc30fd29b63e
--- /dev/null
+++ b/app/server/migrations/2024013000_plan_build_convo_ids.down.sql
@@ -0,0 +1,5 @@
+ALTER TABLE plan_builds
+ RENAME COLUMN convo_message_ids TO convo_message_id;
+
+ALTER TABLE plan_builds
+ ALTER COLUMN convo_message_id TYPE UUID USING (convo_message_id[1]);
\ No newline at end of file
diff --git a/app/server/migrations/2024013000_plan_build_convo_ids.up.sql b/app/server/migrations/2024013000_plan_build_convo_ids.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..c8d3dcb0401aff7070f95440303898771aa69040
--- /dev/null
+++ b/app/server/migrations/2024013000_plan_build_convo_ids.up.sql
@@ -0,0 +1,5 @@
+ALTER TABLE plan_builds
+ RENAME COLUMN convo_message_id TO convo_message_ids;
+
+ALTER TABLE plan_builds
+ ALTER COLUMN convo_message_ids TYPE UUID[] USING ARRAY[convo_message_ids];
diff --git a/app/server/migrations/2024020800_heartbeats.down.sql b/app/server/migrations/2024020800_heartbeats.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..149cbb351aee032233c5c5a28f080a83ca7d25cf
--- /dev/null
+++ b/app/server/migrations/2024020800_heartbeats.down.sql
@@ -0,0 +1,2 @@
+ALTER TABLE repo_locks DROP COLUMN last_heartbeat_at;
+ALTER TABLE model_streams DROP COLUMN last_heartbeat_at;
\ No newline at end of file
diff --git a/app/server/migrations/2024020800_heartbeats.up.sql b/app/server/migrations/2024020800_heartbeats.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..82ad644ff69f185619d9bb43c656dd6f13f71a23
--- /dev/null
+++ b/app/server/migrations/2024020800_heartbeats.up.sql
@@ -0,0 +1,2 @@
+ALTER TABLE repo_locks ADD COLUMN last_heartbeat_at TIMESTAMP NOT NULL DEFAULT NOW();
+ALTER TABLE model_streams ADD COLUMN last_heartbeat_at TIMESTAMP NOT NULL DEFAULT NOW();
\ No newline at end of file
diff --git a/app/server/migrations/2024022000_revert_plan_build_convo_ids.down.sql b/app/server/migrations/2024022000_revert_plan_build_convo_ids.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..c8d3dcb0401aff7070f95440303898771aa69040
--- /dev/null
+++ b/app/server/migrations/2024022000_revert_plan_build_convo_ids.down.sql
@@ -0,0 +1,5 @@
+ALTER TABLE plan_builds
+ RENAME COLUMN convo_message_id TO convo_message_ids;
+
+ALTER TABLE plan_builds
+ ALTER COLUMN convo_message_ids TYPE UUID[] USING ARRAY[convo_message_ids];
diff --git a/app/server/migrations/2024022000_revert_plan_build_convo_ids.up.sql b/app/server/migrations/2024022000_revert_plan_build_convo_ids.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..1dfa59ca7f9d60685524f142dfc4dc30fd29b63e
--- /dev/null
+++ b/app/server/migrations/2024022000_revert_plan_build_convo_ids.up.sql
@@ -0,0 +1,5 @@
+ALTER TABLE plan_builds
+ RENAME COLUMN convo_message_ids TO convo_message_id;
+
+ALTER TABLE plan_builds
+ ALTER COLUMN convo_message_id TYPE UUID USING (convo_message_id[1]);
\ No newline at end of file
diff --git a/app/server/migrations/2024032700_remove_billing_admin.down.sql b/app/server/migrations/2024032700_remove_billing_admin.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/app/server/migrations/2024032700_remove_billing_admin.up.sql b/app/server/migrations/2024032700_remove_billing_admin.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..39fb77f746142c2affbaf76fecb00ecbccca538f
--- /dev/null
+++ b/app/server/migrations/2024032700_remove_billing_admin.up.sql
@@ -0,0 +1 @@
+DELETE FROM org_roles WHERE name = 'billing_admin';
\ No newline at end of file
diff --git a/app/server/migrations/2024032701_drop_users_projects.down.sql b/app/server/migrations/2024032701_drop_users_projects.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..27cfdba1f93792e617dca428838cc7a3f9fb6e4c
--- /dev/null
+++ b/app/server/migrations/2024032701_drop_users_projects.down.sql
@@ -0,0 +1,12 @@
+CREATE TABLE IF NOT EXISTS users_projects (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+ user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
+ project_id UUID NOT NULL REFERENCES projects(id) ON DELETE CASCADE,
+ last_active_plan_id UUID REFERENCES plans(id),
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE TRIGGER update_users_projects_modtime BEFORE UPDATE ON users_projects FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE INDEX users_projects_idx ON users_projects(user_id, org_id, project_id);
\ No newline at end of file
diff --git a/app/server/migrations/2024032701_drop_users_projects.up.sql b/app/server/migrations/2024032701_drop_users_projects.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..02bd1b78e126e5e6c7d20f1e30472426a8ebe19e
--- /dev/null
+++ b/app/server/migrations/2024032701_drop_users_projects.up.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS users_projects;
\ No newline at end of file
diff --git a/app/server/migrations/2024040400_add_orgs_users_unique.down.sql b/app/server/migrations/2024040400_add_orgs_users_unique.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..9f41a01cbe7252cedf88e1153c7e9758e97c63e2
--- /dev/null
+++ b/app/server/migrations/2024040400_add_orgs_users_unique.down.sql
@@ -0,0 +1 @@
+ALTER TABLE orgs_users DROP CONSTRAINT org_user_unique;
\ No newline at end of file
diff --git a/app/server/migrations/2024040400_add_orgs_users_unique.up.sql b/app/server/migrations/2024040400_add_orgs_users_unique.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..67079496e900e9f15102c294ad3f06d544cd7c8f
--- /dev/null
+++ b/app/server/migrations/2024040400_add_orgs_users_unique.up.sql
@@ -0,0 +1,12 @@
+-- clean up any duplicates added mistakenly earlier
+WITH ranked_duplicates AS (
+ SELECT id,
+ ROW_NUMBER() OVER (PARTITION BY org_id, user_id ORDER BY created_at) AS rn
+ FROM orgs_users
+)
+DELETE FROM orgs_users
+WHERE id IN (
+ SELECT id FROM ranked_duplicates WHERE rn > 1
+);
+
+ALTER TABLE orgs_users ADD CONSTRAINT org_user_unique UNIQUE (org_id, user_id);
diff --git a/app/server/migrations/2024041500_model_sets_models.down.sql b/app/server/migrations/2024041500_model_sets_models.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..212c0991fe8d85dfa41746d73146d65ebf58a3d0
--- /dev/null
+++ b/app/server/migrations/2024041500_model_sets_models.down.sql
@@ -0,0 +1,2 @@
+DROP TABLE IF EXISTS model_sets;
+DROP TABLE IF EXISTS custom_models;
\ No newline at end of file
diff --git a/app/server/migrations/2024041500_model_sets_models.up.sql b/app/server/migrations/2024041500_model_sets_models.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..f9731f08ed387bdab492398e4692183b793c691e
--- /dev/null
+++ b/app/server/migrations/2024041500_model_sets_models.up.sql
@@ -0,0 +1,47 @@
+CREATE TABLE IF NOT EXISTS model_sets (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+
+ name VARCHAR(255) NOT NULL,
+ description TEXT,
+
+ planner JSON,
+ plan_summary JSON,
+ builder JSON,
+ namer JSON,
+ commit_msg JSON,
+ exec_status JSON,
+
+ created_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+
+CREATE INDEX model_sets_org_idx ON model_sets(org_id);
+
+CREATE TABLE IF NOT EXISTS custom_models (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+
+ provider VARCHAR(255) NOT NULL,
+ custom_provider VARCHAR(255),
+ base_url VARCHAR(255) NOT NULL,
+ model_name VARCHAR(255) NOT NULL,
+ description TEXT,
+ max_tokens INTEGER NOT NULL,
+ api_key_env_var VARCHAR(255),
+
+ is_openai_compatible BOOLEAN NOT NULL,
+ has_json_mode BOOLEAN NOT NULL,
+ has_streaming BOOLEAN NOT NULL,
+ has_function_calling BOOLEAN NOT NULL,
+ has_streaming_function_calls BOOLEAN NOT NULL,
+
+ default_max_convo_tokens INTEGER NOT NULL,
+ default_reserved_output_tokens INTEGER NOT NULL,
+
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ created_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+
+CREATE TRIGGER update_custom_models_modtime BEFORE UPDATE ON custom_models FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE INDEX custom_models_org_idx ON custom_models(org_id);
diff --git a/app/server/migrations/2024042600_default_plan_settings.down.sql b/app/server/migrations/2024042600_default_plan_settings.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..5955fffc4aa4dd14a51d3b803f49da0c93ea9ab7
--- /dev/null
+++ b/app/server/migrations/2024042600_default_plan_settings.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS default_plan_settings;
\ No newline at end of file
diff --git a/app/server/migrations/2024042600_default_plan_settings.up.sql b/app/server/migrations/2024042600_default_plan_settings.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..d574339861fbd9dfd78351a85675cf1837eb37f2
--- /dev/null
+++ b/app/server/migrations/2024042600_default_plan_settings.up.sql
@@ -0,0 +1,12 @@
+CREATE TABLE IF NOT EXISTS default_plan_settings (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+
+ plan_settings JSON,
+
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ created_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE TRIGGER update_default_plan_settings_modtime BEFORE UPDATE ON default_plan_settings FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE UNIQUE INDEX default_plan_settings_org_idx ON default_plan_settings(org_id);
diff --git a/app/server/migrations/2024091800_sign_in_codes.down.sql b/app/server/migrations/2024091800_sign_in_codes.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..14e7ad8ff58ec8fbdb6a0191515f051fb132d753
--- /dev/null
+++ b/app/server/migrations/2024091800_sign_in_codes.down.sql
@@ -0,0 +1 @@
+DROP TABLE IF EXISTS sign_in_codes;
\ No newline at end of file
diff --git a/app/server/migrations/2024091800_sign_in_codes.up.sql b/app/server/migrations/2024091800_sign_in_codes.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..a576fd1554b521bc92e47d5d40f380f156a0f1f7
--- /dev/null
+++ b/app/server/migrations/2024091800_sign_in_codes.up.sql
@@ -0,0 +1,12 @@
+CREATE TABLE IF NOT EXISTS sign_in_codes (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ pin_hash VARCHAR(64) NOT NULL,
+ user_id UUID REFERENCES users(id),
+ org_id UUID REFERENCES orgs(id),
+ auth_token_id UUID REFERENCES auth_tokens(id),
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE TRIGGER update_sign_in_codes_modtime BEFORE UPDATE ON sign_in_codes FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE UNIQUE INDEX sign_in_codes_idx ON sign_in_codes(pin_hash, created_at DESC);
\ No newline at end of file
diff --git a/app/server/migrations/2024092100_remove_trial_fields.down.sql b/app/server/migrations/2024092100_remove_trial_fields.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..81f018d5d7a50ea37cf715df30ad85722dd506dd
--- /dev/null
+++ b/app/server/migrations/2024092100_remove_trial_fields.down.sql
@@ -0,0 +1,2 @@
+ALTER TABLE auth_tokens ADD COLUMN is_trial BOOLEAN NOT NULL DEFAULT FALSE;
+ALTER TABLE users ADD COLUMN is_trial BOOLEAN NOT NULL DEFAULT FALSE;
\ No newline at end of file
diff --git a/app/server/migrations/2024092100_remove_trial_fields.up.sql b/app/server/migrations/2024092100_remove_trial_fields.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..d4efed21be81846ce35ecf8a6e2fcbaecd7c3a8a
--- /dev/null
+++ b/app/server/migrations/2024092100_remove_trial_fields.up.sql
@@ -0,0 +1,2 @@
+ALTER TABLE auth_tokens DROP COLUMN is_trial;
+ALTER TABLE users DROP COLUMN is_trial;
\ No newline at end of file
diff --git a/app/server/migrations/2024100900_update_locks.down.sql b/app/server/migrations/2024100900_update_locks.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..9379da541211256e292443bdd0704ada16dacae6
--- /dev/null
+++ b/app/server/migrations/2024100900_update_locks.down.sql
@@ -0,0 +1,12 @@
+-- Revert user_id to NOT NULL if no NULL values exist
+DO $$
+BEGIN
+ -- Check for NULL values in user_id
+ IF EXISTS (SELECT 1 FROM repo_locks WHERE user_id IS NULL) THEN
+ RAISE EXCEPTION 'Cannot revert to NOT NULL, as there are rows with NULL values in user_id.';
+ ELSE
+ -- Proceed with setting the columns to NOT NULL
+ ALTER TABLE repo_locks
+ ALTER COLUMN user_id SET NOT NULL;
+ END IF;
+END $$;
diff --git a/app/server/migrations/2024100900_update_locks.up.sql b/app/server/migrations/2024100900_update_locks.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..ab8d070fa8b94430ace344fa45faad01b6dd16f1
--- /dev/null
+++ b/app/server/migrations/2024100900_update_locks.up.sql
@@ -0,0 +1,2 @@
+ALTER TABLE repo_locks
+ ALTER COLUMN user_id DROP NOT NULL;
diff --git a/app/server/migrations/2024121400_plan_config.down.sql b/app/server/migrations/2024121400_plan_config.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..c9e70d15a25b9ead8c05f3e9a6ab5bdd5acc92db
--- /dev/null
+++ b/app/server/migrations/2024121400_plan_config.down.sql
@@ -0,0 +1,3 @@
+ALTER TABLE plans DROP COLUMN IF EXISTS plan_config;
+
+ALTER TABLE users DROP COLUMN IF EXISTS default_plan_config;
\ No newline at end of file
diff --git a/app/server/migrations/2024121400_plan_config.up.sql b/app/server/migrations/2024121400_plan_config.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..9081f48958ec45fe1918da11eb425ffe9e9e73c1
--- /dev/null
+++ b/app/server/migrations/2024121400_plan_config.up.sql
@@ -0,0 +1,3 @@
+ALTER TABLE plans ADD COLUMN IF NOT EXISTS plan_config JSON;
+
+ALTER TABLE users ADD COLUMN IF NOT EXISTS default_plan_config JSON;
diff --git a/app/server/migrations/2025012600_update_custom_models.down.sql b/app/server/migrations/2025012600_update_custom_models.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..aeb400b1b4e91404cd6e74e408d2d3541e08f727
--- /dev/null
+++ b/app/server/migrations/2025012600_update_custom_models.down.sql
@@ -0,0 +1,9 @@
+ALTER TABLE custom_models DROP COLUMN preferred_output_format;
+
+ALTER TABLE custom_models ADD COLUMN has_streaming BOOLEAN NOT NULL DEFAULT FALSE;
+ALTER TABLE custom_models ADD COLUMN has_function_calling BOOLEAN NOT NULL DEFAULT FALSE;
+ALTER TABLE custom_models ADD COLUMN has_streaming_function_calls BOOLEAN NOT NULL DEFAULT FALSE;
+ALTER TABLE custom_models ADD COLUMN has_json_mode BOOLEAN NOT NULL DEFAULT FALSE;
+ALTER TABLE custom_models ADD COLUMN is_openai_compatible BOOLEAN NOT NULL DEFAULT FALSE;
+
+ALTER TABLE custom_models DROP COLUMN has_image_support;
diff --git a/app/server/migrations/2025012600_update_custom_models.up.sql b/app/server/migrations/2025012600_update_custom_models.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..2da54831eb5c4dfd01867005b122bf9dd78faf93
--- /dev/null
+++ b/app/server/migrations/2025012600_update_custom_models.up.sql
@@ -0,0 +1,9 @@
+ALTER TABLE custom_models ADD COLUMN preferred_output_format VARCHAR(32) NOT NULL DEFAULT 'xml';
+
+ALTER TABLE custom_models DROP COLUMN has_streaming_function_calls;
+ALTER TABLE custom_models DROP COLUMN has_json_mode;
+ALTER TABLE custom_models DROP COLUMN has_streaming;
+ALTER TABLE custom_models DROP COLUMN has_function_calling;
+ALTER TABLE custom_models DROP COLUMN is_openai_compatible;
+
+ALTER TABLE custom_models ADD COLUMN has_image_support BOOLEAN NOT NULL DEFAULT FALSE;
\ No newline at end of file
diff --git a/app/server/migrations/2025021101_locks_unique.down.sql b/app/server/migrations/2025021101_locks_unique.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..12f5eca954dd9cd1c76f381388f3c6a65930a213
--- /dev/null
+++ b/app/server/migrations/2025021101_locks_unique.down.sql
@@ -0,0 +1,2 @@
+DROP TABLE IF EXISTS lockable_plan_ids;
+DROP INDEX IF EXISTS repo_locks_single_write_lock;
\ No newline at end of file
diff --git a/app/server/migrations/2025021101_locks_unique.up.sql b/app/server/migrations/2025021101_locks_unique.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..528755beef6ea46bc4440c562608b15bbed5d9dd
--- /dev/null
+++ b/app/server/migrations/2025021101_locks_unique.up.sql
@@ -0,0 +1,8 @@
+
+CREATE UNIQUE INDEX repo_locks_single_write_lock
+ ON repo_locks(plan_id)
+ WHERE (scope = 'w');
+
+CREATE TABLE IF NOT EXISTS lockable_plan_ids (
+ plan_id UUID NOT NULL PRIMARY KEY REFERENCES plans(id) ON DELETE CASCADE
+);
\ No newline at end of file
diff --git a/app/server/migrations/2025022700_remove_models_col.down.sql b/app/server/migrations/2025022700_remove_models_col.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..3d72520c70f138a7bd49eb26ccbbb28b82c0d905
--- /dev/null
+++ b/app/server/migrations/2025022700_remove_models_col.down.sql
@@ -0,0 +1 @@
+ALTER TABLE custom_models ADD COLUMN default_reserved_output_tokens INTEGER NOT NULL;
\ No newline at end of file
diff --git a/app/server/migrations/2025022700_remove_models_col.up.sql b/app/server/migrations/2025022700_remove_models_col.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..609c2e2be338247b89390bfa170708adfcfeebde
--- /dev/null
+++ b/app/server/migrations/2025022700_remove_models_col.up.sql
@@ -0,0 +1 @@
+ALTER TABLE custom_models DROP COLUMN default_reserved_output_tokens;
\ No newline at end of file
diff --git a/app/server/migrations/2025031300_add_model_roles.down.sql b/app/server/migrations/2025031300_add_model_roles.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..f0eaf5238542fea2e0976a0d424fbbe8eae512ce
--- /dev/null
+++ b/app/server/migrations/2025031300_add_model_roles.down.sql
@@ -0,0 +1,3 @@
+ALTER TABLE model_sets DROP COLUMN context_loader;
+ALTER TABLE model_sets DROP COLUMN whole_file_builder;
+ALTER TABLE model_sets DROP COLUMN coder;
\ No newline at end of file
diff --git a/app/server/migrations/2025031300_add_model_roles.up.sql b/app/server/migrations/2025031300_add_model_roles.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..699a15d5d9490555ea553f1f9f67686c654eb604
--- /dev/null
+++ b/app/server/migrations/2025031300_add_model_roles.up.sql
@@ -0,0 +1,3 @@
+ALTER TABLE model_sets ADD COLUMN context_loader JSON;
+ALTER TABLE model_sets ADD COLUMN whole_file_builder JSON;
+ALTER TABLE model_sets ADD COLUMN coder JSON;
\ No newline at end of file
diff --git a/app/server/migrations/2025031900_add_custom_model_cols.down.sql b/app/server/migrations/2025031900_add_custom_model_cols.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..152f804e020519a7bcd048ea32cea249dbd166e8
--- /dev/null
+++ b/app/server/migrations/2025031900_add_custom_model_cols.down.sql
@@ -0,0 +1,3 @@
+ALTER TABLE custom_models DROP COLUMN max_output_tokens;
+ALTER TABLE custom_models DROP COLUMN reserved_output_tokens;
+ALTER TABLE custom_models DROP COLUMN model_id;
diff --git a/app/server/migrations/2025031900_add_custom_model_cols.up.sql b/app/server/migrations/2025031900_add_custom_model_cols.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..48c3107187a75287593c2fc076083d36835995f1
--- /dev/null
+++ b/app/server/migrations/2025031900_add_custom_model_cols.up.sql
@@ -0,0 +1,4 @@
+ALTER TABLE custom_models
+ ADD COLUMN max_output_tokens INTEGER NOT NULL,
+ ADD COLUMN reserved_output_tokens INTEGER NOT NULL,
+ ADD COLUMN model_id VARCHAR(255) NOT NULL;
\ No newline at end of file
diff --git a/app/server/migrations/2025032400_sign_in_codes_on_delete.down.sql b/app/server/migrations/2025032400_sign_in_codes_on_delete.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..892e397fd6a6251a0b4f98c60f2035cc0f9751e7
--- /dev/null
+++ b/app/server/migrations/2025032400_sign_in_codes_on_delete.down.sql
@@ -0,0 +1,6 @@
+ALTER TABLE sign_in_codes
+DROP CONSTRAINT sign_in_codes_org_id_fkey,
+ADD CONSTRAINT sign_in_codes_org_id_fkey
+FOREIGN KEY (org_id)
+REFERENCES orgs(id);
+
diff --git a/app/server/migrations/2025032400_sign_in_codes_on_delete.up.sql b/app/server/migrations/2025032400_sign_in_codes_on_delete.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..3bff4d8fb87c0770c8449ed103ad4acf2bb399cf
--- /dev/null
+++ b/app/server/migrations/2025032400_sign_in_codes_on_delete.up.sql
@@ -0,0 +1,7 @@
+ALTER TABLE sign_in_codes
+DROP CONSTRAINT sign_in_codes_org_id_fkey,
+ADD CONSTRAINT sign_in_codes_org_id_fkey
+FOREIGN KEY (org_id)
+REFERENCES orgs(id)
+ON DELETE SET NULL;
+
diff --git a/app/server/migrations/2025051600_custom_models_refactor.down.sql b/app/server/migrations/2025051600_custom_models_refactor.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..af33bdf39251f6ea95e590b5c6cb0e188d6f6249
--- /dev/null
+++ b/app/server/migrations/2025051600_custom_models_refactor.down.sql
@@ -0,0 +1,4 @@
+DROP TABLE IF EXISTS custom_models;
+DROP TABLE IF EXISTS custom_providers;
+
+ALTER TABLE custom_models_legacy RENAME TO custom_models;
diff --git a/app/server/migrations/2025051600_custom_models_refactor.up.sql b/app/server/migrations/2025051600_custom_models_refactor.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..664b7d4b1c4e70026aeba0229854999ac85bd1a0
--- /dev/null
+++ b/app/server/migrations/2025051600_custom_models_refactor.up.sql
@@ -0,0 +1,107 @@
+
+BEGIN;
+ALTER TABLE custom_models RENAME TO custom_models_legacy;
+
+CREATE TABLE IF NOT EXISTS custom_models (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+
+ model_id VARCHAR(255) NOT NULL,
+ description TEXT,
+ publisher VARCHAR(64) NOT NULL DEFAULT '',
+
+ max_tokens INTEGER NOT NULL,
+ default_max_convo_tokens INTEGER NOT NULL,
+ max_output_tokens INTEGER NOT NULL,
+ reserved_output_tokens INTEGER NOT NULL,
+
+ has_image_support BOOLEAN NOT NULL DEFAULT FALSE,
+ preferred_output_format VARCHAR(32) NOT NULL DEFAULT 'xml',
+
+ system_prompt_disabled BOOLEAN NOT NULL DEFAULT FALSE,
+ role_params_disabled BOOLEAN NOT NULL DEFAULT FALSE,
+ stop_disabled BOOLEAN NOT NULL DEFAULT FALSE,
+ predicted_output_enabled BOOLEAN NOT NULL DEFAULT FALSE,
+ reasoning_effort_enabled BOOLEAN NOT NULL DEFAULT FALSE,
+ reasoning_effort VARCHAR(32) NOT NULL DEFAULT '',
+ include_reasoning BOOLEAN NOT NULL DEFAULT FALSE,
+ reasoning_budget INTEGER NOT NULL DEFAULT 0,
+ supports_cache_control BOOLEAN NOT NULL DEFAULT FALSE,
+ single_message_no_system_prompt BOOLEAN NOT NULL DEFAULT FALSE,
+ token_estimate_padding_pct FLOAT NOT NULL DEFAULT 0.0,
+
+ providers JSON NOT NULL DEFAULT '[]',
+
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE INDEX IF NOT EXISTS cmv_org_idx ON custom_models(org_id);
+CREATE UNIQUE INDEX IF NOT EXISTS cmv_unique_idx ON custom_models(org_id, model_id);
+
+CREATE TRIGGER cmv_modtime BEFORE UPDATE ON custom_models
+ FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE TABLE IF NOT EXISTS custom_providers (
+ id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
+ org_id UUID NOT NULL REFERENCES orgs(id) ON DELETE CASCADE,
+
+ name VARCHAR(255) NOT NULL,
+ base_url VARCHAR(255) NOT NULL,
+ skip_auth BOOLEAN NOT NULL DEFAULT FALSE,
+ api_key_env_var VARCHAR(255),
+
+ extra_auth_vars JSON NOT NULL DEFAULT '[]',
+
+ created_at TIMESTAMP NOT NULL DEFAULT NOW(),
+ updated_at TIMESTAMP NOT NULL DEFAULT NOW()
+);
+CREATE INDEX IF NOT EXISTS cp_org_idx ON custom_providers(org_id);
+CREATE UNIQUE INDEX IF NOT EXISTS cp_unique_idx ON custom_providers(org_id, name);
+CREATE TRIGGER cp_modtime BEFORE UPDATE ON custom_providers
+ FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+/* ---- migrate base rows into the new custom_models ------------------ */
+INSERT INTO custom_models (
+ id, org_id, model_id, description,
+ max_tokens, default_max_convo_tokens,
+ max_output_tokens, reserved_output_tokens,
+ has_image_support, preferred_output_format,
+
+ providers, -- <-- aggregated JSON array
+ created_at, updated_at
+)
+SELECT
+ id, org_id, model_id, description,
+ max_tokens, default_max_convo_tokens,
+ max_output_tokens, reserved_output_tokens,
+ has_image_support, preferred_output_format,
+
+ /* -------- build a one-element providers array -------- */
+ json_build_array(
+ json_build_object(
+ 'provider', provider,
+ 'custom_provider', custom_provider,
+ 'model_name', model_name
+ )
+ )::json,
+
+ created_at, updated_at
+FROM custom_models_legacy
+ON CONFLICT (org_id, model_id) DO NOTHING;
+
+/* ---- migrate unique custom providers ------------------------------- */
+WITH src AS (
+ SELECT DISTINCT
+ org_id,
+ custom_provider AS name,
+ base_url,
+ api_key_env_var
+ FROM custom_models_legacy
+ WHERE custom_provider IS NOT NULL
+)
+INSERT INTO custom_providers (org_id, name, base_url, api_key_env_var)
+SELECT org_id, name, base_url, api_key_env_var
+FROM src
+ON CONFLICT (org_id, name) DO NOTHING;
+
+COMMIT;
\ No newline at end of file
diff --git a/app/server/migrations/2025052200_model_pack_cols.down.sql b/app/server/migrations/2025052200_model_pack_cols.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..c7f9d49e96e6f98028ad48d2f76ae21722ee4acb
--- /dev/null
+++ b/app/server/migrations/2025052200_model_pack_cols.down.sql
@@ -0,0 +1,6 @@
+ALTER TABLE model_sets
+ DROP COLUMN IF EXISTS updated_at;
+
+DROP TRIGGER IF EXISTS model_set_modtime ON model_sets;
+
+DROP INDEX IF EXISTS model_set_unique_idx;
diff --git a/app/server/migrations/2025052200_model_pack_cols.up.sql b/app/server/migrations/2025052200_model_pack_cols.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..f3fde587f4cce3d2f80af51c59d4e8cf92d2e541
--- /dev/null
+++ b/app/server/migrations/2025052200_model_pack_cols.up.sql
@@ -0,0 +1,7 @@
+ALTER TABLE model_sets
+ ADD COLUMN updated_at TIMESTAMP NOT NULL DEFAULT NOW();
+
+CREATE TRIGGER model_set_modtime BEFORE UPDATE ON model_sets
+ FOR EACH ROW EXECUTE FUNCTION update_updated_at_column();
+
+CREATE UNIQUE INDEX IF NOT EXISTS model_set_unique_idx ON model_sets(org_id, name);
\ No newline at end of file
diff --git a/app/server/migrations/2025070200_add_org_user_config.down.sql b/app/server/migrations/2025070200_add_org_user_config.down.sql
new file mode 100644
index 0000000000000000000000000000000000000000..3f47b04aae6bfbd7e3009a6659c981b083ffd9bb
--- /dev/null
+++ b/app/server/migrations/2025070200_add_org_user_config.down.sql
@@ -0,0 +1 @@
+ALTER TABLE orgs_users DROP COLUMN config;
\ No newline at end of file
diff --git a/app/server/migrations/2025070200_add_org_user_config.up.sql b/app/server/migrations/2025070200_add_org_user_config.up.sql
new file mode 100644
index 0000000000000000000000000000000000000000..ced7ad5587c120433eae71981a84e7ad564076b6
--- /dev/null
+++ b/app/server/migrations/2025070200_add_org_user_config.up.sql
@@ -0,0 +1 @@
+ALTER TABLE orgs_users ADD COLUMN config JSON;
\ No newline at end of file
diff --git a/app/server/model/client.go b/app/server/model/client.go
new file mode 100644
index 0000000000000000000000000000000000000000..08f87a5dc7dde02ec204356e6360acee2a75110a
--- /dev/null
+++ b/app/server/model/client.go
@@ -0,0 +1,560 @@
+package model
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "plandex-server/db"
+ "plandex-server/types"
+ "strings"
+ "sync"
+ "time"
+
+ shared "plandex-shared"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/sashabaranov/go-openai"
+)
+
+// note that we are *only* using streaming requests now
+// non-streaming request handling has been removed completely
+// streams offer more predictable cancellation partial results
+
+const (
+ ACTIVE_STREAM_CHUNK_TIMEOUT = time.Duration(60) * time.Second
+ USAGE_CHUNK_TIMEOUT = time.Duration(10) * time.Second
+ MAX_ADDITIONAL_RETRIES_WITH_FALLBACK = 1
+ MAX_RETRIES_WITHOUT_FALLBACK = 3
+ MAX_RETRY_DELAY_SECONDS = 10
+)
+
+var httpClient = &http.Client{}
+
+type ClientInfo struct {
+ Client *openai.Client
+ ProviderConfig shared.ModelProviderConfigSchema
+ ApiKey string
+ OpenAIOrgId string
+}
+
+func InitClients(authVars map[string]string, settings *shared.PlanSettings, orgUserConfig *shared.OrgUserConfig) map[string]ClientInfo {
+ clients := make(map[string]ClientInfo)
+ providers := shared.GetProvidersForAuthVars(authVars, settings, orgUserConfig)
+
+ for _, provider := range providers {
+ clients[provider.ToComposite()] = newClient(provider, authVars)
+ }
+
+ return clients
+}
+
+func newClient(providerConfig shared.ModelProviderConfigSchema, authVars map[string]string) ClientInfo {
+ var apiKey string
+ if providerConfig.ApiKeyEnvVar != "" {
+ apiKey = authVars[providerConfig.ApiKeyEnvVar]
+ } else if providerConfig.HasClaudeMaxAuth {
+ apiKey = authVars[shared.AnthropicClaudeMaxTokenEnvVar]
+ }
+
+ config := openai.DefaultConfig(apiKey)
+ config.BaseURL = providerConfig.BaseUrl
+
+ var openAIOrgId string
+ if providerConfig.Provider == shared.ModelProviderOpenAI && authVars["OPENAI_ORG_ID"] != "" {
+ openAIOrgId = authVars["OPENAI_ORG_ID"]
+ config.OrgID = openAIOrgId
+ }
+
+ return ClientInfo{
+ Client: openai.NewClientWithConfig(config),
+ ApiKey: apiKey,
+ ProviderConfig: providerConfig,
+ OpenAIOrgId: openAIOrgId,
+ }
+}
+
+// ExtendedChatCompletionStream can wrap either a native OpenAI stream or our custom implementation
+type ExtendedChatCompletionStream struct {
+ openaiStream *openai.ChatCompletionStream
+ customReader *StreamReader[types.ExtendedChatCompletionStreamResponse]
+ ctx context.Context
+}
+
+// StreamReader handles the SSE stream reading
+type StreamReader[T any] struct {
+ reader *bufio.Reader
+ response *http.Response
+ emptyMessagesLimit int
+ errAccumulator *ErrorAccumulator
+ unmarshaler *JSONUnmarshaler
+}
+
+// ErrorAccumulator keeps track of errors during streaming
+type ErrorAccumulator struct {
+ errors []error
+ mu sync.Mutex
+}
+
+// JSONUnmarshaler handles JSON unmarshaling for stream responses
+type JSONUnmarshaler struct{}
+
+func CreateChatCompletionStream(
+ clients map[string]ClientInfo,
+ authVars map[string]string,
+ modelConfig *shared.ModelRoleConfig,
+ settings *shared.PlanSettings,
+ orgUserConfig *shared.OrgUserConfig,
+ currentOrgId string,
+ currentUserId string,
+ ctx context.Context,
+ req types.ExtendedChatCompletionRequest,
+) (*ExtendedChatCompletionStream, error) {
+ providerComposite := modelConfig.GetProviderComposite(authVars, settings, orgUserConfig)
+ _, ok := clients[providerComposite]
+ if !ok {
+ return nil, fmt.Errorf("client not found for provider composite: %s", providerComposite)
+ }
+
+ baseModelConfig := modelConfig.GetBaseModelConfig(authVars, settings, orgUserConfig)
+
+ // ensure the model name is set correctly on fallbacks
+ req.Model = baseModelConfig.ModelName
+
+ resolveReq(&req, modelConfig, baseModelConfig, settings)
+
+ // choose the fastest provider by latency/throughput on openrouter
+ if baseModelConfig.Provider == shared.ModelProviderOpenRouter {
+ if !strings.HasSuffix(string(req.Model), ":nitro") && !strings.HasSuffix(string(req.Model), ":free") && !strings.HasSuffix(string(req.Model), ":floor") {
+ req.Model += ":nitro"
+ }
+ }
+
+ if baseModelConfig.ReasoningBudget > 0 {
+ req.ReasoningConfig = &types.ReasoningConfig{
+ MaxTokens: baseModelConfig.ReasoningBudget,
+ Exclude: !baseModelConfig.IncludeReasoning || baseModelConfig.HideReasoning,
+ }
+ } else if baseModelConfig.ReasoningEffortEnabled {
+ req.ReasoningConfig = &types.ReasoningConfig{
+ Effort: shared.ReasoningEffort(baseModelConfig.ReasoningEffort),
+ Exclude: !baseModelConfig.IncludeReasoning || baseModelConfig.HideReasoning,
+ }
+ } else if baseModelConfig.IncludeReasoning {
+ req.ReasoningConfig = &types.ReasoningConfig{
+ Exclude: baseModelConfig.HideReasoning,
+ }
+ }
+
+ return withStreamingRetries(ctx, func(numTotalRetry int, didProviderFallback bool, modelErr *shared.ModelError) (*ExtendedChatCompletionStream, shared.FallbackResult, error) {
+ handleClaudeMaxRateLimitedIfNeeded(
+ modelErr,
+ modelConfig,
+ authVars,
+ settings,
+ orgUserConfig,
+ currentOrgId,
+ currentUserId,
+ )
+
+ fallbackRes := modelConfig.GetFallbackForModelError(
+ numTotalRetry,
+ didProviderFallback,
+ modelErr,
+ authVars,
+ settings,
+ orgUserConfig,
+ )
+ resolvedModelConfig := fallbackRes.ModelRoleConfig
+
+ if resolvedModelConfig == nil {
+ return nil, fallbackRes, fmt.Errorf("model config is nil")
+ }
+
+ providerComposite := resolvedModelConfig.GetProviderComposite(authVars, settings, orgUserConfig)
+
+ baseModelConfig := resolvedModelConfig.GetBaseModelConfig(authVars, settings, orgUserConfig)
+
+ opClient, ok := clients[providerComposite]
+
+ if !ok {
+ return nil, fallbackRes, fmt.Errorf("client not found for provider composite: %s", providerComposite)
+ }
+
+ if modelErr != nil && modelErr.Kind == shared.ErrCacheSupport {
+ for i := range req.Messages {
+ for j := range req.Messages[i].Content {
+ if req.Messages[i].Content[j].CacheControl != nil {
+ req.Messages[i].Content[j].CacheControl = nil
+ }
+ }
+ }
+ }
+
+ modelConfig = resolvedModelConfig
+
+ log.Println("createChatCompletionStreamExtended - modelConfig")
+ spew.Dump(map[string]interface{}{
+ "modelConfig.ModelId": baseModelConfig.ModelId,
+ "modelConfig.ModelTag": baseModelConfig.ModelTag,
+ "modelConfig.ModelName": baseModelConfig.ModelName,
+ "modelConfig.Provider": baseModelConfig.Provider,
+ "modelConfig.BaseUrl": baseModelConfig.BaseUrl,
+ "modelConfig.ApiKeyEnvVar": baseModelConfig.ApiKeyEnvVar,
+ })
+
+ resp, err := createChatCompletionStreamExtended(resolvedModelConfig, opClient, authVars, settings, orgUserConfig, ctx, req)
+ return resp, fallbackRes, err
+ }, func(resp *ExtendedChatCompletionStream, err error) {})
+}
+
+func createChatCompletionStreamExtended(
+ modelConfig *shared.ModelRoleConfig,
+ client ClientInfo,
+ authVars map[string]string,
+ settings *shared.PlanSettings,
+ orgUserConfig *shared.OrgUserConfig,
+ ctx context.Context,
+ extendedReq types.ExtendedChatCompletionRequest,
+) (*ExtendedChatCompletionStream, error) {
+ baseModelConfig := modelConfig.GetBaseModelConfig(authVars, settings, orgUserConfig)
+
+ // ensure the model name is set correctly on fallbacks
+ extendedReq.Model = baseModelConfig.ModelName
+
+ var openaiReq *types.ExtendedOpenAIChatCompletionRequest
+ if baseModelConfig.Provider == shared.ModelProviderOpenAI {
+ openaiReq = extendedReq.ToOpenAI()
+ log.Println("Creating chat completion stream with direct OpenAI provider request")
+ }
+
+ switch baseModelConfig.Provider {
+ case shared.ModelProviderGoogleVertex:
+ if authVars["VERTEXAI_PROJECT"] != "" {
+ extendedReq.VertexProject = authVars["VERTEXAI_PROJECT"]
+ }
+ if authVars["VERTEXAI_LOCATION"] != "" {
+ extendedReq.VertexLocation = authVars["VERTEXAI_LOCATION"]
+ }
+ if authVars["GOOGLE_APPLICATION_CREDENTIALS"] != "" {
+ extendedReq.VertexCredentials = authVars["GOOGLE_APPLICATION_CREDENTIALS"]
+ }
+ case shared.ModelProviderAzureOpenAI:
+ if authVars["AZURE_API_BASE"] != "" {
+ extendedReq.LiteLLMApiBase = authVars["AZURE_API_BASE"]
+ }
+ if authVars["AZURE_API_VERSION"] != "" {
+ extendedReq.AzureApiVersion = authVars["AZURE_API_VERSION"]
+ }
+
+ if authVars["AZURE_DEPLOYMENTS_MAP"] != "" {
+ var azureDeploymentsMap map[string]string
+ err := json.Unmarshal([]byte(authVars["AZURE_DEPLOYMENTS_MAP"]), &azureDeploymentsMap)
+ if err != nil {
+ return nil, fmt.Errorf("error unmarshalling AZURE_DEPLOYMENTS_MAP: %w", err)
+ }
+ modelName := string(extendedReq.Model)
+ modelName = strings.ReplaceAll(modelName, "azure/", "")
+
+ deploymentName, ok := azureDeploymentsMap[modelName]
+ if ok {
+ log.Println("azure - deploymentName", deploymentName)
+ modelName = "azure/" + deploymentName
+ extendedReq.Model = shared.ModelName(modelName)
+ }
+ }
+
+ // azure uses 'reasoning_config' instead of 'reasoning' like direct openai api
+ if extendedReq.ReasoningConfig != nil {
+ extendedReq.AzureReasoningEffort = extendedReq.ReasoningConfig.Effort
+ extendedReq.ReasoningConfig = nil
+ }
+ case shared.ModelProviderAmazonBedrock:
+ if authVars["AWS_ACCESS_KEY_ID"] != "" {
+ extendedReq.BedrockAccessKeyId = authVars["AWS_ACCESS_KEY_ID"]
+ }
+ if authVars["AWS_SECRET_ACCESS_KEY"] != "" {
+ extendedReq.BedrockSecretAccessKey = authVars["AWS_SECRET_ACCESS_KEY"]
+ }
+ if authVars["AWS_SESSION_TOKEN"] != "" {
+ extendedReq.BedrockSessionToken = authVars["AWS_SESSION_TOKEN"]
+ }
+ if authVars["AWS_REGION"] != "" {
+ extendedReq.BedrockRegion = authVars["AWS_REGION"]
+ }
+ if authVars["AWS_INFERENCE_PROFILE_ARN"] != "" {
+ extendedReq.BedrockInferenceProfileArn = authVars["AWS_INFERENCE_PROFILE_ARN"]
+ }
+
+ case shared.ModelProviderOllama:
+ if os.Getenv("OLLAMA_BASE_URL") != "" {
+ extendedReq.LiteLLMApiBase = os.Getenv("OLLAMA_BASE_URL")
+ }
+ }
+
+ if client.ProviderConfig.HasClaudeMaxAuth {
+ extendedReq.Messages = append([]types.ExtendedChatMessage{
+ {
+ Role: openai.ChatMessageRoleSystem,
+ Content: []types.ExtendedChatMessagePart{
+ {Type: openai.ChatMessagePartTypeText,
+ Text: "You are Claude Code, Anthropic's official CLI for Claude."},
+ },
+ },
+ }, extendedReq.Messages...)
+
+ if extendedReq.ExtraHeaders == nil {
+ extendedReq.ExtraHeaders = make(map[string]string)
+ }
+ extendedReq.ExtraHeaders["anthropic-beta"] = shared.AnthropicClaudeMaxBetaHeader
+ extendedReq.ExtraHeaders["Authorization"] = "Bearer " + authVars[shared.AnthropicClaudeMaxTokenEnvVar]
+ extendedReq.ExtraHeaders["anthropic-product"] = "claude-code"
+
+ }
+
+ // Marshal the request body to JSON
+ var jsonBody []byte
+ var err error
+ if openaiReq != nil {
+ jsonBody, err = json.Marshal(openaiReq)
+ } else {
+ jsonBody, err = json.Marshal(extendedReq)
+ }
+ if err != nil {
+ return nil, fmt.Errorf("error marshaling request: %w", err)
+ }
+
+ // log.Println("request jsonBody", string(jsonBody))
+
+ // Create new request
+ baseUrl := baseModelConfig.BaseUrl
+ url := baseUrl + "/chat/completions"
+
+ req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(jsonBody))
+ if err != nil {
+ return nil, fmt.Errorf("error creating request: %w", err)
+ }
+
+ // Set required headers for streaming
+ req.Header.Set("Content-Type", "application/json")
+ req.Header.Set("Accept", "text/event-stream")
+ req.Header.Set("Cache-Control", "no-cache")
+ req.Header.Set("Connection", "keep-alive")
+
+ // some providers send api key in the body, some in the header
+ // some use other auth methods and so don't have a simple api key
+ if client.ApiKey != "" {
+ req.Header.Set("Authorization", "Bearer "+client.ApiKey)
+ }
+ if client.OpenAIOrgId != "" {
+ req.Header.Set("OpenAI-Organization", client.OpenAIOrgId)
+ }
+
+ addOpenRouterHeaders(req)
+
+ // Send the request
+ resp, err := httpClient.Do(req) //nolint:bodyclose // body is closed in stream.Close()
+ if err != nil {
+ return nil, fmt.Errorf("error making request: %w", err)
+ }
+
+ if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest {
+ defer resp.Body.Close()
+ body, err := io.ReadAll(resp.Body)
+ if err != nil {
+ return nil, fmt.Errorf("error reading error response: %w", err)
+ }
+ return nil, &HTTPError{
+ StatusCode: resp.StatusCode,
+ Body: string(body),
+ Header: resp.Header.Clone(), // retain Retry-After etc.
+ }
+ }
+
+ // Log response headers
+ // log.Println("Response headers:")
+ // for key, values := range resp.Header {
+ // log.Printf("%s: %v\n", key, values)
+ // }
+
+ reader := &StreamReader[types.ExtendedChatCompletionStreamResponse]{
+ reader: bufio.NewReader(resp.Body),
+ response: resp,
+ emptyMessagesLimit: 30,
+ errAccumulator: NewErrorAccumulator(),
+ unmarshaler: &JSONUnmarshaler{},
+ }
+
+ return &ExtendedChatCompletionStream{
+ customReader: reader,
+ ctx: ctx,
+ }, nil
+}
+
+func NewErrorAccumulator() *ErrorAccumulator {
+ return &ErrorAccumulator{
+ errors: make([]error, 0),
+ }
+}
+
+func (ea *ErrorAccumulator) Add(err error) {
+ ea.mu.Lock()
+ defer ea.mu.Unlock()
+ ea.errors = append(ea.errors, err)
+}
+
+func (ea *ErrorAccumulator) GetErrors() []error {
+ ea.mu.Lock()
+ defer ea.mu.Unlock()
+ return ea.errors
+}
+
+func (ju *JSONUnmarshaler) Unmarshal(data []byte, v interface{}) error {
+ return json.Unmarshal(data, v)
+}
+
+// Recv reads from the stream
+func (stream *StreamReader[T]) Recv() (*T, error) {
+ for {
+ line, err := stream.reader.ReadString('\n')
+ if err != nil {
+ return nil, err
+ }
+
+ // Trim any whitespace
+ line = strings.TrimSpace(line)
+
+ // Skip empty lines
+ if line == "" {
+ continue
+ }
+
+ // Check for data prefix
+ if !strings.HasPrefix(line, "data: ") {
+ continue
+ }
+
+ // Extract the data
+ data := strings.TrimPrefix(line, "data: ")
+
+ // log.Println("\n\n--- stream data:\n", data, "\n\n")
+
+ // Check for stream completion
+ if data == "[DONE]" {
+ return nil, io.EOF
+ }
+
+ // Parse the response
+ var response T
+ err = stream.unmarshaler.Unmarshal([]byte(data), &response)
+ if err != nil {
+ stream.errAccumulator.Add(err)
+ continue
+ }
+
+ return &response, nil
+ }
+}
+
+func (stream *StreamReader[T]) Close() error {
+ if stream.response != nil {
+ return stream.response.Body.Close()
+ }
+ return nil
+}
+
+// Recv returns the next message in the stream
+func (stream *ExtendedChatCompletionStream) Recv() (*types.ExtendedChatCompletionStreamResponse, error) {
+ select {
+ case <-stream.ctx.Done():
+ return nil, stream.ctx.Err()
+ default:
+ if stream.openaiStream != nil {
+ bytes, err := stream.openaiStream.RecvRaw()
+ if err != nil {
+ return nil, err
+ }
+
+ var response types.ExtendedChatCompletionStreamResponse
+ err = json.Unmarshal(bytes, &response)
+ if err != nil {
+ return nil, err
+ }
+ return &response, nil
+ }
+ return stream.customReader.Recv()
+ }
+}
+
+// Close the response body
+func (stream *ExtendedChatCompletionStream) Close() error {
+ if stream.openaiStream != nil {
+ return stream.openaiStream.Close()
+ }
+ return stream.customReader.Close()
+}
+
+func resolveReq(req *types.ExtendedChatCompletionRequest, modelConfig *shared.ModelRoleConfig, baseModelConfig *shared.BaseModelConfig, settings *shared.PlanSettings) {
+ // if system prompt is disabled, change the role of the system message to user
+ if modelConfig.GetSharedBaseConfig(settings).SystemPromptDisabled {
+ log.Println("System prompt disabled - changing role of system message to user")
+ for i, msg := range req.Messages {
+ log.Println("Message role:", msg.Role)
+ if msg.Role == openai.ChatMessageRoleSystem {
+ log.Println("Changing role of system message to user")
+ req.Messages[i].Role = openai.ChatMessageRoleUser
+ }
+ }
+
+ for _, msg := range req.Messages {
+ log.Println("Final message role:", msg.Role)
+ }
+ }
+
+ if modelConfig.GetSharedBaseConfig(settings).RoleParamsDisabled {
+ log.Println("Role params disabled - setting temperature and top p to 0")
+ req.Temperature = 0
+ req.TopP = 0
+ }
+
+ if baseModelConfig.Provider == shared.ModelProviderOllama {
+ // ollama doesn't support temperature or top p params
+ log.Println("Ollama - clearing temperature and top p")
+ req.Temperature = 0
+ req.TopP = 0
+
+ }
+}
+
+func addOpenRouterHeaders(req *http.Request) {
+ req.Header.Set("HTTP-Referer", "https://plandex.ai")
+ req.Header.Set("X-Title", "Plandex")
+ req.Header.Set("X-OR-Prefer", "ttft,throughput")
+ if os.Getenv("GOENV") == "production" {
+ req.Header.Set("X-OR-Region", "us-east-1")
+ }
+}
+
+func handleClaudeMaxRateLimitedIfNeeded(modelErr *shared.ModelError, modelConfig *shared.ModelRoleConfig, authVars map[string]string, settings *shared.PlanSettings, orgUserConfig *shared.OrgUserConfig, currentOrgId string, currentUserId string) {
+
+ // if we used a claude max provider and got rate limited, set the cooldown on org user config and update the db in the background
+ if modelErr != nil && modelErr.Kind == shared.ErrRateLimited && modelErr.RetryAfterSeconds == 0 {
+ baseModelConfig := modelConfig.GetBaseModelConfig(authVars, settings, orgUserConfig)
+ if baseModelConfig.BaseModelProviderConfig.HasClaudeMaxAuth {
+ orgUserConfig.ClaudeSubscriptionCooldownStartedAt = time.Now()
+
+ go func() {
+ err := db.UpdateOrgUserConfig(currentUserId, currentOrgId, orgUserConfig)
+ if err != nil {
+ log.Printf("Error updating org user config: %v\n", err)
+ }
+ }()
+ }
+ }
+
+}
diff --git a/app/server/model/client_stream.go b/app/server/model/client_stream.go
new file mode 100644
index 0000000000000000000000000000000000000000..6d4134eb826b372db428341facc02f9c0cca4830
--- /dev/null
+++ b/app/server/model/client_stream.go
@@ -0,0 +1,340 @@
+package model
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "log"
+ "math/rand"
+ "plandex-server/types"
+ shared "plandex-shared"
+ "time"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/sashabaranov/go-openai"
+)
+
+type OnStreamFn func(chunk string, buffer string) (shouldStop bool)
+
+func CreateChatCompletionWithInternalStream(
+ clients map[string]ClientInfo,
+ authVars map[string]string,
+ modelConfig *shared.ModelRoleConfig,
+ settings *shared.PlanSettings,
+ orgUserConfig *shared.OrgUserConfig,
+ currentOrgId string,
+ currentUserId string,
+ ctx context.Context,
+ req types.ExtendedChatCompletionRequest,
+ onStream OnStreamFn,
+ reqStarted time.Time,
+) (*types.ModelResponse, error) {
+ providerComposite := modelConfig.GetProviderComposite(authVars, settings, orgUserConfig)
+ _, ok := clients[providerComposite]
+ if !ok {
+ return nil, fmt.Errorf("client not found for provider composite: %s", providerComposite)
+ }
+
+ baseModelConfig := modelConfig.GetBaseModelConfig(authVars, settings, orgUserConfig)
+
+ resolveReq(&req, modelConfig, baseModelConfig, settings)
+
+ // choose the fastest provider by latency/throughput on openrouter
+ if baseModelConfig.Provider == shared.ModelProviderOpenRouter {
+ req.Model += ":nitro"
+ }
+
+ // Force streaming mode since we're using the streaming API
+ req.Stream = true
+
+ // Include usage in stream response
+ req.StreamOptions = &openai.StreamOptions{
+ IncludeUsage: true,
+ }
+
+ return withStreamingRetries(ctx, func(numTotalRetry int, didProviderFallback bool, modelErr *shared.ModelError) (resp *types.ModelResponse, fallbackRes shared.FallbackResult, err error) {
+ handleClaudeMaxRateLimitedIfNeeded(modelErr, modelConfig, authVars, settings, orgUserConfig, currentOrgId, currentUserId)
+
+ fallbackRes = modelConfig.GetFallbackForModelError(numTotalRetry, didProviderFallback, modelErr, authVars, settings, orgUserConfig)
+ resolvedModelConfig := fallbackRes.ModelRoleConfig
+
+ if resolvedModelConfig == nil {
+ return nil, fallbackRes, fmt.Errorf("model config is nil")
+ }
+
+ providerComposite := resolvedModelConfig.GetProviderComposite(authVars, settings, orgUserConfig)
+ opClient, ok := clients[providerComposite]
+
+ if !ok {
+ return nil, fallbackRes, fmt.Errorf("client not found for provider composite: %s", providerComposite)
+ }
+
+ modelConfig = resolvedModelConfig
+ resp, err = processChatCompletionStream(resolvedModelConfig, opClient, authVars, settings, orgUserConfig, ctx, req, onStream, reqStarted)
+ if err != nil {
+ return nil, fallbackRes, err
+ }
+ return resp, fallbackRes, nil
+ }, func(resp *types.ModelResponse, err error) {
+ if resp != nil {
+ resp.Stopped = true
+ resp.Error = err.Error()
+ }
+ })
+}
+
+func processChatCompletionStream(
+ modelConfig *shared.ModelRoleConfig,
+ client ClientInfo,
+ authVars map[string]string,
+ settings *shared.PlanSettings,
+ orgUserConfig *shared.OrgUserConfig,
+ ctx context.Context,
+ req types.ExtendedChatCompletionRequest,
+ onStream OnStreamFn,
+ reqStarted time.Time,
+) (*types.ModelResponse, error) {
+ streamCtx, cancel := context.WithCancel(ctx)
+
+ log.Println("processChatCompletionStream - modelConfig", spew.Sdump(map[string]interface{}{
+ "model": modelConfig.ModelId,
+ }))
+
+ stream, err := createChatCompletionStreamExtended(modelConfig, client, authVars, settings, orgUserConfig, streamCtx, req)
+
+ if err != nil {
+ cancel()
+ return nil, fmt.Errorf("error creating chat completion stream: %w", err)
+ }
+
+ defer stream.Close()
+ defer cancel()
+
+ accumulator := types.NewStreamCompletionAccumulator()
+ // Create a timer that will trigger if no chunk is received within the specified duration
+ timer := time.NewTimer(ACTIVE_STREAM_CHUNK_TIMEOUT)
+ defer timer.Stop()
+ streamFinished := false
+
+ receivedFirstChunk := false
+
+ // Process stream until EOF or error
+ for {
+ select {
+ case <-streamCtx.Done():
+ log.Println("Stream canceled")
+ return accumulator.Result(true, streamCtx.Err()), streamCtx.Err()
+ case <-timer.C:
+ log.Println("Stream timed out due to inactivity")
+ if streamFinished {
+ log.Println("Stream finished—timed out waiting for usage chunk")
+ return accumulator.Result(false, nil), nil
+ } else {
+ log.Println("Stream timed out due to inactivity")
+ return accumulator.Result(true, fmt.Errorf("stream timed out due to inactivity. The model is not responding.")), nil
+ }
+ default:
+ response, err := stream.Recv()
+ if err == io.EOF {
+ if streamFinished {
+ return accumulator.Result(false, nil), nil
+ }
+
+ err = fmt.Errorf("model stream ended unexpectedly: %w", err)
+ return accumulator.Result(true, err), err
+ }
+ if err != nil {
+ err = fmt.Errorf("error receiving stream chunk: %w", err)
+ return accumulator.Result(true, err), err
+ }
+
+ if response.ID != "" {
+ accumulator.SetGenerationId(response.ID)
+ }
+
+ if !receivedFirstChunk {
+ receivedFirstChunk = true
+ accumulator.SetFirstTokenAt(time.Now())
+ }
+
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timer.Reset(ACTIVE_STREAM_CHUNK_TIMEOUT)
+
+ // Process the response
+ if response.Usage != nil {
+ accumulator.SetUsage(response.Usage)
+ return accumulator.Result(false, nil), nil
+ }
+
+ emptyChoices := false
+ var content string
+
+ if len(response.Choices) == 0 {
+ // Previously we'd return an error if there were no choices, but some models do this and then keep streaming, so we'll just log it and continue
+ log.Println("processChatCompletionStream - no choices in response")
+ // err := fmt.Errorf("no choices in response")
+ // return accumulator.Result(false, err), err
+ emptyChoices = true
+ }
+
+ // We'll be more accepting of multiple choices and just take the first one
+ // if len(response.Choices) > 1 {
+ // err = fmt.Errorf("stream finished with more than one choice | The model failed to generate a valid response.")
+ // return accumulator.Result(true, err), err
+ // }
+
+ if !emptyChoices {
+ choice := response.Choices[0]
+
+ if choice.FinishReason != "" {
+ if choice.FinishReason == "error" {
+ err = fmt.Errorf("model stopped with error status | The model is not responding.")
+ return accumulator.Result(true, err), err
+ } else {
+ // Reset the timer for the usage chunk
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timer.Reset(USAGE_CHUNK_TIMEOUT)
+ streamFinished = true
+ continue
+ }
+ }
+
+ if req.Tools != nil {
+ if choice.Delta.ToolCalls != nil {
+ toolCall := choice.Delta.ToolCalls[0]
+ content = toolCall.Function.Arguments
+ }
+ } else {
+ if choice.Delta.Content != "" {
+ content = choice.Delta.Content
+ }
+ }
+ }
+
+ accumulator.AddContent(content)
+ // pass the chunk and the accumulated content to the callback
+ if onStream != nil {
+ shouldReturn := onStream(content, accumulator.Content())
+ if shouldReturn {
+ return accumulator.Result(false, nil), nil
+ }
+ }
+ }
+ }
+}
+
+func withStreamingRetries[T any](
+ ctx context.Context,
+ operation func(numRetry int, didProviderFallback bool, modelErr *shared.ModelError) (resp *T, fallbackRes shared.FallbackResult, err error),
+ onContextDone func(resp *T, err error),
+) (*T, error) {
+ var resp *T
+ var numTotalRetry int
+ var numFallbackRetry int
+ var fallbackRes shared.FallbackResult
+ var modelErr *shared.ModelError
+ var didProviderFallback bool
+
+ for {
+ if ctx.Err() != nil {
+ if resp != nil {
+ // Return partial result with context error
+ onContextDone(resp, ctx.Err())
+ return resp, ctx.Err()
+ }
+ return nil, ctx.Err()
+ }
+
+ var err error
+
+ var numRetry int
+ if numFallbackRetry > 0 {
+ numRetry = numFallbackRetry
+ } else {
+ numRetry = numTotalRetry
+ }
+
+ log.Printf("withStreamingRetries - will run operation")
+
+ log.Println(spew.Sdump(map[string]interface{}{
+ "numTotalRetry": numTotalRetry,
+ "didProviderFallback": didProviderFallback,
+ "modelErr": modelErr,
+ }))
+
+ resp, fallbackRes, err = operation(numTotalRetry, didProviderFallback, modelErr)
+ if err == nil {
+ return resp, nil
+ }
+
+ log.Printf("withStreamingRetries - operation returned error: %v", err)
+
+ isFallback := fallbackRes.IsFallback
+ maxRetries := MAX_RETRIES_WITHOUT_FALLBACK
+ if isFallback {
+ maxRetries = MAX_ADDITIONAL_RETRIES_WITH_FALLBACK
+ }
+
+ if fallbackRes.FallbackType == shared.FallbackTypeProvider {
+ didProviderFallback = true
+ }
+
+ compareRetries := numTotalRetry
+ if isFallback {
+ compareRetries = numFallbackRetry
+ }
+
+ log.Printf("Error in streaming operation: %v, isFallback: %t, numTotalRetry: %d, numFallbackRetry: %d, numRetry: %d, compareRetries: %d, maxRetries: %d\n", err, isFallback, numTotalRetry, numFallbackRetry, numRetry, compareRetries, maxRetries)
+
+ classifyRes := classifyBasicError(err, fallbackRes.BaseModelConfig.HasClaudeMaxAuth)
+ modelErr = &classifyRes
+
+ newFallback := false
+ if !modelErr.Retriable {
+ log.Printf("withStreamingRetries - operation returned non-retriable error: %v", err)
+ spew.Dump(modelErr)
+ if modelErr.Kind == shared.ErrContextTooLong && fallbackRes.ModelRoleConfig.LargeContextFallback == nil {
+ log.Printf("withStreamingRetries - non-retriable context too long error and no large context fallback is defined, returning error")
+ // if it's a context too long error and no large context fallback is defined, return the error
+ return resp, err
+ } else if modelErr.Kind != shared.ErrContextTooLong && fallbackRes.ModelRoleConfig.ErrorFallback == nil {
+ log.Printf("withStreamingRetries - non-retriable error and no error fallback is defined, returning error")
+ // if it's any other error and no error fallback is defined, return the error
+ return resp, err
+ }
+ log.Printf("withStreamingRetries - operation returned non-retriable error, but has fallback - resetting numFallbackRetry to 0 and continuing to retry")
+ numFallbackRetry = 0
+ newFallback = true
+ compareRetries = 0
+ // otherwise, continue to retry logic
+ }
+
+ if compareRetries >= maxRetries {
+ log.Printf("withStreamingRetries - compareRetries >= maxRetries - returning error")
+ return resp, err
+ }
+
+ var retryDelay time.Duration
+ if modelErr != nil && modelErr.RetryAfterSeconds > 0 {
+ // if the model err has a retry after, then use that with a bit of padding
+ retryDelay = time.Duration(int(float64(modelErr.RetryAfterSeconds)*1.1)) * time.Second
+ } else {
+ // otherwise, use some jitter
+ retryDelay = time.Duration(1000+rand.Intn(200)) * time.Millisecond
+ }
+
+ log.Printf("withStreamingRetries - retrying stream in %v seconds", retryDelay)
+ time.Sleep(retryDelay)
+
+ if modelErr != nil && modelErr.ShouldIncrementRetry() {
+ numTotalRetry++
+ if isFallback && !newFallback {
+ numFallbackRetry++
+ }
+ }
+ }
+}
diff --git a/app/server/model/litellm.go b/app/server/model/litellm.go
new file mode 100644
index 0000000000000000000000000000000000000000..93a9bd360a3f76dd296da9b93fe17026b5488335
--- /dev/null
+++ b/app/server/model/litellm.go
@@ -0,0 +1,141 @@
+package model
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "os/exec"
+ "strconv"
+ "sync"
+ "time"
+)
+
+var (
+ liteLLMOnce sync.Once
+ liteLLMCmd *exec.Cmd
+)
+
+func EnsureLiteLLM(numWorkers int) error {
+ var finalErr error
+ liteLLMOnce.Do(func() {
+ if isLiteLLMHealthy() {
+ log.Println("LiteLLM proxy is already healthy")
+ return
+ }
+
+ log.Println("LiteLLM proxy is not running. Starting...")
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ err := startLiteLLMServer(numWorkers)
+ if err != nil {
+ log.Println("LiteLLM proxy launch failed:", err)
+ finalErr = fmt.Errorf("LiteLLM proxy launch failed: %w", err)
+ return
+ }
+
+ ticker := time.NewTicker(500 * time.Millisecond)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ log.Println("LiteLLM proxy launch timed out")
+ finalErr = fmt.Errorf("LiteLLM proxy launch timed out")
+ return
+ case <-ticker.C:
+ if isLiteLLMHealthy() {
+ log.Println("LiteLLM proxy is healthy")
+ return
+ } else {
+ log.Println("LiteLLM proxy is not healthy yet, retrying after 500ms...")
+ }
+ }
+ }
+ })
+
+ return finalErr
+}
+
+func ShutdownLiteLLMServer() error {
+ if liteLLMCmd != nil && liteLLMCmd.Process != nil {
+ log.Println("Shutting down LiteLLM proxy gracefully...")
+ if err := liteLLMCmd.Process.Signal(os.Interrupt); err != nil {
+ return fmt.Errorf("failed to signal LiteLLM for shutdown: %w", err)
+ }
+
+ done := make(chan error, 1)
+ go func() {
+ done <- liteLLMCmd.Wait()
+ }()
+
+ select {
+ case <-time.After(5 * time.Second):
+ log.Println("LiteLLM proxy shutdown timed out, forcing kill")
+ return liteLLMCmd.Process.Kill()
+ case err := <-done:
+ return err
+ }
+ }
+ return nil
+}
+
+func isLiteLLMHealthy() bool {
+ ctx, cancel := context.WithTimeout(context.Background(), 500*time.Millisecond)
+ defer cancel()
+
+ req, err := http.NewRequestWithContext(ctx, "GET", "http://localhost:4000/health", nil)
+ if err != nil {
+ log.Println("LiteLLM health check request failed:", err)
+ return false
+ }
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ log.Println("LiteLLM health check failed:", err)
+ return false
+ }
+ defer resp.Body.Close()
+
+ return resp.StatusCode == 200
+}
+
+func startLiteLLMServer(numWorkers int) error {
+ liteLLMCmd = exec.Command("python3",
+ "-m", "uvicorn",
+ "litellm_proxy:app",
+ "--host", "0.0.0.0",
+ "--port", "4000",
+ "--workers", strconv.Itoa(numWorkers),
+ )
+
+ if os.Getenv("LITELLM_PROXY_DIR") != "" {
+ liteLLMCmd.Dir = os.Getenv("LITELLM_PROXY_DIR")
+ }
+
+ // clean env
+ liteLLMCmd.Env = []string{
+ "PATH=" + os.Getenv("PATH"),
+ "HOME=" + os.Getenv("HOME"),
+ }
+
+ if os.Getenv("OLLAMA_BASE_URL") != "" {
+ log.Println("OLLAMA_BASE_URL is set, so we can reach ollama from inside docker container in local mode")
+ // so we can reach ollama from inside docker container in local mode
+ liteLLMCmd.Env = append(liteLLMCmd.Env, "OLLAMA_BASE_URL="+os.Getenv("OLLAMA_BASE_URL"))
+ }
+
+ liteLLMCmd.Stdout = os.Stdout
+ liteLLMCmd.Stderr = os.Stderr
+
+ err := liteLLMCmd.Start()
+ if err != nil {
+ return err
+ }
+
+ log.Println("LiteLLM proxy launched")
+ return nil
+}
diff --git a/app/server/model/model_error.go b/app/server/model/model_error.go
new file mode 100644
index 0000000000000000000000000000000000000000..3ad4c2990f7f81fedc6c3fecc5f63155b273a612
--- /dev/null
+++ b/app/server/model/model_error.go
@@ -0,0 +1,281 @@
+package model
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ shared "plandex-shared"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+type HTTPError struct {
+ StatusCode int
+ Body string
+ Header http.Header
+}
+
+func (e *HTTPError) Error() string {
+ return fmt.Sprintf("status code: %d, body: %s", e.StatusCode, e.Body)
+}
+
+// JSON-style `"retry_after_ms":1234`
+var reJSON = regexp.MustCompile(`"retry_after_ms"\s*:\s*(\d+)`)
+
+// Header- or text-style "Retry-After: 12" / "retry_after: 12s"
+var reRetryAfter = regexp.MustCompile(
+ `retry[_\-\s]?after[_\-\s]?(?:[:\s]+)?(\d+)(ms|seconds?|secs?|s)?`,
+)
+
+// Free-form Azure style "Try again in 59 seconds."
+// Also matches "Retry in 10 seconds."
+var reTryAgain = regexp.MustCompile(
+ `(?:re)?try[_\-\s]+(?:again[_\-\s]+)?in[_\-\s]+(\d+)(ms|seconds?|secs?|s)?`,
+)
+
+func ClassifyErrMsg(msg string) *shared.ModelError {
+ log.Printf("Classifying error message: %s", msg)
+
+ msg = strings.ToLower(msg)
+
+ if strings.Contains(msg, "maximum context length") ||
+ strings.Contains(msg, "context length exceeded") ||
+ strings.Contains(msg, "exceed context limit") ||
+ strings.Contains(msg, "decrease input length") ||
+ strings.Contains(msg, "too many tokens") ||
+ strings.Contains(msg, "payload too large") ||
+ strings.Contains(msg, "payload is too large") ||
+ strings.Contains(msg, "input is too large") ||
+ strings.Contains(msg, "input too large") ||
+ strings.Contains(msg, "input is too long") ||
+ strings.Contains(msg, "input too long") {
+ log.Printf("Context too long error: %s", msg)
+ return &shared.ModelError{
+ Kind: shared.ErrContextTooLong,
+ Retriable: false,
+ RetryAfterSeconds: 0,
+ }
+ }
+
+ if strings.Contains(msg, "model_overloaded") ||
+ strings.Contains(msg, "model overloaded") ||
+ strings.Contains(msg, "server is overloaded") ||
+ strings.Contains(msg, "model is currently overloaded") ||
+ strings.Contains(msg, "overloaded_error") ||
+ strings.Contains(msg, "resource has been exhausted") {
+ log.Printf("Overloaded error: %s", msg)
+ return &shared.ModelError{
+ Kind: shared.ErrOverloaded,
+ Retriable: true,
+ RetryAfterSeconds: 0,
+ }
+ }
+
+ if strings.Contains(msg, "cache control") {
+ log.Printf("Cache control error: %s", msg)
+ return &shared.ModelError{
+ Kind: shared.ErrCacheSupport,
+ Retriable: true,
+ RetryAfterSeconds: 0,
+ }
+ }
+
+ log.Println("No error classification based on message")
+
+ return nil
+}
+
+func ClassifyModelError(code int, message string, headers http.Header, isClaudeMax bool) shared.ModelError {
+ msg := strings.ToLower(message)
+
+ // first of all, if it's claude max and a 429, it means the subscription limit was reached, so handle it accordingly
+ if isClaudeMax && code == 429 {
+ retryAfter := extractRetryAfter(headers, msg)
+ if retryAfter > 0 {
+ return shared.ModelError{
+ Kind: shared.ErrSubscriptionQuotaExhausted,
+ Retriable: true,
+ RetryAfterSeconds: retryAfter,
+ }
+ }
+ return shared.ModelError{
+ Kind: shared.ErrSubscriptionQuotaExhausted,
+ Retriable: false,
+ RetryAfterSeconds: 0,
+ }
+ }
+
+ // next try to classify the error based on the message only
+ msgRes := ClassifyErrMsg(msg)
+ if msgRes != nil {
+ log.Printf("Classified error message: %+v", msgRes)
+ return *msgRes
+ }
+
+ var res shared.ModelError
+
+ switch code {
+ case 429, 529:
+ res = shared.ModelError{
+ Kind: shared.ErrRateLimited,
+ Retriable: true,
+ RetryAfterSeconds: 0,
+ }
+ case 413:
+ res = shared.ModelError{
+ Kind: shared.ErrContextTooLong,
+ Retriable: false,
+ RetryAfterSeconds: 0,
+ }
+
+ // rare codes but they never succeed on retry if they do show up
+ case 501, 505:
+ res = shared.ModelError{
+ Kind: shared.ErrOther,
+ Retriable: false,
+ RetryAfterSeconds: 0,
+ }
+ default:
+ res = shared.ModelError{
+ Kind: shared.ErrOther,
+ Retriable: code >= 500 || strings.Contains(msg, "provider returned error"), // 'provider returned error' is from OpenRouter, and unless it's a non-retriable status code, it should still be retried since OpenRouter may switch to a different provider
+ RetryAfterSeconds: 0,
+ }
+ }
+
+ log.Printf("Model error: %+v", res)
+
+ // best‑effort parse of "Retry‑After" style hints in the message
+ if res.Retriable {
+ retryAfter := extractRetryAfter(headers, msg)
+
+ // if the retry after is greater than the max delay, then the error is not retriable
+ if retryAfter > MAX_RETRY_DELAY_SECONDS {
+ log.Printf("Retry after %d seconds is greater than the max delay of %d seconds - not retriable", retryAfter, MAX_RETRY_DELAY_SECONDS)
+ res.Retriable = false
+ } else {
+ res.RetryAfterSeconds = retryAfter
+ }
+
+ }
+
+ return res
+}
+
+func extractRetryAfter(h http.Header, body string) (sec int) {
+ now := time.Now()
+
+ // Retry-After header: seconds or HTTP-date
+ if v := h.Get("Retry-After"); v != "" {
+ if n, err := strconv.Atoi(strings.TrimSpace(v)); err == nil {
+ return n
+ }
+ if t, err := time.Parse(http.TimeFormat, v); err == nil {
+ d := int(t.Sub(now).Seconds())
+ if d > 0 {
+ return d
+ }
+ }
+ }
+
+ // X-RateLimit-Reset epoch
+ if v := h.Get("X-RateLimit-Reset"); v != "" {
+ if reset, _ := strconv.ParseInt(v, 10, 64); reset > now.Unix() {
+ return int(reset - now.Unix())
+ }
+ }
+
+ lower := strings.ToLower(strings.TrimSpace(body))
+
+ // "retry_after_ms": 1234
+ if m := reJSON.FindStringSubmatch(lower); len(m) == 2 {
+ n, _ := strconv.Atoi(m[1])
+ return n / 1000
+ }
+ // "retry after 12"
+ if m := reRetryAfter.FindStringSubmatch(lower); len(m) >= 2 {
+ unit := ""
+ if len(m) == 3 {
+ unit = m[2]
+ }
+ return normalizeUnit(m[1], unit)
+ }
+
+ // "try again in 8"
+ if m := reTryAgain.FindStringSubmatch(lower); len(m) >= 2 {
+ unit := ""
+ if len(m) == 3 {
+ unit = m[2]
+ }
+ return normalizeUnit(m[1], unit)
+ }
+ return 0
+}
+
+func normalizeUnit(numStr, unit string) int {
+ n, _ := strconv.Atoi(numStr) // safe because the regex matched \d+
+
+ switch unit {
+ case "ms": // milliseconds
+ return n / 1000
+ case "sec", "secs", "second", "seconds", "s":
+ return n // already in seconds
+ default: // unit omitted ⇒ assume seconds
+ return n
+ }
+}
+
+func classifyBasicError(err error, isClaudeMax bool) shared.ModelError {
+ // if it's an http error, classify it based on the status code and body
+ if httpErr, ok := err.(*HTTPError); ok {
+ me := ClassifyModelError(
+ httpErr.StatusCode,
+ httpErr.Body,
+ httpErr.Header,
+ isClaudeMax,
+ )
+ return me
+ }
+
+ // try to classify the error based on the message only
+ msgRes := ClassifyErrMsg(err.Error())
+ if msgRes != nil {
+ return *msgRes
+ }
+
+ // Fall back to old heuristic – still keeps the signature identical
+ if isNonRetriableBasicErr(err) {
+ return shared.ModelError{Kind: shared.ErrOther, Retriable: false}
+ }
+ return shared.ModelError{Kind: shared.ErrOther, Retriable: true}
+}
+
+func isNonRetriableBasicErr(err error) bool {
+ errStr := err.Error()
+
+ // we don't want to retry on the errors below
+ if strings.Contains(errStr, "context deadline exceeded") || strings.Contains(errStr, "context canceled") {
+ log.Println("Context deadline exceeded or canceled - no retry")
+ return true
+ }
+
+ if strings.Contains(errStr, "status code: 400") &&
+ strings.Contains(errStr, "reduce the length of the messages") {
+ log.Println("Token limit exceeded - no retry")
+ return true
+ }
+
+ if strings.Contains(errStr, "status code: 401") {
+ log.Println("Invalid auth or api key - no retry")
+ return true
+ }
+
+ if strings.Contains(errStr, "status code: 429") && strings.Contains(errStr, "exceeded your current quota") {
+ log.Println("Current quota exceeded - no retry")
+ return true
+ }
+
+ return false
+}
diff --git a/app/server/model/model_request.go b/app/server/model/model_request.go
new file mode 100644
index 0000000000000000000000000000000000000000..c7b398d96905f768a2a3e25d117cc36fed0915d0
--- /dev/null
+++ b/app/server/model/model_request.go
@@ -0,0 +1,294 @@
+package model
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "plandex-server/db"
+ "plandex-server/hooks"
+ "plandex-server/notify"
+ "plandex-server/types"
+ shared "plandex-shared"
+ "runtime/debug"
+ "strings"
+ "time"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/sashabaranov/go-openai"
+)
+
+type ModelRequestParams struct {
+ Clients map[string]ClientInfo
+ AuthVars map[string]string
+ Auth *types.ServerAuth
+ Plan *db.Plan
+ ModelConfig *shared.ModelRoleConfig
+ Settings *shared.PlanSettings
+ OrgUserConfig *shared.OrgUserConfig
+ Purpose string
+
+ Messages []types.ExtendedChatMessage
+ Prediction string
+ Stop []string
+ Tools []openai.Tool
+ ToolChoice *openai.ToolChoice
+
+ EstimatedOutputTokens int // optional
+
+ ModelStreamId string
+ ConvoMessageId string
+ BuildId string
+ ModelPackName string
+ SessionId string
+
+ BeforeReq func()
+ AfterReq func()
+
+ OnStream func(string, string) bool
+
+ WillCacheNumTokens int
+}
+
+func ModelRequest(
+ ctx context.Context,
+ params ModelRequestParams,
+) (*types.ModelResponse, error) {
+ clients := params.Clients
+ authVars := params.AuthVars
+ auth := params.Auth
+ plan := params.Plan
+ messages := params.Messages
+ prediction := params.Prediction
+ stop := params.Stop
+ tools := params.Tools
+ toolChoice := params.ToolChoice
+ modelConfig := params.ModelConfig
+ modelStreamId := params.ModelStreamId
+ convoMessageId := params.ConvoMessageId
+ buildId := params.BuildId
+ modelPackName := params.ModelPackName
+ purpose := params.Purpose
+ sessionId := params.SessionId
+ settings := params.Settings
+ orgUserConfig := params.OrgUserConfig
+ currentOrgId := auth.OrgId
+ currentUserId := auth.User.Id
+
+ if purpose == "" {
+ return nil, fmt.Errorf("purpose is required")
+ }
+
+ baseModelConfig := modelConfig.GetBaseModelConfig(authVars, settings, orgUserConfig)
+
+ messages = FilterEmptyMessages(messages)
+ messages = CheckSingleSystemMessage(modelConfig, baseModelConfig, messages)
+ inputTokensEstimate := GetMessagesTokenEstimate(messages...) + TokensPerRequest
+
+ config := modelConfig.GetRoleForInputTokens(inputTokensEstimate, settings)
+ modelConfig = &config
+
+ if params.EstimatedOutputTokens != 0 {
+ config = modelConfig.GetRoleForOutputTokens(params.EstimatedOutputTokens, settings)
+ modelConfig = &config
+ }
+
+ log.Println("ModelRequest - modelConfig:")
+ spew.Dump(modelConfig)
+ log.Println("ModelRequest - baseModelConfig:")
+ spew.Dump(baseModelConfig)
+
+ log.Printf("Model config - role: %s, model: %s, max output tokens: %d\n", modelConfig.Role, baseModelConfig.ModelName, baseModelConfig.MaxOutputTokens)
+
+ expectedOutputTokens := baseModelConfig.MaxOutputTokens - inputTokensEstimate
+ if params.EstimatedOutputTokens != 0 {
+ expectedOutputTokens = params.EstimatedOutputTokens
+ }
+
+ _, apiErr := hooks.ExecHook(hooks.WillSendModelRequest, hooks.HookParams{
+ Auth: auth,
+ Plan: plan,
+ WillSendModelRequestParams: &hooks.WillSendModelRequestParams{
+ InputTokens: inputTokensEstimate,
+ OutputTokens: expectedOutputTokens,
+ ModelName: baseModelConfig.ModelName,
+ ModelId: baseModelConfig.ModelId,
+ ModelTag: baseModelConfig.ModelTag,
+ },
+ })
+
+ if apiErr != nil {
+ return nil, apiErr
+ }
+
+ if params.BeforeReq != nil {
+ params.BeforeReq()
+ }
+
+ reqStarted := time.Now()
+
+ req := types.ExtendedChatCompletionRequest{
+ Model: baseModelConfig.ModelName,
+ Messages: messages,
+ }
+
+ if !baseModelConfig.RoleParamsDisabled {
+ req.Temperature = modelConfig.Temperature
+ req.TopP = modelConfig.TopP
+ }
+
+ if len(tools) > 0 {
+ req.Tools = tools
+ }
+
+ if toolChoice != nil {
+ req.ToolChoice = toolChoice
+ }
+
+ onStream := params.OnStream
+ if baseModelConfig.StopDisabled {
+ if len(stop) > 0 {
+ onStream = func(chunk string, buffer string) (shouldStop bool) {
+ for _, stopSequence := range stop {
+ if strings.Contains(buffer, stopSequence) {
+ return true
+ }
+ }
+ if params.OnStream != nil {
+ return params.OnStream(chunk, buffer)
+ }
+ return false
+ }
+ }
+ } else {
+ req.Stop = stop
+ }
+
+ if prediction != "" {
+ req.Prediction = &types.OpenAIPrediction{
+ Type: "content",
+ Content: prediction,
+ }
+ }
+
+ res, err := CreateChatCompletionWithInternalStream(clients, authVars, modelConfig, settings, orgUserConfig, currentOrgId, currentUserId, ctx, req, onStream, reqStarted)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if baseModelConfig.StopDisabled && len(stop) > 0 {
+ earliest := len(res.Content)
+ found := false
+ for _, s := range stop {
+ if i := strings.Index(res.Content, s); i != -1 && i < earliest {
+ earliest = i
+ found = true
+ }
+ }
+ if found {
+ res.Content = res.Content[:earliest]
+ }
+ }
+
+ if params.AfterReq != nil {
+ params.AfterReq()
+ }
+
+ // log.Printf("\n\n**\n\nModel response: %s\n\n**\n\n", res.Content)
+
+ var inputTokens int
+ var outputTokens int
+ var cachedTokens int
+
+ if res.Usage != nil {
+ if res.Usage.PromptTokensDetails != nil {
+ cachedTokens = res.Usage.PromptTokensDetails.CachedTokens
+ }
+ inputTokens = res.Usage.PromptTokens
+ outputTokens = res.Usage.CompletionTokens
+ } else {
+ inputTokens = inputTokensEstimate
+ outputTokens = shared.GetNumTokensEstimate(res.Content)
+
+ if params.WillCacheNumTokens > 0 {
+ cachedTokens = params.WillCacheNumTokens
+ }
+ }
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in DidSendModelRequest hook: %v\n%s", r, debug.Stack())
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("panic in DidSendModelRequest hook: %v\n%s", r, debug.Stack()))
+ }
+ }()
+
+ _, apiErr := hooks.ExecHook(hooks.DidSendModelRequest, hooks.HookParams{
+ Auth: auth,
+ Plan: plan,
+ DidSendModelRequestParams: &hooks.DidSendModelRequestParams{
+ InputTokens: inputTokens,
+ OutputTokens: outputTokens,
+ CachedTokens: cachedTokens,
+ ModelId: baseModelConfig.ModelId,
+ ModelTag: baseModelConfig.ModelTag,
+ ModelName: baseModelConfig.ModelName,
+ ModelProvider: baseModelConfig.Provider,
+ ModelPackName: modelPackName,
+ ModelRole: modelConfig.Role,
+ Purpose: purpose,
+ GenerationId: res.GenerationId,
+ PlanId: plan.Id,
+ ModelStreamId: modelStreamId,
+ ConvoMessageId: convoMessageId,
+ BuildId: buildId,
+
+ RequestStartedAt: reqStarted,
+ Streaming: true,
+ Req: &req,
+ StreamResult: res.Content,
+ ModelConfig: modelConfig,
+ FirstTokenAt: res.FirstTokenAt,
+ SessionId: sessionId,
+ },
+ })
+
+ if apiErr != nil {
+ log.Printf("buildWholeFile - error executing DidSendModelRequest hook: %v", apiErr)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error executing DidSendModelRequest hook: %v", apiErr))
+ }
+ }()
+
+ return res, nil
+}
+
+func FilterEmptyMessages(messages []types.ExtendedChatMessage) []types.ExtendedChatMessage {
+ filteredMessages := []types.ExtendedChatMessage{}
+ for _, message := range messages {
+ var content []types.ExtendedChatMessagePart
+ for _, part := range message.Content {
+ if part.Type != openai.ChatMessagePartTypeText || part.Text != "" {
+ content = append(content, part)
+ }
+ }
+ if len(content) > 0 {
+ filteredMessages = append(filteredMessages, types.ExtendedChatMessage{
+ Role: message.Role,
+ Content: content,
+ })
+ }
+ }
+ return filteredMessages
+}
+
+func CheckSingleSystemMessage(modelConfig *shared.ModelRoleConfig, baseModelConfig *shared.BaseModelConfig, messages []types.ExtendedChatMessage) []types.ExtendedChatMessage {
+ if len(messages) == 1 && baseModelConfig.SingleMessageNoSystemPrompt {
+ if messages[0].Role == openai.ChatMessageRoleSystem {
+ msg := messages[0]
+ msg.Role = openai.ChatMessageRoleUser
+ return []types.ExtendedChatMessage{msg}
+ }
+ }
+
+ return messages
+}
diff --git a/app/server/model/name.go b/app/server/model/name.go
new file mode 100644
index 0000000000000000000000000000000000000000..c763a336d9bda10da3d32db99bcd4ddcdf0a57f3
--- /dev/null
+++ b/app/server/model/name.go
@@ -0,0 +1,323 @@
+package model
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "plandex-server/db"
+ "plandex-server/model/prompts"
+ "plandex-server/types"
+ "plandex-server/utils"
+
+ shared "plandex-shared"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+func GenPlanName(
+ auth *types.ServerAuth,
+ plan *db.Plan,
+ settings *shared.PlanSettings,
+ orgUserConfig *shared.OrgUserConfig,
+ clients map[string]ClientInfo,
+ authVars map[string]string,
+ planContent string,
+ sessionId string,
+ ctx context.Context,
+) (string, error) {
+ config := settings.GetModelPack().Namer
+
+ var tools []openai.Tool
+ var toolChoice *openai.ToolChoice
+
+ baseModelConfig := config.GetBaseModelConfig(authVars, settings, orgUserConfig)
+
+ var sysPrompt string
+ if baseModelConfig.PreferredOutputFormat == shared.ModelOutputFormatXml {
+ sysPrompt = prompts.SysPlanNameXml
+ } else {
+ sysPrompt = prompts.SysPlanName
+ tools = []openai.Tool{
+ {
+ Type: "function",
+ Function: &prompts.PlanNameFn,
+ },
+ }
+ choice := openai.ToolChoice{
+ Type: "function",
+ Function: openai.ToolFunction{
+ Name: prompts.PlanNameFn.Name,
+ },
+ }
+ toolChoice = &choice
+ }
+
+ prompt := prompts.GetPlanNamePrompt(sysPrompt, planContent)
+
+ messages := []types.ExtendedChatMessage{
+ {
+ Role: openai.ChatMessageRoleSystem,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompt,
+ },
+ },
+ },
+ }
+
+ modelRes, err := ModelRequest(ctx, ModelRequestParams{
+ Clients: clients,
+ AuthVars: authVars,
+ Auth: auth,
+ Plan: plan,
+ ModelConfig: &config,
+ OrgUserConfig: orgUserConfig,
+ Purpose: "Plan name",
+ Messages: messages,
+ Tools: tools,
+ ToolChoice: toolChoice,
+ SessionId: sessionId,
+ Settings: settings,
+ })
+
+ if err != nil {
+ fmt.Printf("Error during plan name model call: %v\n", err)
+ return "", err
+ }
+
+ var planName string
+ content := modelRes.Content
+
+ if baseModelConfig.PreferredOutputFormat == shared.ModelOutputFormatXml {
+ planName = utils.GetXMLContent(content, "planName")
+ if planName == "" {
+ return "", fmt.Errorf("No planName tag found in XML response")
+ }
+ } else {
+ if content == "" {
+ fmt.Println("no namePlan function call found in response")
+ return "", fmt.Errorf("No namePlan function call found in response. The model failed to generate a valid response.")
+ }
+
+ var nameRes prompts.PlanNameRes
+ err = json.Unmarshal([]byte(content), &nameRes)
+ if err != nil {
+ fmt.Printf("Error unmarshalling plan description response: %v\n", err)
+ return "", err
+ }
+ planName = nameRes.PlanName
+ }
+
+ return planName, nil
+}
+
+type GenPipedDataNameParams struct {
+ Ctx context.Context
+ Auth *types.ServerAuth
+ Plan *db.Plan
+ Settings *shared.PlanSettings
+ OrgUserConfig *shared.OrgUserConfig
+ AuthVars map[string]string
+ SessionId string
+ Clients map[string]ClientInfo
+ PipedContent string
+}
+
+func GenPipedDataName(
+ params GenPipedDataNameParams,
+) (string, error) {
+ ctx := params.Ctx
+ auth := params.Auth
+ plan := params.Plan
+ settings := params.Settings
+ clients := params.Clients
+ authVars := params.AuthVars
+ pipedContent := params.PipedContent
+ sessionId := params.SessionId
+ orgUserConfig := params.OrgUserConfig
+
+ config := settings.GetModelPack().Namer
+
+ var sysPrompt string
+ var tools []openai.Tool
+ var toolChoice *openai.ToolChoice
+
+ baseModelConfig := config.GetBaseModelConfig(authVars, settings, orgUserConfig)
+
+ if baseModelConfig.PreferredOutputFormat == shared.ModelOutputFormatXml {
+ sysPrompt = prompts.SysPipedDataNameXml
+ } else {
+ sysPrompt = prompts.SysPipedDataName
+ tools = []openai.Tool{
+ {
+ Type: "function",
+ Function: &prompts.PipedDataNameFn,
+ },
+ }
+ choice := openai.ToolChoice{
+ Type: "function",
+ Function: openai.ToolFunction{
+ Name: prompts.PipedDataNameFn.Name,
+ },
+ }
+ toolChoice = &choice
+ }
+
+ prompt := prompts.GetPipedDataNamePrompt(sysPrompt, pipedContent)
+
+ messages := []types.ExtendedChatMessage{
+ {
+ Role: openai.ChatMessageRoleSystem,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompt,
+ },
+ },
+ },
+ }
+
+ modelRes, err := ModelRequest(ctx, ModelRequestParams{
+ Clients: clients,
+ Auth: auth,
+ AuthVars: authVars,
+ Plan: plan,
+ ModelConfig: &config,
+ Purpose: "Piped data name",
+ Messages: messages,
+ Tools: tools,
+ ToolChoice: toolChoice,
+ SessionId: sessionId,
+ Settings: settings,
+ OrgUserConfig: orgUserConfig,
+ })
+
+ if err != nil {
+ fmt.Printf("Error during piped data name model call: %v\n", err)
+ return "", err
+ }
+
+ var name string
+ content := modelRes.Content
+
+ if baseModelConfig.PreferredOutputFormat == shared.ModelOutputFormatXml {
+ name = utils.GetXMLContent(content, "name")
+ if name == "" {
+ return "", fmt.Errorf("No name tag found in XML response")
+ }
+ } else {
+ if content == "" {
+ fmt.Println("no namePipedData function call found in response")
+ return "", fmt.Errorf("No namePipedData function call found in response. The model failed to generate a valid response.")
+ }
+
+ var nameRes prompts.PipedDataNameRes
+ err = json.Unmarshal([]byte(content), &nameRes)
+ if err != nil {
+ fmt.Printf("Error unmarshalling piped data name response: %v\n", err)
+ return "", err
+ }
+ name = nameRes.Name
+ }
+
+ return name, nil
+}
+
+func GenNoteName(
+ ctx context.Context,
+ auth *types.ServerAuth,
+ plan *db.Plan,
+ settings *shared.PlanSettings,
+ orgUserConfig *shared.OrgUserConfig,
+ clients map[string]ClientInfo,
+ authVars map[string]string,
+ note string,
+ sessionId string,
+) (string, error) {
+ config := settings.GetModelPack().Namer
+
+ var sysPrompt string
+ var tools []openai.Tool
+ var toolChoice *openai.ToolChoice
+
+ baseModelConfig := config.GetBaseModelConfig(authVars, settings, orgUserConfig)
+
+ if baseModelConfig.PreferredOutputFormat == shared.ModelOutputFormatXml {
+ sysPrompt = prompts.SysNoteNameXml
+ } else {
+ sysPrompt = prompts.SysNoteName
+ tools = []openai.Tool{
+ {
+ Type: "function",
+ Function: &prompts.NoteNameFn,
+ },
+ }
+ choice := openai.ToolChoice{
+ Type: "function",
+ Function: openai.ToolFunction{
+ Name: prompts.NoteNameFn.Name,
+ },
+ }
+ toolChoice = &choice
+ }
+
+ prompt := prompts.GetNoteNamePrompt(sysPrompt, note)
+
+ messages := []types.ExtendedChatMessage{
+ {
+ Role: openai.ChatMessageRoleSystem,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompt,
+ },
+ },
+ },
+ }
+
+ modelRes, err := ModelRequest(ctx, ModelRequestParams{
+ Clients: clients,
+ Auth: auth,
+ AuthVars: authVars,
+ Plan: plan,
+ ModelConfig: &config,
+ Purpose: "Note name",
+ Messages: messages,
+ Tools: tools,
+ ToolChoice: toolChoice,
+ SessionId: sessionId,
+ Settings: settings,
+ OrgUserConfig: orgUserConfig,
+ })
+
+ if err != nil {
+ fmt.Printf("Error during note name model call: %v\n", err)
+ return "", err
+ }
+
+ var name string
+ content := modelRes.Content
+
+ if baseModelConfig.PreferredOutputFormat == shared.ModelOutputFormatXml {
+ name = utils.GetXMLContent(content, "name")
+ if name == "" {
+ return "", fmt.Errorf("No name tag found in XML response")
+ }
+ } else {
+ if content == "" {
+ fmt.Println("no nameNote function call found in response")
+ return "", fmt.Errorf("No nameNote function call found in response. The model failed to generate a valid response.")
+ }
+
+ var nameRes prompts.NoteNameRes
+ err = json.Unmarshal([]byte(content), &nameRes)
+ if err != nil {
+ fmt.Printf("Error unmarshalling note name response: %v\n", err)
+ return "", err
+ }
+ name = nameRes.Name
+ }
+
+ return name, nil
+}
diff --git a/app/server/model/parse/subtasks.go b/app/server/model/parse/subtasks.go
new file mode 100644
index 0000000000000000000000000000000000000000..4d68c38b14fe490588ba1b0529bee9a3200a8014
--- /dev/null
+++ b/app/server/model/parse/subtasks.go
@@ -0,0 +1,121 @@
+package parse
+
+import (
+ "log"
+ "plandex-server/db"
+ "regexp"
+ "strings"
+)
+
+func ParseSubtasks(replyContent string) []*db.Subtask {
+ split := strings.Split(replyContent, "### Tasks")
+ if len(split) < 2 {
+ split = strings.Split(replyContent, "### Task")
+ if len(split) < 2 {
+ log.Println("[Subtasks] No tasks section found in reply")
+ return nil
+ }
+ }
+
+ lines := strings.Split(split[1], "\n")
+
+ var subtasks []*db.Subtask
+ var currentTask *db.Subtask
+ var descLines []string
+
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+ if line == "" {
+ continue
+ }
+
+ // Check for any number followed by a period and space
+ if matched, _ := regexp.MatchString(`^\d+\.\s`, line); matched {
+ // Save previous task if exists
+ if currentTask != nil {
+ currentTask.Description = strings.Join(descLines, "\n")
+ log.Printf("[Subtasks] Adding subtask: %q with %d uses files", currentTask.Title, len(currentTask.UsesFiles))
+ subtasks = append(subtasks, currentTask)
+ }
+
+ // Start new task
+ parts := strings.SplitN(line, ". ", 2)
+ if len(parts) == 2 {
+ title := parts[1]
+ currentTask = &db.Subtask{
+ Title: title,
+ }
+ descLines = nil
+ }
+ continue
+ }
+
+ // Handle Uses: section
+ if strings.HasPrefix(line, "Uses:") {
+ if currentTask != nil {
+ usesStr := strings.TrimPrefix(line, "Uses:")
+ for _, use := range strings.Split(usesStr, ",") {
+ use = strings.TrimSpace(use)
+ use = strings.Trim(use, "`")
+ if use != "" {
+ currentTask.UsesFiles = append(currentTask.UsesFiles, use)
+ }
+ }
+ log.Printf("[Subtasks] Added uses files for %q: %v", currentTask.Title, currentTask.UsesFiles)
+ }
+ continue
+ }
+
+ // Add to description if we have a current task
+ if currentTask != nil {
+ // Remove bullet point if present, but don't require it
+ line = strings.TrimPrefix(line, "-")
+ line = strings.TrimSpace(line)
+ if line != "" {
+ descLines = append(descLines, line)
+ }
+ }
+ }
+
+ // Save final task if exists
+ if currentTask != nil {
+ currentTask.Description = strings.Join(descLines, "\n")
+ log.Printf("[Subtasks] Adding final subtask: %q with %d uses files", currentTask.Title, len(currentTask.UsesFiles))
+ subtasks = append(subtasks, currentTask)
+ }
+
+ log.Printf("[Subtasks] Parsed %d total subtasks", len(subtasks))
+ return subtasks
+}
+
+func ParseRemoveSubtasks(replyContent string) []string {
+ split := strings.Split(replyContent, "### Remove Tasks")
+ if len(split) < 2 {
+ return nil
+ }
+
+ section := split[1]
+ lines := strings.Split(section, "\n")
+ var tasksToRemove []string
+
+ sawEmptyLine := false
+ for _, line := range lines {
+ line = strings.TrimSpace(line)
+ if line == "" {
+ sawEmptyLine = true
+ continue
+ }
+ if sawEmptyLine && !strings.HasPrefix(line, "-") {
+ break
+ }
+ if strings.HasPrefix(line, "- ") {
+ title := strings.TrimPrefix(line, "- ")
+ title = strings.TrimSpace(title)
+ if title != "" {
+ tasksToRemove = append(tasksToRemove, title)
+ }
+ }
+ }
+
+ return tasksToRemove
+}
diff --git a/app/server/model/parse/subtasks_test.go b/app/server/model/parse/subtasks_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..a3a66945a82a8fa7f7bdbbe1b32558fc7ea9f788
--- /dev/null
+++ b/app/server/model/parse/subtasks_test.go
@@ -0,0 +1,141 @@
+package parse
+
+import (
+ "plandex-server/db"
+ "testing"
+)
+
+func TestParseSubtasks(t *testing.T) {
+ tests := []struct {
+ name string
+ input string
+ expected []*db.Subtask
+ }{
+ {
+ name: "empty input",
+ input: "",
+ expected: nil,
+ },
+ {
+ name: "single task without description",
+ input: `### Tasks
+1. Create a new file`,
+ expected: []*db.Subtask{
+ {
+ Title: "Create a new file",
+ Description: "",
+ UsesFiles: nil,
+ },
+ },
+ },
+ {
+ name: "multiple tasks with descriptions and uses",
+ input: `### Tasks
+1. Create config file
+- Will store application settings
+- Contains environment variables
+Uses: ` + "`config/settings.yml`" + `, ` + "`config/defaults.yml`" + `
+
+2. Update main function
+- Add configuration loading
+Uses: ` + "`main.go`",
+ expected: []*db.Subtask{
+ {
+ Title: "Create config file",
+ Description: "Will store application settings\nContains environment variables",
+ UsesFiles: []string{"config/settings.yml", "config/defaults.yml"},
+ },
+ {
+ Title: "Update main function",
+ Description: "Add configuration loading",
+ UsesFiles: []string{"main.go"},
+ },
+ },
+ },
+ {
+ name: "alternative task header",
+ input: `### Task
+1. Simple task`,
+ expected: []*db.Subtask{
+ {
+ Title: "Simple task",
+ Description: "",
+ UsesFiles: nil,
+ },
+ },
+ },
+ {
+ name: "tasks with empty lines between",
+ input: `### Tasks
+1. First task
+- Description one
+
+2. Second task
+- Description two`,
+ expected: []*db.Subtask{
+ {
+ Title: "First task",
+ Description: "Description one",
+ UsesFiles: nil,
+ },
+ {
+ Title: "Second task",
+ Description: "Description two",
+ UsesFiles: nil,
+ },
+ },
+ },
+ {
+ name: "single task from pong",
+ input: "### Tasks" + `
+
+9. Update Makefile to include Homebrew-specific include and library search paths
+- Modify CFLAGS in Makefile to add -I/opt/homebrew/include
+- Modify LDFLAGS in Makefile to add -L/opt/homebrew/lib
+Uses: ` + "`Makefile`" + `, ` + "`_apply.sh`",
+ expected: []*db.Subtask{
+ {
+ Title: "Update Makefile to include Homebrew-specific include and library search paths",
+ Description: "Modify CFLAGS in Makefile to add -I/opt/homebrew/include\nModify LDFLAGS in Makefile to add -L/opt/homebrew/lib",
+ UsesFiles: []string{"Makefile", "_apply.sh"},
+ },
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := ParseSubtasks(tt.input)
+
+ if len(got) != len(tt.expected) {
+ t.Errorf("ParseSubtasks() returned %d subtasks, want %d", len(got), len(tt.expected))
+ return
+ }
+
+ for i := range got {
+ if got[i].Title != tt.expected[i].Title {
+ t.Errorf("Subtask[%d].Title = %q, want %q", i, got[i].Title, tt.expected[i].Title)
+ }
+ if got[i].Description != tt.expected[i].Description {
+ t.Errorf("Subtask[%d].Description = %q, want %q", i, got[i].Description, tt.expected[i].Description)
+ }
+ if !sliceEqual(got[i].UsesFiles, tt.expected[i].UsesFiles) {
+ t.Errorf("Subtask[%d].UsesFiles = %v, want %v", i, got[i].UsesFiles, tt.expected[i].UsesFiles)
+ }
+ }
+ })
+ }
+}
+
+// sliceEqual compares two string slices for equality
+func sliceEqual(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
diff --git a/app/server/model/plan/activate.go b/app/server/model/plan/activate.go
new file mode 100644
index 0000000000000000000000000000000000000000..1a9547b3d31003b68e9d5c2588a2d4ea9dd7805b
--- /dev/null
+++ b/app/server/model/plan/activate.go
@@ -0,0 +1,83 @@
+package plan
+
+import (
+ "fmt"
+ "log"
+ "plandex-server/db"
+ "plandex-server/host"
+ "plandex-server/model"
+ "plandex-server/types"
+ "time"
+
+ shared "plandex-shared"
+)
+
+func activatePlan(
+ clients map[string]model.ClientInfo,
+ plan *db.Plan,
+ branch string,
+ auth *types.ServerAuth,
+ prompt string,
+ buildOnly,
+ autoContext bool,
+ sessionId string,
+) (*types.ActivePlan, error) {
+ log.Printf("Activate plan: plan ID %s on branch %s\n", plan.Id, branch)
+
+ // Just in case this request was made immediately after another stream finished, wait a little to allow for cleanup
+ log.Println("Waiting 100ms before checking for active plan")
+ time.Sleep(100 * time.Millisecond)
+ log.Println("Done waiting, checking for active plan")
+
+ active := GetActivePlan(plan.Id, branch)
+ if active != nil {
+ log.Printf("Tell: Active plan found for plan ID %s on branch %s\n", plan.Id, branch) // Log if an active plan is found
+ return nil, fmt.Errorf("plan %s branch %s already has an active stream on this host", plan.Id, branch)
+ }
+
+ modelStream, err := db.GetActiveModelStream(plan.Id, branch)
+ if err != nil {
+ log.Printf("Error getting active model stream: %v\n", err)
+ return nil, fmt.Errorf("error getting active model stream: %v", err)
+ }
+
+ if modelStream != nil {
+ log.Printf("Tell: Active model stream found for plan ID %s on branch %s on host %s\n", plan.Id, branch, modelStream.InternalIp) // Log if an active model stream is found
+ return nil, fmt.Errorf("plan %s branch %s already has an active stream on host %s", plan.Id, branch, modelStream.InternalIp)
+ }
+
+ active = CreateActivePlan(
+ auth.OrgId,
+ auth.User.Id,
+ plan.Id,
+ branch,
+ prompt,
+ buildOnly,
+ autoContext,
+ sessionId,
+ )
+
+ modelStream = &db.ModelStream{
+ OrgId: auth.OrgId,
+ PlanId: plan.Id,
+ InternalIp: host.Ip,
+ Branch: branch,
+ }
+ err = db.StoreModelStream(modelStream, active.Ctx, active.CancelFn)
+ if err != nil {
+ log.Printf("Tell: Error storing model stream for plan ID %s on branch %s: %v\n", plan.Id, branch, err) // Log error storing model stream
+ log.Printf("Error storing model stream: %v\n", err)
+ log.Printf("Tell: Error storing model stream: %v\n", err) // Log error storing model stream
+
+ active.StreamDoneCh <- &shared.ApiError{Msg: fmt.Sprintf("Error storing model stream: %v", err)}
+
+ return nil, fmt.Errorf("error storing model stream: %v", err)
+ }
+
+ active.ModelStreamId = modelStream.Id
+
+ log.Printf("Tell: Model stream stored with ID %s for plan ID %s on branch %s\n", modelStream.Id, plan.Id, branch) // Log successful storage of model stream
+ log.Println("Model stream id:", modelStream.Id)
+
+ return active, nil
+}
diff --git a/app/server/model/plan/build_exec.go b/app/server/model/plan/build_exec.go
new file mode 100644
index 0000000000000000000000000000000000000000..8342a6d1fda5270a7068d8da340095b209522aaf
--- /dev/null
+++ b/app/server/model/plan/build_exec.go
@@ -0,0 +1,437 @@
+package plan
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "path/filepath"
+ "plandex-server/db"
+ "plandex-server/hooks"
+ "plandex-server/model"
+ "plandex-server/notify"
+ "plandex-server/types"
+ "runtime/debug"
+ "time"
+
+ shared "plandex-shared"
+)
+
+type BuildParams struct {
+ Clients map[string]model.ClientInfo
+ AuthVars map[string]string
+ Plan *db.Plan
+ Branch string
+ Auth *types.ServerAuth
+ SessionId string
+ OrgUserConfig *shared.OrgUserConfig
+ Settings *shared.PlanSettings
+}
+
+func Build(params BuildParams) (int, error) {
+ clients := params.Clients
+ authVars := params.AuthVars
+ plan := params.Plan
+ branch := params.Branch
+ auth := params.Auth
+ sessionId := params.SessionId
+ orgUserConfig := params.OrgUserConfig
+ settings := params.Settings
+
+ log.Printf("Build: Called with plan ID %s on branch %s\n", plan.Id, branch)
+ log.Println("Build: Starting Build operation")
+
+ state := activeBuildStreamState{
+ clients: clients,
+ authVars: authVars,
+ auth: auth,
+ currentOrgId: auth.OrgId,
+ currentUserId: auth.User.Id,
+ orgUserConfig: orgUserConfig,
+ plan: plan,
+ branch: branch,
+ settings: settings,
+ }
+
+ streamDone := func() {
+ active := GetActivePlan(plan.Id, branch)
+ if active != nil {
+ active.StreamDoneCh <- nil
+ }
+ }
+
+ onErr := func(err error) (int, error) {
+ log.Printf("Build error: %v\n", err)
+ streamDone()
+ return 0, err
+ }
+
+ pendingBuildsByPath, err := state.loadPendingBuilds(sessionId)
+ if err != nil {
+ return onErr(err)
+ }
+
+ if len(pendingBuildsByPath) == 0 {
+ log.Println("No pending builds")
+ streamDone()
+ return 0, nil
+ }
+
+ err = db.SetPlanStatus(plan.Id, branch, shared.PlanStatusBuilding, "")
+
+ if err != nil {
+ log.Printf("Error setting plan status to building: %v\n", err)
+ return onErr(fmt.Errorf("error setting plan status to building: %v", err))
+ }
+
+ log.Printf("Starting %d builds\n", len(pendingBuildsByPath))
+
+ for _, pendingBuilds := range pendingBuildsByPath {
+ go state.queueBuilds(pendingBuilds)
+ }
+
+ return len(pendingBuildsByPath), nil
+}
+
+func (state *activeBuildStreamState) queueBuild(activeBuild *types.ActiveBuild) {
+ planId := state.plan.Id
+ branch := state.branch
+
+ filePath := activeBuild.Path
+
+ // log.Printf("Queue:")
+ // spew.Dump(activePlan.BuildQueuesByPath[filePath])
+
+ var isBuilding bool
+
+ UpdateActivePlan(planId, branch, func(active *types.ActivePlan) {
+ active.BuildQueuesByPath[filePath] = append(active.BuildQueuesByPath[filePath], activeBuild)
+ isBuilding = active.IsBuildingByPath[filePath]
+ })
+ log.Printf("Queued build for file %s\n", filePath)
+
+ if isBuilding {
+ log.Printf("Already building file %s\n", filePath)
+ return
+ } else {
+ log.Printf("Not building file %s\n", filePath)
+
+ active := GetActivePlan(planId, branch)
+ if active == nil {
+ log.Printf("Active plan not found for plan ID %s and branch %s\n", planId, branch)
+ return
+ }
+
+ UpdateActivePlan(planId, branch, func(active *types.ActivePlan) {
+ active.IsBuildingByPath[filePath] = true
+ })
+
+ go state.execPlanBuild(activeBuild)
+ }
+}
+
+func (state *activeBuildStreamState) queueBuilds(activeBuilds []*types.ActiveBuild) {
+ log.Printf("Queueing %d builds\n", len(activeBuilds))
+
+ for _, activeBuild := range activeBuilds {
+ state.queueBuild(activeBuild)
+ }
+}
+
+func (buildState *activeBuildStreamState) execPlanBuild(activeBuild *types.ActiveBuild) {
+ if activeBuild == nil {
+ log.Println("No active build")
+ return
+ }
+
+ log.Printf("execPlanBuild - %s\n", activeBuild.Path)
+ // log.Println(spew.Sdump(activeBuild))
+
+ planId := buildState.plan.Id
+ branch := buildState.branch
+
+ activePlan := GetActivePlan(planId, branch)
+ if activePlan == nil {
+ log.Printf("Active plan not found for plan ID %s and branch %s\n", planId, branch)
+ return
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("execPlanBuild: Panic: %v\n%s\n", r, string(debug.Stack()))
+
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("execPlanBuild: Panic: %v\n%s", r, string(debug.Stack())))
+
+ activePlan.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Panic in execPlanBuild",
+ }
+ }
+ }()
+
+ filePath := activeBuild.Path
+
+ if !activePlan.IsBuildingByPath[filePath] {
+ UpdateActivePlan(activePlan.Id, activePlan.Branch, func(ap *types.ActivePlan) {
+ ap.IsBuildingByPath[filePath] = true
+ })
+ }
+
+ fileState := &activeBuildStreamFileState{
+ activeBuildStreamState: buildState,
+ filePath: filePath,
+ activeBuild: activeBuild,
+ builderRun: hooks.DidFinishBuilderRunParams{
+ StartedAt: time.Now(),
+ PlanId: activePlan.Id,
+ FilePath: filePath,
+ FileExt: filepath.Ext(filePath),
+ },
+ }
+
+ log.Printf("execPlanBuild - %s - calling fileState.loadBuildFile()\n", filePath)
+ err := fileState.loadBuildFile(activeBuild)
+ if err != nil {
+ log.Printf("Error loading build file: %v\n", err)
+ fileState.onBuildFileError(fmt.Errorf("error loading build file: %v", err))
+ return
+ }
+
+ fileState.resolvePreBuildState()
+
+ // unless it's a file operation, stream initial status to client
+ if !activeBuild.IsFileOperation() && !fileState.isNewFile {
+ log.Printf("execPlanBuild - %s - streaming initial build info\n", filePath)
+ // spew.Dump(activeBuild)
+ buildInfo := &shared.BuildInfo{
+ Path: filePath,
+ NumTokens: 0,
+ Finished: false,
+ }
+ activePlan.Stream(shared.StreamMessage{
+ Type: shared.StreamMessageBuildInfo,
+ BuildInfo: buildInfo,
+ })
+ } else if activeBuild.IsFileOperation() {
+ log.Printf("execPlanBuild - %s - file operation - won't stream initial build info\n", filePath)
+ } else if fileState.isNewFile {
+ log.Printf("execPlanBuild - %s - new file - won't stream initial build info\n", filePath)
+ }
+
+ log.Printf("execPlanBuild - %s - calling fileState.buildFile()\n", filePath)
+ fileState.buildFile()
+}
+
+func (fileState *activeBuildStreamFileState) buildFile() {
+ filePath := fileState.filePath
+ activeBuild := fileState.activeBuild
+ planId := fileState.plan.Id
+ branch := fileState.branch
+ currentOrgId := fileState.currentOrgId
+ build := fileState.build
+
+ activePlan := GetActivePlan(planId, branch)
+
+ if activePlan == nil {
+ log.Printf("Active plan not found for plan ID %s and branch %s\n", planId, branch)
+ return
+ }
+
+ log.Printf("Building file %s\n", filePath)
+ log.Printf("%d files in context\n", len(activePlan.ContextsByPath))
+ // log.Println("activePlan.ContextsByPath files:")
+ // for k := range activePlan.ContextsByPath {
+ // log.Println(k)
+ // }
+
+ if activeBuild.IsMoveOp {
+ log.Printf("File %s is a move operation. Moving to %s\n", filePath, activeBuild.MoveDestination)
+
+ // For move operations, we split it into two separate builds:
+ // 1. A removal build for the source file
+ // 2. A creation build for the destination file with the current content
+ // This is simpler than handling moves in a single build since our build system
+ // is designed around operating on one path at a time
+ fileState.activeBuildStreamState.queueBuilds([]*types.ActiveBuild{
+ {
+ ReplyId: activeBuild.ReplyId,
+ Path: activeBuild.Path,
+ IsRemoveOp: true,
+ },
+ {
+ ReplyId: activeBuild.ReplyId,
+ Path: activeBuild.MoveDestination,
+ FileContent: fileState.preBuildState,
+ FileContentTokens: 0,
+ },
+ })
+
+ // Mark this move operation as successful since we've queued the actual work
+ activeBuild.Success = true
+
+ UpdateActivePlan(planId, branch, func(active *types.ActivePlan) {
+ active.IsBuildingByPath[filePath] = false
+ active.BuiltFiles[filePath] = true
+ })
+
+ // Process the next build in queue (which will be our removal build)
+ // We need to explicitly advance the queue for the source path since this
+ // current build is holding the 'building' state open
+ // The create build for the destination will be handled automatically by the queue logic
+ fileState.buildNextInQueue()
+ return
+ }
+
+ if activeBuild.IsRemoveOp {
+ log.Printf("File %s is a remove operation. Removing file.\n", filePath)
+
+ log.Printf("streaming remove build info for file %s\n", filePath)
+ buildInfo := &shared.BuildInfo{
+ Path: filePath,
+ NumTokens: 0,
+ Removed: true,
+ Finished: true,
+ }
+
+ activePlan.Stream(shared.StreamMessage{
+ Type: shared.StreamMessageBuildInfo,
+ BuildInfo: buildInfo,
+ })
+
+ planRes := &db.PlanFileResult{
+ OrgId: currentOrgId,
+ PlanId: planId,
+ PlanBuildId: build.Id,
+ ConvoMessageId: build.ConvoMessageId,
+ Path: filePath,
+ Content: "",
+ RemovedFile: true,
+ }
+ fileState.onFinishBuildFile(planRes)
+ return
+ }
+
+ if activeBuild.IsResetOp {
+ log.Printf("File %s is a reset operation. Resetting file.\n", filePath)
+
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: currentOrgId,
+ UserId: fileState.currentUserId,
+ PlanId: planId,
+ Branch: branch,
+ PlanBuildId: build.Id,
+ Scope: db.LockScopeWrite,
+ Reason: "reset file op",
+ Ctx: activePlan.Ctx,
+ CancelFn: activePlan.CancelFn,
+ }, func(repo *db.GitRepo) error {
+ now := time.Now()
+ return db.RejectPlanFile(currentOrgId, planId, filePath, now)
+ })
+
+ if err != nil {
+ log.Printf("Error rejecting plan file: %v\n", err)
+ fileState.onBuildFileError(fmt.Errorf("error rejecting plan file: %v", err))
+ return
+ }
+
+ buildInfo := &shared.BuildInfo{
+ Path: filePath,
+ NumTokens: 0,
+ Finished: true,
+ Removed: fileState.contextPart == nil,
+ }
+
+ activePlan.Stream(shared.StreamMessage{
+ Type: shared.StreamMessageBuildInfo,
+ BuildInfo: buildInfo,
+ })
+
+ time.Sleep(200 * time.Millisecond)
+
+ fileState.onBuildProcessed(activeBuild)
+ return
+ }
+
+ if fileState.preBuildState == "" {
+ log.Printf("File %s not found in model context or current plan. Creating new file.\n", filePath)
+
+ buildInfo := &shared.BuildInfo{
+ Path: filePath,
+ NumTokens: 0,
+ Finished: true,
+ }
+
+ log.Printf("streaming new file build info for file %s\n", filePath)
+
+ activePlan.Stream(shared.StreamMessage{
+ Type: shared.StreamMessageBuildInfo,
+ BuildInfo: buildInfo,
+ })
+
+ // new file
+ planRes := &db.PlanFileResult{
+ OrgId: currentOrgId,
+ PlanId: planId,
+ PlanBuildId: build.Id,
+ ConvoMessageId: build.ConvoMessageId,
+ Path: filePath,
+ Content: activeBuild.FileContent,
+ }
+
+ // log.Println("build exec - new file result")
+ // spew.Dump(planRes)
+ fileState.onFinishBuildFile(planRes)
+ return
+ } else {
+ currentNumTokens := shared.GetNumTokensEstimate(fileState.preBuildState)
+
+ log.Printf("Current state num tokens: %d\n", currentNumTokens)
+
+ activeBuild.CurrentFileTokens = currentNumTokens
+ activePlan.DidEditFiles = true
+ }
+
+ // build structured edits strategy now works regardless of language/tree-sitter support
+ log.Println("buildFile - building structured edits")
+ fileState.buildStructuredEdits()
+}
+
+func (fileState *activeBuildStreamFileState) resolvePreBuildState() {
+ filePath := fileState.filePath
+ currentPlan := fileState.currentPlanState
+ planId := fileState.plan.Id
+ branch := fileState.branch
+
+ activePlan := GetActivePlan(planId, branch)
+
+ if activePlan == nil {
+ log.Printf("Active plan not found for plan ID %s and branch %s\n", planId, branch)
+ return
+ }
+ contextPart := activePlan.ContextsByPath[filePath]
+
+ var currentState string
+ currentPlanFile, fileInCurrentPlan := currentPlan.CurrentPlanFiles.Files[filePath]
+
+ // log.Println("plan files:")
+ // spew.Dump(currentPlan.CurrentPlanFiles.Files)
+
+ if fileInCurrentPlan {
+ log.Printf("File %s found in current plan.\n", filePath)
+ fileState.isNewFile = false
+ currentState = currentPlanFile
+ // log.Println("\n\nCurrent state:\n", currentState, "\n\n")
+
+ } else if contextPart != nil {
+ log.Printf("File %s found in model context. Using context state.\n", filePath)
+ fileState.isNewFile = false
+ currentState = contextPart.Body
+ // log.Println("\n\nCurrent state:\n", currentState, "\n\n")
+ } else {
+ fileState.isNewFile = true
+ }
+
+ fileState.preBuildState = currentState
+ fileState.contextPart = contextPart
+}
diff --git a/app/server/model/plan/build_finish.go b/app/server/model/plan/build_finish.go
new file mode 100644
index 0000000000000000000000000000000000000000..2f61eba0f170c9c981b1432f4c410227894e013d
--- /dev/null
+++ b/app/server/model/plan/build_finish.go
@@ -0,0 +1,343 @@
+package plan
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ "plandex-server/hooks"
+ "plandex-server/notify"
+ "plandex-server/types"
+ "strings"
+ "time"
+
+ shared "plandex-shared"
+)
+
+func (state *activeBuildStreamFileState) onFinishBuild() {
+ log.Println("Build finished")
+
+ planId := state.plan.Id
+ branch := state.branch
+ currentOrgId := state.currentOrgId
+ currentUserId := state.currentUserId
+ convoMessageId := state.convoMessageId
+ build := state.build
+
+ // first check if any of the messages we're building hasen't finished streaming yet
+ stillStreaming := false
+ var doneCh chan bool
+ ap := GetActivePlan(planId, branch)
+
+ if ap == nil {
+ log.Println("onFinishBuild - Active plan not found")
+ return
+ }
+
+ if ap.CurrentStreamingReplyId == convoMessageId {
+ stillStreaming = true
+ doneCh = ap.CurrentReplyDoneCh
+ }
+ if stillStreaming {
+ log.Println("Reply is still streaming, waiting for it to finish before finishing build")
+ <-doneCh
+ }
+
+ // Check again if build is finished
+ // (more builds could have been queued while we were waiting for the reply to finish streaming)
+ ap = GetActivePlan(planId, branch)
+
+ if ap == nil {
+ log.Println("onFinishBuild - Active plan not found")
+ return
+ }
+
+ if !ap.BuildFinished() {
+ log.Println("Build not finished after waiting for reply to finish streaming")
+ return
+ }
+
+ log.Println("Locking repo for finished build")
+
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: currentOrgId,
+ UserId: currentUserId,
+ PlanId: planId,
+ Branch: branch,
+ PlanBuildId: build.Id,
+ Scope: db.LockScopeWrite,
+ Ctx: ap.Ctx,
+ CancelFn: ap.CancelFn,
+ Reason: "finish build",
+ }, func(repo *db.GitRepo) error {
+ // get plan descriptions
+ var planDescs []*db.ConvoMessageDescription
+ planDescs, err := db.GetConvoMessageDescriptions(currentOrgId, planId)
+ if err != nil {
+ log.Printf("Error getting pending build descriptions: %v\n", err)
+ return fmt.Errorf("error getting pending build descriptions: %v", err)
+ }
+
+ var unbuiltDescs []*db.ConvoMessageDescription
+ for _, desc := range planDescs {
+ if !desc.DidBuild || len(desc.BuildPathsInvalidated) > 0 {
+ unbuiltDescs = append(unbuiltDescs, desc)
+ }
+ }
+
+ // get fresh current plan state
+ var currentPlan *shared.CurrentPlanState
+ currentPlan, err = db.GetCurrentPlanState(db.CurrentPlanStateParams{
+ OrgId: currentOrgId,
+ PlanId: planId,
+ ConvoMessageDescriptions: planDescs,
+ })
+ if err != nil {
+ log.Printf("Error getting current plan state: %v\n", err)
+ return fmt.Errorf("error getting current plan state: %v", err)
+ }
+
+ descErrCh := make(chan error, len(unbuiltDescs))
+ for _, desc := range unbuiltDescs {
+ if len(desc.Operations) > 0 {
+ desc.DidBuild = true
+ desc.BuildPathsInvalidated = map[string]bool{}
+ }
+
+ go func(desc *db.ConvoMessageDescription) {
+ err := db.StoreDescription(desc)
+
+ if err != nil {
+ descErrCh <- fmt.Errorf("error storing description: %v", err)
+ return
+ }
+
+ descErrCh <- nil
+ }(desc)
+ }
+
+ for range unbuiltDescs {
+ err = <-descErrCh
+ if err != nil {
+ log.Printf("Error storing description: %v\n", err)
+ return err
+ }
+ }
+
+ err = repo.GitAddAndCommit(branch, currentPlan.PendingChangesSummaryForBuild())
+
+ if err != nil {
+ if strings.Contains(err.Error(), "nothing to commit") {
+ log.Println("Nothing to commit")
+ return nil
+ }
+ return fmt.Errorf("error committing plan build: %v", err)
+ }
+
+ log.Println("Plan build committed")
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error finishing build: %v\n", err)
+
+ if err.Error() != context.Canceled.Error() {
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error finishing build: %v", err))
+
+ ap.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Error finishing build: " + err.Error(),
+ }
+ }
+ return
+ }
+
+ active := GetActivePlan(planId, branch)
+
+ if active != nil && (active.RepliesFinished || active.BuildOnly) {
+ active.Finish()
+ }
+}
+
+func (fileState *activeBuildStreamFileState) onFinishBuildFile(planRes *db.PlanFileResult) {
+ planId := fileState.plan.Id
+ branch := fileState.branch
+ currentOrgId := fileState.currentOrgId
+ build := fileState.build
+ activeBuild := fileState.activeBuild
+
+ activePlan := GetActivePlan(planId, branch)
+
+ if activePlan == nil {
+ log.Println("onFinishBuildFile - Active plan not found")
+ return
+ }
+
+ filePath := fileState.filePath
+
+ log.Printf("onFinishBuildFile: %s\n", filePath)
+
+ if planRes == nil {
+ log.Println("onFinishBuildFile - planRes is nil")
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("onFinishBuildFile: planRes is nil"))
+
+ activePlan.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Error storing plan result: planRes is nil",
+ }
+ return
+ }
+
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: currentOrgId,
+ UserId: fileState.currentUserId,
+ PlanId: planId,
+ Branch: branch,
+ PlanBuildId: build.Id,
+ Scope: db.LockScopeWrite,
+ Ctx: activePlan.Ctx,
+ CancelFn: activePlan.CancelFn,
+ Reason: "store plan result",
+ }, func(repo *db.GitRepo) error {
+ log.Println("Storing plan result", planRes.Path)
+
+ err := db.StorePlanResult(planRes)
+ if err != nil {
+ log.Printf("Error storing plan result: %v\n", err)
+ return err
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error storing plan build result: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error storing plan build result: %v", err))
+
+ activePlan.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Error storing plan build result: " + err.Error(),
+ }
+ return
+ }
+
+ fileState.builderRun.FinishedAt = time.Now()
+ hooks.ExecHook(hooks.DidFinishBuilderRun, hooks.HookParams{
+ Auth: fileState.auth,
+ Plan: fileState.plan,
+ DidFinishBuilderRunParams: &fileState.builderRun,
+ })
+
+ log.Printf("Finished building file %s - setting activeBuild.Success to true\n", filePath)
+ // log.Println(spew.Sdump(activeBuild))
+
+ fileState.onBuildProcessed(activeBuild)
+}
+
+func (fileState *activeBuildStreamFileState) onBuildProcessed(activeBuild *types.ActiveBuild) {
+ filePath := fileState.filePath
+ planId := fileState.plan.Id
+ branch := fileState.branch
+
+ activeBuild.Success = true
+
+ stillBuildingPath := fileState.buildNextInQueue()
+ if stillBuildingPath {
+ return
+ }
+
+ log.Printf("No more builds for path %s, checking if entire build is finished\n", filePath)
+
+ buildFinished := false
+
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.BuiltFiles[filePath] = true
+ ap.IsBuildingByPath[filePath] = false
+ if ap.BuildFinished() {
+ buildFinished = true
+ }
+ })
+
+ log.Printf("Finished building file %s\n", filePath)
+
+ if buildFinished {
+ log.Println("Finished building plan, calling onFinishBuild")
+ fileState.onFinishBuild()
+ } else {
+ log.Println("Finished building file, but plan is not finished")
+ }
+}
+
+func (fileState *activeBuildStreamFileState) onBuildFileError(err error) {
+ planId := fileState.plan.Id
+ branch := fileState.branch
+ filePath := fileState.filePath
+ build := fileState.build
+ activeBuild := fileState.activeBuild
+
+ activePlan := GetActivePlan(planId, branch)
+
+ if activePlan == nil {
+ log.Println("onBuildFileError - Active plan not found")
+ return
+ }
+
+ log.Printf("Error for file %s: %v\n", filePath, err)
+
+ activeBuild.Success = false
+ activeBuild.Error = err
+
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error for file %s: %v", filePath, err))
+
+ activePlan.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: err.Error(),
+ }
+
+ if err != nil {
+ log.Printf("Error storing plan error result: %v\n", err)
+ }
+
+ build.Error = err.Error()
+
+ err = db.SetBuildError(build)
+ if err != nil {
+ log.Printf("Error setting build error: %v\n", err)
+ }
+}
+
+func (fileState *activeBuildStreamFileState) buildNextInQueue() bool {
+ filePath := fileState.filePath
+ activePlan := GetActivePlan(fileState.plan.Id, fileState.branch)
+ if activePlan == nil {
+ log.Println("onFinishBuildFile - Active plan not found")
+ return false
+ }
+
+ // if more builds are queued, start the next one
+ if !activePlan.PathQueueEmpty(filePath) {
+ log.Printf("Processing next build for file %s\n", filePath)
+ queue := activePlan.BuildQueuesByPath[filePath]
+ var nextBuild *types.ActiveBuild
+ for _, build := range queue {
+ if !build.BuildFinished() {
+ nextBuild = build
+ break
+ }
+ }
+
+ if nextBuild != nil {
+ log.Println("Calling execPlanBuild for next build in queue")
+ go fileState.execPlanBuild(nextBuild)
+ }
+ return true
+ }
+
+ return false
+}
diff --git a/app/server/model/plan/build_load.go b/app/server/model/plan/build_load.go
new file mode 100644
index 0000000000000000000000000000000000000000..d109605dfae92f0816c50035f5facda3a935d874
--- /dev/null
+++ b/app/server/model/plan/build_load.go
@@ -0,0 +1,322 @@
+package plan
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ "plandex-server/notify"
+ "plandex-server/syntax"
+ "plandex-server/types"
+ "runtime"
+ "runtime/debug"
+
+ shared "plandex-shared"
+)
+
+func (state *activeBuildStreamState) loadPendingBuilds(sessionId string) (map[string][]*types.ActiveBuild, error) {
+ clients := state.clients
+ plan := state.plan
+ branch := state.branch
+ auth := state.auth
+
+ active, err := activatePlan(clients, plan, branch, auth, "", true, false, sessionId)
+
+ if err != nil {
+ log.Printf("Error activating plan: %v\n", err)
+ }
+
+ modelStreamId := active.ModelStreamId
+ state.modelStreamId = modelStreamId
+
+ var modelContext []*db.Context
+ var pendingBuildsByPath map[string][]*types.ActiveBuild
+ var settings *shared.PlanSettings
+ var orgUserConfig *shared.OrgUserConfig
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: plan.Id,
+ Branch: branch,
+ Scope: db.LockScopeRead,
+ Ctx: active.Ctx,
+ CancelFn: active.CancelFn,
+ Reason: "load pending builds",
+ }, func(repo *db.GitRepo) error {
+ errCh := make(chan error, 4)
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in getPlanContexts: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("error getting plan modelContext: %v", r)
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+ res, err := db.GetPlanContexts(auth.OrgId, plan.Id, true, false)
+ if err != nil {
+ log.Printf("Error getting plan modelContext: %v\n", err)
+ errCh <- fmt.Errorf("error getting plan modelContext: %v", err)
+ return
+ }
+ modelContext = res
+
+ errCh <- nil
+ }()
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in getPlanSettings: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("error getting plan settings: %v", r)
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+ res, err := active.PendingBuildsByPath(auth.OrgId, auth.User.Id, nil)
+
+ if err != nil {
+ log.Printf("Error getting pending builds by path: %v\n", err)
+ errCh <- fmt.Errorf("error getting pending builds by path: %v", err)
+ return
+ }
+
+ pendingBuildsByPath = res
+
+ errCh <- nil
+ }()
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in getPlanSettings: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("error getting plan settings: %v", r)
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+ res, err := db.GetPlanSettings(plan)
+ if err != nil {
+ log.Printf("Error getting plan settings: %v\n", err)
+ errCh <- fmt.Errorf("error getting plan settings: %v", err)
+ return
+ }
+
+ settings = res
+ errCh <- nil
+ }()
+
+ go func() {
+
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in getOrgUserConfig: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("error getting org user config: %v", r)
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+
+ res, err := db.GetOrgUserConfig(auth.User.Id, auth.OrgId)
+ if err != nil {
+ log.Printf("Error getting org user config: %v\n", err)
+ errCh <- fmt.Errorf("error getting org user config: %v", err)
+ return
+ }
+
+ orgUserConfig = res
+ errCh <- nil
+ }()
+
+ for i := 0; i < 4; i++ {
+ err = <-errCh
+ if err != nil {
+ log.Printf("Error getting plan data: %v\n", err)
+ return err
+ }
+ }
+ return nil
+ })
+
+ if err != nil {
+ return nil, fmt.Errorf("error getting plan data: %v", err)
+ }
+
+ UpdateActivePlan(plan.Id, branch, func(ap *types.ActivePlan) {
+ ap.Contexts = modelContext
+ for _, context := range modelContext {
+ if context.FilePath != "" {
+ ap.ContextsByPath[context.FilePath] = context
+ }
+ }
+ })
+
+ state.modelContext = modelContext
+ state.settings = settings
+ state.orgUserConfig = orgUserConfig
+
+ return pendingBuildsByPath, nil
+}
+
+func (state *activeBuildStreamFileState) loadBuildFile(activeBuild *types.ActiveBuild) error {
+ currentOrgId := state.currentOrgId
+ planId := state.plan.Id
+ branch := state.branch
+ filePath := state.filePath
+
+ activePlan := GetActivePlan(planId, branch)
+
+ if activePlan == nil {
+ return fmt.Errorf("active plan not found")
+ }
+
+ convoMessageId := activeBuild.ReplyId
+
+ parser, lang, fallbackParser, fallbackLang := syntax.GetParserForPath(filePath)
+
+ if parser != nil {
+ validationRes, err := syntax.ValidateWithParsers(activePlan.Ctx, lang, parser, fallbackLang, fallbackParser, state.preBuildState)
+ if err != nil {
+ log.Printf(" error validating original file syntax: %v\n", err)
+ return fmt.Errorf("error validating original file syntax: %v", err)
+ }
+
+ state.language = validationRes.Lang
+ state.parser = validationRes.Parser
+
+ state.builderRun.Lang = string(validationRes.Lang)
+
+ if validationRes.TimedOut {
+ state.syntaxCheckTimedOut = true
+ } else if !validationRes.Valid {
+ state.preBuildStateSyntaxInvalid = true
+ }
+ }
+
+ build := &db.PlanBuild{
+ OrgId: currentOrgId,
+ PlanId: planId,
+ ConvoMessageId: convoMessageId,
+ FilePath: filePath,
+ }
+ err := db.StorePlanBuild(build)
+
+ if err != nil {
+ log.Printf("Error storing plan build: %v\n", err)
+ UpdateActivePlan(activePlan.Id, activePlan.Branch, func(ap *types.ActivePlan) {
+ ap.IsBuildingByPath[filePath] = false
+ })
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error storing plan build: %v", err))
+
+ activePlan.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Error storing plan build: " + err.Error(),
+ }
+ return err
+ }
+
+ var currentPlan *shared.CurrentPlanState
+ var convo []*db.ConvoMessage
+
+ log.Println("Locking repo for load build file")
+
+ err = db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: currentOrgId,
+ UserId: state.activeBuildStreamState.currentUserId,
+ PlanId: planId,
+ Branch: branch,
+ PlanBuildId: build.Id,
+ Scope: db.LockScopeRead,
+ Ctx: activePlan.Ctx,
+ CancelFn: activePlan.CancelFn,
+ Reason: "load build file",
+ }, func(repo *db.GitRepo) error {
+ errCh := make(chan error, 2)
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in getCurrentPlanState: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("error getting current plan state: %v", r)
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+ log.Println("loadBuildFile - Getting current plan state")
+ res, err := db.GetCurrentPlanState(db.CurrentPlanStateParams{
+ OrgId: currentOrgId,
+ PlanId: planId,
+ })
+ if err != nil {
+ log.Printf("Error getting current plan state: %v\n", err)
+ UpdateActivePlan(activePlan.Id, activePlan.Branch, func(ap *types.ActivePlan) {
+ ap.IsBuildingByPath[filePath] = false
+ })
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error getting current plan state: %v", err))
+
+ activePlan.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Error getting current plan state: " + err.Error(),
+ }
+ errCh <- fmt.Errorf("error getting current plan state: %v", err)
+ return
+ }
+ currentPlan = res
+
+ log.Println("Got current plan state")
+ errCh <- nil
+ }()
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in getPlanConvo: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("error getting plan convo: %v", r)
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+ res, err := db.GetPlanConvo(currentOrgId, planId)
+ if err != nil {
+ log.Printf("Error getting plan convo: %v\n", err)
+ errCh <- fmt.Errorf("error getting plan convo: %v", err)
+ return
+ }
+ convo = res
+
+ errCh <- nil
+ }()
+
+ for i := 0; i < 2; i++ {
+ err = <-errCh
+ if err != nil {
+ log.Printf("Error getting plan data: %v\n", err)
+ return err
+ }
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error loading build file: %v\n", err)
+ UpdateActivePlan(activePlan.Id, activePlan.Branch, func(ap *types.ActivePlan) {
+ ap.IsBuildingByPath[filePath] = false
+ })
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error loading build file: %v", err))
+
+ activePlan.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Error loading build file: " + err.Error(),
+ }
+ return err
+ }
+
+ state.filePath = filePath
+ state.convoMessageId = convoMessageId
+ state.build = build
+ state.currentPlanState = currentPlan
+ state.convo = convo
+
+ return nil
+
+}
diff --git a/app/server/model/plan/build_race.go b/app/server/model/plan/build_race.go
new file mode 100644
index 0000000000000000000000000000000000000000..408d2dd290cea5eb588463a56769680b26fc6cac
--- /dev/null
+++ b/app/server/model/plan/build_race.go
@@ -0,0 +1,301 @@
+package plan
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log"
+ "plandex-server/syntax"
+ "plandex-server/utils"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "time"
+)
+
+type raceResult struct {
+ content string
+ valid bool
+}
+
+type buildRaceParams struct {
+ updated string
+ proposedContent string
+ desc string
+ reasons []syntax.NeedsVerifyReason
+ syntaxErrors []string
+
+ didCallFastApply bool
+ fastApplyCh chan string
+
+ sessionId string
+}
+
+func (fileState *activeBuildStreamFileState) buildRace(
+ buildCtx context.Context,
+ cancelBuild context.CancelFunc,
+ params buildRaceParams,
+) (raceResult, error) {
+ log.Printf("buildRace - starting race for file")
+ defer func() {
+ log.Printf("buildRace - canceling build context")
+ cancelBuild()
+ }()
+
+ originalFile := fileState.preBuildState
+
+ updated := params.updated
+ proposedContent := params.proposedContent
+ desc := params.desc
+ reasons := params.reasons
+ syntaxErrors := params.syntaxErrors
+ fastApplyCh := params.fastApplyCh
+ sessionId := params.sessionId
+ log.Printf("buildRace - original file length: %d, updated length: %d", len(originalFile), len(updated))
+ log.Printf("buildRace - has %d syntax errors and %d verify reasons", len(syntaxErrors), len(reasons))
+
+ maxErrs := 3
+
+ resCh := make(chan raceResult, 1)
+ errCh := make(chan error, maxErrs)
+
+ sendRes := func(res raceResult) {
+ select {
+ case resCh <- res:
+ case <-buildCtx.Done():
+ log.Printf("buildRace - context canceled, skipping sendRes")
+ }
+ }
+
+ sendErr := func(err error) {
+ select {
+ case errCh <- err:
+ case <-buildCtx.Done():
+ log.Printf("buildRace - context canceled, skipping sendErr")
+ }
+ }
+
+ startedFallbacks := false
+
+ startWholeFileBuild := func(comments string) {
+ log.Printf("buildRace - starting whole file fallback build")
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in startWholeFileBuild: %v\n%s", r, debug.Stack())
+ sendErr(fmt.Errorf("error starting whole file build: %v", r))
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+ select {
+ case <-buildCtx.Done():
+ log.Printf("buildRace - context already canceled, skipping whole file build")
+ return
+ default:
+ }
+
+ content, err := fileState.buildWholeFileFallback(buildCtx, proposedContent, desc, comments, sessionId)
+
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ log.Printf("Context canceled during whole file build")
+ return
+ }
+
+ log.Printf("buildRace - whole file build failed: %v", err)
+ sendErr(fmt.Errorf("error building whole file: %w", err))
+ } else {
+ log.Printf("buildRace - whole file build succeeded")
+ sendRes(raceResult{content: content, valid: true})
+ }
+ }()
+ }
+
+ maybeStartFastApply := func(onFail func()) {
+ log.Printf("buildRace - starting fast apply")
+ if !params.didCallFastApply {
+ log.Printf("buildRace - fast apply isn't defined, skipping")
+ sendErr(nil) // no error, just no fast apply
+ onFail()
+ return
+ }
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in maybeStartFastApply: %v\n%s", r, debug.Stack())
+ sendErr(fmt.Errorf("error starting fast apply: %v", r))
+ onFail()
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+ var fastApplyRes string
+
+ select {
+ case fastApplyRes = <-fastApplyCh:
+ case <-buildCtx.Done():
+ log.Printf("buildRace - context canceled, skipping fast apply")
+ sendErr(nil) // no error, just no fast apply
+ onFail()
+ return
+ }
+
+ if fastApplyRes == "" {
+ log.Printf("buildRace - fast apply isn't defined or failed to run")
+ sendErr(nil) // no error, just no fast apply
+ onFail()
+ return
+ }
+
+ // log.Printf("buildRace - fast apply result:\n\n%s", fastApplyRes)
+
+ fastApplySyntaxErrors := fileState.validateSyntax(buildCtx, fastApplyRes)
+ fileState.builderRun.FastApplySyntaxErrors = fastApplySyntaxErrors
+
+ if len(fastApplySyntaxErrors) > 0 {
+ log.Printf("buildRace - fast apply succeeded, but has %d syntax errors", len(fastApplySyntaxErrors))
+ sendErr(fmt.Errorf("fast apply succeeded, but has %d syntax errors", len(fastApplySyntaxErrors)))
+ onFail()
+ return
+ }
+
+ log.Printf("buildRace - fast apply returned, validating... ")
+ validateResult, err := fileState.buildValidateLoop(buildCtx, buildValidateLoopParams{
+ originalFile: originalFile,
+ updated: fastApplyRes,
+ proposedContent: proposedContent,
+ desc: desc,
+ reasons: reasons,
+
+ // just validate since we're already building replacements in parallel
+ maxAttempts: 1,
+ validateOnlyOnFinalAttempt: true,
+ isInitial: false,
+ sessionId: sessionId,
+ })
+
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ log.Printf("Context canceled during fast apply validation")
+ return
+ }
+
+ log.Printf("buildRace - fast apply validation failed with error: %v", err)
+ sendErr(fmt.Errorf("fast apply validation failed: %w", err))
+ onFail()
+ return
+ }
+
+ if validateResult.valid {
+ log.Printf("buildRace - fast apply validation succeeded")
+ fileState.builderRun.FastApplySuccess = true
+ sendRes(raceResult{content: validateResult.updated, valid: validateResult.valid})
+ } else {
+ log.Printf("buildRace - fast apply validation failed with problem: %s", validateResult.problem)
+ fileState.builderRun.FastApplyFailureResponse = validateResult.problem
+ sendErr(fmt.Errorf("fast apply validation failed: %s", validateResult.problem))
+ onFail()
+ return
+ }
+ }()
+ }
+
+ startFallbacks := func(comments string) {
+ startedFallbacks = true
+ // try fast apply + validation first if it's defined
+ // if it's undefined or fails, start the whole file build fallback
+ maybeStartFastApply(func() {
+ startWholeFileBuild(comments)
+ })
+ }
+
+ // If we get an incorrect marker, start the whole file build in the background while the validation/replacement loop continues
+ onInitialStream := func(chunk string, buffer string) bool {
+ if !startedFallbacks && strings.Contains(buffer, "") && strings.Contains(buffer, "") {
+ log.Printf("buildRace - detected incorrect marker, triggering whole file build")
+
+ comments := utils.GetXMLContent(buffer, "PlandexComments")
+
+ startFallbacks(comments)
+ }
+ // keep streaming
+ return false
+ }
+
+ fileState.builderRun.AutoApplyValidationStartedAt = time.Now()
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in buildRace validation loop: %v\n%s", r, debug.Stack())
+ sendErr(fmt.Errorf("error building validate loop: %v", r))
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+
+ log.Printf("buildRace - starting validation loop")
+ validateResult, err := fileState.buildValidateLoop(buildCtx, buildValidateLoopParams{
+ originalFile: originalFile,
+ updated: updated,
+ proposedContent: proposedContent,
+ desc: desc,
+ reasons: reasons,
+ syntaxErrors: syntaxErrors,
+ initialPhaseOnStream: onInitialStream,
+ isInitial: true,
+ sessionId: sessionId,
+ })
+
+ fileState.builderRun.AutoApplyValidationFinishedAt = time.Now()
+
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ log.Printf("Context canceled during buildValidate")
+ return
+ }
+
+ log.Printf("buildRace - validation loop failed: %v", err)
+ sendErr(fmt.Errorf("error building validate loop: %w", err))
+ } else {
+ log.Printf("buildRace - validation loop finished, valid: %v", validateResult.valid)
+ if validateResult.valid {
+ log.Printf("buildRace - validation loop succeeded, valid: %v", validateResult.valid)
+ sendRes(raceResult{content: validateResult.updated, valid: validateResult.valid})
+ } else {
+ log.Printf("buildRace - validation loop failed, valid: %v", validateResult.valid)
+ sendErr(fmt.Errorf("validation loop failed: %s", validateResult.problem))
+ }
+ }
+ }()
+
+ errs := []error{}
+ errChNumReceived := 0
+
+ for {
+ select {
+ case <-buildCtx.Done():
+ log.Printf("buildRace - context canceled")
+ return raceResult{}, buildCtx.Err()
+ case err := <-errCh:
+ errChNumReceived++
+ log.Printf("buildRace - error channel received %d: %v\n", errChNumReceived, err)
+
+ if err != nil {
+ errs = append(errs, err)
+ }
+
+ if errChNumReceived >= maxErrs {
+ log.Printf("buildRace - all attempts failed with %d errors", len(errs))
+ return raceResult{}, fmt.Errorf("all build attempts failed: %v", errs)
+ }
+
+ if !startedFallbacks {
+ log.Printf("buildRace - starting build fallbacks")
+ startFallbacks("") // since replacements failed, pass an empty string for comments -- this causes whole file build to classify comments first
+ }
+ case res := <-resCh:
+ log.Printf("buildRace - got successful result")
+ return res, nil
+ }
+ }
+}
diff --git a/app/server/model/plan/build_state.go b/app/server/model/plan/build_state.go
new file mode 100644
index 0000000000000000000000000000000000000000..35790f82be19cbb31199bfd31e14180ca3c9bb42
--- /dev/null
+++ b/app/server/model/plan/build_state.go
@@ -0,0 +1,49 @@
+package plan
+
+import (
+ "plandex-server/db"
+ "plandex-server/hooks"
+ "plandex-server/model"
+ "plandex-server/types"
+
+ shared "plandex-shared"
+
+ sitter "github.com/smacker/go-tree-sitter"
+)
+
+const MaxBuildErrorRetries = 3 // uses semi-exponential backoff so be careful with this
+
+type activeBuildStreamState struct {
+ modelStreamId string
+ clients map[string]model.ClientInfo
+ authVars map[string]string
+ auth *types.ServerAuth
+ currentOrgId string
+ currentUserId string
+ orgUserConfig *shared.OrgUserConfig
+ plan *db.Plan
+ branch string
+ settings *shared.PlanSettings
+ modelContext []*db.Context
+ convo []*db.ConvoMessage
+}
+
+type activeBuildStreamFileState struct {
+ *activeBuildStreamState
+ filePath string
+ convoMessageId string
+ build *db.PlanBuild
+ currentPlanState *shared.CurrentPlanState
+ activeBuild *types.ActiveBuild
+ preBuildState string
+ parser *sitter.Parser
+ language shared.Language
+ syntaxCheckTimedOut bool
+ preBuildStateSyntaxInvalid bool
+ validationNumRetry int
+ wholeFileNumRetry int
+ isNewFile bool
+ contextPart *db.Context
+
+ builderRun hooks.DidFinishBuilderRunParams
+}
diff --git a/app/server/model/plan/build_structured_edits.go b/app/server/model/plan/build_structured_edits.go
new file mode 100644
index 0000000000000000000000000000000000000000..1a0665246a95018aad30d79885bd08c850bbd97f
--- /dev/null
+++ b/app/server/model/plan/build_structured_edits.go
@@ -0,0 +1,238 @@
+package plan
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "plandex-server/db"
+ diff_pkg "plandex-server/diff"
+ "plandex-server/hooks"
+ "plandex-server/syntax"
+ "plandex-server/utils"
+ "runtime"
+ "runtime/debug"
+ "strings"
+ "time"
+
+ shared "plandex-shared"
+)
+
+func (fileState *activeBuildStreamFileState) buildStructuredEdits() {
+ filePath := fileState.filePath
+ activeBuild := fileState.activeBuild
+ planId := fileState.plan.Id
+ branch := fileState.branch
+ originalFile := fileState.preBuildState
+ parser := fileState.parser
+
+ if parser == nil {
+ log.Printf("buildStructuredEdits - tree-sitter parser is nil for file %s\n", filePath)
+ }
+
+ activePlan := GetActivePlan(planId, branch)
+ if activePlan == nil {
+ log.Printf("Active plan not found for plan ID %s and branch %s\n", planId, branch)
+ fileState.onBuildFileError(fmt.Errorf("active plan not found for plan ID %s and branch %s", planId, branch))
+ return
+ }
+
+ buildCtx, cancelBuild := context.WithCancel(activePlan.Ctx)
+
+ proposedContent := activeBuild.FileContent
+ desc := activeBuild.FileDescription
+
+ descLower := strings.ToLower(desc)
+ isReplaceOrRemove := strings.Contains(descLower, "type: replace") || strings.Contains(descLower, "type: remove") || strings.Contains(descLower, "type: overwrite")
+
+ var autoApplyRes *syntax.ApplyChangesResult
+ var autoApplySyntaxErrors []string
+
+ calledFastApply := false
+ var fastApplyRes string
+ fastApplyCh := make(chan string, 1)
+
+ callFastApply := func() {
+ log.Printf("buildStructuredEdits - %s - calling fast apply hook\n", filePath)
+ fileState.builderRun.DidFastApply = true
+ fileState.builderRun.FastApplyStartedAt = time.Now()
+ calledFastApply = true
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in callFastApply: %v\n%s", r, debug.Stack())
+ fastApplyCh <- ""
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+
+ res, err := hooks.ExecHook(hooks.CallFastApply, hooks.HookParams{
+ FastApplyParams: &hooks.FastApplyParams{
+ InitialCode: originalFile,
+ EditSnippet: proposedContent,
+ Language: fileState.language,
+ Ctx: buildCtx,
+ },
+ })
+
+ if err != nil {
+ log.Printf("buildStructuredEdits - error executing fast apply hook: %v\n", err)
+ // empty string acts as a no-op
+ fastApplyCh <- ""
+ return
+ } else if res.FastApplyResult == nil {
+ log.Printf("buildStructuredEdits - fast apply hook returned nil result\n")
+ // empty string acts as a no-op
+ fastApplyCh <- ""
+ return
+ }
+
+ fastApplyRes = res.FastApplyResult.MergedCode
+ log.Printf("buildStructuredEdits - %s - got fast apply hook result\n", filePath)
+ // fmt.Printf("buildStructuredEdits - fastApplyRes:\n%s", fastApplyRes)
+
+ fileState.builderRun.FastApplyFinishedAt = time.Now()
+
+ fastApplyCh <- fastApplyRes
+ }()
+ }
+
+ if isReplaceOrRemove {
+ callFastApply()
+ }
+
+ log.Printf("buildStructuredEdits - %s - applying changes\n", filePath)
+ // Apply plan logic
+ log.Printf("buildStructuredEdits - %s - calling ApplyChanges\n", filePath)
+ autoApplyRes = syntax.ApplyChanges(
+ buildCtx,
+ syntax.ApplyChangesParams{
+ Original: originalFile,
+ Proposed: proposedContent,
+ Desc: desc,
+ AddMissingStartEndRefs: true,
+ Parser: fileState.parser,
+ Language: fileState.language,
+ },
+ )
+ log.Printf("buildStructuredEdits - %s - got ApplyChanges result\n", filePath)
+ // log.Printf("buildStructuredEdits - autoApplyRes.NewFile:\n\n%s", autoApplyRes.NewFile)
+ log.Println("buildStructuredEdits - autoApplyRes.NeedsVerifyReasons:", autoApplyRes.NeedsVerifyReasons)
+
+ autoApplySyntaxErrors = fileState.validateSyntax(buildCtx, autoApplyRes.NewFile)
+
+ hasNeedsVerifyReasons := len(autoApplyRes.NeedsVerifyReasons) > 0
+
+ autoApplyHasSyntaxErrors := len(autoApplySyntaxErrors) > 0
+ autoApplyIsValid := !autoApplyHasSyntaxErrors && !hasNeedsVerifyReasons
+
+ if !autoApplyIsValid && !calledFastApply {
+ callFastApply()
+ }
+
+ log.Printf("buildStructuredEdits - %s - autoApplyHasSyntaxErrors: %t, hasNeedsVerifyReasons: %t, autoApplyIsValid: %t\n",
+ filePath, autoApplyHasSyntaxErrors, hasNeedsVerifyReasons, autoApplyIsValid)
+
+ updated := autoApplyRes.NewFile
+
+ // If no problems, we trust the direct ApplyChanges result
+ if autoApplyIsValid {
+ log.Printf("buildStructuredEdits - %s - changes are valid, using ApplyChanges result\n", filePath)
+ fileState.builderRun.AutoApplySuccess = true
+ } else {
+ log.Printf("buildStructuredEdits - %s - auto apply has syntax errors or NeedsVerifyReasons", filePath)
+ fileState.builderRun.AutoApplyValidationReasons = make([]string, len(autoApplyRes.NeedsVerifyReasons))
+ for i, reason := range autoApplyRes.NeedsVerifyReasons {
+ fileState.builderRun.AutoApplyValidationReasons[i] = string(reason)
+ }
+
+ fileState.builderRun.AutoApplyValidationSyntaxErrors = autoApplySyntaxErrors
+
+ buildRaceParams := buildRaceParams{
+ updated: updated,
+ proposedContent: proposedContent,
+ desc: desc,
+ reasons: autoApplyRes.NeedsVerifyReasons,
+ syntaxErrors: autoApplySyntaxErrors,
+
+ didCallFastApply: calledFastApply,
+ fastApplyCh: fastApplyCh,
+
+ sessionId: activePlan.SessionId,
+ }
+
+ buildRaceResult, err := fileState.buildRace(buildCtx, cancelBuild, buildRaceParams)
+ if err != nil {
+ if apiErr, ok := err.(*shared.ApiError); ok {
+ activePlan.StreamDoneCh <- apiErr
+ return
+ } else {
+ log.Printf("buildStructuredEdits - %s - error building race: %v\n", filePath, err)
+ fileState.onBuildFileError(fmt.Errorf("error building race: %v", err))
+ }
+ return
+ }
+
+ updated = buildRaceResult.content
+ }
+
+ // output diff and store build results
+ buildInfo := &shared.BuildInfo{
+ Path: filePath,
+ NumTokens: 0,
+ Finished: true,
+ }
+ log.Printf("streaming build info for finished file %s\n", filePath)
+ activePlan.Stream(shared.StreamMessage{
+ Type: shared.StreamMessageBuildInfo,
+ BuildInfo: buildInfo,
+ })
+ time.Sleep(50 * time.Millisecond)
+
+ // strip any blank lines from beginning/end of updated file
+ updated = utils.StripAddedBlankLines(originalFile, updated)
+
+ log.Printf("buildStructuredEdits - %s - getting diff replacements\n", filePath)
+ replacements, err := diff_pkg.GetDiffReplacements(originalFile, updated)
+ if err != nil {
+ log.Printf("buildStructuredEdits - error getting diff replacements: %v\n", err)
+ fileState.onBuildFileError(fmt.Errorf("error getting diff replacements: %v", err))
+ return
+ }
+ log.Printf("buildStructuredEdits - %s - got %d replacements\n", filePath, len(replacements))
+
+ for _, replacement := range replacements {
+ replacement.Summary = strings.TrimSpace(desc)
+ }
+
+ res := db.PlanFileResult{
+ TypeVersion: 1,
+ OrgId: fileState.plan.OrgId,
+ PlanId: fileState.plan.Id,
+ PlanBuildId: fileState.build.Id,
+ ConvoMessageId: fileState.convoMessageId,
+ Content: "",
+ Path: filePath,
+ Replacements: replacements,
+ }
+
+ log.Printf("buildStructuredEdits - %s - finishing build file\n", filePath)
+ fileState.onFinishBuildFile(&res)
+}
+
+func (fileState *activeBuildStreamFileState) validateSyntax(buildCtx context.Context, updated string) []string {
+ if fileState.parser != nil && !fileState.preBuildStateSyntaxInvalid && !fileState.syntaxCheckTimedOut {
+ validationRes, err := syntax.ValidateWithParsers(buildCtx, fileState.language, fileState.parser, "", nil, updated) // fallback parser was already set as fileState.parser if needed during initial preBuildState syntax check
+ if err != nil {
+ log.Printf("buildStructuredEdits - error validating updated file: %v\n", err)
+ } else if validationRes.TimedOut {
+ log.Printf("buildStructuredEdits - syntax check timed out for updated file\n")
+ fileState.syntaxCheckTimedOut = true
+ return nil
+ } else {
+ return validationRes.Errors
+ }
+ }
+
+ return nil
+}
diff --git a/app/server/model/plan/build_validate_and_fix.go b/app/server/model/plan/build_validate_and_fix.go
new file mode 100644
index 0000000000000000000000000000000000000000..0bad9dfa6f6bb6ed54a0ac2bf172f803bed6187e
--- /dev/null
+++ b/app/server/model/plan/build_validate_and_fix.go
@@ -0,0 +1,486 @@
+package plan
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log"
+ "math/rand"
+ diff_pkg "plandex-server/diff"
+ "plandex-server/model"
+ "plandex-server/model/prompts"
+ "plandex-server/syntax"
+ "plandex-server/types"
+ "plandex-server/utils"
+ shared "plandex-shared"
+ "strings"
+ "time"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+const MaxValidationFixAttempts = 3
+
+type buildValidateLoopParams struct {
+ originalFile string
+ updated string
+ proposedContent string
+ desc string
+ syntaxErrors []string
+ reasons []syntax.NeedsVerifyReason
+ initialPhaseOnStream func(chunk string, buffer string) bool
+ validateOnlyOnFinalAttempt bool
+ maxAttempts int
+ isInitial bool
+ sessionId string
+}
+
+type buildValidateLoopResult struct {
+ valid bool
+ updated string
+ problem string
+}
+
+func (fileState *activeBuildStreamFileState) buildValidateLoop(
+ ctx context.Context,
+ params buildValidateLoopParams,
+) (buildValidateLoopResult, error) {
+ log.Printf("Starting buildValidateLoop for file: %s", fileState.filePath)
+
+ originalFile := params.originalFile
+ updated := params.updated
+ proposedContent := params.proposedContent
+ desc := params.desc
+
+ syntaxErrors := params.syntaxErrors
+ numAttempts := 0
+
+ problems := []string{}
+
+ maxAttempts := MaxValidationFixAttempts
+ if params.maxAttempts > 0 {
+ maxAttempts = params.maxAttempts
+ }
+
+ for numAttempts < maxAttempts {
+ currentAttempt := numAttempts + 1
+ log.Printf("Starting validation attempt %d/%d", currentAttempt, MaxValidationFixAttempts)
+
+ // check for context cancellation
+ if ctx.Err() != nil {
+ log.Printf("Context cancelled during attempt %d", currentAttempt)
+ return buildValidateLoopResult{}, ctx.Err()
+ }
+
+ // reset retry count for each phase
+ fileState.validationNumRetry = 0
+ log.Printf("Reset validation retry count for attempt %d", currentAttempt)
+
+ var onStream func(chunk string, buffer string) bool
+ if numAttempts == 0 {
+ onStream = params.initialPhaseOnStream
+ log.Printf("Using initial phase onStream handler")
+ } else {
+ onStream = nil
+ log.Printf("No onStream handler for attempt %d", currentAttempt)
+ }
+
+ var reasons []syntax.NeedsVerifyReason
+ if numAttempts == 0 {
+ reasons = params.reasons
+ log.Printf("Using initial reasons for validation")
+ } else {
+ reasons = []syntax.NeedsVerifyReason{}
+ log.Printf("Using empty reasons list for attempt %d", currentAttempt)
+ }
+
+ modelConfig := fileState.settings.GetModelPack().Builder
+ // if available, switch to stronger model after the first attempt failed
+ if currentAttempt > 2 && modelConfig.StrongModel != nil {
+ log.Printf("Switching to strong model for attempt %d", currentAttempt)
+ modelConfig = *modelConfig.StrongModel
+ }
+
+ isLastAttempt := numAttempts == maxAttempts-1
+
+ // build validate params
+ validateParams := buildValidateParams{
+ originalFile: originalFile,
+ updated: updated,
+ proposedContent: proposedContent,
+ desc: desc,
+ onStream: onStream,
+ syntaxErrors: syntaxErrors,
+ reasons: reasons,
+ modelConfig: &modelConfig,
+ validateOnly: isLastAttempt && params.validateOnlyOnFinalAttempt,
+ phase: currentAttempt,
+ isInitial: params.isInitial,
+ sessionId: params.sessionId,
+ }
+
+ log.Printf("Calling buildValidate for attempt %d", currentAttempt)
+ res, err := fileState.buildValidate(ctx, validateParams)
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ log.Printf("Context canceled during buildValidate")
+ return buildValidateLoopResult{}, err
+ }
+
+ log.Printf("Error in buildValidate during attempt %d: %v", currentAttempt, err)
+ return buildValidateLoopResult{}, fmt.Errorf("error building validate: %v", err)
+ }
+ updated = res.updated
+
+ syntaxErrors = fileState.validateSyntax(ctx, updated)
+ log.Printf("Found %d syntax errors after attempt %d", len(syntaxErrors), currentAttempt)
+
+ if res.valid && len(syntaxErrors) == 0 {
+ log.Printf("Validation succeeded in attempt %d", currentAttempt)
+ return buildValidateLoopResult{
+ valid: res.valid,
+ updated: res.updated,
+ }, nil
+ }
+
+ problems = append(problems, res.problem)
+
+ log.Printf("Validation failed in attempt %d, preparing for next attempt", currentAttempt)
+
+ numAttempts++
+ }
+
+ log.Printf("Validation failed after %d attempts", MaxValidationFixAttempts)
+ return buildValidateLoopResult{
+ valid: false,
+ updated: updated,
+ problem: strings.Join(problems, "\n\n"),
+ }, nil
+}
+
+type buildValidateParams struct {
+ originalFile string
+ updated string
+ proposedContent string
+ desc string
+ syntaxErrors []string
+ reasons []syntax.NeedsVerifyReason
+ onStream func(chunk string, buffer string) bool
+ phase int
+ modelConfig *shared.ModelRoleConfig
+ validateOnly bool
+ isInitial bool
+ sessionId string
+}
+
+type buildValidateResult struct {
+ valid bool
+ updated string
+ problem string
+}
+
+func (fileState *activeBuildStreamFileState) buildValidate(
+ ctx context.Context,
+ params buildValidateParams,
+) (buildValidateResult, error) {
+ log.Printf("Starting buildValidate for phase %d", params.phase)
+
+ auth := fileState.auth
+ filePath := fileState.filePath
+ clients := fileState.clients
+ authVars := fileState.authVars
+ modelConfig := params.modelConfig
+
+ originalFile := params.originalFile
+ updated := params.updated
+ proposedContent := params.proposedContent
+ desc := params.desc
+ onStream := params.onStream
+ syntaxErrors := params.syntaxErrors
+ reasons := params.reasons
+
+ baseModelConfig := modelConfig.GetBaseModelConfig(authVars, fileState.settings, fileState.orgUserConfig)
+
+ // Get diff for validation
+ log.Printf("Getting diffs between original and updated content")
+ diff, err := diff_pkg.GetDiffs(originalFile, updated)
+ if err != nil {
+ log.Printf("Error getting diffs: %v", err)
+ return buildValidateResult{}, fmt.Errorf("error getting diffs: %v", err)
+ }
+
+ originalWithLineNums := shared.AddLineNums(originalFile)
+ proposedWithLineNums := shared.AddLineNums(proposedContent)
+
+ maxExpectedOutputTokens := shared.GetNumTokensEstimate(originalFile)/2 + shared.GetNumTokensEstimate(proposedContent)
+
+ // Choose prompt and tools based on preferred format
+
+ log.Printf("Building XML validation replacements prompt")
+ promptText, headNumTokens := prompts.GetValidationReplacementsXmlPrompt(prompts.ValidationPromptParams{
+ Path: filePath,
+ OriginalWithLineNums: originalWithLineNums,
+ Desc: desc,
+ ProposedWithLineNums: proposedWithLineNums,
+ Diff: diff,
+ SyntaxErrors: syntaxErrors,
+ Reasons: reasons,
+ })
+
+ // log.Printf("Prompt to LLM: %s", promptText)
+
+ log.Printf("Creating initial messages for phase 1")
+ messages := []types.ExtendedChatMessage{
+ {
+ Role: openai.ChatMessageRoleSystem,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: promptText,
+ },
+ },
+ },
+ }
+ reqStarted := time.Now()
+ fileState.builderRun.ReplacementStartedAt = reqStarted
+
+ if params.validateOnly {
+ log.Printf("Making validation-only model request")
+ } else {
+ log.Printf("Making validation-replacements model request")
+ }
+ // log.Printf("Messages: %v", messages)
+
+ stop := []string{""}
+ if params.validateOnly {
+ stop = []string{"", ""}
+ }
+
+ var willCacheNumTokens int
+ isFirstPass := params.isInitial && params.phase == 1
+ if !isFirstPass && baseModelConfig.Provider == shared.ModelProviderOpenAI {
+ willCacheNumTokens = headNumTokens
+ }
+
+ log.Printf("buildValidate - calling model.ModelRequest")
+ // spew.Dump(messages)
+
+ // Use ModelRequest for both formats
+ res, err := model.ModelRequest(ctx, model.ModelRequestParams{
+ Clients: clients,
+ Auth: auth,
+ AuthVars: authVars,
+ Plan: fileState.plan,
+ ModelConfig: modelConfig,
+ Purpose: "File edit",
+ Messages: messages,
+ ModelStreamId: fileState.modelStreamId,
+ ConvoMessageId: fileState.convoMessageId,
+ BuildId: fileState.build.Id,
+ ModelPackName: fileState.settings.GetModelPack().Name,
+ Stop: stop,
+ BeforeReq: func() {
+ log.Printf("Starting model request")
+ fileState.builderRun.ReplacementStartedAt = time.Now()
+ },
+ AfterReq: func() {
+ log.Printf("Finished model request")
+ fileState.builderRun.ReplacementFinishedAt = time.Now()
+ },
+ OnStream: onStream,
+
+ WillCacheNumTokens: willCacheNumTokens,
+ SessionId: params.sessionId,
+ EstimatedOutputTokens: maxExpectedOutputTokens,
+ Settings: fileState.settings,
+ OrgUserConfig: fileState.orgUserConfig,
+ })
+
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ log.Printf("Context canceled during model request")
+ return buildValidateResult{}, err
+ }
+
+ log.Printf("Error calling model: %v", err)
+ return fileState.validationRetryOrError(ctx, params, err)
+ }
+
+ // log.Printf("Model response:\n\n%s", res.Content)
+
+ fileState.builderRun.GenerationIds = append(fileState.builderRun.GenerationIds, res.GenerationId)
+ log.Printf("Added generation ID: %s", res.GenerationId)
+
+ // Handle response based on format
+ parseRes, err := handleXMLResponse(fileState, res.Content, originalWithLineNums, updated, params.validateOnly)
+
+ if err != nil {
+ log.Printf("Error handling response: %v", err)
+ return fileState.validationRetryOrError(ctx, params, err)
+ }
+
+ log.Printf("Validation result: valid=%v", parseRes.valid)
+
+ return parseRes, nil
+}
+
+func handleXMLResponse(
+ fileState *activeBuildStreamFileState,
+ content string,
+ originalWithLineNums shared.LineNumberedTextType,
+ updated string,
+ validateOnly bool,
+) (buildValidateResult, error) {
+ log.Printf("Handling XML response for file: %s", fileState.filePath)
+
+ if strings.Contains(content, "") {
+ log.Printf("XML response indicates changes are correct")
+ fileState.builderRun.ReplacementSuccess = true
+ return buildValidateResult{
+ valid: true,
+ updated: updated,
+ }, nil
+ }
+
+ if validateOnly {
+ log.Printf("Validation-only mode, skipping replacements")
+ return buildValidateResult{
+ valid: false,
+ updated: updated,
+ }, nil
+ }
+
+ originalFileLines := strings.Split(string(originalWithLineNums), "\n")
+
+ incremental := originalWithLineNums
+
+ log.Printf("Processing XML replacement blocks")
+
+ replacementsOuter := utils.GetXMLContent(content, "PlandexReplacements")
+
+ if replacementsOuter == "" {
+ log.Printf("No replacements found in XML response")
+ return buildValidateResult{
+ valid: false,
+ updated: shared.RemoveLineNums(incremental),
+ problem: "No replacements found in XML response",
+ }, nil
+ }
+
+ replacements := utils.GetAllXMLContent(replacementsOuter, "Replacement")
+
+ for i, replacement := range replacements {
+ log.Printf("Processing replacement: %d/%d", i+1, len(replacements))
+
+ old := utils.GetXMLContent(replacement, "Old")
+ new := utils.GetXMLContent(replacement, "New")
+
+ if old == "" {
+ log.Printf("No old content found for replacement")
+ return buildValidateResult{valid: false, updated: updated}, fmt.Errorf("no old content found for replacement")
+ }
+
+ old = strings.TrimSpace(old)
+
+ // log.Printf("Old content trimmed:\n\n%s", strconv.Quote(old))
+
+ // log.Printf("New content:\n\n%s", strconv.Quote(new))
+
+ if !strings.HasPrefix(old, "pdx-") {
+ log.Printf("Old content does not have a line number prefix for first line")
+ return buildValidateResult{valid: false, updated: updated}, fmt.Errorf("old content does not have a line number prefix for first line")
+ }
+
+ oldLines := strings.Split(old, "\n")
+
+ var lastLine string
+ var lastLineNum int
+ firstLine := oldLines[0]
+ if len(oldLines) > 1 {
+ lastLine = oldLines[len(oldLines)-1]
+ }
+
+ firstLineNum, err := shared.ExtractLineNumberWithPrefix(firstLine, "pdx-")
+ if err != nil {
+ log.Printf("Error extracting line number from first line: %v", err)
+ return buildValidateResult{valid: false, updated: updated}, fmt.Errorf("error extracting line number from first line: %v", err)
+ }
+
+ if lastLine != "" {
+ lastLineNum, err = shared.ExtractLineNumberWithPrefix(lastLine, "pdx-")
+ if err != nil {
+ log.Printf("Error extracting line number from last line: %v", err)
+ return buildValidateResult{valid: false, updated: updated}, fmt.Errorf("error extracting line number from last line: %v", err)
+ }
+ }
+
+ if lastLineNum == 0 {
+ if !(firstLineNum > 0 && firstLineNum <= len(originalFileLines)) {
+ log.Printf("Invalid line number for first line: %d", firstLineNum)
+ return buildValidateResult{valid: false, updated: updated}, fmt.Errorf("invalid line number for first line: %d", firstLineNum)
+ }
+ old = originalFileLines[firstLineNum-1]
+ } else {
+ if !(firstLineNum > 0 && firstLineNum <= len(originalFileLines) && lastLineNum > firstLineNum && lastLineNum <= len(originalFileLines)) {
+ log.Printf("Invalid line numbers for first and last lines: %d-%d", firstLineNum, lastLineNum)
+ return buildValidateResult{valid: false, updated: updated}, fmt.Errorf("invalid line numbers: %d-%d", firstLineNum, lastLineNum)
+ }
+ old = strings.Join(originalFileLines[firstLineNum-1:lastLineNum], "\n")
+ }
+
+ // log.Printf("Applying replacement.\n\nOld:\n\n%s\n\nNew:\n\n%s", old, new)
+
+ incremental = shared.LineNumberedTextType(strings.Replace(string(incremental), old, new, 1))
+
+ // log.Printf("Updated content:\n\n%s", string(incremental))
+ }
+
+ var problem string
+
+ if strings.Contains(content, "") {
+ split := strings.Split(content, "")
+ problem = split[0]
+ } else if strings.Contains(content, "") {
+ split := strings.Split(content, "")
+ problem = split[0]
+ }
+
+ final := shared.RemoveLineNums(incremental)
+
+ // log.Printf("Final content:\n\n%s", final)
+
+ return buildValidateResult{valid: false, updated: final, problem: problem}, nil
+}
+
+func (fileState *activeBuildStreamFileState) validationRetryOrError(buildCtx context.Context, validateParams buildValidateParams, err error) (buildValidateResult, error) {
+ log.Printf("Handling validation error for file: %s", fileState.filePath)
+ if fileState.validationNumRetry < MaxBuildErrorRetries {
+ fileState.validationNumRetry++
+
+ log.Printf("Retrying validation (attempt %d/%d) due to error: %v",
+ fileState.validationNumRetry, MaxBuildErrorRetries, err)
+
+ activePlan := GetActivePlan(fileState.plan.Id, fileState.branch)
+
+ if activePlan == nil {
+ log.Printf("Active plan not found for plan ID %s and branch %s",
+ fileState.plan.Id, fileState.branch)
+ return buildValidateResult{}, fmt.Errorf("active plan not found for plan ID %s and branch %s",
+ fileState.plan.Id, fileState.branch)
+ }
+
+ select {
+ case <-buildCtx.Done():
+ log.Printf("Context canceled during retry wait")
+ return buildValidateResult{}, context.Canceled
+ case <-time.After(time.Duration(fileState.validationNumRetry*fileState.validationNumRetry)*200*time.Millisecond + time.Duration(rand.Intn(500))*time.Millisecond):
+ log.Printf("Retry wait completed, attempting validation again")
+ break
+ }
+
+ return fileState.buildValidate(buildCtx, validateParams)
+ } else {
+ log.Printf("Max retries (%d) exceeded, returning error", MaxBuildErrorRetries)
+ return buildValidateResult{}, err
+ }
+}
diff --git a/app/server/model/plan/build_whole_file.go b/app/server/model/plan/build_whole_file.go
new file mode 100644
index 0000000000000000000000000000000000000000..01ac45e8eeb4114229c03ffeef862aa8284cce64
--- /dev/null
+++ b/app/server/model/plan/build_whole_file.go
@@ -0,0 +1,171 @@
+package plan
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log"
+ "math/rand"
+ "plandex-server/model"
+ "plandex-server/model/prompts"
+ "plandex-server/types"
+ "plandex-server/utils"
+ "time"
+
+ shared "plandex-shared"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+func (fileState *activeBuildStreamFileState) buildWholeFileFallback(buildCtx context.Context, proposedContent string, desc string, comments string, sessionId string) (string, error) {
+ auth := fileState.auth
+ filePath := fileState.filePath
+ clients := fileState.clients
+ authVars := fileState.authVars
+ planId := fileState.plan.Id
+ branch := fileState.branch
+ originalFile := fileState.preBuildState
+ config := fileState.settings.GetModelPack().GetWholeFileBuilder()
+
+ activePlan := GetActivePlan(planId, branch)
+
+ if activePlan == nil {
+ log.Printf("Active plan not found for plan ID %s and branch %s\n", planId, branch)
+ fileState.onBuildFileError(fmt.Errorf("active plan not found for plan ID %s and branch %s", planId, branch))
+ return "", fmt.Errorf("active plan not found for plan ID %s and branch %s", planId, branch)
+ }
+
+ baseModelConfig := config.GetBaseModelConfig(authVars, fileState.settings, fileState.orgUserConfig)
+
+ originalFileWithLineNums := shared.AddLineNums(originalFile)
+ proposedContentWithLineNums := shared.AddLineNums(proposedContent)
+
+ sysPrompt, headNumTokens := prompts.GetWholeFilePrompt(filePath, originalFileWithLineNums, proposedContentWithLineNums, desc, comments)
+
+ messages := []types.ExtendedChatMessage{
+ {
+ Role: openai.ChatMessageRoleSystem,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: sysPrompt,
+ },
+ },
+ },
+ }
+
+ inputTokens := model.GetMessagesTokenEstimate(messages...) + model.TokensPerRequest
+ maxExpectedOutputTokens := shared.GetNumTokensEstimate(originalFile + proposedContent)
+
+ modelConfig := config.GetRoleForInputTokens(inputTokens, fileState.settings)
+ modelConfig = modelConfig.GetRoleForOutputTokens(maxExpectedOutputTokens, fileState.settings)
+
+ log.Println("buildWholeFile - calling model for whole file write")
+
+ var prediction string
+
+ if baseModelConfig.PredictedOutputEnabled && comments != "" {
+ prediction = `
+
+` + originalFile + `
+
+`
+
+ }
+
+ // This allows proper accounting for cached input tokens even when the stream is cancelled -- OpenAI only for now
+ var willCacheNumTokens int
+ if baseModelConfig.Provider == shared.ModelProviderOpenAI {
+ willCacheNumTokens = headNumTokens
+ }
+
+ log.Println("buildWholeFile - calling model.ModelRequest")
+ // spew.Dump(messages)
+
+ modelRes, err := model.ModelRequest(buildCtx, model.ModelRequestParams{
+ Clients: clients,
+ Auth: auth,
+ AuthVars: authVars,
+ Plan: fileState.plan,
+ ModelConfig: &config,
+ Purpose: "File edit",
+
+ Messages: messages,
+ Prediction: prediction,
+
+ ModelStreamId: fileState.modelStreamId,
+ ConvoMessageId: fileState.convoMessageId,
+ BuildId: fileState.build.Id,
+
+ BeforeReq: func() {
+ fileState.builderRun.BuiltWholeFile = true
+ fileState.builderRun.BuildWholeFileStartedAt = time.Now()
+ },
+
+ AfterReq: func() {
+ fileState.builderRun.BuildWholeFileFinishedAt = time.Now()
+ },
+
+ WillCacheNumTokens: willCacheNumTokens,
+ EstimatedOutputTokens: maxExpectedOutputTokens,
+
+ SessionId: sessionId,
+ Settings: fileState.settings,
+ OrgUserConfig: fileState.orgUserConfig,
+ })
+
+ if err != nil {
+ if errors.Is(err, context.Canceled) {
+ log.Printf("buildWholeFileFallback - context canceled during model request for file %s", filePath)
+ return "", err
+ }
+
+ return "", fmt.Errorf("error calling model: %v", err)
+ }
+
+ fileState.builderRun.GenerationIds = append(fileState.builderRun.GenerationIds, modelRes.GenerationId)
+ fileState.builderRun.BuildWholeFileFinishedAt = time.Now()
+
+ content := modelRes.Content
+
+ // log.Printf("buildWholeFile - %s - content:\n%s\n", filePath, content)
+
+ wholeFile := utils.GetXMLContent(content, "PlandexWholeFile")
+
+ if wholeFile == "" {
+ log.Printf("buildWholeFile - no whole file found in response\n")
+ return fileState.wholeFileRetryOrError(buildCtx, proposedContent, desc, comments, sessionId, fmt.Errorf("no whole file found in response"))
+ }
+
+ return wholeFile, nil
+}
+
+func (fileState *activeBuildStreamFileState) wholeFileRetryOrError(buildCtx context.Context, proposedContent string, desc string, comments string, sessionId string, err error) (string, error) {
+ if fileState.wholeFileNumRetry < MaxBuildErrorRetries {
+ fileState.wholeFileNumRetry++
+
+ log.Printf("buildWholeFile - retrying whole file file '%s' due to error: %v\n", fileState.filePath, err)
+
+ activePlan := GetActivePlan(fileState.plan.Id, fileState.branch)
+
+ if activePlan == nil {
+ log.Printf("buildWholeFile - active plan not found for plan ID %s and branch %s\n", fileState.plan.Id, fileState.branch)
+ // fileState.onBuildFileError(fmt.Errorf("active plan not found for plan ID %s and branch %s", fileState.plan.Id, fileState.branch))
+ return "", fmt.Errorf("active plan not found for plan ID %s and branch %s", fileState.plan.Id, fileState.branch)
+ }
+
+ select {
+ case <-buildCtx.Done():
+ log.Printf("buildWholeFile - context canceled\n")
+ return "", context.Canceled
+ case <-time.After(time.Duration(fileState.wholeFileNumRetry*fileState.wholeFileNumRetry)*200*time.Millisecond + time.Duration(rand.Intn(500))*time.Millisecond):
+ break
+ }
+
+ return fileState.buildWholeFileFallback(buildCtx, proposedContent, desc, comments, sessionId)
+ } else {
+ // fileState.onBuildFileError(err)
+ return "", err
+ }
+
+}
diff --git a/app/server/model/plan/commit_msg.go b/app/server/model/plan/commit_msg.go
new file mode 100644
index 0000000000000000000000000000000000000000..962f5b6b2c97bd737a8a9011aed470a8da10b640
--- /dev/null
+++ b/app/server/model/plan/commit_msg.go
@@ -0,0 +1,261 @@
+package plan
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ "plandex-server/model"
+ "plandex-server/model/prompts"
+ "plandex-server/notify"
+ "plandex-server/types"
+ "plandex-server/utils"
+
+ shared "plandex-shared"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+func (state *activeTellStreamState) genPlanDescription() (*db.ConvoMessageDescription, *shared.ApiError) {
+ auth := state.auth
+ plan := state.plan
+ planId := plan.Id
+ branch := state.branch
+ settings := state.settings
+ clients := state.clients
+ authVars := state.authVars
+ orgUserConfig := state.orgUserConfig
+ config := settings.GetModelPack().CommitMsg
+
+ activePlan := GetActivePlan(planId, branch)
+ if activePlan == nil {
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("active plan not found for plan %s and branch %s", planId, branch))
+
+ return nil, &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("active plan not found for plan %s and branch %s", planId, branch),
+ }
+ }
+
+ baseModelConfig := config.GetBaseModelConfig(authVars, settings, orgUserConfig)
+
+ var sysPrompt string
+ var tools []openai.Tool
+ var toolChoice *openai.ToolChoice
+
+ if baseModelConfig.PreferredOutputFormat == shared.ModelOutputFormatXml {
+ sysPrompt = prompts.SysDescribeXml
+ } else {
+ sysPrompt = prompts.SysDescribe
+ tools = []openai.Tool{
+ {
+ Type: "function",
+ Function: &prompts.DescribePlanFn,
+ },
+ }
+ choice := openai.ToolChoice{
+ Type: "function",
+ Function: openai.ToolFunction{
+ Name: prompts.DescribePlanFn.Name,
+ },
+ }
+ toolChoice = &choice
+ }
+
+ messages := []types.ExtendedChatMessage{
+ {
+ Role: openai.ChatMessageRoleSystem,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: sysPrompt,
+ },
+ },
+ },
+ {
+ Role: openai.ChatMessageRoleAssistant,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: activePlan.CurrentReplyContent,
+ },
+ },
+ },
+ }
+
+ reqParams := model.ModelRequestParams{
+ Clients: clients,
+ Auth: auth,
+ AuthVars: authVars,
+ Plan: plan,
+ ModelConfig: &config,
+ Purpose: "Response summary",
+ Messages: messages,
+ ModelStreamId: state.modelStreamId,
+ ConvoMessageId: state.replyId,
+ SessionId: activePlan.SessionId,
+ Settings: settings,
+ OrgUserConfig: orgUserConfig,
+ }
+
+ if tools != nil {
+ reqParams.Tools = tools
+ }
+ if toolChoice != nil {
+ reqParams.ToolChoice = toolChoice
+ }
+
+ modelRes, err := model.ModelRequest(activePlan.Ctx, reqParams)
+
+ if err != nil {
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error during plan description model call: %v", err))
+
+ return nil, &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("error during plan description model call: %v", err),
+ }
+ }
+
+ log.Println("Plan description model call complete")
+
+ content := modelRes.Content
+
+ var commitMsg string
+
+ if baseModelConfig.PreferredOutputFormat == shared.ModelOutputFormatXml {
+ commitMsg = utils.GetXMLContent(content, "commitMsg")
+ if commitMsg == "" {
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("no commitMsg tag found in XML response"))
+
+ return nil, &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "No commitMsg tag found in XML response",
+ }
+ }
+ } else {
+
+ if content == "" {
+ fmt.Println("no describePlan function call found in response")
+
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("no describePlan function call found in response"))
+
+ return nil, &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "No describePlan function call found in response. The model failed to generate a valid response.",
+ }
+ }
+
+ var desc shared.ConvoMessageDescription
+ err = json.Unmarshal([]byte(content), &desc)
+ if err != nil {
+ fmt.Printf("Error unmarshalling plan description response: %v\n", err)
+
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error unmarshalling plan description response: %v", err))
+
+ return nil, &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("error unmarshalling plan description response: %v", err),
+ }
+ }
+ commitMsg = desc.CommitMsg
+ }
+
+ return &db.ConvoMessageDescription{
+ PlanId: planId,
+ CommitMsg: commitMsg,
+ }, nil
+}
+
+type GenCommitMsgForPendingResultsParams struct {
+ Auth *types.ServerAuth
+ Plan *db.Plan
+ Settings *shared.PlanSettings
+ Current *shared.CurrentPlanState
+ SessionId string
+ Ctx context.Context
+ Clients map[string]model.ClientInfo
+ AuthVars map[string]string
+}
+
+func GenCommitMsgForPendingResults(params GenCommitMsgForPendingResultsParams) (string, error) {
+ auth := params.Auth
+ plan := params.Plan
+ settings := params.Settings
+ current := params.Current
+ sessionId := params.SessionId
+ ctx := params.Ctx
+ clients := params.Clients
+ authVars := params.AuthVars
+
+ config := settings.GetModelPack().CommitMsg
+
+ s := ""
+
+ num := 0
+ for _, desc := range current.ConvoMessageDescriptions {
+ if desc.WroteFiles && desc.DidBuild && len(desc.BuildPathsInvalidated) == 0 && desc.AppliedAt == nil {
+ s += desc.CommitMsg + "\n"
+ num++
+ }
+ }
+
+ if num <= 1 {
+ return s, nil
+ }
+
+ prompt := "Pending changes:\n\n" + s
+
+ messages := []types.ExtendedChatMessage{
+ {
+ Role: openai.ChatMessageRoleSystem,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompts.SysPendingResults,
+ },
+ },
+ },
+ {
+ Role: openai.ChatMessageRoleUser,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompt,
+ },
+ },
+ },
+ }
+
+ modelRes, err := model.ModelRequest(ctx, model.ModelRequestParams{
+ Clients: clients,
+ AuthVars: authVars,
+ Auth: auth,
+ Plan: plan,
+ ModelConfig: &config,
+ Purpose: "Commit message",
+ Messages: messages,
+ SessionId: sessionId,
+ Settings: settings,
+ })
+
+ if err != nil {
+ fmt.Println("Generate commit message error:", err)
+
+ return "", err
+ }
+
+ content := modelRes.Content
+
+ if content == "" {
+ return "", fmt.Errorf("no response from model")
+ }
+
+ return content, nil
+}
diff --git a/app/server/model/plan/exec_status.go b/app/server/model/plan/exec_status.go
new file mode 100644
index 0000000000000000000000000000000000000000..d44090655f42033c38f45953033f97feff9b1250
--- /dev/null
+++ b/app/server/model/plan/exec_status.go
@@ -0,0 +1,210 @@
+package plan
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/model"
+ "plandex-server/model/prompts"
+ "plandex-server/notify"
+ "plandex-server/types"
+ "plandex-server/utils"
+ "strings"
+
+ shared "plandex-shared"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+// controls the number steps to spent trying to finish a single subtask
+// if a subtask is not finished in this number of steps, we'll give up and mark it done
+// necessary to prevent infinite loops
+const MaxPreviousMessages = 4
+
+type execStatusShouldContinueResult struct {
+ subtaskFinished bool
+}
+
+func (state *activeTellStreamState) execStatusShouldContinue(currentMessage string, sessionId string, ctx context.Context) (execStatusShouldContinueResult, *shared.ApiError) {
+ auth := state.auth
+ plan := state.plan
+ settings := state.settings
+ clients := state.clients
+ authVars := state.authVars
+ config := settings.GetModelPack().ExecStatus
+ orgUserConfig := state.orgUserConfig
+
+ baseModelConfig := config.GetBaseModelConfig(authVars, settings, orgUserConfig)
+ currentSubtask := state.currentSubtask
+
+ if currentSubtask == nil {
+ log.Printf("[ExecStatus] No current subtask")
+ return execStatusShouldContinueResult{
+ subtaskFinished: true,
+ }, nil
+ }
+
+ // Check subtask completion
+ completionMarker := fmt.Sprintf("**%s** has been completed", currentSubtask.Title)
+ log.Printf("[ExecStatus] Checking for subtask completion marker: %q", completionMarker)
+ log.Printf("[ExecStatus] Current subtask: %q", currentSubtask.Title)
+
+ if strings.Contains(currentMessage, completionMarker) {
+ log.Printf("[ExecStatus] ✓ Subtask completion marker found")
+ return execStatusShouldContinueResult{
+ subtaskFinished: true,
+ }, nil
+
+ // NOTE: tried using an LLM to verify "suspicious" subtask completions, but in practice led to too many extra LLM calls and disagreement cycles between agent roles (it's finished. no it's note! etc.)
+ // now just going back to trusting the completion marker... basically it's better to err on the side of marking tasks done.
+
+ // var potentialProblem bool
+
+ // if len(state.chunkProcessor.replyOperations) == 0 {
+ // log.Printf("[ExecStatus] ✗ Subtask completion marker found, but there are no operations to execute")
+ // potentialProblem = true
+ // } else {
+ // wroteToPaths := map[string]bool{}
+ // for _, op := range state.chunkProcessor.replyOperations {
+ // if op.Type == shared.OperationTypeFile {
+ // wroteToPaths[op.Path] = true
+ // }
+ // }
+
+ // for _, path := range currentSubtask.UsesFiles {
+ // if !wroteToPaths[path] {
+ // log.Printf("[ExecStatus] ✗ Subtask completion marker found, but the operations did not write to the file %q from the 'Uses' list", path)
+ // potentialProblem = true
+ // break
+ // }
+ // }
+ // }
+
+ // if !potentialProblem {
+ // log.Printf("[ExecStatus] ✓ Subtask completion marker found and no potential problem - will mark as completed")
+
+ // return execStatusShouldContinueResult{
+ // subtaskFinished: true,
+ // }, nil
+ // } else if currentSubtask.NumTries >= 1 {
+ // log.Printf("[ExecStatus] ✓ Subtask completion marker found, but the operations are questionable -- marking it done anyway since it's the second try and we can't risk an infinite loop")
+
+ // return execStatusShouldContinueResult{
+ // subtaskFinished: true,
+ // }, nil
+ // } else {
+ // log.Printf("[ExecStatus] ✗ Subtask completion marker found, but the operations are questionable -- will verify with LLM call")
+ // }
+ } else {
+ log.Printf("[ExecStatus] ✗ No subtask completion marker found in message")
+ }
+
+ log.Println("[ExecStatus] Current subtasks state:")
+ for i, task := range state.subtasks {
+ log.Printf("[ExecStatus] Task %d: %q (finished=%v)", i+1, task.Title, task.IsFinished)
+ }
+
+ log.Println("Checking if plan should continue based on exec status")
+
+ fullSubtask := currentSubtask.Title
+ fullSubtask += "\n\n" + currentSubtask.Description
+
+ previousMessages := []string{}
+ for _, msg := range state.convo {
+ if msg.Subtask != nil && msg.Subtask.Title == currentSubtask.Title {
+ previousMessages = append(previousMessages, msg.Message)
+ }
+ }
+
+ if len(previousMessages) >= MaxPreviousMessages {
+ log.Printf("[ExecStatus] ✗ Max previous messages reached - will mark as completed and move on to next subtask")
+ return execStatusShouldContinueResult{
+ subtaskFinished: true,
+ }, nil
+ }
+
+ prompt := prompts.GetExecStatusFinishedSubtask(prompts.GetExecStatusFinishedSubtaskParams{
+ UserPrompt: state.userPrompt,
+ CurrentSubtask: fullSubtask,
+ CurrentMessage: currentMessage,
+ PreviousMessages: previousMessages,
+ PreferredOutputFormat: baseModelConfig.PreferredOutputFormat,
+ })
+
+ messages := []types.ExtendedChatMessage{
+ {
+ Role: openai.ChatMessageRoleSystem,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompt,
+ },
+ },
+ },
+ }
+
+ modelRes, err := model.ModelRequest(ctx, model.ModelRequestParams{
+ Clients: clients,
+ Auth: auth,
+ AuthVars: authVars,
+ Plan: plan,
+ ModelConfig: &config,
+ Purpose: "Task completion check",
+ Messages: messages,
+ ModelStreamId: state.modelStreamId,
+ ConvoMessageId: state.replyId,
+ SessionId: sessionId,
+ Settings: settings,
+ OrgUserConfig: orgUserConfig,
+ })
+
+ if err != nil {
+ log.Printf("[ExecStatus] Error in model call: %v", err)
+ return execStatusShouldContinueResult{}, nil
+ }
+
+ content := modelRes.Content
+
+ var reasoning string
+ var subtaskFinished bool
+
+ if baseModelConfig.PreferredOutputFormat == shared.ModelOutputFormatXml {
+ reasoning = utils.GetXMLContent(content, "reasoning")
+ subtaskFinishedStr := utils.GetXMLContent(content, "subtaskFinished")
+ subtaskFinished = subtaskFinishedStr == "true"
+
+ if reasoning == "" || subtaskFinishedStr == "" {
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("execStatusShouldContinue: missing required XML tags in response"))
+
+ return execStatusShouldContinueResult{}, &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Missing required XML tags in response",
+ }
+ }
+ } else {
+
+ if content == "" {
+ log.Printf("[ExecStatus] No function response found in model output")
+ return execStatusShouldContinueResult{}, nil
+ }
+
+ var res types.ExecStatusResponse
+ if err := json.Unmarshal([]byte(content), &res); err != nil {
+ log.Printf("[ExecStatus] Failed to parse response: %v", err)
+ return execStatusShouldContinueResult{}, nil
+ }
+
+ reasoning = res.Reasoning
+ subtaskFinished = res.SubtaskFinished
+ }
+
+ log.Printf("[ExecStatus] Decision: subtaskFinished=%v, reasoning=%v",
+ subtaskFinished, reasoning)
+
+ return execStatusShouldContinueResult{
+ subtaskFinished: subtaskFinished,
+ }, nil
+}
diff --git a/app/server/model/plan/shutdown.go b/app/server/model/plan/shutdown.go
new file mode 100644
index 0000000000000000000000000000000000000000..ec2d2c6b7bf56cd8603443e12dcfb8307eb7da0e
--- /dev/null
+++ b/app/server/model/plan/shutdown.go
@@ -0,0 +1 @@
+package plan
diff --git a/app/server/model/plan/state.go b/app/server/model/plan/state.go
new file mode 100644
index 0000000000000000000000000000000000000000..f888f33478ff2555d5642101c415781a57b7a2cc
--- /dev/null
+++ b/app/server/model/plan/state.go
@@ -0,0 +1,178 @@
+package plan
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "plandex-server/db"
+ "plandex-server/notify"
+ "plandex-server/shutdown"
+ "plandex-server/types"
+ "strings"
+ "time"
+
+ shared "plandex-shared"
+)
+
+var (
+ activePlans types.SafeMap[*types.ActivePlan] = *types.NewSafeMap[*types.ActivePlan]()
+)
+
+func GetActivePlan(planId, branch string) *types.ActivePlan {
+ return activePlans.Get(strings.Join([]string{planId, branch}, "|"))
+}
+
+func CreateActivePlan(orgId, userId, planId, branch, prompt string, buildOnly, autoContext bool, sessionId string) *types.ActivePlan {
+ activePlan := types.NewActivePlan(orgId, userId, planId, branch, prompt, buildOnly, autoContext, sessionId)
+ key := strings.Join([]string{planId, branch}, "|")
+
+ activePlans.Set(key, activePlan)
+
+ go func() {
+ for {
+ select {
+ case <-activePlan.Ctx.Done():
+ log.Printf("case <-activePlan.Ctx.Done(): %s\n", planId)
+
+ err := db.SetPlanStatus(planId, branch, shared.PlanStatusStopped, "")
+ if err != nil {
+ log.Printf("Error setting plan %s status to stopped: %v\n", planId, err)
+ }
+
+ DeleteActivePlan(orgId, userId, planId, branch)
+
+ return
+ case apiErr := <-activePlan.StreamDoneCh:
+ log.Printf("case apiErr := <-activePlan.StreamDoneCh: %s\n", planId)
+ log.Printf("apiErr: %v\n", apiErr)
+
+ if apiErr == nil {
+ log.Printf("Plan %s stream completed successfully", planId)
+
+ err := db.SetPlanStatus(planId, branch, shared.PlanStatusFinished, "")
+ if err != nil {
+ log.Printf("Error setting plan %s status to ready: %v\n", planId, err)
+ }
+
+ // cancel *after* the DeleteActivePlan call
+ // allows queued operations to complete
+ DeleteActivePlan(orgId, userId, planId, branch)
+ activePlan.CancelFn()
+ return
+ } else {
+ log.Printf("Error streaming plan %s: %v\n", planId, apiErr)
+
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error streaming plan %s: %v", planId, apiErr))
+
+ err := db.SetPlanStatus(planId, branch, shared.PlanStatusError, apiErr.Msg)
+ if err != nil {
+ log.Printf("Error setting plan %s status to error: %v\n", planId, err)
+ }
+
+ log.Println("Sending error message to client")
+ activePlan.Stream(shared.StreamMessage{
+ Type: shared.StreamMessageError,
+ Error: apiErr,
+ })
+ activePlan.FlushStreamBuffer()
+
+ log.Println("Stopping any active summary stream")
+ activePlan.SummaryCancelFn()
+
+ log.Println("Waiting 100ms after streaming error before canceling active plan")
+ time.Sleep(100 * time.Millisecond)
+
+ // cancel *before* the DeleteActivePlan call below
+ // short circuits any active operations
+ log.Println("Cancelling active plan")
+ activePlan.CancelFn()
+ DeleteActivePlan(orgId, userId, planId, branch)
+ return
+ }
+ }
+ }
+ }()
+
+ return activePlan
+}
+
+func DeleteActivePlan(orgId, userId, planId, branch string) {
+ log.Printf("Deleting active plan %s - %s - %s\n", planId, branch, orgId)
+
+ activePlan := GetActivePlan(planId, branch)
+ if activePlan == nil {
+ log.Printf("DeleteActivePlan - No active plan found for plan ID %s on branch %s\n", planId, branch)
+ return
+ }
+
+ ctx, cancelFn := context.WithTimeout(shutdown.ShutdownCtx, 10*time.Second)
+ defer cancelFn()
+
+ log.Printf("Clearing uncommitted changes for plan %s - %s - %s\n", planId, branch, orgId)
+
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: orgId,
+ UserId: userId,
+ PlanId: planId,
+ Branch: branch,
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancelFn,
+ Reason: "delete active plan",
+ }, func(repo *db.GitRepo) error {
+ log.Printf("Starting clear uncommitted changes for plan %s - %s - %s\n", planId, branch, orgId)
+ err := repo.GitClearUncommittedChanges(branch)
+ log.Printf("Finished clear uncommitted changes for plan %s - %s - %s\n", planId, branch, orgId)
+ log.Printf("Error: %v\n", err)
+ return err
+ })
+
+ if err != nil {
+ log.Printf("Error clearing uncommitted changes for plan %s: %v\n", planId, err)
+ }
+
+ activePlans.Delete(strings.Join([]string{planId, branch}, "|"))
+
+ log.Printf("Deleted active plan %s - %s - %s\n", planId, branch, orgId)
+}
+
+func UpdateActivePlan(planId, branch string, fn func(*types.ActivePlan)) {
+ activePlans.Update(strings.Join([]string{planId, branch}, "|"), fn)
+}
+
+func SubscribePlan(ctx context.Context, planId, branch string) (string, chan string) {
+ log.Printf("Subscribing to plan %s\n", planId)
+ var id string
+ var ch chan string
+
+ activePlan := GetActivePlan(planId, branch)
+ if activePlan == nil {
+ log.Printf("SubscribePlan - No active plan found for plan ID %s on branch %s\n", planId, branch)
+ return "", nil
+ }
+
+ UpdateActivePlan(planId, branch, func(activePlan *types.ActivePlan) {
+ id, ch = activePlan.Subscribe(ctx)
+ })
+ return id, ch
+}
+
+func UnsubscribePlan(planId, branch, subscriptionId string) {
+ log.Printf("UnsubscribePlan %s - %s - %s\n", planId, branch, subscriptionId)
+
+ active := GetActivePlan(planId, branch)
+
+ if active == nil {
+ log.Printf("No active plan found for plan ID %s on branch %s\n", planId, branch)
+ return
+ }
+
+ UpdateActivePlan(planId, branch, func(activePlan *types.ActivePlan) {
+ activePlan.Unsubscribe(subscriptionId)
+ log.Printf("Unsubscribed from plan %s - %s - %s\n", planId, branch, subscriptionId)
+ })
+}
+
+func NumActivePlans() int {
+ return activePlans.Len()
+}
diff --git a/app/server/model/plan/stop.go b/app/server/model/plan/stop.go
new file mode 100644
index 0000000000000000000000000000000000000000..99a6ee90ae1a6ea0024f4f069c7090b680d030f7
--- /dev/null
+++ b/app/server/model/plan/stop.go
@@ -0,0 +1,52 @@
+package plan
+
+import (
+ "fmt"
+ "plandex-server/db"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+func Stop(planId, branch, currentUserId, currentOrgId string) error {
+ active := GetActivePlan(planId, branch)
+
+ if active == nil {
+ return fmt.Errorf("no active plan with id %s", planId)
+ }
+
+ active.SummaryCancelFn()
+ active.CancelFn()
+
+ return nil
+}
+
+func StorePartialReply(repo *db.GitRepo, planId, branch, currentUserId, currentOrgId string) error {
+ active := GetActivePlan(planId, branch)
+
+ if active == nil {
+ return fmt.Errorf("no active plan with id %s", planId)
+ }
+
+ if !active.BuildOnly && !active.RepliesFinished {
+ num := active.MessageNum + 1
+
+ userMsg := db.ConvoMessage{
+ OrgId: currentOrgId,
+ PlanId: planId,
+ UserId: currentUserId,
+ Role: openai.ChatMessageRoleAssistant,
+ Tokens: active.NumTokens,
+ Num: num,
+ Stopped: true,
+ Message: active.CurrentReplyContent,
+ }
+
+ _, err := db.StoreConvoMessage(repo, &userMsg, currentUserId, branch, true)
+
+ if err != nil {
+ return fmt.Errorf("error storing convo message: %v", err)
+ }
+ }
+
+ return nil
+}
diff --git a/app/server/model/plan/tell_build_pending.go b/app/server/model/plan/tell_build_pending.go
new file mode 100644
index 0000000000000000000000000000000000000000..cd2ae7240f934ed7940fc9cd0519ad3adf798d5c
--- /dev/null
+++ b/app/server/model/plan/tell_build_pending.go
@@ -0,0 +1,80 @@
+package plan
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/notify"
+ "runtime/debug"
+
+ shared "plandex-shared"
+)
+
+func (state *activeTellStreamState) queuePendingBuilds() {
+ plan := state.plan
+ planId := plan.Id
+ branch := state.branch
+ auth := state.auth
+ clients := state.clients
+ authVars := state.authVars
+ currentOrgId := state.currentOrgId
+ currentUserId := state.currentUserId
+ active := GetActivePlan(planId, branch)
+
+ if active == nil {
+ log.Printf("execTellPlan: Active plan not found for plan ID %s on branch %s\n", planId, branch)
+ return
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in queuePendingBuilds: %v\n%s", r, debug.Stack())
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error getting pending builds by path: %v", r))
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Error getting pending builds by path: %v\n%s", r, debug.Stack()),
+ }
+ }
+ }()
+
+ pendingBuildsByPath, err := active.PendingBuildsByPath(auth.OrgId, auth.User.Id, state.convo)
+
+ if err != nil {
+ log.Printf("Error getting pending builds by path: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error getting pending builds by path: %v", err))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Error getting pending builds by path: %v", err),
+ }
+ return
+ }
+
+ if len(pendingBuildsByPath) == 0 {
+ log.Println("Tell plan: no pending builds")
+ return
+ }
+
+ log.Printf("Tell plan: found %d pending builds\n", len(pendingBuildsByPath))
+ // spew.Dump(pendingBuildsByPath)
+
+ buildState := &activeBuildStreamState{
+ modelStreamId: active.ModelStreamId,
+ clients: clients,
+ authVars: authVars,
+ auth: auth,
+ currentOrgId: currentOrgId,
+ currentUserId: currentUserId,
+ plan: plan,
+ branch: branch,
+ settings: state.settings,
+ modelContext: state.modelContext,
+ orgUserConfig: state.orgUserConfig,
+ }
+
+ for _, pendingBuilds := range pendingBuildsByPath {
+ buildState.queueBuilds(pendingBuilds)
+ }
+}
diff --git a/app/server/model/plan/tell_context.go b/app/server/model/plan/tell_context.go
new file mode 100644
index 0000000000000000000000000000000000000000..7b5be8bb9326b297e83465e888a5ed7197d777f8
--- /dev/null
+++ b/app/server/model/plan/tell_context.go
@@ -0,0 +1,443 @@
+package plan
+
+import (
+ "fmt"
+ "log"
+ "plandex-server/types"
+ "regexp"
+ "sort"
+ "strings"
+
+ shared "plandex-shared"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+type formatModelContextParams struct {
+ includeMaps bool
+ smartContextEnabled bool
+ includeApplyScript bool
+ baseOnly bool
+ cacheControl bool
+ activeOnly bool
+ autoOnly bool
+ activatePaths map[string]bool
+ activatePathsOrdered []string
+ maxTokens int
+}
+
+func (state *activeTellStreamState) formatModelContext(params formatModelContextParams) []*types.ExtendedChatMessagePart {
+ log.Println("Tell plan - formatModelContext")
+
+ includeMaps := params.includeMaps
+ smartContextEnabled := params.smartContextEnabled
+ includeApplyScript := params.includeApplyScript
+ currentStage := state.currentStage
+
+ basicOnly := params.baseOnly
+ activeOnly := params.activeOnly
+ autoOnly := params.autoOnly
+ activatePaths := params.activatePaths
+ activatePathsOrdered := params.activatePathsOrdered
+ if activatePaths == nil {
+ activatePaths = map[string]bool{}
+ }
+
+ maxTokens := params.maxTokens
+
+ // log all the flags
+ log.Printf("Tell plan - formatModelContext - basicOnly: %t, activeOnly: %t, autoOnly: %t, smartContextEnabled: %t, execEnabled: %t, includeMaps: %t, activatePaths: %v, activatePathsOrdered: %v, maxTokens: %d\n",
+ basicOnly, activeOnly, autoOnly, smartContextEnabled, includeApplyScript, includeMaps, activatePaths, activatePathsOrdered, params.maxTokens)
+
+ var contextBodies []string = []string{
+ "### LATEST PLAN CONTEXT ###",
+ }
+ addedFilesSet := map[string]bool{}
+
+ uses := map[string]bool{}
+
+ // log.Println("Tell plan - formatModelContext - state.currentSubtask:\n", spew.Sdump(state.currentSubtask))
+ // if state.currentSubtask != nil {
+ // log.Println("Tell plan - formatModelContext - state.currentSubtask.UsesFiles:\n", spew.Sdump(state.currentSubtask.UsesFiles))
+ // }
+ // log.Println("Tell plan - formatModelContext - currentStage.TellStage:\n", currentStage.TellStage)
+ // log.Println("Tell plan - formatModelContext - smartContextEnabled:\n", smartContextEnabled)
+
+ if currentStage.TellStage == shared.TellStageImplementation && smartContextEnabled && state.currentSubtask != nil {
+ log.Println("Tell plan - formatModelContext - implementation stage - smart context enabled for current subtask")
+ for _, path := range state.currentSubtask.UsesFiles {
+ uses[path] = true
+ }
+ if verboseLogging {
+ log.Printf("Tell plan - formatModelContext - uses: %v\n", uses)
+ }
+ }
+
+ // log.Println("Tell plan - formatModelContext - state.modelContext:\n", spew.Sdump(state.modelContext))
+
+ totalTokens := 0
+
+ type toLoad struct {
+ FilePath string
+ Name string
+ Url string
+ NumTokens int
+ Body string
+ ContextType shared.ContextType
+ ImageDetail openai.ImageURLDetail
+ IsPending bool
+ }
+ var toLoadAll []toLoad
+
+ for _, part := range state.modelContext {
+ if verboseLogging {
+ log.Printf("Tell plan - formatModelContext - part: %s - %s - %s - %d tokens\n", part.ContextType, part.Name, part.FilePath, part.NumTokens)
+ }
+ if !(part.ContextType == shared.ContextMapType && includeMaps) {
+ if basicOnly && part.AutoLoaded {
+ if verboseLogging {
+ log.Println("Tell plan - formatModelContext - skipping auto loaded part -- basicOnly && part.AutoLoaded")
+ }
+ continue
+ }
+
+ if autoOnly && !part.AutoLoaded {
+ if verboseLogging {
+ log.Println("Tell plan - formatModelContext - skipping auto loaded part -- autoOnly && !part.AutoLoaded")
+ }
+ continue
+ }
+ }
+
+ if currentStage.TellStage == shared.TellStageImplementation && smartContextEnabled && state.currentSubtask != nil && part.ContextType == shared.ContextFileType && !uses[part.FilePath] {
+ if verboseLogging {
+ log.Println("Tell plan - formatModelContext - skipping part -- currentStage.TellStage == shared.TellStageImplementation && smartContextEnabled && state.currentSubtask != nil && part.ContextType == shared.ContextFileType && !uses[part.FilePath]")
+ }
+ continue
+ }
+
+ if activeOnly && !activatePaths[part.FilePath] {
+ if verboseLogging {
+ log.Println("Tell plan - formatModelContext - skipping part -- activeOnly && !activatePaths[part.FilePath]")
+ }
+ continue
+ }
+
+ if part.ContextType == shared.ContextMapType && !includeMaps {
+ if verboseLogging {
+ log.Println("Tell plan - formatModelContext - skipping part -- part.ContextType == shared.ContextMapType && !includeMaps")
+ }
+ continue
+ }
+
+ toLoadAll = append(toLoadAll, toLoad{
+ FilePath: part.FilePath,
+ NumTokens: part.NumTokens,
+ Body: part.Body,
+ ContextType: part.ContextType,
+ Name: part.Name,
+ Url: part.Url,
+ ImageDetail: part.ImageDetail,
+ })
+
+ if part.ContextType == shared.ContextFileType {
+ addedFilesSet[part.FilePath] = true
+ }
+ }
+
+ // Add any current pendingFiles in plan that weren't added to the context
+ var currentPlanFiles *shared.CurrentPlanFiles
+ var pendingFiles map[string]string = map[string]string{}
+ if state.currentPlanState != nil && state.currentPlanState.CurrentPlanFiles != nil && state.currentPlanState.CurrentPlanFiles.Files != nil {
+ currentPlanFiles = state.currentPlanState.CurrentPlanFiles
+ pendingFiles = state.currentPlanState.CurrentPlanFiles.Files
+ }
+
+ for filePath, body := range pendingFiles {
+ if !addedFilesSet[filePath] {
+
+ if currentStage.TellStage == shared.TellStageImplementation && smartContextEnabled && !uses[filePath] {
+ continue
+ }
+
+ if filePath == "_apply.sh" {
+ continue
+ }
+
+ if activeOnly && !activatePaths[filePath] {
+ continue
+ }
+
+ numTokens := shared.GetNumTokensEstimate(body)
+
+ toLoadAll = append(toLoadAll, toLoad{
+ FilePath: filePath,
+ NumTokens: numTokens,
+ Body: body,
+ ContextType: shared.ContextFileType,
+ Name: filePath,
+ IsPending: true,
+ })
+
+ if verboseLogging {
+ log.Printf("Tell plan - formatModelContext - added current plan file - %s\n", filePath)
+ }
+ }
+ }
+
+ if len(activatePathsOrdered) > 0 {
+ indexByPath := map[string]int{}
+ for i, path := range activatePathsOrdered {
+ indexByPath[path] = i
+ }
+
+ sort.Slice(toLoadAll, func(i, j int) bool {
+ iIndex, ok1 := indexByPath[toLoadAll[i].FilePath]
+ jIndex, ok2 := indexByPath[toLoadAll[j].FilePath]
+
+ // If neither has an index, sort by Name so we are using a stable order for caching
+ if !ok1 && !ok2 {
+ return toLoadAll[i].Name < toLoadAll[j].Name
+ }
+
+ // If only i doesn't have an index, it goes after j
+ if !ok1 {
+ return false
+ }
+
+ // If only j doesn't have an index, it goes after i
+ if !ok2 {
+ return true
+ }
+
+ // Both have indices, compare them
+ return iIndex < jIndex
+ })
+ }
+
+ for _, part := range toLoadAll {
+ totalTokens += part.NumTokens
+
+ if maxTokens > 0 && totalTokens > maxTokens {
+ if verboseLogging {
+ log.Printf("Tell plan - formatModelContext - total tokens: %d\n", totalTokens)
+ }
+ break
+ }
+
+ var message string
+ var fmtStr string
+ var args []any
+
+ if part.ContextType == shared.ContextDirectoryTreeType {
+ fmtStr = "\n\n- %s | directory tree:\n\n```\n%s\n```"
+ args = append(args, part.FilePath, part.Body)
+ } else if part.ContextType == shared.ContextFileType {
+ // if we're in the context phase and the file is pending, just include that the file is pending, not the full content
+ // there is generally enough related context from the conversation and summary to decide on whether to load the file or not
+ // without this, the context phase can get overloaded with pending file content
+ if currentStage.TellStage == shared.TellStagePlanning &&
+ currentStage.PlanningPhase == shared.PlanningPhaseContext &&
+ part.IsPending {
+ fmtStr = "\n\n- File `%s` has pending changes (%d 🪙)"
+ args = append(args, part.FilePath, part.NumTokens)
+ } else {
+
+ fmtStr = "\n\n- %s:\n\n```\n%s\n```"
+
+ // use pending file value if available
+ var body string
+ var found bool
+ res, ok := pendingFiles[part.FilePath]
+ if ok {
+ body = res
+ found = true
+ }
+ if !found {
+ body = part.Body
+ }
+
+ args = append(args, part.FilePath, body)
+ }
+ } else if part.ContextType == shared.ContextMapType {
+ fmtStr = "\n\n- %s | map:\n\n```\n%s\n```"
+ args = append(args, part.FilePath, part.Body)
+ } else if part.Url != "" {
+ fmtStr = "\n\n- %s:\n\n```\n%s\n```"
+ args = append(args, part.Url, part.Body)
+ } else if part.ContextType != shared.ContextImageType {
+ fmtStr = "\n\n- content%s:\n\n```\n%s\n```"
+ args = append(args, part.Name, part.Body)
+ }
+
+ if part.ContextType != shared.ContextImageType {
+ message = fmt.Sprintf(fmtStr, args...)
+ contextBodies = append(contextBodies, message)
+ }
+
+ if verboseLogging {
+ log.Printf("Tell plan - formatModelContext - added context: %s - %s - %s - %d tokens\n", part.ContextType, part.Name, part.FilePath, part.NumTokens)
+ }
+ }
+
+ if currentPlanFiles != nil && len(currentPlanFiles.Removed) > 0 {
+ contextBodies = append(contextBodies, "*Removed files:*\n")
+ for path := range currentPlanFiles.Removed {
+ contextBodies = append(contextBodies, fmt.Sprintf("- %s", path))
+ }
+ contextBodies = append(contextBodies, "These files have been *removed* and are no longer in the plan. If you want to re-add them to the plan, you must explicitly create them again.")
+
+ log.Println("Tell plan - formatModelContext - added removed files")
+ log.Println(contextBodies)
+ }
+
+ var execScriptLines []string
+
+ if includeApplyScript &&
+ // don't show _apply.sh history and content if smart context is enabled and the current subtask doesn't use it
+ !(currentStage.TellStage == shared.TellStageImplementation && smartContextEnabled && state.currentSubtask != nil && !uses["_apply.sh"]) {
+
+ execHistory := state.currentPlanState.ExecHistory()
+
+ execScriptLines = append(execScriptLines, execHistory)
+
+ scriptContent, ok := pendingFiles["_apply.sh"]
+ var isEmpty bool
+ if !ok || scriptContent == "" {
+ scriptContent = "[empty]"
+ isEmpty = true
+ }
+
+ execScriptLines = append(execScriptLines, "*Current* state of _apply.sh script:")
+ execScriptLines = append(execScriptLines, fmt.Sprintf("\n\n- _apply.sh:\n\n```\n%s\n```", scriptContent))
+
+ if isEmpty && currentStage.TellStage == shared.TellStagePlanning && currentStage.PlanningPhase != shared.PlanningPhaseContext {
+ execScriptLines = append(execScriptLines, "The _apply.sh script is *empty*. You ABSOLUTELY MUST include a '### Commands' section in your response prior to the '### Tasks' section that evaluates whether any commands should be written to _apply.sh during the plan. This is MANDATORY. Do NOT UNDER ANY CIRCUMSTANCES omit this section. If you determine that commands should be added or updated in _apply.sh, you MUST also create a subtask referencing _apply.sh in the '### Tasks' section.")
+
+ if execHistory != "" {
+ execScriptLines = append(execScriptLines, "Consider the history of previously executed _apply.sh scripts when determining which commands to include in the new _apply.sh file. Are there any commands that should be run again after code changes? If so, mention them in the '### Commands' section and then include a subtask to include them in the _apply.sh file in the '### Tasks' section.")
+ }
+ }
+ }
+
+ log.Println("Tell plan - formatModelContext - contextMessages:", len(contextBodies))
+
+ textMsg := &types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeText,
+ Text: strings.Join(contextBodies, "\n"),
+ }
+
+ res := []*types.ExtendedChatMessagePart{textMsg}
+
+ // now add any images that should be included
+ // we'll check later for model image support once the final model config is set
+ for _, load := range toLoadAll {
+ if load.ContextType == shared.ContextImageType {
+ res = append(res, &types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeText,
+ Text: fmt.Sprintf("Image: %s", load.Name),
+ })
+ res = append(res, &types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeImageURL,
+ ImageURL: &openai.ChatMessageImageURL{URL: shared.GetImageDataURI(load.Body, load.FilePath), Detail: load.ImageDetail},
+ })
+ }
+ }
+
+ if params.cacheControl && len(res) > 0 {
+ res[len(res)-1].CacheControl = &types.CacheControlSpec{
+ Type: types.CacheControlTypeEphemeral,
+ }
+ }
+
+ if len(execScriptLines) > 0 {
+ res = append(res, &types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeText,
+ Text: strings.Join(execScriptLines, "\n"),
+ })
+ }
+
+ res = append(res, &types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeText,
+ Text: "### END OF CONTEXT ###\n\n",
+ })
+
+ return res
+}
+
+var pathRegex = regexp.MustCompile("`(.+?)`")
+
+type checkAutoLoadContextResult struct {
+ autoLoadPaths []string
+ activatePaths map[string]bool
+ hasExplicitPaths bool
+ activatePathsOrdered []string
+}
+
+func (state *activeTellStreamState) checkAutoLoadContext() checkAutoLoadContextResult {
+ req := state.req
+ activePlan := state.activePlan
+ contextsByPath := activePlan.ContextsByPath
+ currentStage := state.currentStage
+
+ // can only auto load context in planning stage
+ // context phase is primary loading phase
+ // planning phase can still load additional context files as a backup
+ if currentStage.TellStage != shared.TellStagePlanning {
+ return checkAutoLoadContextResult{}
+ }
+
+ // for chat responses, only auto load context if we're in the context phase
+ if req.IsChatOnly && currentStage.PlanningPhase != shared.PlanningPhaseContext {
+ return checkAutoLoadContextResult{}
+ }
+
+ log.Printf("%d existing contexts by path\n", len(contextsByPath))
+
+ // pick out all potential file paths within backticks
+ matches := pathRegex.FindAllStringSubmatch(activePlan.CurrentReplyContent, -1)
+
+ toAutoLoad := map[string]bool{}
+ toActivate := map[string]bool{}
+ toActivateOrdered := []string{}
+ allSet := map[string]bool{}
+ allFiles := []string{}
+
+ for _, match := range matches {
+ trimmed := strings.TrimSpace(match[1])
+ if trimmed == "" {
+ continue
+ }
+
+ if req.ProjectPaths[trimmed] {
+ if !allSet[trimmed] {
+ allFiles = append(allFiles, trimmed)
+ allSet[trimmed] = true
+
+ toActivate[trimmed] = true
+ toActivateOrdered = append(toActivateOrdered, trimmed)
+ if contextsByPath[trimmed] == nil {
+ toAutoLoad[trimmed] = true
+ }
+
+ }
+ }
+ }
+
+ toAutoLoadPaths := []string{}
+ for path := range toAutoLoad {
+ toAutoLoadPaths = append(toAutoLoadPaths, path)
+ }
+
+ hasExplicitPaths := strings.Contains(activePlan.CurrentReplyContent, "### Files")
+
+ log.Printf("Tell plan - checkAutoLoadContext - toAutoLoad: %v\n", toAutoLoadPaths)
+ log.Printf("Tell plan - checkAutoLoadContext - toActivate: %v\n", toActivateOrdered)
+
+ return checkAutoLoadContextResult{
+ autoLoadPaths: toAutoLoadPaths,
+ activatePaths: toActivate,
+ activatePathsOrdered: toActivateOrdered,
+ hasExplicitPaths: hasExplicitPaths,
+ }
+}
diff --git a/app/server/model/plan/tell_exec.go b/app/server/model/plan/tell_exec.go
new file mode 100644
index 0000000000000000000000000000000000000000..99f41b4384501da8ca61933aaedf0a2aad828374
--- /dev/null
+++ b/app/server/model/plan/tell_exec.go
@@ -0,0 +1,684 @@
+package plan
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "runtime/debug"
+ "time"
+
+ "plandex-server/db"
+ "plandex-server/hooks"
+ "plandex-server/model"
+ "plandex-server/notify"
+ "plandex-server/types"
+
+ shared "plandex-shared"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/google/uuid"
+ "github.com/sashabaranov/go-openai"
+)
+
+type TellParams struct {
+ Clients map[string]model.ClientInfo
+ AuthVars map[string]string
+ Plan *db.Plan
+ Branch string
+ Auth *types.ServerAuth
+ Req *shared.TellPlanRequest
+}
+
+func Tell(params TellParams) error {
+ clients := params.Clients
+ plan := params.Plan
+ branch := params.Branch
+ auth := params.Auth
+ req := params.Req
+ authVars := params.AuthVars
+
+ log.Printf("Tell: Called with plan ID %s on branch %s\n", plan.Id, branch)
+
+ _, err := activatePlan(
+ clients,
+ plan,
+ branch,
+ auth,
+ req.Prompt,
+ false,
+ req.AutoContext,
+ req.SessionId,
+ )
+
+ if err != nil {
+ log.Printf("Error activating plan: %v\n", err)
+ return err
+ }
+
+ go execTellPlan(execTellPlanParams{
+ clients: clients,
+ plan: plan,
+ branch: branch,
+ auth: auth,
+ req: req,
+ iteration: 0,
+ shouldBuildPending: !req.IsChatOnly && req.BuildMode == shared.BuildModeAuto,
+ authVars: authVars,
+ })
+
+ log.Printf("Tell: Tell operation completed successfully for plan ID %s on branch %s\n", plan.Id, branch)
+ return nil
+}
+
+type execTellPlanParams struct {
+ clients map[string]model.ClientInfo
+ authVars map[string]string
+ plan *db.Plan
+ branch string
+ auth *types.ServerAuth
+ req *shared.TellPlanRequest
+ iteration int
+ missingFileResponse shared.RespondMissingFileChoice
+ shouldBuildPending bool
+ unfinishedSubtaskReasoning string
+}
+
+func execTellPlan(params execTellPlanParams) {
+ clients := params.clients
+ authVars := params.authVars
+ plan := params.plan
+ branch := params.branch
+ auth := params.auth
+ req := params.req
+ iteration := params.iteration
+ missingFileResponse := params.missingFileResponse
+ shouldBuildPending := params.shouldBuildPending
+ unfinishedSubtaskReasoning := params.unfinishedSubtaskReasoning
+
+ log.Printf("[TellExec] Starting iteration %d for plan %s on branch %s", iteration, plan.Id, branch)
+
+ currentUserId := auth.User.Id
+ currentOrgId := auth.OrgId
+
+ active := GetActivePlan(plan.Id, branch)
+
+ if active == nil {
+ log.Printf("execTellPlan: Active plan not found for plan ID %s on branch %s\n", plan.Id, branch)
+ return
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("execTellPlan: Panic: %v\n%s\n", r, string(debug.Stack()))
+
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("execTellPlan: Panic: %v\n%s", r, string(debug.Stack())))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Panic in execTellPlan: %v\n%s", r, string(debug.Stack())),
+ }
+ }
+ }()
+
+ if missingFileResponse == "" {
+ log.Println("Executing WillExecPlanHook")
+ _, apiErr := hooks.ExecHook(hooks.WillExecPlan, hooks.HookParams{
+ Auth: auth,
+ Plan: plan,
+ })
+
+ if apiErr != nil {
+ time.Sleep(100 * time.Millisecond)
+ active.StreamDoneCh <- apiErr
+ return
+ }
+ }
+
+ planId := plan.Id
+ log.Println("execTellPlan - Setting plan status to replying")
+ err := db.SetPlanStatus(planId, branch, shared.PlanStatusReplying, "")
+ if err != nil {
+ log.Printf("Error setting plan %s status to replying: %v\n", planId, err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error setting plan %s status to replying: %v", planId, err))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Error setting plan status to replying: %v", err),
+ }
+
+ log.Printf("execTellPlan: execTellPlan operation completed for plan ID %s on branch %s, iteration %d\n", plan.Id, branch, iteration)
+ return
+ }
+ log.Println("execTellPlan - Plan status set to replying")
+
+ state := &activeTellStreamState{
+ modelStreamId: active.ModelStreamId,
+ clients: clients,
+ authVars: authVars,
+ req: req,
+ auth: auth,
+ currentOrgId: currentOrgId,
+ currentUserId: currentUserId,
+ plan: plan,
+ branch: branch,
+ iteration: iteration,
+ missingFileResponse: missingFileResponse,
+ }
+
+ log.Println("execTellPlan - Loading tell plan")
+ err = state.loadTellPlan()
+ if err != nil {
+ return
+ }
+ log.Println("execTellPlan - Tell plan loaded")
+
+ activatePaths, activatePathsOrdered := state.resolveCurrentStage()
+
+ var tentativeModelConfig shared.ModelRoleConfig
+ var tentativeMaxTokens int
+ if state.currentStage.TellStage == shared.TellStagePlanning {
+ if state.currentStage.PlanningPhase == shared.PlanningPhaseContext {
+ log.Println("Tell plan - isContextStage - setting modelConfig to context loader")
+ tentativeModelConfig = state.settings.GetModelPack().GetArchitect()
+ tentativeMaxTokens = state.settings.GetArchitectEffectiveMaxTokens()
+ } else {
+ plannerConfig := state.settings.GetModelPack().Planner
+ tentativeModelConfig = plannerConfig.ModelRoleConfig
+ tentativeMaxTokens = state.settings.GetPlannerEffectiveMaxTokens()
+ }
+ } else if state.currentStage.TellStage == shared.TellStageImplementation {
+ tentativeModelConfig = state.settings.GetModelPack().GetCoder()
+ tentativeMaxTokens = state.settings.GetCoderEffectiveMaxTokens()
+ } else {
+ log.Printf("Tell plan - execTellPlan - unknown tell stage: %s\n", state.currentStage.TellStage)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("execTellPlan: unknown tell stage: %s", state.currentStage.TellStage))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Unknown tell stage",
+ }
+ return
+ }
+
+ ok, tokensWithoutContext := state.dryRunCalculateTokensWithoutContext(tentativeMaxTokens, unfinishedSubtaskReasoning)
+ if !ok {
+ return
+ }
+
+ var planStageSharedMsgs []*types.ExtendedChatMessagePart
+ var planningPhaseOnlyMsgs []*types.ExtendedChatMessagePart
+ var implementationMsgs []*types.ExtendedChatMessagePart
+
+ if state.currentStage.TellStage == shared.TellStageImplementation {
+ implementationMsgs = state.formatModelContext(formatModelContextParams{
+ includeMaps: false,
+ smartContextEnabled: req.SmartContext,
+ includeApplyScript: req.ExecEnabled,
+ })
+ } else if state.currentStage.TellStage == shared.TellStagePlanning {
+ // add the shared context between planning and context phases first so it can be cached
+ // this is just for the map and any manually loaded contexts - auto contexts will be added later
+ planStageSharedMsgs = state.formatModelContext(formatModelContextParams{
+ includeMaps: true,
+ smartContextEnabled: req.SmartContext,
+ includeApplyScript: req.ExecEnabled,
+ baseOnly: true,
+ cacheControl: true,
+ })
+
+ if state.currentStage.PlanningPhase == shared.PlanningPhaseTasks {
+ if req.AutoContext {
+ msg := types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleSystem,
+ Content: []types.ExtendedChatMessagePart{},
+ }
+ for _, part := range planStageSharedMsgs {
+ msg.Content = append(msg.Content, *part)
+ }
+ sharedMsgsTokens := model.GetMessagesTokenEstimate(msg)
+
+ tokensRemaining := tentativeMaxTokens - (sharedMsgsTokens + tokensWithoutContext)
+
+ if tokensRemaining < 0 {
+ log.Println("tokensRemaining is negative")
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("tokensRemaining is negative"))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Max tokens exceeded before adding context",
+ }
+ return
+ }
+
+ planningPhaseOnlyMsgs = state.formatModelContext(formatModelContextParams{
+ includeMaps: false,
+ smartContextEnabled: req.SmartContext,
+ includeApplyScript: false, // already included in planStageSharedMsgs
+ activeOnly: true,
+ activatePaths: activatePaths,
+ activatePathsOrdered: activatePathsOrdered,
+ maxTokens: int(float64(tokensRemaining) * 0.95), // leave a little extra room
+ })
+ } else {
+ // if auto context is disabled, just dump in any remaining auto contexts, since all basic contexts have already been added in planStageSharedMsgs
+ planningPhaseOnlyMsgs = state.formatModelContext(formatModelContextParams{
+ includeMaps: false,
+ smartContextEnabled: req.SmartContext,
+ includeApplyScript: false, // already included in planStageSharedMsgs
+ autoOnly: true,
+ })
+ }
+ }
+ }
+
+ getTellSysPromptParams := getTellSysPromptParams{
+ planStageSharedMsgs: planStageSharedMsgs,
+ planningPhaseOnlyMsgs: planningPhaseOnlyMsgs,
+ implementationMsgs: implementationMsgs,
+ contextTokenLimit: tentativeMaxTokens,
+ }
+
+ // log.Println("getTellSysPromptParams:\n", spew.Sdump(getTellSysPromptParams))
+
+ sysParts, err := state.getTellSysPrompt(getTellSysPromptParams)
+ if err != nil {
+ log.Printf("Error getting tell sys prompt: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error getting tell sys prompt: %v", err))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Error getting tell sys prompt: %v", err),
+ }
+ return
+ }
+
+ // log.Println("**sysPrompt:**\n", spew.Sdump(sysParts))
+
+ state.messages = []types.ExtendedChatMessage{
+ {
+ Role: openai.ChatMessageRoleSystem,
+ Content: sysParts,
+ },
+ }
+
+ promptMessage, ok := state.resolvePromptMessage(unfinishedSubtaskReasoning)
+ if !ok {
+ return
+ }
+
+ // log.Println("messages:\n\n", spew.Sdump(state.messages))
+
+ // log.Println("promptMessage:", spew.Sdump(promptMessage))
+
+ state.tokensBeforeConvo =
+ model.GetMessagesTokenEstimate(state.messages...) +
+ model.GetMessagesTokenEstimate(*promptMessage) +
+ state.latestSummaryTokens +
+ model.TokensPerRequest
+
+ // print out breakdown of token usage
+ log.Printf("Latest summary tokens: %d\n", state.latestSummaryTokens)
+ log.Printf("Total tokens before convo: %d\n", state.tokensBeforeConvo)
+
+ var effectiveMaxTokens int
+ if state.currentStage.TellStage == shared.TellStagePlanning {
+ if state.currentStage.PlanningPhase == shared.PlanningPhaseContext {
+ effectiveMaxTokens = state.settings.GetArchitectEffectiveMaxTokens()
+ } else {
+ effectiveMaxTokens = state.settings.GetPlannerEffectiveMaxTokens()
+ }
+ } else if state.currentStage.TellStage == shared.TellStageImplementation {
+ effectiveMaxTokens = state.settings.GetCoderEffectiveMaxTokens()
+ }
+
+ if state.tokensBeforeConvo > effectiveMaxTokens {
+ // token limit already exceeded before adding conversation
+ err := fmt.Errorf("token limit exceeded before adding conversation")
+ log.Printf("Error: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("token limit exceeded before adding conversation"))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Token limit exceeded before adding conversation",
+ }
+ return
+ }
+
+ if !state.addConversationMessages() {
+ return
+ }
+
+ // add the prompt message to the end of the messages slice
+ if promptMessage != nil {
+ state.messages = append(state.messages, *promptMessage)
+ } else {
+ log.Println("promptMessage is nil")
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("promptMessage is nil"))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Prompt message isn't set",
+ }
+ return
+ }
+
+ state.replyId = uuid.New().String()
+ state.replyParser = types.NewReplyParser()
+
+ if missingFileResponse != "" && !state.handleMissingFileResponse(unfinishedSubtaskReasoning) {
+ return
+ }
+
+ // filter out any messages that are empty
+ state.messages = model.FilterEmptyMessages(state.messages)
+
+ log.Printf("\n\nMessages: %d\n", len(state.messages))
+ // for _, message := range state.messages {
+ // log.Printf("%s: %v\n", message.Role, message.Content)
+ // }
+
+ requestTokens := model.GetMessagesTokenEstimate(state.messages...) + model.TokensPerRequest
+ state.totalRequestTokens = requestTokens
+
+ modelConfig := tentativeModelConfig
+
+ log.Println("Tell plan - setting modelConfig")
+ log.Println("Tell plan - requestTokens:", requestTokens)
+ log.Println("Tell plan - state.currentStage.TellStage:", state.currentStage.TellStage)
+ log.Println("Tell plan - state.currentStage.PlanningPhase:", state.currentStage.PlanningPhase)
+
+ if state.currentStage.TellStage == shared.TellStagePlanning {
+ if state.currentStage.PlanningPhase == shared.PlanningPhaseContext {
+ log.Println("Tell plan - isContextStage - setting modelConfig to context loader")
+ modelConfig = state.settings.GetModelPack().GetArchitect().GetRoleForInputTokens(requestTokens, state.settings)
+ log.Println("Tell plan - got modelConfig for context phase")
+ } else if state.currentStage.PlanningPhase == shared.PlanningPhaseTasks {
+ modelConfig = state.settings.GetModelPack().Planner.GetRoleForInputTokens(requestTokens, state.settings)
+ log.Println("Tell plan - got modelConfig for tasks phase")
+ }
+ } else if state.currentStage.TellStage == shared.TellStageImplementation {
+ modelConfig = state.settings.GetModelPack().GetCoder().GetRoleForInputTokens(requestTokens, state.settings)
+ log.Println("Tell plan - got modelConfig for implementation stage")
+ }
+
+ state.modelConfig = &modelConfig
+
+ baseModelConfig := modelConfig.GetBaseModelConfig(authVars, state.settings, state.orgUserConfig)
+
+ if baseModelConfig == nil {
+ log.Println("Tell plan - baseModelConfig is nil")
+ log.Println("Tell plan - modelConfig id:", modelConfig.ModelId)
+
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("No model config found for: %s", state.modelConfig.ModelId))
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "No model config found for: " + string(state.modelConfig.ModelId),
+ }
+ return
+ }
+
+ state.baseModelConfig = baseModelConfig
+
+ // if the model doesn't support cache control, remove the cache control spec from the messages
+ if !baseModelConfig.SupportsCacheControl {
+ for i := range state.messages {
+ for j := range state.messages[i].Content {
+ if state.messages[i].Content[j].CacheControl != nil {
+ state.messages[i].Content[j].CacheControl = nil
+ }
+ }
+ }
+ }
+
+ // if the model doesn't support images, remove any image parts from the messages
+ if !baseModelConfig.HasImageSupport {
+ log.Println("Tell exec - model doesn't support images. Removing image parts from messages. File name will still be included.")
+
+ for i := range state.messages {
+ filteredContent := []types.ExtendedChatMessagePart{}
+ for _, part := range state.messages[i].Content {
+ if part.Type != openai.ChatMessagePartTypeImageURL {
+ filteredContent = append(filteredContent, part)
+ }
+ }
+ state.messages[i].Content = filteredContent
+ }
+ }
+
+ log.Println("tell exec - will send model request with:", spew.Sdump(map[string]interface{}{
+ "provider": baseModelConfig.Provider,
+ "modelId": baseModelConfig.ModelId,
+ "modelTag": baseModelConfig.ModelTag,
+ "modelName": baseModelConfig.ModelName,
+ "tokens": requestTokens,
+ }))
+
+ _, apiErr := hooks.ExecHook(hooks.WillSendModelRequest, hooks.HookParams{
+ Auth: auth,
+ Plan: plan,
+ WillSendModelRequestParams: &hooks.WillSendModelRequestParams{
+ InputTokens: requestTokens,
+ OutputTokens: baseModelConfig.MaxOutputTokens - requestTokens,
+ ModelName: baseModelConfig.ModelName,
+ ModelId: baseModelConfig.ModelId,
+ ModelTag: baseModelConfig.ModelTag,
+ IsUserPrompt: true,
+ },
+ })
+ if apiErr != nil {
+ active.StreamDoneCh <- apiErr
+ return
+ }
+
+ state.doTellRequest()
+
+ if shouldBuildPending {
+ go state.queuePendingBuilds()
+ }
+
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.CurrentStreamingReplyId = state.replyId
+ ap.CurrentReplyDoneCh = make(chan bool, 1)
+ })
+
+}
+
+func (state *activeTellStreamState) doTellRequest() {
+ clients := state.clients
+ authVars := state.authVars
+ modelConfig := state.modelConfig
+ active := state.activePlan
+
+ fallbackRes := modelConfig.GetFallbackForModelError(state.numErrorRetry, state.didProviderFallback, state.modelErr, authVars, state.settings, state.orgUserConfig)
+ modelConfig = fallbackRes.ModelRoleConfig
+ stop := []string{""}
+
+ baseModelConfig := modelConfig.GetBaseModelConfig(state.authVars, state.settings, state.orgUserConfig)
+
+ if fallbackRes.FallbackType == shared.FallbackTypeProvider {
+ state.didProviderFallback = true
+ }
+
+ // log.Println("Stop:", stop)
+ // spew.Dump(state.messages)
+
+ log.Println("modelConfig:", spew.Sdump(map[string]interface{}{
+ "modelName": baseModelConfig.ModelName,
+ "modelId": baseModelConfig.ModelId,
+ "modelTag": baseModelConfig.ModelTag,
+ }))
+
+ if state.noCacheSupportErr {
+ log.Println("Tell exec - request failed with cache support error. Removing cache control breakpoints from messages.")
+ for i := range state.messages {
+ for j := range state.messages[i].Content {
+ if state.messages[i].Content[j].CacheControl != nil {
+ state.messages[i].Content[j].CacheControl = nil
+ }
+ }
+ }
+ }
+
+ modelReq := types.ExtendedChatCompletionRequest{
+ Model: baseModelConfig.ModelName,
+ Messages: state.messages,
+ Stream: true,
+ StreamOptions: &openai.StreamOptions{
+ IncludeUsage: true,
+ },
+ Temperature: modelConfig.Temperature,
+ TopP: modelConfig.TopP,
+ }
+
+ if baseModelConfig.StopDisabled {
+ state.manualStop = stop
+ } else {
+ modelReq.Stop = stop
+ }
+
+ // update state
+ state.fallbackRes = fallbackRes
+ state.requestStartedAt = time.Now()
+ state.originalReq = &modelReq
+ state.modelConfig = modelConfig
+
+ // output the modelReq to a json file
+ // if jsonData, err := json.MarshalIndent(modelReq, "", " "); err == nil {
+ // timestamp := time.Now().Format("2006-01-02-150405")
+ // filename := fmt.Sprintf("generations/model-request-%s.json", timestamp)
+ // if err := os.WriteFile(filename, jsonData, 0644); err != nil {
+ // log.Printf("Error writing model request to file: %v\n", err)
+ // }
+ // } else {
+ // log.Printf("Error marshaling model request to JSON: %v\n", err)
+ // }
+
+ log.Printf("[Tell] doTellRequest retry=%d fallbackRetry=%d using model=%s",
+ state.numErrorRetry, state.numFallbackRetry, baseModelConfig.ModelName)
+
+ // start the stream
+ stream, err := model.CreateChatCompletionStream(clients, authVars, modelConfig, state.settings, state.orgUserConfig, state.currentOrgId, state.currentUserId, active.ModelStreamCtx, modelReq)
+ if err != nil {
+ log.Printf("Error starting reply stream: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error starting reply stream: %v", err))
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Error starting reply stream: " + err.Error(),
+ }
+ return
+ }
+
+ // handle stream chunks
+ go state.listenStream(stream)
+}
+
+func (state *activeTellStreamState) dryRunCalculateTokensWithoutContext(tentativeMaxTokens int, unfinishedSubtaskReasoning string) (bool, int) {
+ clone := &activeTellStreamState{
+ modelStreamId: state.modelStreamId,
+ clients: state.clients,
+ req: state.req,
+ auth: state.auth,
+ currentOrgId: state.currentOrgId,
+ currentUserId: state.currentUserId,
+ plan: state.plan,
+ branch: state.branch,
+ iteration: state.iteration,
+ missingFileResponse: state.missingFileResponse,
+ settings: state.settings,
+ currentStage: state.currentStage,
+ subtasks: state.subtasks,
+ currentSubtask: state.currentSubtask,
+ convo: state.convo,
+ summaries: state.summaries,
+ latestSummaryTokens: state.latestSummaryTokens,
+ userPrompt: state.userPrompt,
+ promptMessage: state.promptMessage,
+ hasContextMap: state.hasContextMap,
+ contextMapEmpty: state.contextMapEmpty,
+ hasAssistantReply: state.hasAssistantReply,
+ modelContext: state.modelContext,
+ activePlan: state.activePlan,
+ }
+
+ sysParts, err := clone.getTellSysPrompt(getTellSysPromptParams{
+ contextTokenLimit: tentativeMaxTokens,
+ dryRunWithoutContext: true,
+ })
+
+ if err != nil {
+ log.Printf("error getting tell sys prompt for dry run token calculation: %v", err)
+
+ msg := "Error getting tell sys prompt for dry run token calculation"
+ if err.Error() == AllTasksCompletedMsg {
+ msg = "There's no current task to implement. Try a prompt instead of the 'continue' command."
+ go notify.NotifyErr(notify.SeverityInfo, msg)
+ } else {
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error getting tell sys prompt for dry run token calculation: %v", err))
+ }
+
+ state.activePlan.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: msg,
+ }
+ return false, 0
+ }
+
+ clone.messages = []types.ExtendedChatMessage{
+ {
+ Role: openai.ChatMessageRoleSystem,
+ Content: sysParts,
+ },
+ }
+
+ promptMessage, ok := clone.resolvePromptMessage(unfinishedSubtaskReasoning)
+ if !ok {
+ return false, 0
+ }
+
+ clone.tokensBeforeConvo =
+ model.GetMessagesTokenEstimate(clone.messages...) +
+ model.GetMessagesTokenEstimate(*promptMessage) +
+ clone.latestSummaryTokens +
+ model.TokensPerRequest
+
+ var effectiveMaxTokens int
+ if clone.currentStage.TellStage == shared.TellStagePlanning {
+ if clone.currentStage.PlanningPhase == shared.PlanningPhaseContext {
+ effectiveMaxTokens = clone.settings.GetArchitectEffectiveMaxTokens()
+ } else {
+ effectiveMaxTokens = clone.settings.GetPlannerEffectiveMaxTokens()
+ }
+ } else if clone.currentStage.TellStage == shared.TellStageImplementation {
+ effectiveMaxTokens = clone.settings.GetCoderEffectiveMaxTokens()
+ }
+
+ if clone.tokensBeforeConvo > effectiveMaxTokens {
+ log.Println("tokensBeforeConvo exceeds max tokens during dry run")
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("tokensBeforeConvo exceeds max tokens during dry run"))
+
+ state.activePlan.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Max tokens exceeded before adding conversation",
+ }
+ return false, 0
+ }
+
+ if !clone.addConversationMessages() {
+ return false, 0
+ }
+
+ clone.messages = append(clone.messages, *promptMessage)
+
+ return true, model.GetMessagesTokenEstimate(clone.messages...) + model.TokensPerRequest
+}
diff --git a/app/server/model/plan/tell_load.go b/app/server/model/plan/tell_load.go
new file mode 100644
index 0000000000000000000000000000000000000000..4cc065123678b85997705bebb8547db1a891e823
--- /dev/null
+++ b/app/server/model/plan/tell_load.go
@@ -0,0 +1,442 @@
+package plan
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ "plandex-server/model"
+ "plandex-server/notify"
+ "plandex-server/types"
+ "runtime"
+ "runtime/debug"
+
+ shared "plandex-shared"
+
+ "github.com/jmoiron/sqlx"
+ "github.com/sashabaranov/go-openai"
+)
+
+func (state *activeTellStreamState) loadTellPlan() error {
+ clients := state.clients
+ authVars := state.authVars
+ req := state.req
+ auth := state.auth
+ plan := state.plan
+ planId := plan.Id
+ branch := state.branch
+ currentUserId := state.currentUserId
+ currentOrgId := state.currentOrgId
+ iteration := state.iteration
+ missingFileResponse := state.missingFileResponse
+
+ err := state.setActivePlan()
+ if err != nil {
+ return err
+ }
+ active := state.activePlan
+
+ lockScope := db.LockScopeWrite
+ if iteration > 0 || missingFileResponse != "" {
+ lockScope = db.LockScopeRead
+ }
+
+ var modelContext []*db.Context
+ var convo []*db.ConvoMessage
+ var promptMsg *db.ConvoMessage
+ var summaries []*db.ConvoSummary
+ var subtasks []*db.Subtask
+ var settings *shared.PlanSettings
+ var orgUserConfig *shared.OrgUserConfig
+ var latestSummaryTokens int
+ var currentPlan *shared.CurrentPlanState
+
+ log.Printf("[TellLoad] Tell plan - loadTellPlan - iteration: %d, missingFileResponse: %s, req.IsUserContinue: %t, lockScope: %s\n", iteration, missingFileResponse, req.IsUserContinue, lockScope)
+
+ db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: auth.OrgId,
+ UserId: auth.User.Id,
+ PlanId: planId,
+ Branch: branch,
+ Scope: lockScope,
+ Ctx: active.Ctx,
+ CancelFn: active.CancelFn,
+ Reason: "load tell plan",
+ }, func(repo *db.GitRepo) error {
+ errCh := make(chan error, 4)
+
+ // get name for plan and rename if it's a draft
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in getPlanSettings: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("panic getting plan settings: %v\n%s", r, debug.Stack())
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+
+ res, err := db.GetPlanSettings(plan)
+ if err != nil {
+ log.Printf("Error getting plan settings: %v\n", err)
+ errCh <- fmt.Errorf("error getting plan settings: %v", err)
+ return
+ }
+ settings = res
+
+ orgUserConfigRes, err := db.GetOrgUserConfig(auth.User.Id, auth.OrgId)
+ if err != nil {
+ log.Printf("Error getting org user config: %v\n", err)
+ errCh <- fmt.Errorf("error getting org user config: %v", err)
+ return
+ }
+ orgUserConfig = orgUserConfigRes
+
+ if plan.Name == "draft" {
+ name, err := model.GenPlanName(
+ auth,
+ plan,
+ settings,
+ orgUserConfig,
+ clients,
+ authVars,
+ req.Prompt,
+ active.SessionId,
+ active.Ctx,
+ )
+
+ if err != nil {
+ log.Printf("Error generating plan name: %v\n", err)
+ errCh <- fmt.Errorf("error generating plan name: %v", err)
+ return
+ }
+
+ err = db.WithTx(active.Ctx, "rename plan", func(tx *sqlx.Tx) error {
+ err := db.RenamePlan(planId, name, tx)
+
+ if err != nil {
+ log.Printf("Error renaming plan: %v\n", err)
+ return fmt.Errorf("error renaming plan: %v", err)
+ }
+
+ err = db.IncNumNonDraftPlans(currentUserId, tx)
+
+ if err != nil {
+ log.Printf("Error incrementing num non draft plans: %v\n", err)
+ return fmt.Errorf("error incrementing num non draft plans: %v", err)
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error renaming plan: %v\n", err)
+ errCh <- fmt.Errorf("error renaming plan: %v", err)
+ return
+ }
+ }
+
+ errCh <- nil
+ }()
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in getPlanContexts: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("error getting plan modelContext: %v", r)
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+
+ if iteration > 0 || missingFileResponse != "" {
+ modelContext = active.Contexts
+ } else {
+ res, err := db.GetPlanContexts(currentOrgId, planId, true, false)
+ if err != nil {
+ log.Printf("Error getting plan modelContext: %v\n", err)
+ errCh <- fmt.Errorf("error getting plan modelContext: %v", err)
+ return
+ }
+
+ log.Printf("[TellLoad] Tell plan - loadTellPlan - modelContext: %v\n", len(modelContext))
+ // for _, part := range modelContext {
+ // log.Printf("[TellLoad] Tell plan - loadTellPlan - part: %s - %s - %s - %d tokens\n", part.ContextType, part.Name, part.FilePath, part.NumTokens)
+ // }
+
+ modelContext = res
+ }
+
+ errCh <- nil
+ }()
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in getPlanConvo: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("error getting plan convo: %v", r)
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+
+ res, err := db.GetPlanConvo(currentOrgId, planId)
+ if err != nil {
+ log.Printf("Error getting plan convo: %v\n", err)
+ errCh <- fmt.Errorf("error getting plan convo: %v", err)
+ return
+ }
+ convo = res
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.MessageNum = len(convo)
+ })
+
+ promptTokens := shared.GetNumTokensEstimate(req.Prompt)
+ innerErrCh := make(chan error, 2)
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in storeUserMessage: %v\n%s", r, debug.Stack())
+ innerErrCh <- fmt.Errorf("error storing user message: %v", r)
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+
+ if iteration == 0 && missingFileResponse == "" && !req.IsUserContinue {
+ num := len(convo) + 1
+
+ log.Printf("[TellLoad] storing user message | len(convo): %d | num: %d\n", len(convo), num)
+
+ promptMsg = &db.ConvoMessage{
+ OrgId: currentOrgId,
+ PlanId: planId,
+ UserId: currentUserId,
+ Role: openai.ChatMessageRoleUser,
+ Tokens: promptTokens,
+ Num: num,
+ Message: req.Prompt,
+ Flags: shared.ConvoMessageFlags{
+ IsApplyDebug: req.IsApplyDebug,
+ IsUserDebug: req.IsUserDebug,
+ IsChat: req.IsChatOnly,
+ },
+ }
+
+ log.Println("[TellLoad] storing user message")
+ // repo.LogGitRepoState()
+
+ _, err = db.StoreConvoMessage(repo, promptMsg, auth.User.Id, branch, true)
+
+ if err != nil {
+ log.Printf("[TellLoad] Error storing user message: %v\n", err)
+ innerErrCh <- fmt.Errorf("error storing user message: %v", err)
+ return
+ }
+
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.MessageNum = num
+ })
+ }
+
+ innerErrCh <- nil
+ }()
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in getPlanSummaries: %v\n%s", r, debug.Stack())
+ innerErrCh <- fmt.Errorf("error getting plan summaries: %v", r)
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+
+ var convoMessageIds []string
+
+ for _, convoMessage := range convo {
+ convoMessageIds = append(convoMessageIds, convoMessage.Id)
+ }
+
+ log.Println("getting plan summaries")
+ log.Println("convoMessageIds:", convoMessageIds)
+
+ res, err := db.GetPlanSummaries(planId, convoMessageIds)
+ if err != nil {
+ log.Printf("Error getting plan summaries: %v\n", err)
+ innerErrCh <- fmt.Errorf("error getting plan summaries: %v", err)
+ return
+ }
+ summaries = res
+
+ log.Printf("got %d plan summaries", len(summaries))
+
+ if len(summaries) > 0 {
+ latestSummaryTokens = shared.GetNumTokensEstimate(summaries[len(summaries)-1].Summary)
+ }
+
+ innerErrCh <- nil
+ }()
+
+ for i := 0; i < 2; i++ {
+ err := <-innerErrCh
+ if err != nil {
+ errCh <- err
+ return
+ }
+ }
+
+ if promptMsg != nil {
+ convo = append(convo, promptMsg)
+ }
+
+ errCh <- nil
+ }()
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in getPlanSubtasks: %v\n%s", r, debug.Stack())
+ errCh <- fmt.Errorf("error getting plan subtasks: %v\n%s", r, debug.Stack())
+ runtime.Goexit() // don't allow outer function to continue and double-send to channel
+ }
+ }()
+
+ res, err := db.GetPlanSubtasks(auth.OrgId, planId)
+ if err != nil {
+ log.Printf("Error getting plan subtasks: %v\n", err)
+ errCh <- fmt.Errorf("error getting plan subtasks: %v", err)
+ return
+ }
+ subtasks = res
+ errCh <- nil
+ }()
+
+ for i := 0; i < 4; i++ {
+ err = <-errCh
+ if err != nil {
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error loading plan: %v", err))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Error loading plan: %v", err),
+ }
+ return err
+ }
+ }
+
+ res, err := db.GetCurrentPlanState(db.CurrentPlanStateParams{
+ OrgId: currentOrgId,
+ PlanId: planId,
+ Contexts: modelContext,
+ })
+
+ if err != nil {
+ return fmt.Errorf("error getting current plan state: %v", err)
+ }
+
+ currentPlan = res
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("execTellPlan: error loading tell plan: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error loading tell plan: %v", err))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Error loading tell plan: %v", err),
+ }
+ return err
+ }
+
+ state.modelContext = modelContext
+ state.convo = convo
+ state.promptConvoMessage = promptMsg
+ state.summaries = summaries
+ state.latestSummaryTokens = latestSummaryTokens
+ state.settings = settings
+ state.currentPlanState = currentPlan
+ state.subtasks = subtasks
+
+ for _, subtask := range state.subtasks {
+ if !subtask.IsFinished {
+ state.currentSubtask = subtask
+ break
+ }
+ }
+
+ log.Printf("[TellLoad] Subtasks: %+v", state.subtasks)
+ log.Printf("[TellLoad] Current subtask: %+v", state.currentSubtask)
+
+ state.hasContextMap = false
+ state.contextMapEmpty = true
+ for _, context := range state.modelContext {
+ if context.ContextType == shared.ContextMapType {
+ state.hasContextMap = true
+ if context.NumTokens > 0 {
+ state.contextMapEmpty = false
+ }
+ break
+ }
+ }
+
+ state.hasAssistantReply = false
+ for _, convoMessage := range state.convo {
+ if convoMessage.Role == openai.ChatMessageRoleAssistant {
+ state.hasAssistantReply = true
+ break
+ }
+ }
+
+ if iteration == 0 && missingFileResponse == "" {
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.Contexts = state.modelContext
+
+ for _, context := range state.modelContext {
+ if context.FilePath != "" {
+ ap.ContextsByPath[context.FilePath] = context
+ }
+ }
+ })
+ } else if missingFileResponse == "" {
+ // reset current reply content and num tokens
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.CurrentReplyContent = ""
+ ap.NumTokens = 0
+ })
+ }
+
+ // if any skipped paths have since been added to context, remove them from skipped paths
+ if len(active.SkippedPaths) > 0 {
+ var toUnskipPaths []string
+ for contextPath := range active.ContextsByPath {
+ if active.SkippedPaths[contextPath] {
+ toUnskipPaths = append(toUnskipPaths, contextPath)
+ }
+ }
+ if len(toUnskipPaths) > 0 {
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ for _, path := range toUnskipPaths {
+ delete(ap.SkippedPaths, path)
+ }
+ })
+ }
+ }
+
+ return nil
+}
+
+func (state *activeTellStreamState) setActivePlan() error {
+ plan := state.plan
+ branch := state.branch
+
+ active := GetActivePlan(plan.Id, branch)
+
+ if active == nil {
+ return fmt.Errorf("no active plan with id %s", plan.Id)
+ }
+
+ state.activePlan = active
+
+ return nil
+}
diff --git a/app/server/model/plan/tell_missing_file.go b/app/server/model/plan/tell_missing_file.go
new file mode 100644
index 0000000000000000000000000000000000000000..d3b73bc82f5369bdf20ff976de30bd395a499416
--- /dev/null
+++ b/app/server/model/plan/tell_missing_file.go
@@ -0,0 +1,132 @@
+package plan
+
+import (
+ "log"
+ "plandex-server/model/prompts"
+ "plandex-server/types"
+
+ shared "plandex-shared"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+func (state *activeTellStreamState) handleMissingFileResponse(unfinishedSubtaskReasoning string) bool {
+ missingFileResponse := state.missingFileResponse
+ planId := state.plan.Id
+ branch := state.branch
+ req := state.req
+
+ active := GetActivePlan(planId, branch)
+
+ if active == nil {
+ log.Printf("execTellPlan: Active plan not found for plan ID %s on branch %s\n", planId, branch)
+ return false
+ }
+
+ log.Println("Missing file response:", missingFileResponse, "setting replyParser")
+ // log.Printf("Current reply content:\n%s\n", active.CurrentReplyContent)
+
+ state.replyParser.AddChunk(active.CurrentReplyContent, true)
+ res := state.replyParser.Read()
+ currentFile := res.CurrentFilePath
+
+ log.Printf("Current file: %s\n", currentFile)
+ // log.Println("Current reply content:\n", active.CurrentReplyContent)
+
+ replyContent := active.CurrentReplyContent
+ numTokens := active.NumTokens
+
+ if missingFileResponse == shared.RespondMissingFileChoiceSkip {
+ replyBeforeCurrentFile := state.replyParser.GetReplyBeforeCurrentPath()
+ numTokens = shared.GetNumTokensEstimate(replyBeforeCurrentFile)
+
+ replyContent = replyBeforeCurrentFile
+ state.replyParser = types.NewReplyParser()
+ state.replyParser.AddChunk(replyContent, true)
+
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.CurrentReplyContent = replyContent
+ ap.NumTokens = numTokens
+ ap.SkippedPaths[currentFile] = true
+ })
+
+ } else {
+ if missingFileResponse == shared.RespondMissingFileChoiceOverwrite {
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.AllowOverwritePaths[currentFile] = true
+ })
+ }
+ }
+
+ state.messages = append(state.messages, types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleAssistant,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: active.CurrentReplyContent,
+ },
+ },
+ })
+
+ if missingFileResponse == shared.RespondMissingFileChoiceSkip {
+ res := state.replyParser.FinishAndRead()
+ skipPrompt := prompts.GetSkipMissingFilePrompt(res.CurrentFilePath)
+
+ params := prompts.UserPromptParams{
+ CreatePromptParams: prompts.CreatePromptParams{
+ ExecMode: req.ExecEnabled,
+ AutoContext: req.AutoContext,
+ IsUserDebug: req.IsUserDebug,
+ IsApplyDebug: req.IsApplyDebug,
+ ContextTokenLimit: state.settings.GetPlannerEffectiveMaxTokens(),
+ },
+ Prompt: skipPrompt,
+ OsDetails: req.OsDetails,
+ CurrentStage: state.currentStage,
+ UnfinishedSubtaskReasoning: unfinishedSubtaskReasoning,
+ }
+
+ prompt := prompts.GetWrappedPrompt(params) + "\n\n" + skipPrompt // repetition of skip prompt to improve instruction following
+
+ state.messages = append(state.messages, types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleUser,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompt,
+ },
+ },
+ })
+
+ } else {
+ missingPrompt := prompts.GetMissingFileContinueGeneratingPrompt(res.CurrentFilePath)
+
+ params := prompts.UserPromptParams{
+ CreatePromptParams: prompts.CreatePromptParams{
+ ExecMode: req.ExecEnabled,
+ AutoContext: req.AutoContext,
+ IsUserDebug: req.IsUserDebug,
+ IsApplyDebug: req.IsApplyDebug,
+ ContextTokenLimit: state.settings.GetPlannerEffectiveMaxTokens(),
+ },
+ Prompt: missingPrompt,
+ OsDetails: req.OsDetails,
+ CurrentStage: state.currentStage,
+ UnfinishedSubtaskReasoning: unfinishedSubtaskReasoning,
+ }
+
+ prompt := prompts.GetWrappedPrompt(params) + "\n\n" + missingPrompt // repetition of missing prompt to improve instruction following
+
+ state.messages = append(state.messages, types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleUser,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompt,
+ },
+ },
+ })
+ }
+
+ return true
+}
diff --git a/app/server/model/plan/tell_prompt_message.go b/app/server/model/plan/tell_prompt_message.go
new file mode 100644
index 0000000000000000000000000000000000000000..d256b43bd3337c23391ba90f554282ea2a4d8ecc
--- /dev/null
+++ b/app/server/model/plan/tell_prompt_message.go
@@ -0,0 +1,183 @@
+package plan
+
+import (
+ "log"
+ "net/http"
+ "plandex-server/model/prompts"
+ "plandex-server/types"
+ shared "plandex-shared"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+func (state *activeTellStreamState) resolvePromptMessage(
+ unfinishedSubtaskReasoning string,
+) (*types.ExtendedChatMessage, bool) {
+ req := state.req
+ active := state.activePlan
+ iteration := state.iteration
+
+ var promptMessage *types.ExtendedChatMessage
+
+ state.skipConvoMessages = map[string]bool{}
+
+ lastMessage := state.lastSuccessfulConvoMessage()
+
+ if req.IsUserContinue {
+ if lastMessage == nil {
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeContinueNoMessages,
+ Status: http.StatusBadRequest,
+ Msg: "No messages yet. Can't continue plan.",
+ }
+ return nil, false
+ }
+ log.Println("User is continuing plan. Last message role:", lastMessage.Role)
+ }
+
+ if req.IsChatOnly {
+ var prompt string
+ if req.IsUserContinue {
+ if lastMessage.Role == openai.ChatMessageRoleUser {
+ log.Println("User is continuing plan in chat only mode. Last message was user message. Using last user message as prompt")
+ content := lastMessage.Message
+ prompt = content
+ state.userPrompt = content
+ state.skipConvoMessages[lastMessage.Id] = true
+ } else {
+ log.Println("User is continuing plan in chat only mode. Last message was assistant message. Using user continue prompt")
+ prompt = prompts.UserContinuePrompt
+ }
+ } else {
+ prompt = req.Prompt
+ }
+
+ wrapped := prompts.GetWrappedChatOnlyPrompt(prompts.ChatUserPromptParams{
+ CreatePromptParams: prompts.CreatePromptParams{
+ AutoContext: req.AutoContext,
+ ExecMode: req.ExecEnabled,
+ IsGitRepo: req.IsGitRepo,
+ // no need to pass in IsUserDebug or IsApplyDebug here because it's a chat message
+ },
+ Prompt: prompt,
+ OsDetails: req.OsDetails,
+ // no current task for chat only mode
+ })
+
+ promptMessage = &types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleUser,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: wrapped,
+ },
+ },
+ }
+ } else if req.IsUserContinue {
+ // log.Println("User is continuing plan. Last message:\n\n", lastMessage.Content)
+ if lastMessage.Role == openai.ChatMessageRoleUser {
+ // if last message was a user message, we want to remove it from the messages array and then use that last message as the prompt so we can continue from where the user left off
+
+ log.Println("User is continuing plan in tell mode. Last message was user message. Using last user message as prompt")
+ content := lastMessage.Message
+
+ params := prompts.UserPromptParams{
+ CreatePromptParams: prompts.CreatePromptParams{
+ ExecMode: req.ExecEnabled,
+ AutoContext: req.AutoContext,
+ IsGitRepo: req.IsGitRepo,
+ ContextTokenLimit: state.settings.GetPlannerEffectiveMaxTokens(),
+ // no need to pass in IsUserDebug or IsApplyDebug here because we're continuing
+ },
+ Prompt: content,
+ OsDetails: req.OsDetails,
+ CurrentStage: state.currentStage,
+ UnfinishedSubtaskReasoning: unfinishedSubtaskReasoning,
+ }
+
+ promptMessage = &types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleUser,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompts.GetWrappedPrompt(params),
+ },
+ },
+ }
+
+ state.userPrompt = content
+ } else {
+
+ // if the last message was an assistant message, we'll use the user continue prompt
+ log.Println("User is continuing plan in tell mode. Last message was assistant message. Using user continue prompt")
+
+ params := prompts.UserPromptParams{
+ CreatePromptParams: prompts.CreatePromptParams{
+ ExecMode: req.ExecEnabled,
+ AutoContext: req.AutoContext,
+ IsGitRepo: req.IsGitRepo,
+ ContextTokenLimit: state.settings.GetPlannerEffectiveMaxTokens(),
+ // no need to pass in IsUserDebug or IsApplyDebug here because we're continuing
+ },
+ Prompt: prompts.UserContinuePrompt,
+ OsDetails: req.OsDetails,
+ CurrentStage: state.currentStage,
+ UnfinishedSubtaskReasoning: unfinishedSubtaskReasoning,
+ }
+
+ promptMessage = &types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleUser,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompts.GetWrappedPrompt(params),
+ },
+ },
+ }
+ }
+ } else {
+ var prompt string
+ if iteration == 0 {
+ prompt = req.Prompt
+ } else if state.currentStage.TellStage == shared.TellStageImplementation {
+ prompt = prompts.AutoContinueImplementationPrompt
+ } else {
+ prompt = prompts.AutoContinuePlanningPrompt
+ }
+
+ state.userPrompt = prompt
+
+ params := prompts.UserPromptParams{
+ CreatePromptParams: prompts.CreatePromptParams{
+ ExecMode: req.ExecEnabled,
+ AutoContext: req.AutoContext,
+ IsUserDebug: req.IsUserDebug,
+ IsApplyDebug: req.IsApplyDebug,
+ IsGitRepo: req.IsGitRepo,
+ ContextTokenLimit: state.settings.GetPlannerEffectiveMaxTokens(),
+ },
+ Prompt: prompt,
+ OsDetails: req.OsDetails,
+ CurrentStage: state.currentStage,
+ UnfinishedSubtaskReasoning: unfinishedSubtaskReasoning,
+ }
+
+ finalPrompt := prompts.GetWrappedPrompt(params)
+
+ // log.Println("Final prompt:", finalPrompt)
+
+ promptMessage = &types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleUser,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: finalPrompt,
+ },
+ },
+ }
+ }
+
+ // log.Println("Prompt message:", promptMessage.Content)
+
+ return promptMessage, true
+}
diff --git a/app/server/model/plan/tell_stage.go b/app/server/model/plan/tell_stage.go
new file mode 100644
index 0000000000000000000000000000000000000000..57af035e49a69d77793c66ebc1a17fa30e3a37d1
--- /dev/null
+++ b/app/server/model/plan/tell_stage.go
@@ -0,0 +1,104 @@
+package plan
+
+import (
+ "log"
+ "plandex-server/db"
+ shared "plandex-shared"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+func (state *activeTellStreamState) lastSuccessfulConvoMessage() *db.ConvoMessage {
+ for i := len(state.convo) - 1; i >= 0; i-- {
+ msg := state.convo[i]
+ if msg.Stopped || msg.Flags.HasError {
+ continue
+ }
+ return msg
+ }
+ return nil
+}
+
+func (state *activeTellStreamState) resolveCurrentStage() (activatePaths map[string]bool, activatePathsOrdered []string) {
+ req := state.req
+ iteration := state.iteration
+ hasContextMap := state.hasContextMap
+ convo := state.convo
+ contextMapEmpty := state.contextMapEmpty
+
+ log.Printf("[resolveCurrentStage] Initial state: hasContextMap: %v, convo len: %d", hasContextMap, len(convo))
+
+ lastConvoMsg := state.lastSuccessfulConvoMessage()
+
+ activatePaths = map[string]bool{}
+ activatePathsOrdered = []string{}
+
+ isContinueFromAssistantMsg := false
+
+ if lastConvoMsg != nil {
+ isContinueFromAssistantMsg = iteration == 0 && req.IsUserContinue && lastConvoMsg.Role == openai.ChatMessageRoleAssistant
+ log.Printf("[resolveCurrentStage] isContinueFromAssistantMsg: %v (IsUserContinue: %v, LastMsgRole: %s)",
+ isContinueFromAssistantMsg, req.IsUserContinue, lastConvoMsg.Role)
+ } else {
+ log.Println("[resolveCurrentStage] No previous successful conversation message found")
+ }
+
+ isUserPrompt := false
+
+ if !isContinueFromAssistantMsg {
+ isUserPrompt = lastConvoMsg == nil || lastConvoMsg.Role == openai.ChatMessageRoleUser
+ log.Printf("[resolveCurrentStage] isUserPrompt: %v", isUserPrompt)
+ }
+
+ var tellStage shared.TellStage
+ var planningPhase shared.PlanningPhase
+
+ if isUserPrompt {
+ tellStage = shared.TellStagePlanning
+ log.Println("[resolveCurrentStage] Set tellStage to Planning due to user prompt")
+ } else {
+ if lastConvoMsg != nil && lastConvoMsg.Flags.DidMakePlan {
+ tellStage = shared.TellStageImplementation
+ log.Println("[resolveCurrentStage] Set tellStage to Implementation - DidMakePlan: true, IsChatOnly: false")
+ } else if lastConvoMsg != nil && lastConvoMsg.Flags.CurrentStage.TellStage == shared.TellStageImplementation {
+ tellStage = shared.TellStageImplementation
+ log.Println("[resolveCurrentStage] Set tellStage to Implementation - CurrentStage: implementation")
+ } else {
+ tellStage = shared.TellStagePlanning
+ log.Printf("[resolveCurrentStage] Set tellStage to Planning - DidMakePlan: %v, IsChatOnly: %v",
+ lastConvoMsg != nil && lastConvoMsg.Flags.DidMakePlan, req.IsChatOnly)
+ }
+ }
+
+ wasContextStage := false
+ if lastConvoMsg != nil {
+ flags := lastConvoMsg.Flags
+ log.Printf("[resolveCurrentStage] Last convo message flags: %+v", flags)
+ if flags.CurrentStage.TellStage == shared.TellStagePlanning && flags.CurrentStage.PlanningPhase == shared.PlanningPhaseContext {
+ wasContextStage = true
+ activatePaths = lastConvoMsg.ActivatedPaths
+ activatePathsOrdered = lastConvoMsg.ActivatedPathsOrdered
+ log.Printf("[resolveCurrentStage] Was context stage, copied activatePaths: %v", activatePaths)
+ }
+ }
+
+ if tellStage == shared.TellStagePlanning {
+ if req.AutoContext && hasContextMap && !contextMapEmpty && !wasContextStage {
+ planningPhase = shared.PlanningPhaseContext
+ log.Printf("[resolveCurrentStage] Set planningPhase to Context - AutoContext: %v, hasContextMap: %v, contextMapEmpty: %v, wasContextStage: %v",
+ req.AutoContext, hasContextMap, contextMapEmpty, wasContextStage)
+ } else {
+ planningPhase = shared.PlanningPhaseTasks
+ log.Printf("[resolveCurrentStage] Set planningPhase to Tasks - AutoContext: %v, hasContextMap: %v, contextMapEmpty: %v, wasContextStage: %v",
+ req.AutoContext, hasContextMap, contextMapEmpty, wasContextStage)
+ }
+ }
+
+ state.currentStage = shared.CurrentStage{
+ TellStage: tellStage,
+ PlanningPhase: planningPhase,
+ }
+ log.Printf("[resolveCurrentStage] Final state - TellStage: %s, PlanningPhase: %s", tellStage, planningPhase)
+
+ return activatePaths, activatePathsOrdered
+}
diff --git a/app/server/model/plan/tell_state.go b/app/server/model/plan/tell_state.go
new file mode 100644
index 0000000000000000000000000000000000000000..8f773b5025d5ea4779cc0b2b55b2eaf01482f25f
--- /dev/null
+++ b/app/server/model/plan/tell_state.go
@@ -0,0 +1,81 @@
+package plan
+
+import (
+ "plandex-server/db"
+ "plandex-server/model"
+ "plandex-server/types"
+ "time"
+
+ shared "plandex-shared"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+type activeTellStreamState struct {
+ activePlan *types.ActivePlan
+ modelStreamId string
+ clients map[string]model.ClientInfo
+ authVars map[string]string
+ req *shared.TellPlanRequest
+ auth *types.ServerAuth
+ currentOrgId string
+ currentUserId string
+ orgUserConfig *shared.OrgUserConfig
+ plan *db.Plan
+ branch string
+ iteration int
+ replyId string
+ modelContext []*db.Context
+ hasContextMap bool
+ contextMapEmpty bool
+ convo []*db.ConvoMessage
+ promptConvoMessage *db.ConvoMessage
+ currentPlanState *shared.CurrentPlanState
+ missingFileResponse shared.RespondMissingFileChoice
+ summaries []*db.ConvoSummary
+ summarizedToMessageId string
+ latestSummaryTokens int
+ userPrompt string
+ promptMessage *openai.ChatCompletionMessage
+ replyParser *types.ReplyParser
+ replyNumTokens int
+ messages []types.ExtendedChatMessage
+ tokensBeforeConvo int
+ totalRequestTokens int
+ settings *shared.PlanSettings
+ subtasks []*db.Subtask
+ currentSubtask *db.Subtask
+ hasAssistantReply bool
+ currentStage shared.CurrentStage
+ chunkProcessor *chunkProcessor
+ generationId string
+
+ requestStartedAt time.Time
+ firstTokenAt time.Time
+ originalReq *types.ExtendedChatCompletionRequest
+ modelConfig *shared.ModelRoleConfig
+ baseModelConfig *shared.BaseModelConfig
+ fallbackRes shared.FallbackResult
+
+ skipConvoMessages map[string]bool
+
+ manualStop []string
+
+ numErrorRetry int
+ numFallbackRetry int
+ modelErr *shared.ModelError
+ noCacheSupportErr bool
+ didProviderFallback bool
+}
+
+type chunkProcessor struct {
+ replyOperations []*shared.Operation
+ chunksReceived int
+ maybeRedundantOpeningTagContent string
+ fileOpen bool
+ contentBuffer string
+ awaitingBlockOpeningTag bool
+ awaitingBlockClosingTag bool
+ awaitingOpClosingTag bool
+ awaitingBackticks bool
+}
diff --git a/app/server/model/plan/tell_stream_error.go b/app/server/model/plan/tell_stream_error.go
new file mode 100644
index 0000000000000000000000000000000000000000..64e601b254524379aa14ae41094911470d84851f
--- /dev/null
+++ b/app/server/model/plan/tell_stream_error.go
@@ -0,0 +1,270 @@
+package plan
+
+import (
+ "context"
+ "fmt"
+ "log"
+ "math/rand"
+ "net/http"
+ "plandex-server/db"
+ "plandex-server/model"
+ "plandex-server/notify"
+ "plandex-server/shutdown"
+ "strconv"
+ "time"
+
+ shared "plandex-shared"
+)
+
+type onErrorParams struct {
+ streamErr error
+ streamApiErr *shared.ApiError
+ storeDesc bool
+ convoMessageId string
+ commitMsg string
+ canRetry bool
+ modelErr *shared.ModelError
+}
+
+type onErrorResult struct {
+ shouldContinueMainLoop bool
+ shouldReturn bool
+}
+
+func (state *activeTellStreamState) onError(params onErrorParams) onErrorResult {
+ log.Printf("\nStream error: %v\n", params.streamErr)
+ streamErr := params.streamErr
+ storeDesc := params.storeDesc
+ convoMessageId := params.convoMessageId
+ commitMsg := params.commitMsg
+ modelErr := params.modelErr
+
+ planId := state.plan.Id
+ branch := state.branch
+ currentOrgId := state.currentOrgId
+ summarizedToMessageId := state.summarizedToMessageId
+
+ active := GetActivePlan(planId, branch)
+
+ if active == nil {
+ log.Printf("tellStream onError - Active plan not found for plan ID %s on branch %s\n", planId, branch)
+ return onErrorResult{
+ shouldReturn: true,
+ }
+ }
+
+ canRetry := params.canRetry
+ isFallback := state.fallbackRes.IsFallback
+
+ maxRetries := model.MAX_RETRIES_WITHOUT_FALLBACK
+ if isFallback {
+ maxRetries = model.MAX_ADDITIONAL_RETRIES_WITH_FALLBACK
+ }
+
+ compareRetries := state.numErrorRetry
+ if isFallback {
+ compareRetries = state.numFallbackRetry
+ }
+
+ potentialFallback := state.modelConfig.GetFallbackForModelError(
+ state.numErrorRetry,
+ state.didProviderFallback,
+ modelErr,
+ state.authVars,
+ state.settings,
+ state.orgUserConfig,
+ )
+
+ newFallback := false
+ if modelErr != nil {
+ if !modelErr.Retriable {
+ log.Printf("tellStream onError - operation returned non-retriable error: %v", modelErr)
+ if !potentialFallback.IsFallback {
+ canRetry = false
+ } else {
+ log.Printf("tellStream onError - operation returned non-retriable error, but has fallback - resetting numFallbackRetry to 0 and continuing to retry")
+ state.numFallbackRetry = 0
+ // otherwise, continue to retry logic
+ canRetry = true
+ newFallback = true
+ }
+ }
+ }
+
+ if canRetry {
+ log.Println("tellStream onError - canRetry", canRetry)
+
+ if compareRetries >= maxRetries {
+ log.Printf("tellStream onError - Max retries reached for plan ID %s on branch %s\n", planId, branch)
+
+ canRetry = false
+ }
+ }
+
+ if canRetry {
+ log.Println("tellStream onError - retrying stream")
+ // stop stream via context (ensures we stop child streams too)
+ active.CancelModelStreamFn()
+
+ active.ResetModelCtx()
+
+ var retryDelay time.Duration
+ if modelErr != nil && modelErr.RetryAfterSeconds > 0 {
+ // if the model err has a retry after, then use that with a bit of padding
+ retryDelay = time.Duration(int(float64(modelErr.RetryAfterSeconds)*1.1)) * time.Second
+ } else {
+ // otherwise, use some jitter
+ retryDelay = time.Duration(1000+rand.Intn(200)) * time.Millisecond
+ }
+
+ cacheSupportErr := modelErr != nil && modelErr.Kind == shared.ErrCacheSupport
+
+ numErrorRetry := state.numErrorRetry
+ if modelErr != nil && modelErr.ShouldIncrementRetry() {
+ numErrorRetry = numErrorRetry + 1
+ }
+
+ log.Printf("tellStream onError - Retry %d/%d - Retrying stream in %v", numErrorRetry, maxRetries, retryDelay)
+ time.Sleep(retryDelay)
+
+ state.numErrorRetry = numErrorRetry
+ if isFallback && !newFallback && modelErr != nil && modelErr.ShouldIncrementRetry() {
+ state.numFallbackRetry = state.numFallbackRetry + 1
+ }
+
+ // if we got a cache support error, keep everything the same, including the modelErr (if we're already retrying) so we can make the exact same request again without cache control breakpoints
+ if cacheSupportErr {
+ state.noCacheSupportErr = true
+ } else {
+ state.modelErr = modelErr
+
+ if newFallback {
+ // if we got a new fallback, we need to reset the noCacheSupportErr flag since we're using a different model now
+ state.noCacheSupportErr = false
+ }
+ }
+
+ // retry the request
+ state.doTellRequest()
+ return onErrorResult{
+ shouldReturn: true,
+ }
+ }
+
+ storeDescAndReply := func() error {
+ log.Println("tellStream onError - storing desc and reply")
+ ctx, cancelFn := context.WithTimeout(shutdown.ShutdownCtx, 5*time.Second)
+
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: currentOrgId,
+ UserId: state.currentUserId,
+ PlanId: planId,
+ Branch: branch,
+ Scope: db.LockScopeWrite,
+ Ctx: ctx,
+ CancelFn: cancelFn,
+ Reason: "store desc and reply",
+ }, func(repo *db.GitRepo) error {
+ storedMessage := false
+ storedDesc := false
+
+ if convoMessageId == "" {
+ hasUnfinishedSubtasks := false
+ for _, subtask := range state.subtasks {
+ if !subtask.IsFinished {
+ hasUnfinishedSubtasks = true
+ break
+ }
+ }
+
+ assistantMsg, msg, err := state.storeAssistantReply(repo, storeAssistantReplyParams{
+ flags: shared.ConvoMessageFlags{
+ CurrentStage: state.currentStage,
+ HasUnfinishedSubtasks: hasUnfinishedSubtasks,
+ HasError: true,
+ },
+ subtask: nil,
+ addedSubtasks: nil,
+ })
+ if err == nil {
+ convoMessageId = assistantMsg.Id
+ commitMsg = msg
+ storedMessage = true
+ } else {
+ log.Printf("Error storing assistant message after stream error: %v\n", err)
+ return err
+ }
+ }
+
+ if storeDesc && convoMessageId != "" {
+ err := db.StoreDescription(&db.ConvoMessageDescription{
+ OrgId: currentOrgId,
+ PlanId: planId,
+ SummarizedToMessageId: summarizedToMessageId,
+ WroteFiles: false,
+ ConvoMessageId: convoMessageId,
+ BuildPathsInvalidated: map[string]bool{},
+ Error: streamErr.Error(),
+ })
+ if err == nil {
+ storedDesc = true
+ } else {
+ log.Printf("Error storing description after stream error: %v\n", err)
+ return err
+ }
+ }
+
+ if storedMessage || storedDesc {
+ err := repo.GitAddAndCommit(branch, commitMsg)
+ if err != nil {
+ log.Printf("Error committing after stream error: %v\n", err)
+ return err
+ }
+ }
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error storing description and reply after stream error: %v\n", err)
+ return err
+ }
+
+ return nil
+ }
+
+ if active.CurrentReplyContent != "" {
+ storeDescAndReply() // best effort to store description and reply, ignore errors
+ }
+
+ if params.streamApiErr != nil {
+ active.StreamDoneCh <- params.streamApiErr
+ } else {
+ msg := "Stream error: " + streamErr.Error()
+ if params.canRetry && state.numErrorRetry >= maxRetries {
+ msg += " | Failed after " + strconv.Itoa(state.numErrorRetry) + " retries"
+ }
+
+ go notify.NotifyErr(notify.SeverityInfo, fmt.Sprintf("tellStream stream error after %d retries: %v", state.numErrorRetry, streamErr))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: msg,
+ }
+ }
+
+ return onErrorResult{
+ shouldContinueMainLoop: true,
+ }
+}
+
+func (state *activeTellStreamState) onActivePlanMissingError() {
+ planId := state.plan.Id
+ branch := state.branch
+ log.Printf("Active plan not found for plan ID %s on branch %s\n", planId, branch)
+ state.onError(onErrorParams{
+ streamErr: fmt.Errorf("active plan not found for plan ID %s on branch %s", planId, branch),
+ storeDesc: true,
+ })
+}
diff --git a/app/server/model/plan/tell_stream_finish.go b/app/server/model/plan/tell_stream_finish.go
new file mode 100644
index 0000000000000000000000000000000000000000..5f3825542ff1be137c657b82c11dbcefaa95b399
--- /dev/null
+++ b/app/server/model/plan/tell_stream_finish.go
@@ -0,0 +1,277 @@
+package plan
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ "plandex-server/notify"
+ "plandex-server/types"
+ "runtime/debug"
+ "time"
+
+ shared "plandex-shared"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+const MaxAutoContinueIterations = 200
+
+type handleStreamFinishedResult struct {
+ shouldContinueMainLoop bool
+ shouldReturn bool
+}
+
+func (state *activeTellStreamState) handleStreamFinished() handleStreamFinishedResult {
+ planId := state.plan.Id
+ branch := state.branch
+ auth := state.auth
+ plan := state.plan
+ req := state.req
+ clients := state.clients
+ authVars := state.authVars
+ settings := state.settings
+ orgUserConfig := state.orgUserConfig
+ currentOrgId := state.currentOrgId
+ summaries := state.summaries
+ convo := state.convo
+ iteration := state.iteration
+ replyOperations := state.chunkProcessor.replyOperations
+
+ err := state.setActivePlan()
+ if err != nil {
+ state.onActivePlanMissingError()
+ return handleStreamFinishedResult{
+ shouldContinueMainLoop: true,
+ shouldReturn: false,
+ }
+ }
+
+ active := state.activePlan
+
+ time.Sleep(30 * time.Millisecond)
+ active.FlushStreamBuffer()
+ time.Sleep(100 * time.Millisecond)
+
+ active.Stream(shared.StreamMessage{
+ Type: shared.StreamMessageDescribing,
+ })
+ active.FlushStreamBuffer()
+
+ err = db.SetPlanStatus(planId, branch, shared.PlanStatusDescribing, "")
+ if err != nil {
+ res := state.onError(onErrorParams{
+ streamErr: fmt.Errorf("failed to set plan status to describing: %v", err),
+ storeDesc: true,
+ })
+
+ return handleStreamFinishedResult{
+ shouldContinueMainLoop: res.shouldContinueMainLoop,
+ shouldReturn: res.shouldReturn,
+ }
+ }
+
+ autoLoadContextResult := state.checkAutoLoadContext()
+ checkNewSubtasksResult := state.checkNewSubtasks()
+
+ hasExplicitTasks := checkNewSubtasksResult.hasExplicitTasks
+ addedSubtasks := checkNewSubtasksResult.newSubtasks
+
+ checkRemoveSubtasksResult := state.checkRemoveSubtasks()
+
+ removedSubtasks := checkRemoveSubtasksResult.removedSubtasks
+ hasExplicitRemoveTasks := checkRemoveSubtasksResult.hasExplicitRemoveTasks
+
+ log.Println("removedSubtasks:\n", spew.Sdump(removedSubtasks))
+ log.Println("addedSubtasks:\n", spew.Sdump(addedSubtasks))
+ log.Println("hasNewSubtasks:\n", hasExplicitTasks)
+
+ handleDescAndExecStatusRes := state.handleDescAndExecStatus()
+ if handleDescAndExecStatusRes.shouldContinueMainLoop || handleDescAndExecStatusRes.shouldReturn {
+ return handleDescAndExecStatusRes.handleStreamFinishedResult
+ }
+ generatedDescription := handleDescAndExecStatusRes.generatedDescription
+ subtaskFinished := handleDescAndExecStatusRes.subtaskFinished
+
+ log.Printf("subtaskFinished: %v\n", subtaskFinished)
+
+ storeOnFinishedResult := state.storeOnFinished(storeOnFinishedParams{
+ replyOperations: replyOperations,
+ generatedDescription: generatedDescription,
+ subtaskFinished: subtaskFinished,
+ hasNewSubtasks: hasExplicitTasks,
+ autoLoadContextResult: autoLoadContextResult,
+ addedSubtasks: addedSubtasks,
+ removedSubtasks: removedSubtasks,
+ })
+ if storeOnFinishedResult.shouldContinueMainLoop || storeOnFinishedResult.shouldReturn {
+ return storeOnFinishedResult.handleStreamFinishedResult
+ }
+ allSubtasksFinished := storeOnFinishedResult.allSubtasksFinished
+
+ log.Println("allSubtasksFinished:\n", spew.Sdump(allSubtasksFinished))
+
+ // summarize convo needs to come *after* the reply is stored in order to correctly summarize the latest message
+ log.Println("summarizing convo in background")
+ // summarize in the background
+ go func() {
+
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in summarizeConvo: %v\n%s", r, debug.Stack())
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Error summarizing convo: %v", r),
+ }
+ }
+ }()
+
+ err := summarizeConvo(clients, authVars, settings, orgUserConfig, summarizeConvoParams{
+ auth: auth,
+ plan: plan,
+ branch: branch,
+ convo: convo,
+ summaries: summaries,
+ userPrompt: state.userPrompt,
+ currentOrgId: currentOrgId,
+ currentReply: active.CurrentReplyContent,
+ currentReplyNumTokens: active.NumTokens,
+ modelPackName: settings.GetModelPack().Name,
+ }, active.SummaryCtx)
+
+ if err != nil {
+ log.Printf("Error summarizing convo: %v\n", err)
+ active.StreamDoneCh <- err
+ }
+ }()
+
+ log.Println("Sending active.CurrentReplyDoneCh <- true")
+
+ active.CurrentReplyDoneCh <- true
+
+ log.Println("Resetting active.CurrentReplyDoneCh")
+
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.CurrentStreamingReplyId = ""
+ ap.CurrentReplyDoneCh = nil
+ })
+
+ autoLoadPaths := autoLoadContextResult.autoLoadPaths
+ log.Printf("len(autoLoadPaths): %d\n", len(autoLoadPaths))
+ if len(autoLoadPaths) > 0 {
+ log.Println("Sending stream message to load context files")
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic streaming auto-load context: %v\n%s", r, debug.Stack())
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("panic streaming auto-load context: %v\n%s", r, debug.Stack()))
+ }
+ }()
+
+ active.Stream(shared.StreamMessage{
+ Type: shared.StreamMessageLoadContext,
+ LoadContextFiles: autoLoadPaths,
+ })
+ active.FlushStreamBuffer()
+ }()
+
+ log.Println("Waiting for client to auto load context (30s timeout)")
+
+ select {
+ case <-active.Ctx.Done():
+ log.Println("Context cancelled while waiting for auto load context")
+ state.execHookOnStop(false)
+ return handleStreamFinishedResult{
+ shouldContinueMainLoop: false,
+ shouldReturn: true,
+ }
+ case <-time.After(30 * time.Second):
+ log.Println("Timeout waiting for auto load context")
+ res := state.onError(onErrorParams{
+ streamErr: fmt.Errorf("timeout waiting for auto load context response"),
+ storeDesc: true,
+ })
+ return handleStreamFinishedResult{
+ shouldContinueMainLoop: res.shouldContinueMainLoop,
+ shouldReturn: res.shouldReturn,
+ }
+ case <-active.AutoLoadContextCh:
+ }
+ }
+
+ willContinue := state.willContinuePlan(willContinuePlanParams{
+ hasNewSubtasks: hasExplicitTasks,
+ allSubtasksFinished: allSubtasksFinished,
+ activatePaths: autoLoadContextResult.activatePaths,
+ removedSubtasks: hasExplicitRemoveTasks,
+ hasExplicitPaths: autoLoadContextResult.hasExplicitPaths,
+ })
+
+ if willContinue {
+ log.Println("Auto continue plan")
+ // continue plan
+ execTellPlan(execTellPlanParams{
+ clients: clients,
+ plan: plan,
+ branch: branch,
+ auth: auth,
+ req: req,
+ iteration: iteration + 1,
+ authVars: authVars,
+ })
+ } else {
+ var buildFinished bool
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ buildFinished = ap.BuildFinished()
+ ap.RepliesFinished = true
+ })
+
+ log.Printf("Won't continue plan. Build finished: %v\n", buildFinished)
+
+ time.Sleep(50 * time.Millisecond)
+
+ if buildFinished {
+ log.Println("Reply is finished and build is finished, calling active.Finish()")
+ active := GetActivePlan(planId, branch)
+
+ if active == nil {
+ state.onActivePlanMissingError()
+ return handleStreamFinishedResult{
+ shouldContinueMainLoop: true,
+ shouldReturn: false,
+ }
+ }
+
+ active.Finish()
+ } else {
+ log.Println("Plan is still building")
+ log.Println("Updating status to building")
+ err := db.SetPlanStatus(planId, branch, shared.PlanStatusBuilding, "")
+ if err != nil {
+ log.Printf("Error setting plan status to building: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error setting plan status to building: %v", err))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Error setting plan status to building: %v", err),
+ }
+
+ return handleStreamFinishedResult{
+ shouldContinueMainLoop: true,
+ shouldReturn: false,
+ }
+ }
+
+ log.Println("Sending RepliesFinished stream message")
+ active.Stream(shared.StreamMessage{
+ Type: shared.StreamMessageRepliesFinished,
+ })
+
+ }
+ }
+
+ return handleStreamFinishedResult{}
+}
diff --git a/app/server/model/plan/tell_stream_main.go b/app/server/model/plan/tell_stream_main.go
new file mode 100644
index 0000000000000000000000000000000000000000..6bdba0110d3080f716133a6796fa40dafece4c46
--- /dev/null
+++ b/app/server/model/plan/tell_stream_main.go
@@ -0,0 +1,307 @@
+package plan
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/model"
+ "plandex-server/notify"
+ "plandex-server/types"
+ "runtime/debug"
+ "strings"
+ "time"
+
+ shared "plandex-shared"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+func (state *activeTellStreamState) listenStream(stream *model.ExtendedChatCompletionStream) {
+ defer stream.Close()
+
+ plan := state.plan
+ planId := plan.Id
+ branch := state.branch
+
+ active := GetActivePlan(planId, branch)
+
+ if active == nil {
+ log.Printf("listenStream - Active plan not found for plan ID %s on branch %s\n", planId, branch)
+ return
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("listenStream: Panic: %v\n%s\n", r, string(debug.Stack()))
+
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("listenStream: Panic: %v\n%s", r, string(debug.Stack())))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Panic in listenStream",
+ }
+ }
+ }()
+
+ state.chunkProcessor = &chunkProcessor{
+ replyOperations: []*shared.Operation{},
+ chunksReceived: 0,
+ maybeRedundantOpeningTagContent: "",
+ fileOpen: false,
+ contentBuffer: "",
+ awaitingBlockOpeningTag: false,
+ awaitingBlockClosingTag: false,
+ awaitingBackticks: false,
+ }
+
+ // Create a timer that will trigger if no chunk is received within the specified duration
+ firstTokenTimeout := firstTokenTimeout(state.totalRequestTokens, state.baseModelConfig.LocalOnly)
+ log.Printf("listenStream - firstTokenTimeout: %s\n", firstTokenTimeout)
+ timer := time.NewTimer(firstTokenTimeout)
+ defer timer.Stop()
+ streamFinished := false
+
+ baseModelConfig := state.modelConfig.GetBaseModelConfig(state.authVars, state.settings, state.orgUserConfig)
+
+ modelProvider := baseModelConfig.Provider
+ modelName := baseModelConfig.ModelName
+
+ respCh := make(chan *types.ExtendedChatCompletionStreamResponse)
+ streamErrCh := make(chan error)
+
+ // receive chunks from the stream in a separate goroutine so that we can handle errors and timeouts — needed because stream.Recv() blocks forever
+ go func() {
+ for {
+ resp, err := stream.Recv()
+ if err != nil {
+ streamErrCh <- err
+ return
+ }
+ respCh <- resp
+ }
+ }()
+
+mainLoop:
+ for {
+ select {
+ case <-active.Ctx.Done():
+ // The main modelContext was canceled (not the timer)
+ log.Println("\nTell: stream canceled")
+ state.execHookOnStop(false)
+ return
+ case <-timer.C:
+ // Timer triggered because no new chunk was received in time
+ log.Println("\nTell: stream timeout due to inactivity")
+ if streamFinished {
+ log.Println("Tell stream finished—timed out waiting for usage chunk")
+ state.execHookOnStop(false)
+ return
+ } else {
+ res := state.onError(onErrorParams{
+ streamErr: fmt.Errorf("stream timeout due to inactivity: The AI model (%s/%s) is not responding", modelProvider, modelName),
+ storeDesc: true,
+ canRetry: active.CurrentReplyContent == "", // if there was no output yet, we can retry
+ })
+
+ if res.shouldReturn {
+ return
+ }
+ if res.shouldContinueMainLoop {
+ continue mainLoop
+ }
+ }
+
+ case err := <-streamErrCh:
+ log.Printf("listenStream - received from streamErrCh: %v\n", err)
+
+ if err.Error() == "context canceled" {
+ log.Println("Tell: stream context canceled")
+ state.execHookOnStop(false)
+ return
+ }
+
+ log.Printf("Tell: error receiving stream chunk: %v\n", err)
+ state.execHookOnStop(true)
+
+ var msg string
+ name := modelName
+ if !strings.Contains(string(modelName), string(modelProvider)) {
+ name = shared.ModelName(fmt.Sprintf("%s/%s", modelProvider, modelName))
+ }
+ if active.CurrentReplyContent == "" {
+ msg = fmt.Sprintf("The AI model (%s) didn't respond: %v", name, err)
+ } else {
+ msg = fmt.Sprintf("The AI model (%s) stopped responding: %v", name, err)
+ }
+ state.onError(onErrorParams{
+ streamErr: errors.New(msg),
+ storeDesc: true,
+ canRetry: active.CurrentReplyContent == "", // if there was no output yet, we can retry
+ })
+ // here we want to return no matter what -- state.onError will decide whether to retry or not
+ return
+ case response := <-respCh:
+ // Successfully received a chunk, reset the timer
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timer.Reset(model.ACTIVE_STREAM_CHUNK_TIMEOUT)
+
+ // log.Println("tell stream main: received stream response", spew.Sdump(response))
+
+ if response.ID != "" && state.generationId == "" {
+ state.generationId = response.ID
+ }
+
+ if state.firstTokenAt.IsZero() {
+ state.firstTokenAt = time.Now()
+ }
+
+ if response.Error != nil {
+ log.Println("listenStream - stream finished with error", spew.Sdump(response.Error))
+
+ baseModelConfig := state.fallbackRes.BaseModelConfig
+ modelErr := model.ClassifyModelError(response.Error.Code, response.Error.Message, nil, baseModelConfig.HasClaudeMaxAuth)
+
+ res := state.onError(onErrorParams{
+ streamErr: fmt.Errorf("The AI model (%s/%s) stopped streaming with error code %d: %s", modelProvider, modelName, response.Error.Code, response.Error.Message),
+ storeDesc: true,
+ canRetry: active.CurrentReplyContent == "",
+ modelErr: &modelErr,
+ })
+ if res.shouldReturn {
+ return
+ }
+ if res.shouldContinueMainLoop {
+ continue mainLoop
+ }
+ }
+
+ if len(response.Choices) == 0 {
+ if response.Usage != nil {
+ state.handleUsageChunk(response.Usage)
+ return
+ }
+
+ log.Println("listenStream - stream finished with no choices", spew.Sdump(response))
+
+ // Previously we'd return an error if there were no choices, but some models do this and then keep streaming, so we'll just log it and continue, waiting for an EOF if there's a problem
+ // res := state.onError(onErrorParams{
+ // streamErr: fmt.Errorf("stream finished with no choices | The model failed to generate a valid response."),
+ // storeDesc: true,
+ // canRetry: true,
+ // })
+ // if res.shouldReturn {
+ // return
+ // }
+ // if res.shouldContinueMainLoop {
+ // // continue instead of returning so that context cancellation is handled
+ // continue mainLoop
+ // }
+
+ continue mainLoop
+ }
+
+ choice := response.Choices[0]
+
+ processChunkRes := state.processChunk(choice)
+ if processChunkRes.shouldReturn {
+ return
+ }
+
+ handleFinished := func() handleStreamFinishedResult {
+ streamFinishResult := state.handleStreamFinished()
+ if streamFinishResult.shouldReturn || streamFinishResult.shouldContinueMainLoop {
+ return streamFinishResult
+ }
+
+ // usage can either be included in the final chunk (openrouter) or in a separate chunk (openai)
+ // if the usage chunk is included, handle it and then return out of listener
+ // otherwise keep listening for the usage chunk
+ if response.Usage != nil {
+ state.handleUsageChunk(response.Usage)
+ return handleStreamFinishedResult{
+ shouldReturn: true,
+ }
+ }
+
+ // Reset the timer for the usage chunk
+ if !timer.Stop() {
+ <-timer.C
+ }
+ timer.Reset(model.USAGE_CHUNK_TIMEOUT)
+ streamFinished = true
+
+ return handleStreamFinishedResult{
+ shouldContinueMainLoop: true,
+ }
+ }
+
+ if processChunkRes.shouldStop {
+ log.Println("Model stream reached stop sequence")
+
+ res := handleFinished()
+ if res.shouldReturn {
+ return
+ }
+ continue
+ }
+
+ if choice.FinishReason != "" {
+ log.Println("Model stream finished")
+ log.Println("Finish reason: ", choice.FinishReason)
+
+ if choice.FinishReason == "error" {
+ log.Println("Model stream finished with error")
+
+ res := state.onError(onErrorParams{
+ streamErr: fmt.Errorf("The AI model (%s/%s) stopped streaming with an error status", modelProvider, modelName),
+ storeDesc: true,
+ canRetry: active.CurrentReplyContent == "",
+ })
+ if res.shouldReturn {
+ return
+ }
+ if res.shouldContinueMainLoop {
+ continue mainLoop
+ }
+ }
+
+ res := handleFinished()
+ if res.shouldReturn {
+ return
+ }
+ continue
+ } else if response.Usage != nil {
+ state.handleUsageChunk(response.Usage)
+ return
+ }
+ // let main loop continue
+ }
+ }
+}
+
+func firstTokenTimeout(tok int, isLocalModel bool) time.Duration {
+ const (
+ base = 90 * time.Second
+ slope = 90 * time.Second
+ step = 150_000
+ cap = 15 * time.Minute
+ )
+
+ // local models can have a long cold start, and timeouts are less relevant
+ if isLocalModel {
+ return cap
+ }
+
+ if tok <= step {
+ return base
+ }
+ extra := time.Duration((tok-step)/step) * slope
+ if extra > cap-base {
+ extra = cap - base
+ }
+ return base + extra
+}
diff --git a/app/server/model/plan/tell_stream_processor.go b/app/server/model/plan/tell_stream_processor.go
new file mode 100644
index 0000000000000000000000000000000000000000..1b3dfde8b518a8970515b706195082f24076ceca
--- /dev/null
+++ b/app/server/model/plan/tell_stream_processor.go
@@ -0,0 +1,751 @@
+package plan
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ "plandex-server/notify"
+ "plandex-server/types"
+ "regexp"
+ "runtime/debug"
+ "strings"
+ "time"
+
+ shared "plandex-shared"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+const verboseLogging = false
+
+var openingTagRegex = regexp.MustCompile(``)
+
+type processChunkResult struct {
+ shouldReturn bool
+ shouldStop bool
+}
+
+func (state *activeTellStreamState) processChunk(choice types.ExtendedChatCompletionStreamChoice) processChunkResult {
+ req := state.req
+ // missingFileResponse := state.missingFileResponse
+ processor := state.chunkProcessor
+ replyParser := state.replyParser
+ plan := state.plan
+ planId := plan.Id
+ branch := state.branch
+ active := GetActivePlan(planId, branch)
+
+ if active == nil {
+ state.onActivePlanMissingError()
+ return processChunkResult{}
+ }
+
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("processChunk: Panic: %v\n%s\n", r, string(debug.Stack()))
+
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("processChunk: Panic: %v\n%s", r, string(debug.Stack())))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Panic in processChunk: %v\n%s", r, string(debug.Stack())),
+ }
+ }
+ }()
+
+ delta := choice.Delta
+ content := delta.Content
+
+ baseModelConfig := state.modelConfig.GetBaseModelConfig(state.authVars, state.settings, state.orgUserConfig)
+
+ if baseModelConfig.IncludeReasoning && !baseModelConfig.HideReasoning && delta.Reasoning != "" {
+ content = delta.Reasoning
+ }
+
+ if content == "" {
+ return processChunkResult{}
+ }
+
+ processor.chunksReceived++
+
+ if verboseLogging {
+ log.Printf("Adding chunk to parser: %s\n", content)
+ log.Printf("fileOpen: %v\n", processor.fileOpen)
+ }
+
+ replyParser.AddChunk(content, true)
+ parserRes := replyParser.Read()
+
+ if !processor.fileOpen && parserRes.CurrentFilePath != "" {
+ if verboseLogging {
+ log.Printf("File open: %s\n", parserRes.CurrentFilePath)
+ }
+ processor.fileOpen = true
+ }
+
+ if processor.fileOpen && strings.HasSuffix(active.CurrentReplyContent+content, "") {
+ if verboseLogging {
+ log.Println("FinishAndRead because of closing tag")
+ }
+ parserRes = replyParser.FinishAndRead()
+ processor.fileOpen = false
+ }
+
+ if processor.fileOpen && parserRes.CurrentFilePath == "" {
+ if verboseLogging {
+ log.Println("File open but current file path is empty, closing file")
+ }
+ processor.fileOpen = false
+ }
+
+ operations := parserRes.Operations
+ state.replyNumTokens = parserRes.TotalTokens
+ currentFile := parserRes.CurrentFilePath
+
+ // log.Printf("currentFile: %s\n", currentFile)
+ // log.Println("files:")
+ // spew.Dump(files)
+
+ // Handle file that is present in project paths but not in context
+ // Prompt user for what to do on the client side, stop the stream, and wait for user response before proceeding
+ bufferOrStreamRes := processor.bufferOrStream(content, &parserRes, state.currentStage, state.manualStop)
+
+ if currentFile != "" &&
+ !req.IsChatOnly &&
+ active.ContextsByPath[currentFile] == nil &&
+ req.ProjectPaths[currentFile] &&
+ !active.AllowOverwritePaths[currentFile] {
+ return state.handleMissingFile(bufferOrStreamRes.content, currentFile, bufferOrStreamRes.blockLang)
+ }
+
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.CurrentReplyContent += content
+ ap.NumTokens++
+ })
+
+ if verboseLogging {
+ log.Println("processor before bufferOrStream")
+ spew.Dump(processor)
+ log.Println("maybeFilePath", parserRes.MaybeFilePath)
+ log.Println("currentFilePath", parserRes.CurrentFilePath)
+ log.Println("bufferOrStreamRes")
+ spew.Dump(bufferOrStreamRes)
+ }
+
+ if bufferOrStreamRes.shouldStream {
+ active.Stream(shared.StreamMessage{
+ Type: shared.StreamMessageReply,
+ ReplyChunk: bufferOrStreamRes.content,
+ })
+ }
+
+ if verboseLogging {
+ log.Println("processor after bufferOrStream")
+ spew.Dump(processor)
+ }
+
+ if !req.IsChatOnly && len(operations) > len(processor.replyOperations) {
+ state.handleNewOperations(&parserRes)
+ }
+
+ return processChunkResult{
+ shouldStop: bufferOrStreamRes.shouldStop,
+ }
+}
+
+type bufferOrStreamResult struct {
+ shouldStream bool
+ content string
+ blockLang string
+ shouldStop bool
+}
+
+func (processor *chunkProcessor) bufferOrStream(content string, parserRes *types.ReplyParserRes, currentStage shared.CurrentStage, manualStopSequences []string) bufferOrStreamResult {
+ if len(manualStopSequences) > 0 {
+ for _, stopSequence := range manualStopSequences {
+
+ // if the chunk contains the entire stop sequence, stream everything before it then caller can stop the stream
+ if strings.Contains(content, stopSequence) {
+ split := strings.Split(content, stopSequence)
+ if len(split) > 1 {
+ return bufferOrStreamResult{
+ shouldStream: true,
+ content: split[0],
+ shouldStop: true,
+ }
+ } else {
+ // there was nothing before the stop sequence, so nothing to stream
+ return bufferOrStreamResult{
+ shouldStream: false,
+ shouldStop: true,
+ }
+ }
+ }
+
+ // otherwise if the buffer plus chunk contains the stop sequence, don't stream anything and stop the stream
+ if strings.Contains(processor.contentBuffer+content, stopSequence) {
+ log.Printf("bufferOrStream - stop sequence found in buffer plus chunk\n")
+ split := strings.Split(content, stopSequence)
+ if len(split) > 1 {
+ // we'll stream the part before the stop sequence
+ return bufferOrStreamResult{
+ shouldStream: true,
+ content: split[0],
+ shouldStop: true,
+ }
+ } else {
+ // there was nothing before the stop sequence, so nothing to stream
+ return bufferOrStreamResult{
+ shouldStream: false,
+ shouldStop: true,
+ }
+ }
+ }
+
+ // otherwise if the buffer plus chunk ends with a prefix of the stop sequence, buffer it and continue
+
+ toCheck := processor.contentBuffer + content
+ tailLen := len(stopSequence) - 1
+ if tailLen > len(toCheck) {
+ tailLen = len(toCheck)
+ }
+ suffix := toCheck[len(toCheck)-tailLen:]
+
+ if strings.HasPrefix(stopSequence, suffix) {
+ log.Printf("bufferOrStream - stop sequence prefix found in buffer plus chunk. buffer and continue\n")
+ processor.contentBuffer += content
+ return bufferOrStreamResult{
+ shouldStream: false,
+ content: content,
+ }
+ }
+
+ }
+ }
+
+ // apart from manual stop sequences, no buffering in planning stages
+ if currentStage.TellStage == shared.TellStagePlanning {
+ return bufferOrStreamResult{
+ shouldStream: true,
+ content: content,
+ }
+ }
+
+ var shouldStream bool
+ var blockLang string
+
+ awaitingTag := processor.awaitingBlockOpeningTag || processor.awaitingBlockClosingTag || processor.awaitingOpClosingTag
+ awaitingAny := awaitingTag || processor.awaitingBackticks
+
+ if awaitingAny {
+ if verboseLogging {
+ log.Println("awaitingAny")
+ }
+ processor.contentBuffer += content
+ content = processor.contentBuffer
+
+ if verboseLogging {
+ log.Printf("awaitingBlockOpeningTag: %v\n", processor.awaitingBlockOpeningTag)
+ log.Printf("awaitingBlockClosingTag: %v\n", processor.awaitingBlockClosingTag)
+ log.Printf("awaitingBackticks: %v\n", processor.awaitingBackticks)
+ log.Printf("awaitingOpClosingTag: %v\n", processor.awaitingOpClosingTag)
+ log.Printf("content: %q\n", content)
+ }
+ }
+
+ if processor.awaitingBackticks {
+ if strings.Contains(content, "```") {
+ processor.awaitingBackticks = false
+ content = strings.ReplaceAll(content, "```", "\\`\\`\\`")
+
+ if !(processor.awaitingBlockOpeningTag || processor.awaitingBlockClosingTag) {
+ shouldStream = true
+ }
+ } else if !strings.HasSuffix(content, "`") {
+ // fewer than 3 backticks, no need to escape
+ processor.awaitingBackticks = false
+
+ if !(processor.awaitingBlockOpeningTag || processor.awaitingBlockClosingTag) {
+ shouldStream = true
+ }
+ }
+ }
+
+ if awaitingTag {
+ if verboseLogging {
+ log.Println("awaitingTag")
+ }
+ if processor.awaitingBlockOpeningTag {
+ if verboseLogging {
+ log.Println("processor.awaitingBlockOpeningTag")
+ }
+ var matchedPrefix bool
+
+ if parserRes.CurrentFilePath != "" {
+ matched, replaced := replaceCodeBlockOpeningTag(content, func(lang string) string {
+ blockLang = lang
+ return "```" + lang
+ })
+
+ if matched {
+ shouldStream = true
+ processor.awaitingBlockOpeningTag = false
+ processor.fileOpen = true
+ content = replaced
+ } else {
+ // tag is missing - something is wrong - we shouldn't be here but let's try to recover anyway
+ if verboseLogging {
+ log.Printf("Opening tag is missing even though parserRes.CurrentFile is set - something is wrong: %s\n", content)
+ }
+ processor.awaitingBlockOpeningTag = false
+ processor.fileOpen = false
+ content += "\n```" // add ``` to the end of the line to close the markdown code block
+ shouldStream = true
+ }
+ } else {
+ split := strings.Split(content, "<")
+
+ if len(split) > 1 {
+ last := split[len(split)-1]
+ if verboseLogging {
+ log.Printf("last: %s\n", last)
+ }
+ if strings.HasPrefix(`PlandexBlock lang="`, last) {
+ if verboseLogging {
+ log.Println("strings.HasPrefix(`PlandexBlock lang=", last)
+ }
+ shouldStream = false
+ matchedPrefix = true
+ } else if strings.HasPrefix(last, `PlandexBlock lang="`) {
+ if verboseLogging {
+ log.Println("partialOpeningTagRegex.MatchString(last)")
+ }
+ shouldStream = false
+ matchedPrefix = true
+ } else {
+ if verboseLogging {
+ log.Println("partialOpeningTagRegex.MatchString(last) is false")
+ }
+ }
+ }
+ }
+
+ if !matchedPrefix && parserRes.MaybeFilePath == "" && parserRes.CurrentFilePath == "" {
+ // wasn't really a file path / code block
+ processor.awaitingBlockOpeningTag = false
+ shouldStream = true
+ }
+ } else if processor.awaitingBlockClosingTag {
+ if parserRes.CurrentFilePath == "" {
+ if strings.Contains(content, "") {
+ shouldStream = true
+ processor.awaitingBlockClosingTag = false
+ processor.fileOpen = false
+ // replace with ``` to close the markdown code block
+ content = strings.ReplaceAll(content, "", "```")
+ } else {
+ log.Printf("Closing tag is missing even though parserRes.CurrentOperation is nil - something is wrong: %s\n", content)
+ processor.awaitingBlockClosingTag = false
+ shouldStream = true
+ }
+ }
+ } else if processor.awaitingOpClosingTag {
+ if verboseLogging {
+ log.Printf("awaitingOpClosingTag: %v\n", processor.awaitingOpClosingTag)
+ }
+ if strings.Contains(content, "") {
+ if verboseLogging {
+ log.Printf("Found \n")
+ }
+ processor.awaitingOpClosingTag = false
+ content = strings.Replace(content, "\n", "", 1)
+ content = strings.Replace(content, "", "", 1)
+ shouldStream = true
+ }
+ }
+
+ } else {
+ if verboseLogging {
+ log.Println("not awaiting tag")
+ }
+
+ if parserRes.MaybeFilePath != "" && parserRes.CurrentFilePath == "" {
+ processor.awaitingBlockOpeningTag = true
+ } else {
+ // this will set processor.awaitingBlockOpeningTag to true if the content starts with any prefix of 1 {
+ last := split[len(split)-1]
+
+ if strings.HasPrefix(`PlandexBlock lang="`, last) {
+ processor.awaitingBlockOpeningTag = true
+ } else if strings.HasPrefix(last, `PlandexBlock lang="`) {
+ processor.awaitingBlockOpeningTag = true
+ }
+ }
+ }
+
+ if parserRes.CurrentFilePath != "" {
+ if verboseLogging {
+ log.Println("parserRes.CurrentFilePath != \"\"")
+ }
+ if strings.Contains(content, "") {
+ if verboseLogging {
+ log.Println("strings.Contains(content, \"\")")
+ }
+ processor.awaitingBlockClosingTag = true
+ } else {
+ if verboseLogging {
+ log.Println("not strings.Contains(content, \"\")")
+ }
+ split := strings.Split(content, "<")
+ // log.Printf("split: %v\n", split)
+ if len(split) > 1 {
+ if verboseLogging {
+ log.Println("len(split) > 1")
+ }
+ last := split[len(split)-1]
+ // log.Printf("last: %s\n", last)
+ if strings.HasPrefix("/PlandexBlock>", last) {
+ if verboseLogging {
+ log.Println("strings.HasPrefix(\"/PlandexBlock>\", last)")
+ }
+ processor.awaitingBlockClosingTag = true
+ }
+ }
+ }
+ } else if parserRes.FileOperationBlockOpen() {
+ if verboseLogging {
+ log.Println("parserRes.FileOperationBlockOpen()")
+ }
+ if strings.Contains(content, "") {
+ if verboseLogging {
+ log.Println("strings.Contains(content, \"\")")
+ }
+ processor.awaitingOpClosingTag = true
+ } else {
+ if verboseLogging {
+ log.Println("not strings.Contains(content, \"\")")
+ }
+ split := strings.Split(content, "<")
+ if len(split) > 1 {
+ if verboseLogging {
+ log.Println("len(split) > 1")
+ }
+ last := split[len(split)-1]
+ if strings.HasPrefix("EndPlandexFileOps/>", last) {
+ if verboseLogging {
+ log.Println("strings.HasPrefix(\"EndPlandexFileOps/>\", last)")
+ }
+ processor.awaitingOpClosingTag = true
+ }
+ }
+ }
+ } else if strings.Contains(content, "") {
+ if verboseLogging {
+ log.Println("strings.Contains(content, \"\")")
+ }
+ content = strings.Replace(content, "", "```", 1)
+ } else if strings.Contains(content, "") {
+ if verboseLogging {
+ log.Println("strings.Contains(content, \"\")")
+ }
+ content = strings.Replace(content, "\n", "", 1)
+ content = strings.Replace(content, "", "", 1)
+ }
+
+ if processor.fileOpen && (strings.Contains(content, "```") || strings.HasSuffix(content, "`")) {
+ if verboseLogging {
+ log.Println("processor.fileOpen && (strings.Contains(content, \"```\") || strings.HasSuffix(content, \"`\"))")
+ }
+ processor.awaitingBackticks = true
+ }
+
+ var matchedOpeningTag bool
+ if processor.fileOpen {
+ if verboseLogging {
+ log.Println("processor.fileOpen")
+ }
+ var replaced string
+
+ matchedOpeningTag, replaced = replaceCodeBlockOpeningTag(content, func(lang string) string {
+ blockLang = lang
+ return "```" + lang
+ })
+
+ if verboseLogging {
+ log.Println("matchedOpeningTag", matchedOpeningTag)
+ log.Println("replaced", replaced)
+ }
+
+ if matchedOpeningTag {
+ processor.awaitingBlockOpeningTag = false
+ content = replaced
+ }
+ }
+
+ shouldStream = !processor.awaitingBlockOpeningTag && !processor.awaitingBlockClosingTag && !processor.awaitingOpClosingTag && !processor.awaitingBackticks
+
+ if verboseLogging {
+ log.Println("processor.awaitingBlockOpeningTag", processor.awaitingBlockOpeningTag)
+ log.Println("processor.awaitingBlockClosingTag", processor.awaitingBlockClosingTag)
+ log.Println("processor.awaitingOpClosingTag", processor.awaitingOpClosingTag)
+ log.Println("processor.awaitingBackticks", processor.awaitingBackticks)
+
+ log.Println("shouldStream", shouldStream)
+ }
+ }
+
+ if verboseLogging {
+ log.Println("returning bufferOrStreamResult")
+ log.Println("shouldStream", shouldStream)
+ log.Println("content", content)
+ log.Println("blockLang", blockLang)
+ }
+
+ if shouldStream {
+ processor.contentBuffer = ""
+ } else {
+ processor.contentBuffer = content
+ }
+
+ return bufferOrStreamResult{
+ shouldStream: shouldStream,
+ content: content,
+ blockLang: blockLang,
+ }
+}
+
+func (state *activeTellStreamState) handleNewOperations(parserRes *types.ReplyParserRes) {
+ processor := state.chunkProcessor
+ plan := state.plan
+ planId := plan.Id
+ branch := state.branch
+ clients := state.clients
+ auth := state.auth
+ authVars := state.authVars
+ req := state.req
+ replyId := state.replyId
+ currentOrgId := state.currentOrgId
+ currentUserId := state.currentUserId
+ settings := state.settings
+
+ operations := parserRes.Operations
+
+ log.Printf("%d new operations\n", len(operations)-len(processor.replyOperations))
+
+ for i, op := range operations {
+ if i < len(processor.replyOperations) {
+ continue
+ }
+
+ log.Printf("Detected operation: %s\n", op.Name())
+
+ if req.BuildMode == shared.BuildModeAuto {
+ log.Printf("Queuing build for %s\n", op.Name())
+ // log.Println("Content:")
+ // log.Println(strconv.Quote(op.Content))
+
+ buildState := &activeBuildStreamState{
+ modelStreamId: state.modelStreamId,
+ clients: clients,
+ authVars: authVars,
+ auth: auth,
+ currentOrgId: currentOrgId,
+ currentUserId: currentUserId,
+ plan: plan,
+ branch: branch,
+ settings: settings,
+ modelContext: state.modelContext,
+ orgUserConfig: state.orgUserConfig,
+ }
+
+ var opContentTokens int
+ if op.Type == shared.OperationTypeFile {
+ opContentTokens = shared.GetNumTokensEstimate(op.Content)
+ } else {
+ opContentTokens = op.NumTokens
+ }
+
+ // log.Printf("buildState.queueBuilds - op.Description:\n%s\n", op.Description)
+
+ buildState.queueBuilds([]*types.ActiveBuild{{
+ ReplyId: replyId,
+ FileDescription: op.Description,
+ FileContent: op.Content,
+ FileContentTokens: opContentTokens,
+ Path: op.Path,
+ MoveDestination: op.Destination,
+ IsMoveOp: op.Type == shared.OperationTypeMove,
+ IsRemoveOp: op.Type == shared.OperationTypeRemove,
+ IsResetOp: op.Type == shared.OperationTypeReset,
+ }})
+ }
+ processor.replyOperations = append(processor.replyOperations, op)
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.Operations = append(ap.Operations, op)
+ })
+ }
+
+}
+
+func (state *activeTellStreamState) handleMissingFile(content, currentFile, blockLang string) processChunkResult {
+ branch := state.branch
+ plan := state.plan
+ planId := plan.Id
+ replyParser := state.replyParser
+ iteration := state.iteration
+ clients := state.clients
+ auth := state.auth
+ req := state.req
+ authVars := state.authVars
+
+ active := GetActivePlan(planId, branch)
+
+ if active == nil {
+ state.onActivePlanMissingError()
+ return processChunkResult{}
+ }
+
+ log.Printf("Attempting to overwrite a file that isn't in context: %s\n", currentFile)
+
+ // attempting to overwrite a file that isn't in context
+ // we will stop the stream and ask the user what to do
+ err := db.SetPlanStatus(planId, branch, shared.PlanStatusMissingFile, "")
+
+ if err != nil {
+ log.Printf("Error setting plan %s status to prompting: %v\n", planId, err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error setting plan %s status to prompting: %v", planId, err))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Error setting plan status to prompting: %v", err),
+ }
+ return processChunkResult{}
+ }
+
+ var trimmedReply string
+
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.MissingFilePath = currentFile
+ trimmedReply = replyParser.GetReplyForMissingFile()
+ ap.CurrentReplyContent = trimmedReply
+ })
+
+ // log.Println("Content:")
+ // log.Println(content)
+
+ // log.Println("Block lang:")
+ // log.Println(blockLang)
+
+ // log.Println("Trimmed content:")
+ // log.Println(trimmedReply)
+
+ // try to replace the code block opening tag in the chunk with an empty string
+ // this will remove the code block opening tag if it exists
+ splitBy := "```" + blockLang
+ split := strings.Split(content, splitBy)
+ chunkToStream := split[0] + splitBy + "\n"
+
+ // log.Printf("chunkToStream: %s\n", chunkToStream)
+
+ if chunkToStream != "" {
+ log.Printf("Streaming remaining chunk before missing file prompt: %s\n", chunkToStream)
+ active.Stream(shared.StreamMessage{
+ Type: shared.StreamMessageReply,
+ ReplyChunk: chunkToStream,
+ })
+ active.FlushStreamBuffer()
+ time.Sleep(20 * time.Millisecond)
+ }
+
+ log.Printf("Prompting user for missing file: %s\n", currentFile)
+
+ active.Stream(shared.StreamMessage{
+ Type: shared.StreamMessagePromptMissingFile,
+ MissingFilePath: currentFile,
+ MissingFileAutoContext: active.AutoContext,
+ })
+
+ log.Printf("Stopping stream for missing file: %s\n", currentFile)
+ // log.Printf("Chunk content: %s\n", content)
+ // log.Printf("Current reply content: %s\n", active.CurrentReplyContent)
+
+ // stop stream for now
+ active.CancelModelStreamFn()
+
+ log.Printf("Stopped stream for missing file: %s\n", currentFile)
+
+ // wait for user response to come in
+ var userChoice shared.RespondMissingFileChoice
+ select {
+ case <-active.Ctx.Done():
+ log.Println("Context cancelled while waiting for missing file response")
+ state.execHookOnStop(false)
+ return processChunkResult{shouldReturn: true}
+
+ case <-time.After(30 * time.Minute): // long timeout here since we're waiting for user input
+ log.Println("Timeout waiting for missing file choice")
+ state.onError(onErrorParams{
+ streamErr: fmt.Errorf("timeout waiting for missing file choice"),
+ storeDesc: true,
+ })
+ return processChunkResult{}
+
+ case userChoice = <-active.MissingFileResponseCh:
+ }
+
+ log.Printf("User choice for missing file: %s\n", userChoice)
+
+ active.ResetModelCtx()
+
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.MissingFilePath = ""
+ ap.CurrentReplyContent = replyParser.GetReplyForMissingFile()
+ })
+
+ log.Println("Continuing stream")
+
+ // continue plan
+ execTellPlan(execTellPlanParams{
+ clients: clients,
+ plan: plan,
+ branch: branch,
+ auth: auth,
+ req: req,
+ iteration: iteration, // keep the same iteration
+ missingFileResponse: userChoice,
+ authVars: authVars,
+ })
+
+ return processChunkResult{shouldReturn: true}
+}
+
+func getCroppedChunk(uncropped, cropped, chunk string) string {
+ uncroppedIdx := strings.Index(uncropped, chunk)
+ if uncroppedIdx == -1 {
+ return ""
+ }
+ croppedChunk := cropped[uncroppedIdx:]
+ return croppedChunk
+}
+
+func replaceCodeBlockOpeningTag(content string, replaceWithFn func(lang string) string) (bool, string) {
+ // check for opening tag matching
+ match := openingTagRegex.FindStringSubmatch(content)
+
+ if match != nil {
+ // Found complete opening tag with lang and path attributes
+ lang := match[1] // Extract the language from the first capture group
+ return true, strings.Replace(content, match[0], replaceWithFn(lang), 1)
+ } else if strings.Contains(content, "") {
+ // This is a fallback case that should probably be removed since we now require both attributes
+ return true, strings.Replace(content, "", replaceWithFn(""), 1)
+ }
+
+ return false, ""
+}
diff --git a/app/server/model/plan/tell_stream_processor_test.go b/app/server/model/plan/tell_stream_processor_test.go
new file mode 100644
index 0000000000000000000000000000000000000000..45f8e468b72e88b0d592e77dae5da8d714a743b5
--- /dev/null
+++ b/app/server/model/plan/tell_stream_processor_test.go
@@ -0,0 +1,511 @@
+package plan
+
+import (
+ "plandex-server/types"
+ shared "plandex-shared"
+ "testing"
+)
+
+func TestBufferOrStream(t *testing.T) {
+ tests := []struct {
+ only bool
+ name string
+ initialState *chunkProcessor
+ chunk string
+ maybeFilePath string
+ currentFilePath string
+ isInMoveBlock bool
+ isInRemoveBlock bool
+ isInResetBlock bool
+ want bufferOrStreamResult
+ wantState *chunkProcessor // To verify state transitions
+ manualStop []string
+ }{
+ {
+ name: "streams regular content",
+ initialState: &chunkProcessor{
+ contentBuffer: "",
+ },
+ chunk: "some regular text",
+ want: bufferOrStreamResult{
+ shouldStream: true,
+ content: "some regular text",
+ },
+ wantState: &chunkProcessor{
+ awaitingBlockOpeningTag: false,
+ awaitingBlockClosingTag: false,
+ awaitingBackticks: false,
+ fileOpen: false,
+ },
+ },
+ {
+ name: "buffers partial opening tag",
+ initialState: &chunkProcessor{
+ awaitingBlockOpeningTag: true,
+ fileOpen: false,
+ contentBuffer: "",
+ },
+ chunk: `` + "\n",
+ awaitingBlockOpeningTag: true,
+ },
+ chunk: `package`,
+ maybeFilePath: "",
+ currentFilePath: "main.go",
+ want: bufferOrStreamResult{
+ shouldStream: true,
+ content: "```go\npackage",
+ },
+ wantState: &chunkProcessor{
+ awaitingBlockOpeningTag: false,
+ awaitingBlockClosingTag: false,
+ awaitingBackticks: false,
+ fileOpen: true,
+ },
+ },
+ {
+ // occurs when replayParser can't identify a 'maybeFilePath' prior a full opening tag being sent ('maybeFilePath' gets skipped and 'currentFilePath' is set immediately)
+ name: "converts opening tag without awaitingOpeningTag",
+ initialState: &chunkProcessor{
+ fileOpen: true,
+ contentBuffer: "",
+ awaitingBlockOpeningTag: false,
+ },
+ chunk: `` + "\npackage",
+ maybeFilePath: "",
+ currentFilePath: "main.go",
+ want: bufferOrStreamResult{
+ shouldStream: true,
+ content: "```go\npackage",
+ },
+ wantState: &chunkProcessor{
+ awaitingBlockOpeningTag: false,
+ awaitingBlockClosingTag: false,
+ awaitingBackticks: false,
+ fileOpen: true,
+ },
+ },
+ {
+ name: "buffers partial backticks",
+ initialState: &chunkProcessor{
+ fileOpen: true,
+ contentBuffer: "here's some co",
+ },
+ chunk: "de:`",
+ currentFilePath: "main.go",
+ want: bufferOrStreamResult{
+ shouldStream: false,
+ },
+ wantState: &chunkProcessor{
+ awaitingBackticks: true,
+ fileOpen: true,
+ },
+ },
+ {
+ name: "escapes backticks in content",
+ initialState: &chunkProcessor{
+ fileOpen: true,
+ awaitingBackticks: true,
+ contentBuffer: "here's some code:\n`",
+ },
+ chunk: "``\npackage",
+ currentFilePath: "main.go",
+ want: bufferOrStreamResult{
+ shouldStream: true,
+ content: "here's some code:\n\\`\\`\\`\npackage",
+ },
+ wantState: &chunkProcessor{
+ awaitingBlockOpeningTag: false,
+ awaitingBlockClosingTag: false,
+ awaitingBackticks: false,
+ fileOpen: true,
+ },
+ },
+ {
+ name: "buffers partial closing tag",
+ initialState: &chunkProcessor{
+ fileOpen: true,
+ awaitingBlockClosingTag: false,
+ contentBuffer: "",
+ },
+ currentFilePath: "main.go",
+ chunk: "\n}",
+ want: bufferOrStreamResult{
+ shouldStream: false,
+ },
+ wantState: &chunkProcessor{
+ awaitingBlockClosingTag: true,
+ fileOpen: true,
+ contentBuffer: "\n}",
+ },
+ },
+ {
+ name: "replaces full closing tag with file closed",
+ initialState: &chunkProcessor{
+ fileOpen: false,
+ awaitingBlockClosingTag: false,
+ contentBuffer: "",
+ },
+ currentFilePath: "",
+ chunk: "\n}",
+ want: bufferOrStreamResult{
+ shouldStream: true,
+ content: "\n}```",
+ },
+ wantState: &chunkProcessor{
+ awaitingBlockClosingTag: false,
+ fileOpen: false,
+ },
+ },
+ {
+ name: "replaces full closing tag with file closed and awaiting backticks",
+ initialState: &chunkProcessor{
+ fileOpen: false,
+ awaitingBlockClosingTag: false,
+ awaitingBackticks: true,
+ contentBuffer: "",
+ },
+ currentFilePath: "",
+ chunk: " ONLY this one-line title and nothing else.`\n\n\nNow let",
+ want: bufferOrStreamResult{
+ shouldStream: true,
+ content: " ONLY this one-line title and nothing else.`\n```\n\nNow let",
+ },
+ wantState: &chunkProcessor{
+ awaitingBlockClosingTag: false,
+ fileOpen: false,
+ },
+ },
+ {
+ name: "handles single backticks",
+ initialState: &chunkProcessor{
+ fileOpen: true,
+ awaitingBackticks: true,
+ contentBuffer: "`file.go`",
+ },
+ chunk: "\nsomething",
+ currentFilePath: "main.go",
+ want: bufferOrStreamResult{
+ shouldStream: true,
+ content: "`file.go`\nsomething",
+ },
+ wantState: &chunkProcessor{
+ awaitingBlockOpeningTag: false,
+ awaitingBlockClosingTag: false,
+ awaitingBackticks: false,
+ fileOpen: true,
+ },
+ },
+ {
+ name: "handles close and re-open backticks",
+ initialState: &chunkProcessor{
+ fileOpen: true,
+ awaitingBackticks: true,
+ contentBuffer: "`file.go`",
+ },
+ chunk: "\n`file2.go`",
+ currentFilePath: "main.go",
+ want: bufferOrStreamResult{
+ shouldStream: false,
+ },
+ wantState: &chunkProcessor{
+ awaitingBlockOpeningTag: false,
+ awaitingBlockClosingTag: false,
+ awaitingBackticks: true,
+ fileOpen: true,
+ contentBuffer: "`file.go`\n`file2.go`",
+ },
+ },
+ {
+ name: "buffers for end of file operations",
+ initialState: &chunkProcessor{},
+ isInMoveBlock: true,
+ chunk: "\n\nmore",
+ want: bufferOrStreamResult{
+ shouldStream: false,
+ },
+ wantState: &chunkProcessor{
+ awaitingOpClosingTag: true,
+ contentBuffer: "\n\nmore",
+ },
+ },
+ {
+ name: "replaces full end of file operations tag",
+ initialState: &chunkProcessor{
+ awaitingOpClosingTag: true,
+ contentBuffer: "\n\nmore",
+ },
+ chunk: " stuff",
+ want: bufferOrStreamResult{
+ shouldStream: true,
+ content: "\nmore stuff",
+ },
+ wantState: &chunkProcessor{
+ awaitingOpClosingTag: false,
+ },
+ },
+ {
+ name: "buffers for end of file operations with partial tag",
+ initialState: &chunkProcessor{
+ awaitingOpClosingTag: true,
+ },
+ chunk: "\n\nmore",
+ want: bufferOrStreamResult{
+ shouldStream: true,
+ content: "\nmore",
+ },
+ wantState: &chunkProcessor{
+ awaitingOpClosingTag: false,
+ },
+ },
+ {
+ name: "buffers for partial opening tag with no file path label",
+ initialState: &chunkProcessor{},
+ chunk: "something\npackage",
+ want: bufferOrStreamResult{
+ shouldStream: true,
+ content: "something\n```go\npackage",
+ },
+ wantState: &chunkProcessor{
+ awaitingBlockOpeningTag: false,
+ fileOpen: true,
+ },
+ },
+ {
+ name: "replaces full opening tag without file path label",
+ initialState: &chunkProcessor{
+ fileOpen: true,
+ },
+ currentFilePath: "main.go",
+ chunk: "something\n\npackage",
+ want: bufferOrStreamResult{
+ shouldStream: true,
+ content: "something\n```go\npackage",
+ },
+ wantState: &chunkProcessor{
+ awaitingBlockOpeningTag: false,
+ fileOpen: true,
+ },
+ },
+
+ {
+ name: "stop tag entirely in one chunk",
+ initialState: &chunkProcessor{}, // empty buffer
+ chunk: "hello bye",
+ manualStop: []string{""},
+ want: bufferOrStreamResult{
+ shouldStream: true, // stream only the prefix
+ content: "hello ", // text before the tag
+ shouldStop: true, // tell caller to stop
+ },
+ wantState: &chunkProcessor{
+ contentBuffer: "", // nothing left buffered
+ },
+ },
+ {
+ name: "stop tag split across two chunks (prefix + rest)",
+ only: true, // helper if you want to run just this one
+ initialState: &chunkProcessor{
+ contentBuffer: "", // begins empty
+ },
+ // FIRST CHUNK —— just a proper prefix
+ chunk: ""},
+ want: bufferOrStreamResult{
+ shouldStream: false, // nothing streams yet
+ shouldStop: false, // not complete, keep going
+ },
+ wantState: &chunkProcessor{
+ contentBuffer: "\nmore text", // completes tag + trailing text
+ manualStop: []string{""},
+ want: bufferOrStreamResult{
+ shouldStream: false, // do NOT leak "more text"
+ shouldStop: true, // signal caller to stop
+ },
+ wantState: &chunkProcessor{
+ contentBuffer: "", // may keep full tag inside
+ },
+ },
+ {
+ name: "stop prefix turns out to be different tag, falls through to other parsing logic",
+ initialState: &chunkProcessor{
+ contentBuffer: ""},
+ want: bufferOrStreamResult{
+ shouldStream: false,
+ shouldStop: false,
+ },
+ wantState: &chunkProcessor{
+ contentBuffer: "\npackage",
+ manualStop: []string{""},
+ want: bufferOrStreamResult{
+ shouldStream: true,
+ content: "something\n```go\npackage",
+ },
+ wantState: &chunkProcessor{
+ awaitingBlockOpeningTag: false,
+ fileOpen: true,
+ },
+ },
+ }
+
+ only := map[int]bool{}
+ for i, tt := range tests {
+ if tt.only {
+ only[i] = true
+ }
+ }
+
+ for i, tt := range tests {
+ if len(only) > 0 && !only[i] {
+ continue
+ }
+
+ t.Run(tt.name, func(t *testing.T) {
+ processor := tt.initialState
+
+ got := processor.bufferOrStream(tt.chunk, &types.ReplyParserRes{
+ MaybeFilePath: tt.maybeFilePath,
+ CurrentFilePath: tt.currentFilePath,
+ IsInMoveBlock: tt.isInMoveBlock,
+ IsInRemoveBlock: tt.isInRemoveBlock,
+ IsInResetBlock: tt.isInResetBlock,
+ }, shared.CurrentStage{
+ TellStage: shared.TellStageImplementation,
+ }, tt.manualStop)
+
+ if got.shouldStream != tt.want.shouldStream {
+ t.Errorf("shouldStream = %v, want %v", got.shouldStream, tt.want.shouldStream)
+ }
+ if got.shouldStream && got.content != tt.want.content {
+ t.Errorf("content = %q, want %q", got.content, tt.want.content)
+ }
+
+ // Check all state transitions
+ if processor.fileOpen != tt.wantState.fileOpen {
+ t.Errorf("fileOpen = %v, want %v", processor.fileOpen, tt.wantState.fileOpen)
+ }
+ if processor.awaitingBlockOpeningTag != tt.wantState.awaitingBlockOpeningTag {
+ t.Errorf("awaitingOpeningTag = %v, want %v", processor.awaitingBlockOpeningTag, tt.wantState.awaitingBlockOpeningTag)
+ }
+ if processor.awaitingBlockClosingTag != tt.wantState.awaitingBlockClosingTag {
+ t.Errorf("awaitingClosingTag = %v, want %v", processor.awaitingBlockClosingTag, tt.wantState.awaitingBlockClosingTag)
+ }
+ if processor.awaitingBackticks != tt.wantState.awaitingBackticks {
+ t.Errorf("awaitingBackticks = %v, want %v", processor.awaitingBackticks, tt.wantState.awaitingBackticks)
+ }
+
+ if tt.wantState.contentBuffer != "" {
+ if processor.contentBuffer != tt.wantState.contentBuffer {
+ t.Errorf("content buffer = %q, want %q", processor.contentBuffer, tt.wantState.contentBuffer)
+ }
+ }
+
+ // Check buffer is reset when it should be
+ if tt.want.shouldStream && processor.contentBuffer != "" {
+ t.Error("content buffer should be reset after streaming")
+ }
+ })
+ }
+}
diff --git a/app/server/model/plan/tell_stream_status.go b/app/server/model/plan/tell_stream_status.go
new file mode 100644
index 0000000000000000000000000000000000000000..d48ea9e977b8d9ec992752ef4ec2151627f09261
--- /dev/null
+++ b/app/server/model/plan/tell_stream_status.go
@@ -0,0 +1,224 @@
+package plan
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ shared "plandex-shared"
+ "runtime/debug"
+)
+
+type handleDescAndExecStatusResult struct {
+ handleStreamFinishedResult
+ subtaskFinished bool
+ generatedDescription *db.ConvoMessageDescription
+}
+
+func (state *activeTellStreamState) handleDescAndExecStatus() handleDescAndExecStatusResult {
+ currentOrgId := state.currentOrgId
+ summarizedToMessageId := state.summarizedToMessageId
+ planId := state.plan.Id
+ branch := state.branch
+ replyOperations := state.chunkProcessor.replyOperations
+
+ active := GetActivePlan(planId, branch)
+ if active == nil {
+ state.onActivePlanMissingError()
+ return handleDescAndExecStatusResult{
+ handleStreamFinishedResult: handleStreamFinishedResult{
+ shouldContinueMainLoop: true,
+ shouldReturn: false,
+ },
+ }
+ }
+
+ var generatedDescription *db.ConvoMessageDescription
+ var subtaskFinished bool
+
+ var errCh = make(chan *shared.ApiError, 2)
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in genPlanDescription: %v\n%s", r, debug.Stack())
+ errCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Error generating plan description: %v", r),
+ }
+ }
+ }()
+
+ if len(replyOperations) > 0 {
+ log.Println("Generating plan description")
+
+ res, err := state.genPlanDescription()
+ if err != nil {
+ errCh <- err
+ return
+ }
+
+ generatedDescription = res
+ generatedDescription.OrgId = currentOrgId
+ generatedDescription.SummarizedToMessageId = summarizedToMessageId
+ generatedDescription.WroteFiles = true
+ generatedDescription.Operations = replyOperations
+
+ log.Println("Generated plan description.")
+ }
+ errCh <- nil
+ }()
+
+ if state.currentStage.TellStage == shared.TellStageImplementation {
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in execStatusShouldContinue: %v\n%s", r, debug.Stack())
+ errCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Error getting exec status: %v", r),
+ }
+ }
+ }()
+
+ log.Println("Getting exec status")
+ var err *shared.ApiError
+ res, err := state.execStatusShouldContinue(active.CurrentReplyContent, active.SessionId, active.Ctx)
+ if err != nil {
+ errCh <- err
+ return
+ }
+
+ subtaskFinished = res.subtaskFinished
+
+ log.Printf("subtaskFinished: %v\n", subtaskFinished)
+
+ errCh <- nil
+ }()
+
+ } else {
+ errCh <- nil
+ }
+
+ for i := 0; i < 2; i++ {
+ err := <-errCh
+ if err != nil {
+ res := state.onError(onErrorParams{
+ streamApiErr: err,
+ storeDesc: true,
+ })
+ return handleDescAndExecStatusResult{
+ handleStreamFinishedResult: handleStreamFinishedResult{
+ shouldContinueMainLoop: res.shouldContinueMainLoop,
+ shouldReturn: res.shouldReturn,
+ },
+ subtaskFinished: subtaskFinished,
+ generatedDescription: generatedDescription,
+ }
+ }
+ }
+
+ return handleDescAndExecStatusResult{
+ handleStreamFinishedResult: handleStreamFinishedResult{},
+ subtaskFinished: subtaskFinished,
+ generatedDescription: generatedDescription,
+ }
+}
+
+type willContinuePlanParams struct {
+ hasNewSubtasks bool
+ removedSubtasks bool
+ allSubtasksFinished bool
+ activatePaths map[string]bool
+ hasExplicitPaths bool
+}
+
+func (state *activeTellStreamState) willContinuePlan(params willContinuePlanParams) bool {
+ hasNewSubtasks := params.hasNewSubtasks
+ removedSubtasks := params.removedSubtasks
+ allSubtasksFinished := params.allSubtasksFinished
+ activatePaths := params.activatePaths
+ currentSubtask := state.currentSubtask
+
+ log.Printf("[willContinuePlan] currentStage: %v", state.currentStage)
+
+ log.Printf("[willContinuePlan] Initial state - hasNewSubtasks: %v, allSubtasksFinished: %v, tellStage: %v, planningPhase: %v, iteration: %d, autoContinue: %v",
+ hasNewSubtasks, allSubtasksFinished, state.currentStage.TellStage, state.currentStage.PlanningPhase, state.iteration, state.req.AutoContinue)
+
+ if state.currentStage.TellStage == shared.TellStagePlanning {
+ log.Println("[willContinuePlan] In planning stage")
+
+ // always continue to response or planning phase after context phase
+ if state.currentStage.PlanningPhase == shared.PlanningPhaseContext {
+
+ // if it's the context stage but it's chat mode and no files were loaded, don't continue
+ if state.req.IsChatOnly && len(activatePaths) == 0 {
+ log.Println("[willContinuePlan] Chat only - no files loaded - stopping")
+ return false
+ }
+
+ // if no files were listed explicitly in a ### Files section, don't continue if it's chat mode
+ if state.req.IsChatOnly && !params.hasExplicitPaths {
+ log.Println("[willContinuePlan] Chat only - no files loaded - stopping")
+ return false
+ }
+
+ log.Println("[willContinuePlan] In context phase - continuing to planning phase")
+ return true
+ }
+
+ if state.req.IsChatOnly {
+ log.Println("[willContinuePlan] Chat only - stopping")
+ return false
+ }
+
+ // otherwise, if auto-continue is disabled, never continue
+ if !state.req.AutoContinue {
+ log.Println("[willContinuePlan] Auto-continue disabled - stopping")
+ return false
+ }
+
+ // if there are new subtasks, continue
+ if hasNewSubtasks && !allSubtasksFinished {
+ log.Println("[willContinuePlan] Has new subtasks - continuing")
+ return true
+ }
+
+ if removedSubtasks && !allSubtasksFinished {
+ log.Println("[willContinuePlan] Removed subtasks - continuing")
+ return true
+ }
+
+ // if all subtasks are finished, don't continue
+ log.Printf("[willContinuePlan] Checking subtasks finished - allSubtasksFinished: %v, will continue: %v",
+ allSubtasksFinished, !allSubtasksFinished)
+
+ log.Printf("[willContinuePlan] currentSubtask: %v", currentSubtask)
+
+ return !allSubtasksFinished && currentSubtask != nil
+
+ } else if state.currentStage.TellStage == shared.TellStageImplementation {
+ log.Println("[willContinuePlan] In implementation stage")
+
+ // if all subtasks are finished, don't continue
+ if allSubtasksFinished {
+ log.Println("[willContinuePlan] All subtasks finished - stopping")
+ return false
+ }
+
+ // if we've automatically continued too many times, don't continue
+ if state.iteration >= MaxAutoContinueIterations {
+ log.Printf("[willContinuePlan] Reached max iterations (%d) - stopping", MaxAutoContinueIterations)
+ return false
+ }
+
+ // otherwise, continue with implementation
+ log.Println("[willContinuePlan] Continuing implementation")
+ return true
+ }
+
+ log.Printf("[willContinuePlan] Unknown tell stage: %v - won't continue", state.currentStage.TellStage)
+ return false
+}
diff --git a/app/server/model/plan/tell_stream_store.go b/app/server/model/plan/tell_stream_store.go
new file mode 100644
index 0000000000000000000000000000000000000000..c12f4b23a2a59dfc2ac40c9b8a93a2527cb4e370
--- /dev/null
+++ b/app/server/model/plan/tell_stream_store.go
@@ -0,0 +1,305 @@
+package plan
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ "plandex-server/notify"
+ "plandex-server/types"
+ shared "plandex-shared"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+type storeOnFinishedParams struct {
+ replyOperations []*shared.Operation
+ generatedDescription *db.ConvoMessageDescription
+ subtaskFinished bool
+ hasNewSubtasks bool
+ autoLoadContextResult checkAutoLoadContextResult
+ addedSubtasks []*db.Subtask
+ removedSubtasks []string
+}
+
+type storeOnFinishedResult struct {
+ handleStreamFinishedResult
+ allSubtasksFinished bool
+}
+
+func (state *activeTellStreamState) storeOnFinished(params storeOnFinishedParams) storeOnFinishedResult {
+ replyOperations := params.replyOperations
+ generatedDescription := params.generatedDescription
+ subtaskFinished := params.subtaskFinished
+ hasNewSubtasks := params.hasNewSubtasks
+ autoLoadContextResult := params.autoLoadContextResult
+ currentOrgId := state.currentOrgId
+ currentUserId := state.currentUserId
+ planId := state.plan.Id
+ branch := state.branch
+ summarizedToMessageId := state.summarizedToMessageId
+ active := state.activePlan
+ addedSubtasks := params.addedSubtasks
+ removedSubtasks := params.removedSubtasks
+ var allSubtasksFinished bool
+
+ log.Println("[storeOnFinished] Locking repo to store assistant reply and description")
+
+ err := db.ExecRepoOperation(db.ExecRepoOperationParams{
+ OrgId: currentOrgId,
+ UserId: currentUserId,
+ PlanId: planId,
+ Branch: branch,
+ Scope: db.LockScopeWrite,
+ Ctx: active.Ctx,
+ CancelFn: active.CancelFn,
+ Reason: "store on finished",
+ }, func(repo *db.GitRepo) error {
+ log.Println("storeOnFinished: hasNewSubtasks", hasNewSubtasks)
+ log.Println("storeOnFinished: subtaskFinished", subtaskFinished)
+ log.Println("storeOnFinished: removedSubtasks", removedSubtasks)
+
+ messageSubtask := state.currentSubtask
+
+ // first resolve subtask state
+ if hasNewSubtasks || len(removedSubtasks) > 0 || subtaskFinished {
+ if subtaskFinished && state.currentSubtask != nil {
+ log.Printf("[storeOnFinished] Marking subtask as finished: %q", state.currentSubtask.Title)
+ state.currentSubtask.IsFinished = true
+
+ log.Printf("[storeOnFinished] Current subtask state after marking as finished: %+v", state.currentSubtask)
+ }
+
+ log.Printf("[storeOnFinished] Storing plan subtasks (hasNewSubtasks=%v, subtaskFinished=%v)", hasNewSubtasks, subtaskFinished)
+ log.Printf("[storeOnFinished] Current subtasks state before storing:")
+ for i, task := range state.subtasks {
+ log.Printf("[storeOnFinished] Task %d: %q (finished=%v)", i+1, task.Title, task.IsFinished)
+ }
+
+ state.currentSubtask = nil
+ allSubtasksFinished = true
+ for _, subtask := range state.subtasks {
+ if !subtask.IsFinished {
+ state.currentSubtask = subtask
+ allSubtasksFinished = false
+ break
+ }
+ }
+
+ if state.currentSubtask != nil {
+ log.Printf("[storeOnFinished] Set new current subtask: %q", state.currentSubtask.Title)
+ } else {
+ log.Println("[storeOnFinished] No new current subtask set")
+ }
+ log.Printf("[storeOnFinished] All subtasks finished: %v", allSubtasksFinished)
+ } else if state.currentSubtask != nil && !subtaskFinished {
+ log.Printf("[storeOnFinished] Current subtask is not finished: %q", state.currentSubtask.Title)
+ state.currentSubtask.NumTries++
+ }
+
+ log.Println("storeOnFinished: state.currentSubtask", state.currentSubtask)
+ log.Println("storeOnFinished: state.subtasks", state.subtasks)
+ log.Println("storeOnFinished: state.currentStage", state.currentStage)
+
+ var flags shared.ConvoMessageFlags
+
+ flags.CurrentStage = state.currentStage
+
+ if len(replyOperations) > 0 {
+ flags.DidWriteCode = true
+ }
+ if hasNewSubtasks {
+ log.Println("storeOnFinished: hasNewSubtasks")
+ flags.DidMakePlan = true
+ }
+ if len(removedSubtasks) > 0 {
+ log.Println("storeOnFinished: len(removedSubtasks) > 0")
+ flags.DidMakePlan = true
+ flags.DidRemoveTasks = true
+ }
+ if len(autoLoadContextResult.autoLoadPaths) > 0 {
+ flags.DidLoadContext = true
+ }
+ if subtaskFinished && messageSubtask != nil {
+ flags.DidCompleteTask = true
+ }
+ if allSubtasksFinished {
+ log.Println("storeOnFinished: allSubtasksFinished")
+ flags.DidCompletePlan = true
+ }
+ if hasNewSubtasks && (state.req.IsApplyDebug || state.req.IsUserDebug) {
+ log.Println("storeOnFinished: hasNewSubtasks && (state.req.IsApplyDebug || state.req.IsUserDebug)")
+ flags.DidMakeDebuggingPlan = true
+ }
+
+ log.Println("storeOnFinished: flags", flags)
+
+ assistantMsg, convoCommitMsg, err := state.storeAssistantReply(repo, storeAssistantReplyParams{
+ flags: flags,
+ subtask: messageSubtask,
+ addedSubtasks: addedSubtasks,
+ activatePaths: autoLoadContextResult.activatePaths,
+ activatePathsOrdered: autoLoadContextResult.activatePathsOrdered,
+ removedSubtasks: removedSubtasks,
+ }) // updates state.convo
+
+ if err != nil {
+ state.onError(onErrorParams{
+ streamErr: fmt.Errorf("failed to store assistant message: %v", err),
+ storeDesc: true,
+ })
+ return err
+ }
+
+ log.Println("getting description for assistant message: ", assistantMsg.Id)
+
+ var description *db.ConvoMessageDescription
+ if len(replyOperations) == 0 {
+ description = &db.ConvoMessageDescription{
+ OrgId: currentOrgId,
+ PlanId: planId,
+ ConvoMessageId: assistantMsg.Id,
+ SummarizedToMessageId: summarizedToMessageId,
+ BuildPathsInvalidated: map[string]bool{},
+ WroteFiles: false,
+ }
+ } else {
+ description = generatedDescription
+ description.ConvoMessageId = assistantMsg.Id
+ }
+
+ log.Println("[storeOnFinished] Storing description")
+ err = db.StoreDescription(description)
+
+ if err != nil {
+ state.onError(onErrorParams{
+ streamErr: fmt.Errorf("failed to store description: %v", err),
+ storeDesc: false,
+ convoMessageId: assistantMsg.Id,
+ commitMsg: convoCommitMsg,
+ })
+ return err
+ }
+ log.Println("[storeOnFinished] Description stored")
+
+ // store subtasks
+ err = db.StorePlanSubtasks(currentOrgId, planId, state.subtasks)
+ if err != nil {
+ log.Printf("Error storing plan subtasks: %v\n", err)
+ state.onError(onErrorParams{
+ streamErr: fmt.Errorf("failed to store plan subtasks: %v", err),
+ storeDesc: false,
+ convoMessageId: assistantMsg.Id,
+ commitMsg: convoCommitMsg,
+ })
+ return err
+ }
+
+ log.Println("Comitting after store on finished")
+
+ err = repo.GitAddAndCommit(branch, convoCommitMsg)
+ if err != nil {
+ state.onError(onErrorParams{
+ streamErr: fmt.Errorf("failed to commit: %v", err),
+ storeDesc: false,
+ convoMessageId: assistantMsg.Id,
+ commitMsg: convoCommitMsg,
+ })
+ return err
+ }
+ log.Println("Assistant reply, description, and subtasks committed")
+
+ return nil
+ })
+
+ if err != nil {
+ log.Printf("Error storing on finished: %v\n", err)
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("error storing on finished: %v", err))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("Error storing on finished: %v", err),
+ }
+ return storeOnFinishedResult{
+ handleStreamFinishedResult: handleStreamFinishedResult{
+ shouldContinueMainLoop: true,
+ shouldReturn: false,
+ },
+ allSubtasksFinished: false,
+ }
+ }
+
+ return storeOnFinishedResult{
+ handleStreamFinishedResult: handleStreamFinishedResult{},
+ allSubtasksFinished: allSubtasksFinished,
+ }
+}
+
+type storeAssistantReplyParams struct {
+ flags shared.ConvoMessageFlags
+ subtask *db.Subtask
+ addedSubtasks []*db.Subtask
+ activatePaths map[string]bool
+ activatePathsOrdered []string
+ removedSubtasks []string
+}
+
+func (state *activeTellStreamState) storeAssistantReply(repo *db.GitRepo, params storeAssistantReplyParams) (*db.ConvoMessage, string, error) {
+ flags := params.flags
+ subtask := params.subtask
+ addedSubtasks := params.addedSubtasks
+ activatePaths := params.activatePaths
+ activatePathsOrdered := params.activatePathsOrdered
+ removedSubtasks := params.removedSubtasks
+
+ currentOrgId := state.currentOrgId
+ currentUserId := state.currentUserId
+ planId := state.plan.Id
+ branch := state.branch
+ auth := state.auth
+ replyNumTokens := state.replyNumTokens
+ replyId := state.replyId
+ convo := state.convo
+ num := len(convo) + 1
+
+ log.Printf("storing assistant reply | len(convo) %d | num %d\n", len(convo), num)
+
+ activePlan := state.activePlan
+
+ // fmt.Println("raw message: ", activePlan.CurrentReplyContent)
+
+ assistantMsg := db.ConvoMessage{
+ Id: replyId,
+ OrgId: currentOrgId,
+ PlanId: planId,
+ UserId: currentUserId,
+ Role: openai.ChatMessageRoleAssistant,
+ Tokens: replyNumTokens,
+ Num: num,
+ Message: activePlan.CurrentReplyContent,
+ Flags: flags,
+ Subtask: subtask,
+ AddedSubtasks: addedSubtasks,
+ ActivatedPaths: activatePaths,
+ ActivatedPathsOrdered: activatePathsOrdered,
+ RemovedSubtasks: removedSubtasks,
+ }
+
+ commitMsg, err := db.StoreConvoMessage(repo, &assistantMsg, auth.User.Id, branch, false)
+
+ if err != nil {
+ log.Printf("Error storing assistant message: %v\n", err)
+ return nil, "", err
+ }
+
+ UpdateActivePlan(planId, branch, func(ap *types.ActivePlan) {
+ ap.MessageNum = num
+ ap.StoredReplyIds = append(ap.StoredReplyIds, replyId)
+ })
+
+ convo = append(convo, &assistantMsg)
+ state.convo = convo
+
+ return &assistantMsg, commitMsg, err
+}
diff --git a/app/server/model/plan/tell_stream_usage.go b/app/server/model/plan/tell_stream_usage.go
new file mode 100644
index 0000000000000000000000000000000000000000..02a0c52b7abd673ec92cc9cf444877cab6a8a7d2
--- /dev/null
+++ b/app/server/model/plan/tell_stream_usage.go
@@ -0,0 +1,141 @@
+package plan
+
+import (
+ "fmt"
+ "log"
+ "plandex-server/hooks"
+ "plandex-server/notify"
+ "runtime/debug"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/sashabaranov/go-openai"
+)
+
+func (state *activeTellStreamState) handleUsageChunk(usage *openai.Usage) {
+ auth := state.auth
+ plan := state.plan
+ generationId := state.generationId
+
+ log.Println("Tell stream usage:")
+ log.Println(spew.Sdump(usage))
+
+ var cachedTokens int
+ if usage.PromptTokensDetails != nil {
+ cachedTokens = usage.PromptTokensDetails.CachedTokens
+ }
+
+ sessionId := state.activePlan.SessionId
+
+ modelConfig := state.modelConfig
+ baseModelConfig := modelConfig.GetBaseModelConfig(state.authVars, state.settings, state.orgUserConfig)
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in handleUsageChunk: %v\n%s", r, debug.Stack())
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("panic in handleUsageChunk: %v\n%s", r, debug.Stack()))
+ }
+ }()
+
+ _, apiErr := hooks.ExecHook(hooks.DidSendModelRequest, hooks.HookParams{
+ Auth: auth,
+ Plan: plan,
+ DidSendModelRequestParams: &hooks.DidSendModelRequestParams{
+ InputTokens: usage.PromptTokens,
+ OutputTokens: usage.CompletionTokens,
+ CachedTokens: cachedTokens,
+ ModelId: baseModelConfig.ModelId,
+ ModelTag: baseModelConfig.ModelTag,
+ ModelName: baseModelConfig.ModelName,
+ ModelProvider: baseModelConfig.Provider,
+ ModelPackName: state.settings.GetModelPack().Name,
+ ModelRole: modelConfig.Role,
+ Purpose: "Response",
+ GenerationId: generationId,
+ PlanId: plan.Id,
+ ModelStreamId: state.modelStreamId,
+ ConvoMessageId: state.replyId,
+
+ RequestStartedAt: state.requestStartedAt,
+ Streaming: true,
+ FirstTokenAt: state.firstTokenAt,
+ Req: state.originalReq,
+ StreamResult: state.activePlan.CurrentReplyContent,
+ ModelConfig: state.modelConfig,
+
+ SessionId: sessionId,
+ },
+ })
+
+ if apiErr != nil {
+ log.Printf("handleUsageChunk - error executing DidSendModelRequest hook: %v", apiErr)
+ }
+ }()
+}
+
+func (state *activeTellStreamState) execHookOnStop(sendStreamErr bool) {
+ generationId := state.generationId
+
+ log.Printf("execHookOnStop - sendStreamErr: %t\n", sendStreamErr)
+
+ planId := state.plan.Id
+ branch := state.branch
+ auth := state.auth
+ plan := state.plan
+ active := GetActivePlan(planId, branch)
+
+ if active == nil {
+ log.Printf(" Active plan not found for plan ID %s on branch %s\n", planId, branch)
+ return
+ }
+
+ modelConfig := state.modelConfig
+ baseModelConfig := modelConfig.GetBaseModelConfig(state.authVars, state.settings, state.orgUserConfig)
+
+ go func() {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in execHookOnStop: %v\n%s", r, debug.Stack())
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("panic in execHookOnStop: %v\n%s", r, debug.Stack()))
+ }
+ }()
+
+ _, apiErr := hooks.ExecHook(hooks.DidSendModelRequest, hooks.HookParams{
+ Auth: auth,
+ Plan: plan,
+ DidSendModelRequestParams: &hooks.DidSendModelRequestParams{
+ InputTokens: state.totalRequestTokens,
+ OutputTokens: active.NumTokens,
+ ModelId: baseModelConfig.ModelId,
+ ModelTag: baseModelConfig.ModelTag,
+ ModelName: baseModelConfig.ModelName,
+ ModelProvider: baseModelConfig.Provider,
+ ModelPackName: state.settings.GetModelPack().Name,
+ ModelRole: modelConfig.Role,
+ Purpose: "Response",
+ GenerationId: generationId,
+ PlanId: plan.Id,
+ ModelStreamId: state.modelStreamId,
+ ConvoMessageId: state.replyId,
+ StoppedEarly: true,
+ UserCancelled: !sendStreamErr,
+ HadError: sendStreamErr,
+ NoReportedUsage: true,
+
+ RequestStartedAt: state.requestStartedAt,
+ Streaming: true,
+ FirstTokenAt: state.firstTokenAt,
+ Req: state.originalReq,
+ StreamResult: state.activePlan.CurrentReplyContent,
+ ModelConfig: state.modelConfig,
+
+ SessionId: active.SessionId,
+ },
+ })
+
+ if apiErr != nil {
+ log.Printf("execHookOnStop - error executing DidSendModelRequest hook: %v", apiErr)
+ }
+ }()
+
+}
diff --git a/app/server/model/plan/tell_subtasks.go b/app/server/model/plan/tell_subtasks.go
new file mode 100644
index 0000000000000000000000000000000000000000..f77603cefeeb0fd11b0b3dd36f6af33ca8c0b22b
--- /dev/null
+++ b/app/server/model/plan/tell_subtasks.go
@@ -0,0 +1,250 @@
+package plan
+
+import (
+ "fmt"
+ "log"
+ "plandex-server/db"
+ "plandex-server/model/parse"
+ shared "plandex-shared"
+ "strings"
+
+ "github.com/davecgh/go-spew/spew"
+)
+
+func (state *activeTellStreamState) formatSubtasks() string {
+ subtasksText := "### LATEST PLAN TASKS ###\n\n"
+
+ var current *db.Subtask
+
+ for idx, subtask := range state.subtasks {
+ subtasksText += fmt.Sprintf("%d. %s\n", idx+1, subtask.Title)
+ if subtask.Description != "" {
+ subtasksText += "\n" + subtask.Description + "\n"
+ }
+ if len(subtask.UsesFiles) > 0 {
+ subtasksText += "Uses: "
+ usesFiles := []string{}
+ for _, file := range subtask.UsesFiles {
+ usesFiles = append(usesFiles, fmt.Sprintf("`%s`", file))
+ }
+ subtasksText += strings.Join(usesFiles, ", ") + "\n"
+ }
+ subtasksText += "Done: "
+ if subtask.IsFinished {
+ subtasksText += "yes"
+ } else {
+ subtasksText += "no"
+ }
+ subtasksText += "\n"
+
+ if state.currentSubtask != nil && subtask.Title == state.currentSubtask.Title && state.currentStage.TellStage == shared.TellStageImplementation {
+ current = subtask
+ subtasksText += "Current subtask: yes"
+ }
+
+ subtasksText += "\n"
+ }
+
+ if current != nil && state.currentStage.TellStage == shared.TellStageImplementation {
+ subtasksText += fmt.Sprintf("\n### Current subtask\n%s\n", current.Title)
+ if current.Description != "" {
+ subtasksText += "\n" + current.Description + "\n"
+ }
+ if len(current.UsesFiles) > 0 {
+ subtasksText += "Uses: "
+ usesFiles := []string{}
+ for _, file := range current.UsesFiles {
+ usesFiles = append(usesFiles, fmt.Sprintf("`%s`", file))
+ }
+ subtasksText += strings.Join(usesFiles, ", ") + "\n"
+ }
+ } else if state.currentStage.TellStage == shared.TellStagePlanning {
+ if state.currentStage.PlanningPhase == shared.PlanningPhaseTasks {
+ subtasksText += `
+
+ Remember, you are in the *PLANNING* phase and ABSOLUTELY MUST NOT implement any of the subtasks. You MUST NOT write any code or create any files. You can ONLY add or remove subtasks with a '### Tasks' section or a '### Remove Tasks' section. You CANNOT implement any of the subtasks in this response. Follow the PLANNING instructions. The existing subtasks are included for your reference so that you can see what has been planned so far, what has been done, and what is left to do, so that you can add or remove subtasks as needed. DO NOT implement any of the subtasks in this response-follow the instructions for the PLANNING phase.
+
+ `
+ } else if state.currentStage.PlanningPhase == shared.PlanningPhaseContext {
+ subtasksText += `
+
+ Remember, you are in the *CONTEXT* phase. You MUST NOT implement any of the subtasks. You MUST NOT write any code or create any files. You MUST NOT make a plan with a '### Tasks' section or a '### Remove Tasks' section. Follow the instructions for the CONTEXT phase-they are summarized for you in the [SUMMARY OF INSTRUCTIONS] section. The existing subtasks are included for your reference so that you can see what has been planned so far, what has been done, and what is left to do. DO NOT implement any of the subtasks in this response Do NOT add or remove subtasks. Follow the instructions for the CONTEXT phase.
+
+ `
+ }
+ }
+
+ return subtasksText
+}
+
+type checkNewSubtasksResult struct {
+ hasExplicitTasks bool
+ newSubtasks []*db.Subtask
+}
+
+func (state *activeTellStreamState) checkNewSubtasks() checkNewSubtasksResult {
+ activePlan := GetActivePlan(state.plan.Id, state.branch)
+
+ if activePlan == nil {
+ return checkNewSubtasksResult{
+ hasExplicitTasks: false,
+ newSubtasks: nil,
+ }
+ }
+
+ content := activePlan.CurrentReplyContent
+
+ subtasks := parse.ParseSubtasks(content)
+
+ if len(subtasks) == 0 {
+ log.Println("No new subtasks found")
+ return checkNewSubtasksResult{
+ hasExplicitTasks: false,
+ newSubtasks: nil,
+ }
+ }
+
+ log.Println("Found new subtasks:", len(subtasks))
+ // log.Println(spew.Sdump(subtasks))
+
+ subtasksByName := map[string]*db.Subtask{}
+
+ // Only index unfinished subtasks by name
+ for _, subtask := range state.subtasks {
+ if !subtask.IsFinished {
+ subtasksByName[subtask.Title] = subtask
+ }
+ }
+
+ var newSubtasks []*db.Subtask
+ var updatedSubtasks []*db.Subtask
+
+ // Keep finished subtasks
+ for _, subtask := range state.subtasks {
+ if subtask.IsFinished {
+ updatedSubtasks = append(updatedSubtasks, subtask)
+ }
+ }
+
+ // Add new subtasks if they don't exist
+ for _, subtask := range subtasks {
+ if subtasksByName[subtask.Title] == nil {
+ newSubtasks = append(newSubtasks, subtask)
+ updatedSubtasks = append(updatedSubtasks, subtask)
+ }
+ }
+
+ state.subtasks = updatedSubtasks
+
+ var currentSubtaskName string
+ if state.currentSubtask != nil {
+ currentSubtaskName = state.currentSubtask.Title
+ }
+
+ found := false
+ for _, subtask := range state.subtasks {
+ if subtask.Title == currentSubtaskName {
+ found = true
+ state.currentSubtask = subtask
+ break
+ }
+ }
+ if !found {
+ state.currentSubtask = nil
+ }
+
+ if state.currentSubtask == nil {
+ for _, subtask := range state.subtasks {
+ if !subtask.IsFinished {
+ state.currentSubtask = subtask
+ break
+ }
+ }
+ }
+
+ // log.Println("state.subtasks:\n", spew.Sdump(state.subtasks))
+ log.Println("state.currentSubtask:\n", spew.Sdump(state.currentSubtask))
+
+ return checkNewSubtasksResult{
+ hasExplicitTasks: len(subtasks) > 0,
+ newSubtasks: newSubtasks,
+ }
+}
+
+type checkRemoveSubtasksResult struct {
+ hasExplicitRemoveTasks bool
+ removedSubtasks []string
+}
+
+func (state *activeTellStreamState) checkRemoveSubtasks() checkRemoveSubtasksResult {
+ activePlan := GetActivePlan(state.plan.Id, state.branch)
+
+ if activePlan == nil {
+ return checkRemoveSubtasksResult{
+ hasExplicitRemoveTasks: false,
+ removedSubtasks: nil,
+ }
+ }
+
+ content := activePlan.CurrentReplyContent
+
+ // Parse tasks to remove
+ tasksToRemove := parse.ParseRemoveSubtasks(content)
+
+ if len(tasksToRemove) == 0 {
+ log.Println("No tasks to remove found")
+ return checkRemoveSubtasksResult{
+ hasExplicitRemoveTasks: false,
+ removedSubtasks: nil,
+ }
+ }
+
+ log.Println("Found tasks to remove:", len(tasksToRemove))
+ // log.Println(spew.Sdump(tasksToRemove))
+
+ // Create a map of task titles to remove for efficient lookup
+ removeMap := make(map[string]bool)
+ for _, task := range tasksToRemove {
+ removeMap[task] = true
+ }
+
+ var removedSubtasks []*db.Subtask
+ var remainingSubtasks []*db.Subtask
+
+ // Keep tasks that aren't in the remove list
+ for _, subtask := range state.subtasks {
+ if removeMap[subtask.Title] {
+ // Only track unfinished tasks that are being removed
+ if !subtask.IsFinished {
+ removedSubtasks = append(removedSubtasks, subtask)
+ }
+ } else {
+ remainingSubtasks = append(remainingSubtasks, subtask)
+ }
+ }
+
+ state.subtasks = remainingSubtasks
+
+ // Update current subtask if it was removed
+ if state.currentSubtask != nil && removeMap[state.currentSubtask.Title] {
+ state.currentSubtask = nil
+ // Find the first unfinished subtask to set as current
+ for _, subtask := range state.subtasks {
+ if !subtask.IsFinished {
+ state.currentSubtask = subtask
+ break
+ }
+ }
+ }
+
+ removedSubtaskTitles := []string{}
+ for _, subtask := range removedSubtasks {
+ removedSubtaskTitles = append(removedSubtaskTitles, subtask.Title)
+ }
+ log.Println("removedSubtaskTitles:\n", spew.Sdump(removedSubtaskTitles))
+
+ return checkRemoveSubtasksResult{
+ hasExplicitRemoveTasks: len(tasksToRemove) > 0,
+ removedSubtasks: removedSubtaskTitles,
+ }
+}
diff --git a/app/server/model/plan/tell_summary.go b/app/server/model/plan/tell_summary.go
new file mode 100644
index 0000000000000000000000000000000000000000..01112c69748b23a60b44f1eccaefe0c01de002ab
--- /dev/null
+++ b/app/server/model/plan/tell_summary.go
@@ -0,0 +1,434 @@
+package plan
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "log"
+ "net/http"
+ "plandex-server/db"
+ "plandex-server/model"
+ "plandex-server/model/prompts"
+ "plandex-server/notify"
+ "plandex-server/types"
+ "time"
+
+ shared "plandex-shared"
+
+ "github.com/davecgh/go-spew/spew"
+ "github.com/sashabaranov/go-openai"
+)
+
+func (state *activeTellStreamState) addConversationMessages() bool {
+ summaries := state.summaries
+ tokensBeforeConvo := state.tokensBeforeConvo
+ active := GetActivePlan(state.plan.Id, state.branch)
+
+ convo := []*db.ConvoMessage{}
+ for _, msg := range state.convo {
+ if state.skipConvoMessages != nil && state.skipConvoMessages[msg.Id] {
+ continue
+ }
+ convo = append(convo, msg)
+ }
+
+ if active == nil {
+ log.Println("summarizeMessagesIfNeeded - Active plan not found")
+ return false
+ }
+
+ conversationTokens := 0
+ tokensUpToTimestamp := make(map[int64]int)
+ convoMessagesById := make(map[string]*db.ConvoMessage)
+ for _, convoMessage := range convo {
+ conversationTokens += convoMessage.Tokens + model.TokensPerMessage + model.TokensPerName
+ timestamp := convoMessage.CreatedAt.UnixNano() / int64(time.Millisecond)
+ tokensUpToTimestamp[timestamp] = conversationTokens
+ convoMessagesById[convoMessage.Id] = convoMessage
+ // log.Printf("Timestamp: %s | Tokens: %d | Total: %d | conversationTokens\n", convoMessage.Timestamp, convoMessage.Tokens, conversationTokens)
+ }
+
+ log.Printf("Conversation tokens: %d\n", conversationTokens)
+ log.Printf("Max conversation tokens: %d\n", state.settings.GetPlannerMaxConvoTokens())
+
+ // log.Println("Tokens up to timestamp:")
+ // spew.Dump(tokensUpToTimestamp)
+
+ log.Printf("Total tokens: %d\n", tokensBeforeConvo+conversationTokens)
+ log.Printf("Max tokens: %d\n", state.settings.GetPlannerEffectiveMaxTokens())
+
+ var summary *db.ConvoSummary
+ if (tokensBeforeConvo+conversationTokens) > state.settings.GetPlannerEffectiveMaxTokens() ||
+ conversationTokens > state.settings.GetPlannerMaxConvoTokens() {
+ log.Println("Token limit exceeded. Attempting to reduce via conversation summary.")
+
+ // log.Printf("(tokensBeforeConvo+conversationTokens) > state.settings.GetPlannerEffectiveMaxTokens(): %v\n", (tokensBeforeConvo+conversationTokens) > state.settings.GetPlannerEffectiveMaxTokens())
+ // log.Printf("conversationTokens > state.settings.GetPlannerMaxConvoTokens(): %v\n", conversationTokens > state.settings.GetPlannerMaxConvoTokens())
+
+ log.Printf("Num summaries: %d\n", len(summaries))
+
+ // token limit exceeded after adding conversation
+ // get summary for as much as the conversation as necessary to stay under the token limit
+ for _, s := range summaries {
+ timestamp := s.LatestConvoMessageCreatedAt.UnixNano() / int64(time.Millisecond)
+
+ tokens, ok := tokensUpToTimestamp[timestamp]
+
+ log.Printf("Last message timestamp: %d | found: %v\n", timestamp, ok)
+ log.Printf("Tokens up to timestamp: %d\n", tokens)
+
+ if !ok {
+ // try a fallback by id instead of timestamp, in case timestamp rounding caused it to be missing
+ convoMessage, ok := convoMessagesById[s.LatestConvoMessageId]
+
+ if ok {
+ timestamp = convoMessage.CreatedAt.UnixNano() / int64(time.Millisecond)
+ tokens, ok = tokensUpToTimestamp[timestamp]
+ }
+
+ if !ok {
+ // instead of erroring here as we did previously, we'll just log and continue
+ // if no summary is found, we still handle it as an error below
+ // but this way we don't error out completely for a single detached summary
+
+ log.Println("conversation summary timestamp not found in conversation")
+ log.Println("timestamp:", timestamp)
+
+ // log.Println("Conversation summary:")
+ // spew.Dump(s)
+
+ log.Println("tokensUpToTimestamp:")
+ log.Println(spew.Sdump(tokensUpToTimestamp))
+
+ go notify.NotifyErr(notify.SeverityInfo, fmt.Errorf("conversation summary timestamp not found in conversation"))
+
+ continue
+ }
+ }
+
+ updatedConversationTokens := (conversationTokens - tokens) + s.Tokens
+ savedTokens := conversationTokens - updatedConversationTokens
+
+ log.Printf("Conversation summary tokens: %d\n", tokens)
+ log.Printf("Updated conversation tokens: %d\n", updatedConversationTokens)
+ log.Printf("Saved tokens: %d\n", savedTokens)
+
+ if updatedConversationTokens <= state.settings.GetPlannerMaxConvoTokens() &&
+ (tokensBeforeConvo+updatedConversationTokens) <= state.settings.GetPlannerEffectiveMaxTokens() {
+ log.Printf("Summarizing up to %s | saving %d tokens\n", s.LatestConvoMessageCreatedAt.Format(time.RFC3339), savedTokens)
+ summary = s
+ conversationTokens = updatedConversationTokens
+ break
+ }
+ }
+
+ if summary == nil && tokensBeforeConvo+conversationTokens > state.settings.GetPlannerEffectiveMaxTokens() {
+ err := errors.New("couldn't get under token limit with conversation summary")
+ log.Printf("Error: %v\n", err)
+ go notify.NotifyErr(notify.SeverityInfo, fmt.Errorf("couldn't get under token limit with conversation summary"))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Couldn't get under token limit with conversation summary",
+ }
+ return false
+ }
+ }
+
+ var latestSummary *db.ConvoSummary
+ if len(summaries) > 0 {
+ latestSummary = summaries[len(summaries)-1]
+ }
+
+ if summary == nil {
+ for _, convoMessage := range convo {
+ // this gets added later in tell_exec.go
+ if state.promptConvoMessage != nil && convoMessage.Id == state.promptConvoMessage.Id {
+ continue
+ }
+
+ state.messages = append(state.messages, types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleUser,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: convoMessage.Message,
+ },
+ },
+ })
+
+ // add the latest summary as a conversation message if this is the last message summarized, in order to reinforce the current state of the plan to the model
+ if latestSummary != nil && convoMessage.Id == latestSummary.LatestConvoMessageId {
+ state.messages = append(state.messages, types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleAssistant,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: latestSummary.Summary,
+ },
+ },
+ })
+ }
+ }
+ } else {
+ if (tokensBeforeConvo + conversationTokens) > state.settings.GetPlannerEffectiveMaxTokens() {
+ go notify.NotifyErr(notify.SeverityError, fmt.Errorf("token limit still exceeded after summarizing conversation"))
+
+ active.StreamDoneCh <- &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: "Token limit still exceeded after summarizing conversation",
+ }
+ return false
+ }
+ state.summarizedToMessageId = summary.LatestConvoMessageId
+ state.messages = append(state.messages, types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleAssistant,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: summary.Summary,
+ },
+ },
+ })
+
+ // add messages after the last message in the summary
+ for _, convoMessage := range convo {
+ // this gets added later in tell_exec.go
+ if state.promptConvoMessage != nil && convoMessage.Id == state.promptConvoMessage.Id {
+ continue
+ }
+
+ if convoMessage.CreatedAt.After(summary.LatestConvoMessageCreatedAt) {
+ state.messages = append(state.messages, types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleUser,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: convoMessage.Message,
+ },
+ },
+ })
+
+ // add the latest summary as a conversation message if this is the last message summarized, in order to reinforce the current state of the plan to the model
+ if latestSummary != nil && convoMessage.Id == latestSummary.LatestConvoMessageId {
+ state.messages = append(state.messages, types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleAssistant,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: latestSummary.Summary,
+ },
+ },
+ })
+ }
+ }
+ }
+ }
+
+ return true
+}
+
+type summarizeConvoParams struct {
+ auth *types.ServerAuth
+ plan *db.Plan
+ branch string
+ convo []*db.ConvoMessage
+ summaries []*db.ConvoSummary
+ userPrompt string
+ currentReply string
+ currentReplyNumTokens int
+ currentOrgId string
+ modelPackName string
+}
+
+func summarizeConvo(clients map[string]model.ClientInfo, authVars map[string]string, settings *shared.PlanSettings, orgUserConfig *shared.OrgUserConfig, params summarizeConvoParams, ctx context.Context) *shared.ApiError {
+ plan := params.plan
+ planId := plan.Id
+ log.Printf("summarizeConvo: Called for plan ID %s on branch %s\n", planId, params.branch)
+ log.Printf("summarizeConvo: Starting summarizeConvo for planId: %s\n", planId)
+
+ branch := params.branch
+ convo := params.convo
+ summaries := params.summaries
+ userPrompt := params.userPrompt
+ currentReply := params.currentReply
+ active := GetActivePlan(planId, branch)
+
+ config := settings.GetModelPack().PlanSummary
+
+ if active == nil {
+ log.Printf("Active plan not found for plan ID %s and branch %s\n", planId, branch)
+
+ return &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("active plan not found for plan ID %s and branch %s", planId, branch),
+ }
+ }
+
+ log.Println("Generating plan summary for planId:", planId)
+
+ // log.Printf("planId: %s\n", planId)
+ // log.Printf("convo: ")
+ // spew.Dump(convo)
+ // log.Printf("summaries: ")
+ // spew.Dump(summaries)
+ // log.Printf("promptMessage: ")
+ // spew.Dump(promptMessage)
+ // log.Printf("currentOrgId: %s\n", currentOrgId)
+
+ var summaryMessages []*types.ExtendedChatMessage
+ var latestSummary *db.ConvoSummary
+ var numMessagesSummarized int = 0
+ var latestMessageSummarizedAt time.Time
+ var latestMessageId string
+ if len(summaries) > 0 {
+ latestSummary = summaries[len(summaries)-1]
+ numMessagesSummarized = latestSummary.NumMessages
+ }
+
+ // log.Println("Generating plan summary - latest summary:")
+ // spew.Dump(latestSummary)
+
+ // log.Println("Generating plan summary - convo:")
+ // spew.Dump(convo)
+
+ numTokens := 0
+
+ if latestSummary == nil {
+ for _, convoMessage := range convo {
+ summaryMessages = append(summaryMessages, &types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleUser,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: convoMessage.Message,
+ },
+ },
+ })
+ latestMessageId = convoMessage.Id
+ latestMessageSummarizedAt = convoMessage.CreatedAt
+ numMessagesSummarized++
+ numTokens += convoMessage.Tokens + model.TokensPerMessage + model.TokensPerName
+ }
+ } else {
+ summaryMessages = append(summaryMessages, &types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleAssistant,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: latestSummary.Summary,
+ },
+ },
+ })
+
+ numTokens += latestSummary.Tokens + model.TokensPerMessage + model.TokensPerName
+
+ var found bool
+ for _, convoMessage := range convo {
+ if convoMessage.Id == latestSummary.LatestConvoMessageId {
+ found = true
+ continue
+ }
+ if found {
+ summaryMessages = append(summaryMessages, &types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleUser,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: convoMessage.Message,
+ },
+ },
+ })
+ numMessagesSummarized++
+ numTokens += convoMessage.Tokens + model.TokensPerMessage + model.TokensPerName
+ }
+ }
+
+ latestConvoMessage := convo[len(convo)-1]
+ latestMessageId = latestConvoMessage.Id
+ latestMessageSummarizedAt = latestConvoMessage.CreatedAt
+ }
+
+ log.Println("generating summary - latestMessageId:", latestMessageId)
+ log.Println("generating summary - latestMessageSummarizedAt:", latestMessageSummarizedAt)
+
+ if userPrompt != "" {
+ if userPrompt != prompts.UserContinuePrompt && userPrompt != prompts.AutoContinuePlanningPrompt && userPrompt != prompts.AutoContinueImplementationPrompt {
+ summaryMessages = append(summaryMessages, &types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleUser,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: userPrompt,
+ },
+ },
+ })
+
+ tokens := shared.GetNumTokensEstimate(userPrompt)
+ numTokens += tokens + model.TokensPerMessage + model.TokensPerName
+ }
+ }
+
+ if currentReply != "" {
+ summaryMessages = append(summaryMessages, &types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleAssistant,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: currentReply,
+ },
+ },
+ })
+
+ numTokens += params.currentReplyNumTokens + model.TokensPerMessage + model.TokensPerName
+ }
+
+ log.Printf("Calling model for plan summary. Summarizing %d messages\n", len(summaryMessages))
+
+ // log.Println("Generating summary - summary messages:")
+ // spew.Dump(summaryMessages)
+
+ // latestSummaryCh := make(chan *db.ConvoSummary, 1)
+ // active.LatestSummaryCh = latestSummaryCh
+
+ summary, apiErr := model.PlanSummary(clients, authVars, settings, orgUserConfig, config, model.PlanSummaryParams{
+ Conversation: summaryMessages,
+ ConversationNumTokens: numTokens,
+ LatestConvoMessageId: latestMessageId,
+ LatestConvoMessageCreatedAt: latestMessageSummarizedAt,
+ NumMessages: numMessagesSummarized,
+ Auth: params.auth,
+ Plan: plan,
+ ModelPackName: params.modelPackName,
+ ModelStreamId: active.ModelStreamId,
+ SessionId: active.SessionId,
+ }, ctx)
+
+ if apiErr != nil {
+ log.Printf("summarizeConvo: Error generating plan summary for plan %s: %v\n", planId, apiErr)
+ return apiErr
+ }
+
+ log.Printf("summarizeConvo: Summary generated and stored for plan %s\n", planId)
+
+ // log.Println("Generated summary:")
+ // spew.Dump(summary)
+
+ err := db.StoreSummary(summary)
+
+ if err != nil {
+ log.Printf("Error storing plan summary for plan %s: %v\n", planId, err)
+ return &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("error storing plan summary for plan %s: %v", planId, err),
+ }
+ }
+
+ // latestSummaryCh <- summary
+
+ return nil
+}
diff --git a/app/server/model/plan/tell_sys_prompt.go b/app/server/model/plan/tell_sys_prompt.go
new file mode 100644
index 0000000000000000000000000000000000000000..b9cd05796fc01d12d00a7c6382d04490a25ea6f3
--- /dev/null
+++ b/app/server/model/plan/tell_sys_prompt.go
@@ -0,0 +1,183 @@
+package plan
+
+import (
+ "errors"
+ "fmt"
+ "log"
+ "plandex-server/model/prompts"
+ "plandex-server/types"
+ shared "plandex-shared"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+const AllTasksCompletedMsg = "All tasks have been completed. There is no current task to implement."
+
+type getTellSysPromptParams struct {
+ planStageSharedMsgs []*types.ExtendedChatMessagePart
+ planningPhaseOnlyMsgs []*types.ExtendedChatMessagePart
+ implementationMsgs []*types.ExtendedChatMessagePart
+ contextTokenLimit int
+ dryRunWithoutContext bool
+}
+
+func (state *activeTellStreamState) getTellSysPrompt(params getTellSysPromptParams) ([]types.ExtendedChatMessagePart, error) {
+ planningSharedMsgs := params.planStageSharedMsgs
+ plannerOnlyMsgs := params.planningPhaseOnlyMsgs
+ implementationMsgs := params.implementationMsgs
+ contextTokenLimit := params.contextTokenLimit
+ req := state.req
+ active := state.activePlan
+ currentStage := state.currentStage
+
+ sysParts := []types.ExtendedChatMessagePart{}
+
+ createPromptParams := prompts.CreatePromptParams{
+ ExecMode: req.ExecEnabled,
+ AutoContext: req.AutoContext,
+ IsUserDebug: req.IsUserDebug,
+ IsApplyDebug: req.IsApplyDebug,
+ IsGitRepo: req.IsGitRepo,
+ ContextTokenLimit: contextTokenLimit,
+ }
+
+ // log.Println("getTellSysPrompt - prompt params:", spew.Sdump(params))
+
+ if currentStage.TellStage == shared.TellStagePlanning {
+ if len(planningSharedMsgs) == 0 && !params.dryRunWithoutContext {
+ log.Println("planningSharedMsgs is empty - required for planning stage")
+ return nil, fmt.Errorf("planningSharedMsgs is empty - required for planning stage")
+ }
+
+ for _, msg := range planningSharedMsgs {
+ sysParts = append(sysParts, *msg)
+ }
+
+ if currentStage.PlanningPhase == shared.PlanningPhaseContext {
+ log.Println("Planning phase is context -- adding auto context prompt")
+
+ var txt string
+ if req.IsChatOnly {
+ txt = prompts.GetAutoContextChatPrompt(createPromptParams)
+ } else {
+ txt = prompts.GetAutoContextTellPrompt(createPromptParams)
+ }
+
+ sysParts = append(sysParts, types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeText,
+ Text: txt,
+ CacheControl: &types.CacheControlSpec{
+ Type: types.CacheControlTypeEphemeral,
+ },
+ })
+ } else if currentStage.PlanningPhase == shared.PlanningPhaseTasks {
+
+ var txt string
+ if req.IsChatOnly {
+ txt = prompts.GetChatSysPrompt(createPromptParams)
+ } else {
+ txt = prompts.GetPlanningPrompt(createPromptParams)
+ }
+
+ if len(state.subtasks) > 0 {
+ sysParts = append(sysParts, types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeText,
+ Text: txt,
+ })
+ sysParts = append(sysParts, types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeText,
+ Text: state.formatSubtasks(),
+ CacheControl: &types.CacheControlSpec{
+ Type: types.CacheControlTypeEphemeral,
+ },
+ })
+ } else {
+ sysParts = append(sysParts, types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeText,
+ Text: txt,
+ CacheControl: &types.CacheControlSpec{
+ Type: types.CacheControlTypeEphemeral,
+ },
+ })
+ }
+
+ if !req.IsChatOnly {
+ if len(active.SkippedPaths) > 0 {
+ skippedPrompt := prompts.SkippedPathsPrompt
+ for skippedPath := range active.SkippedPaths {
+ skippedPrompt += fmt.Sprintf("- %s\n", skippedPath)
+ }
+ sysParts = append(sysParts, types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeText,
+ Text: skippedPrompt,
+ })
+ }
+ }
+ }
+
+ for _, msg := range plannerOnlyMsgs {
+ sysParts = append(sysParts, *msg)
+ }
+
+ if len(implementationMsgs) > 0 {
+ return nil, fmt.Errorf("implementationMsgs not supported during planning phase")
+ }
+
+ } else if currentStage.TellStage == shared.TellStageImplementation {
+ if state.currentSubtask == nil {
+ return nil, errors.New(AllTasksCompletedMsg)
+ }
+
+ if len(state.subtasks) > 0 {
+ sysParts = append(sysParts, types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompts.GetImplementationPrompt(state.currentSubtask.Title),
+ })
+ sysParts = append(sysParts,
+ types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeText,
+ Text: state.formatSubtasks(),
+ CacheControl: &types.CacheControlSpec{
+ Type: types.CacheControlTypeEphemeral,
+ },
+ })
+ } else {
+ sysParts = append(sysParts, types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompts.GetImplementationPrompt(state.currentSubtask.Title),
+ CacheControl: &types.CacheControlSpec{
+ Type: types.CacheControlTypeEphemeral,
+ },
+ })
+ }
+
+ if !req.IsChatOnly {
+ if len(active.SkippedPaths) > 0 {
+ skippedPrompt := prompts.SkippedPathsPrompt
+ for skippedPath := range active.SkippedPaths {
+ skippedPrompt += fmt.Sprintf("- %s\n", skippedPath)
+ }
+ sysParts = append(sysParts, types.ExtendedChatMessagePart{
+ Type: openai.ChatMessagePartTypeText,
+ Text: skippedPrompt,
+ })
+ }
+ }
+
+ if implementationMsgs != nil {
+ for _, msg := range implementationMsgs {
+ sysParts = append(sysParts, *msg)
+ }
+ } else if !params.dryRunWithoutContext {
+ log.Println("implementationMsgs is nil - required for implementation stage")
+ return nil, fmt.Errorf("implementationMsgs is nil - required for implementation stage")
+ }
+
+ if planningSharedMsgs != nil {
+ log.Println("planningSharedMsgs not supported during implementation stage - only basic or smart context is supported")
+ return nil, fmt.Errorf("planningSharedMsgs not supported during implementation stage - only basic or smart context is supported")
+ }
+ }
+
+ return sysParts, nil
+}
diff --git a/app/server/model/plan/utils.go b/app/server/model/plan/utils.go
new file mode 100644
index 0000000000000000000000000000000000000000..5d08cb3a408e47eb45878cfe207feca4dff8e0eb
--- /dev/null
+++ b/app/server/model/plan/utils.go
@@ -0,0 +1,26 @@
+package plan
+
+import (
+ "plandex-server/types"
+ "strings"
+)
+
+func StripBackticksWrapper(s string) string {
+ check := strings.TrimSpace(s)
+ split := strings.Split(check, "\n")
+
+ if len(split) > 2 {
+ firstLine := strings.TrimSpace(split[0])
+ secondLine := strings.TrimSpace(split[1])
+ lastLine := strings.TrimSpace(split[len(split)-1])
+ if types.LineMaybeHasFilePath(firstLine) && strings.HasPrefix(secondLine, "```") {
+ if lastLine == "```" {
+ return strings.Join(split[1:len(split)-1], "\n")
+ }
+ } else if strings.HasPrefix(firstLine, "```") && lastLine == "```" {
+ return strings.Join(split[1:len(split)-1], "\n")
+ }
+ }
+
+ return s
+}
diff --git a/app/server/model/prompts/apply_exec.go b/app/server/model/prompts/apply_exec.go
new file mode 100644
index 0000000000000000000000000000000000000000..554845213f35d0e1d6d6b6ac607a0ac5fe10eda1
--- /dev/null
+++ b/app/server/model/prompts/apply_exec.go
@@ -0,0 +1,1171 @@
+package prompts
+
+const ApplyScriptSharedPrompt = `
+## _apply.sh file and command execution
+
+**Execution mode is enabled.**
+
+In addition to creating and updating files with code blocks, you can also execute commands on the user's machine by writing to a another *special path*: _apply.sh
+
+### Core _apply.sh Concepts
+
+The _apply.sh script is a special file that allows execution of commands on the user's machine. This script will be executed EXACTLY ONCE after ALL files from ALL subtasks have been created or updated. The entire script runs as a single unit in the root directory of the plan.
+
+#### Core Restrictions
+
+You ABSOLUTELY MUST NOT:
+- Use _apply.sh to create files or directories (use code blocks instead - necessary directories will be created automatically)
+- Use _apply.sh for file operations (move/remove/reset) on files in context
+- Include shebang lines or error handling (this is handled externally)
+- Give _apply.sh execution privileges (this is handled externally)
+- Tell users to run the script (it runs automatically)
+- Use separate script files unless specifically requested or absolutely necessary due to complexity
+
+#### Safety and Security
+
+BE CAREFUL AND CONSERVATIVE when making changes to the user's machine:
+- Only make changes that are strictly necessary for the plan
+- If a command is highly risky, tell the user to run it themselves
+- Do not run malicious commands, commands that will harm the user's machine, or commands intended to cause harm to other systems, networks, or people.
+- Prefer local changes over global system changes (e.g., npm install --save-dev over --global)
+- Only modify files/directories in the root directory of the plan unless specifically directed otherwise
+- Unless some commands are risky/dangerous, include ALL commands in _apply.sh rather than telling users to run them later
+
+#### Avoid User Prompts
+
+Avoid user prompts. Make reasonable default choices rather than prompting the user for input. The _apply.sh script MUST be able to run successfully in a non-interactive context.
+
+#### Keep It Lightweight And Simple
+
+The _apply.sh script should be lightweight and shouldn't do too much work. *Offload to separate files* in the plan if a lot of scripting is needed. _apply.sh doesn't get written to the user's project, so anything that might be valuable to save, reuse, and version control should be in a separate file. You can chmod and execute those separate files from _apply.sh. _apply.sh is for 'throwaway' commands that only need to be run once after the plan is applied to the user's project, like installing dependencies, running tests, or runing a start command. It shouldn't be complex.
+
+Do not use fancy bash constructs that can be difficult to debug or cause portability problems. Keep it very straightforward so there's a 0% chance of bugs in the _apply.sh script.
+
+ABSOLUTELY DO NOT use the _apply.sh script to generate config files, project files, instructions, documentation, or any other necessary files. The _apply.sh script MUST NOT create files or directories—this must be done ONLY with code blocks. Create those files like any other files in the plan using code blocks. Do NOT include any large context blocks of any kind in the _apply.sh script. Use separate files for large content. Keep the _apply.sh script lightweight, simple, and focused only on executing necessary commands.
+
+#### Startup Logic
+
+` + ApplyScriptStartupLogic + `
+
+❌ DO NOT include complex startup logic or commands with flags in _apply.sh:
+
+- _apply.sh:
+
+echo "Importing project resources..."
+godot --headless --quit
+
+# Check if the main scene file exists
+if [ ! -f "scenes/main.tscn" ]; then
+ echo "Error: Main scene file 'scenes/main.tscn' not found."
+ exit 1
+fi
+
+echo "Validating main scene file..."
+if ! godot --headless --check-only --quit scenes/main.tscn; then
+ echo "Error: The main scene file 'scenes/main.tscn' contains errors."
+ exit 1
+fi
+
+echo "Checking for resource loading issues..."
+if ! godot --headless --check-only --quit project.godot; then
+ echo "Error: The project contains resource loading issues."
+ exit 1
+fi
+
+echo "Starting Godot project..."
+godot --position 100,100 --resolution 1280x720 --verbose
+
+
+✅ DO include complex startup logic or commands with flags in a *separate file* in the project, created with a *code block*, not in _apply.sh:
+
+- run.sh:
+
+#!/bin/bash
+set -euo pipefail
+
+echo "Importing project resources..."
+godot --headless --quit
+
+# Check if the main scene file exists
+if [ ! -f "scenes/main.tscn" ]; then
+ echo "Error: Main scene file 'scenes/main.tscn' not found."
+ exit 1
+fi
+
+echo "Validating main scene file..."
+if ! godot --headless --check-only --quit scenes/main.tscn; then
+ echo "Error: The main scene file 'scenes/main.tscn' contains errors."
+ exit 1
+fi
+
+echo "Checking for resource loading issues..."
+if ! godot --headless --check-only --quit project.godot; then
+ echo "Error: The project contains resource loading issues."
+ exit 1
+fi
+
+echo "Starting Godot project..."
+godot --position 100,100 --resolution 1280x720 --verbose
+
+
+- _apply.sh:
+
+chmod +x run.sh
+./run.sh
+
+
+#### Command Preservation Rules
+
+The _apply.sh script accumulates commands during the plan:
+- ALL commands must be preserved until successful application
+- Each update ADDS to or MODIFIES existing commands but NEVER removes them
+- When updating an existing command, modify it rather than duplicating it
+- After successful application, the script resets to empty
+- Current state and history of previously executed scripts will be provided in the prompt
+- Use script history to inform what commands might need to be re-run
+
+#### Dependencies and Tools
+
+When handling tools and dependencies:
+
+1. Context-based Assumptions:
+- Make reasonable assumptions about installed tools based on:
+ * The user's operating system
+ * Files and paths in the context
+ * Project structure and existing configuration
+ * Conversation history
+- For example, if working with an existing Node.js project (has package.json), do NOT include commands to install Node.js/npm
+- Similarly for other languages/frameworks: don't install Go for a Go project, Python for a Python project, etc.
+
+2. Checking for Tools:
+- For tools that aren't clearly present in context:
+ * Always check if the tool is installed before using it
+ * Either install missing tools or exit with a clear error
+ * Make the check specific and informative
+- If no commands need to be run, do not write anything to _apply.sh
+
+3. Dependency Management:
+- DO NOT install dependencies that are already used in the project
+- Only install new dependencies that are specifically needed for new features
+- When working with an entirely new project, you can include basic tooling installation
+- When adding to an existing project, assume core tooling is present
+
+For example, in an existing Node.js project:
+❌ DO NOT: Install Node.js or npm
+❌ DO NOT: Reinstall dependencies listed in package.json
+✅ DO: Install only new packages needed for new features
+✅ DO: Check for specific tools needed for new functionality
+
+#### Avoid Heavy Commands Unless Directed
+
+You must be conservative about running 'heavy' commands like tests that could be slow or resource intensive to run.
+
+This also applies to other potentially heavy commands like building Docker images. Use your best judgement.
+
+#### Additional Requirements
+
+Script execution:
+- Assumes bash/zsh shell is available (OS/shell details provided in prompt)
+- The script runs in the root directory of the plan
+- All commands execute as a single unit after all file operations are complete
+
+Special cases:
+- If the plan includes other script files aside from _apply.sh, they must be given execution privileges and run from _apply.sh
+- Only use separate script files if specifically requested or if the number of commands is too large for a single _apply.sh
+- When using separate scripts, they must be run from _apply.sh, not manually by the user
+
+Running programs:
+- If appropriate, include commands to run the actual program
+- For example: after 'make', include the command to run the program
+- After 'npm install', include 'npm start' if appropriate
+- Use judgment on the best way to run/execute the implemented plan
+- Running web servers and browsers:
+ * Launch the default browser with the appropriate localhost URL after starting the server
+ * When writing a web server that connects to a port, use a port environment variable or command line argument to specify the port number. If you include a fallback port, you can use a common port in the context of the project like 3000 or 8080. Include a port override in the _apply.sh script that uses an UNCOMMON port number that is unlikely to be in use.
+ * Try multiple ports so if a port is in use, the server won't fail to start
+ * When starting a web server that needs a browser launched:
+ * CRITICAL: ALWAYS run the server in the background using & or the script will block and never reach the browser launch
+ * Add a brief sleep to allow the server to start (use your judgment based on the server type and the complexity of the server startup process how long is reasonable)
+ * ALWAYS use the special command 'plandex browser [urls...]' to launch the browser with one or more URLs. This command is provided by Plandex and is available on all operating systems. Substitute the actual URL or URLs you want to open in place of [urls...]. This special command *blocks* and streams the browser output to the console. So if you need to run other commands *after* the browser is launched, you must background the browser command and correclty handle cleanup like other background processes. If the browser command exits with an error, kill any other background processes and exit the entire script with a non-zero exit code.
+
+ Example:
+ # INCORRECT - will block and never launch browser:
+ npm start
+ plandex browser http://localhost:$PORT
+
+ # CORRECT - runs in background, waits, then launches browser:
+ npm start &
+ SERVER_PID=$!
+ sleep 3
+ plandex browser http://localhost:$PORT || {
+ kill $SERVER_PID
+ exit 1
+ }
+ wait $SERVER_PID
+
+ NOTE: when running anything in the background, you must handle the possibility that the process might fail so that no orphaned processes remain.
+ - ALWAYS use 'plandex browser' to open the browser and load urls. Do NOT use 'open' or 'xdg-open' or any other command to open the browser. USE 'plandex browser' instead.
+ * When using the 'plandex browser' command, you ABSOLUTE MUST EXPLICITLY kill all other processes and exit the script with a non-zero exit code if the browser command fails. It is CRITICAL that you DO NOT omit this. The 'plandex browser' command will fail if there are any uncaught errors or console.error logs in the browser.
+ *CRUCIAL NOTE: the _apply.sh script will be run with 'set -e' (it will be set for you, don't add it yourself) so you must DIRECTLY handle errors in foreground commands and cleanup in a '|| { ... }' block immediately when the command fails. *This includes the 'plandex browser' command.* Do NOT omit the '|| { ... }' block for 'plandex browser' or any other foreground command.
+
+ Example:
+ ## INCORRECT - will not kill other processes and will not exit on browser failure:
+ npm start &
+ SERVER_PID=$!
+ sleep 3
+ plandex browser http://localhost:$PORT
+ wait $SERVER_PID
+
+ ## INCORRECT - will not cleanup on failure due to 'set -e':
+ npm start &
+ SERVER_PID=$!
+ sleep 3
+ plandex browser http://localhost:$PORT
+
+ if [ $? -ne 0 ]; then
+ kill $SERVER_PID
+ exit 1
+ fi
+ wait $SERVER_PID
+
+ ## CORRECT - will kill other processes and exit on browser failure, correctly handles 'set -e' with '|| { ... }' block:
+ npm start &
+ SERVER_PID=$!
+ sleep 3
+ plandex browser http://localhost:$PORT || {
+ kill $SERVER_PID
+ exit 1
+ }
+ wait $SERVER_PID
+`
+
+const ApplyScriptPlanningPrompt = ApplyScriptSharedPrompt + `
+
+## Planning _apply.sh Updates
+
+When planning tasks that involve command execution, always consider the natural hierarchy of commands:
+1. First install any required packages/dependencies
+2. Then run any necessary build commands
+3. Finally run any test/execution commands
+
+### Good Practices for Task Organization
+
+When organizing subtasks that involve writing to _apply.sh:
+- Write dependency installations close to the subtasks that introduce them
+- Group related commands together when they're part of the same logical change
+- Commands like 'make', 'npm install', or 'npm run build' that affect the whole project should appear only ONCE
+- If adding a command that's already in _apply.sh, plan to update the existing command rather than duplicating it
+
+### Bad Practices to Avoid
+
+DO NOT:
+- Plan to write the same command multiple times (e.g., 'make' after each file update)
+- Create separate subtasks just to write a single command to _apply.sh
+- Add new 'npm install' commands when you could update an existing one
+- Plan to run the same program multiple times
+
+### Example of Good Task Organization
+
+Good task structure:
+1. Add authentication feature
+ - Update auth-related files
+ - Write to _apply.sh: npm install auth-package
+
+2. Add other features
+ - Update feature files
+ - Write to _apply.sh: npm install other-package
+
+3. Build and run
+ - Write to _apply.sh:
+ npm run build
+ npm start
+
+### Task Planning Guidelines
+
+When breaking down tasks:
+- Remember the single execution model - all commands run after all files are updated
+- Consider dependencies between tasks and their required commands
+- Group related file changes and their associated commands together
+- Think about the logical ordering of commands
+- Include _apply.sh in the 'Uses:' list for any subtask that will modify it
+
+### Command Strategy
+
+Think strategically about command execution:
+- Plan command ordering based on dependencies
+- Consider what will be needed after file changes are complete
+- Group related commands together
+- Plan for proper error handling and dependency checking
+- Consider the user's environment and likely installed tools
+- For web applications and web servers:
+ * Use port environment variables or command line arguments to specify the port number. If you include a fallback port, you can use a common port in the context of the project like 3000 or 8080. Include a port override in the _apply.sh script that uses an UNCOMMON port number that is unlikely to be in use.
+ * Include default browser launch commands after server start
+` + ApplyScriptResetUpdatePlanningPrompt
+
+const ApplyScriptImplementationPrompt = ApplyScriptSharedPrompt + `
+
+## Implementing _apply.sh Updates
+
+Remember that the _apply.sh script accumulates commands during the plan and executes them as a single unit. When adding new commands, carefully consider:
+- Dependencies between commands (what needs to run before what)
+- Whether similar commands already exist that should be updated rather than duplicated
+- How your commands fit into the overall hierarchy (install → build → test/run)
+
+### Creating and Updating _apply.sh
+
+The script must be written using a correctly formatted code block:
+
+- _apply.sh:
+
+# Code goes here
+
+
+CRITICAL rules:
+- ALWAYS include the file path label exactly as shown above
+- NEVER leave out the file path label when writing to _apply.sh
+- There must be NO lines between the file path and opening tag
+- Use lang="bash" in the tag
+
+When writing to _apply.sh include an ### Action Explanation Format section, a file path label, and a tag that includes both a 'lang' attribute and a 'path' attribute as described in the instructions above.
+
+If the current state of the _apply.sh script is *empty*, follow ALL instructions for *creating a new file* when writing to _apply.sh. Include the *entire* _apply.sh script in the code block.
+
+If the current state of the _apply.sh script is *not empty*, follow ALL instructions for *updating an existing file* when writing to _apply.sh.
+
+### Command Output and Error Handling
+
+DO NOT hide or filter command output. For example, DO NOT do this:
+
+- _apply.sh:
+
+if ! make clean && make; then
+ echo "Error: Compilation failed"
+ exit 1
+fi
+
+
+Instead, show all command output:
+
+- _apply.sh:
+
+make clean
+make
+
+
+### Script Organization and Comments
+
+The script should be:
+- Written defensively to fail gracefully
+- Organized logically with similar commands grouped
+- Commented only when necessary for understanding
+- Clear and maintainable
+
+Include logging ONLY for:
+- Error conditions
+- Long-running operations
+- DO NOT log script start/end (handled externally)
+
+### Command Preservation
+
+When updating an existing script:
+1. Review current contents carefully
+2. Preserve ALL existing commands exactly
+3. Add new commands while maintaining existing ones
+4. Verify no commands were accidentally removed/modified
+
+Example of proper update:
+
+Starting script:
+npm install typescript
+npm run build
+
+Adding test command (CORRECT):
+npm install typescript
+npm run build
+npm test
+
+Adding test command (INCORRECT - NEVER DO THIS):
+npm test
+
+### Tool and Dependency Checks
+
+When checking for required tools:
+
+✅ DO:
+- _apply.sh:
+
+if ! command -v tool > /dev/null; then
+ echo "Error: tool is not installed"
+ exit 1
+fi
+
+
+✅ DO group related dependency installations:
+- _apply.sh:
+
+npm install --save-dev \
+ package1 \
+ package2 \
+ package3
+
+
+❌ DO NOT hide command output:
+- _apply.sh:
+
+npm install --quiet package1
+
+
+### Examples
+
+Good example of complete script:
+
+- _apply.sh:
+
+# Check for required tools
+if ! command -v node > /dev/null; then
+ echo "Error: node is not installed"
+ exit 1
+fi
+
+if ! command -v npm > /dev/null; then
+ echo "Error: npm is not installed"
+ exit 1
+fi
+
+# Install dependencies
+echo "Installing project dependencies..."
+npm install --save-dev \
+ "@types/react@^18.0.0" \
+ "typescript@^4.9.0" \
+ "prettier@^2.8.0"
+
+# Find an available port
+export PORT=3400
+while ! nc -z localhost $PORT && [ $PORT -lt 3410 ]; do
+ export PORT=$((PORT + 1))
+done
+
+# Build and start in background
+npm run build
+npm start &
+SERVER_PID=$!
+
+# Wait briefly for server to be ready
+sleep 3
+
+# Launch browser
+plandex browser http://localhost:$PORT || {
+ kill $SERVER_PID
+ exit 1
+}
+wait $SERVER_PID
+
+
+Note the usage of & to run the server in the background. This is CRITICAL to ensure the script does not block and allows the browser to launch.
+
+* If you run multiple processes in parallel with &, you ABSOLUTELY MUST handle partial failure by immediately exiting the script if any process returns a non-zero code.
+ * For example, store process PIDs, wait on all processes, check $?, kill all processes if a failure is detected, and exit with that code.
+EXAMPLE:
+
+- _apply.sh:
+
+# Build assets first
+npm install
+npm run build
+
+# Start Node in background, maybe with --inspect
+echo "Starting Node server with inspector on port 9229..."
+node --inspect=0.0.0.0:9229 server.js &
+pidNode=$!
+
+# Start Python app in background
+echo "Starting Python service..."
+python main.py &
+pidPy=$!
+
+# Wait for the *first* process to exit (success or failure)
+echo "Waiting for either Node or Python to exit..."
+wait $pidNode $pidPy
+exit_code=$?
+
+if [ $exit_code -ne 0 ]; then
+ echo "⚠️ One process exited with an error. Stopping everything..."
+ kill $pidNode $pidPy 2>/dev/null
+ exit $exit_code
+fi
+
+# If we get here, the first process that ended did so with success code
+# We still need to wait on the other process
+echo "First process ended successfully, waiting for the second to exit..."
+wait $pidNode
+wait $pidPy
+
+
+Note on example: notice there's no advanced job control (e.g. setsid, disown, etc.) is needed because the wrapper script handles cleanup. The processes remain in the same process group and are killed when the wrapper script exits. And notice that if either job fails, the wrapper script kills all the jobs and exits with the correct output and error code.
+
+If you only run one background job or run them sequentially, you do not need partial-failure logic. Only include logic for handling partial failures if it's really necessary—otherwise, keep it simple: you can just run the commands and let the wrapper script handle cleanup. For example:
+
+- _apply.sh:
+
+# Run the server in the background
+npm start &
+
+# Run the tests in the foreground
+npm test
+
+
+In this case, the wrapper script will handle cleanup automatically.
+
+- Plandex automatically wraps ` + "`_apply.sh`" + ` in a script that enables job control and kills all processes if the user interrupts. Do NOT add ` + "`trap`" + `, ` + "`setsid`" + `, ` + "`nohup`" + `, or ` + "`disown`" + ` commands.
+- If you run multiple processes (e.g., ` + "`node server.js &`" + ` plus ` + "`python main.py &`" + `), you must handle partial failures by checking their exit codes. For example:
+ - ` + "`pidA=$!`" + ` after launching the first process
+ - Launch the second, ` + "`pidB=$!`" + `
+ - Use ` + "`wait $pidA $pidB`" + ` or check each PID. If one fails (` + "`exit_code != 0`" + `), kill the other.
+- If you only have a single process to run, you may simply do ` + "`command &`" + ` and then ` + "`wait`" + `. The wrapper script ensures no leftover processes remain if the user presses Ctrl+C.
+- Don't run commands that may daemonize themselves or change their process group unless absolutely necessary since it complicates the cleanup process. The wrapper script cannot reliably handle processes that daemonize themselves or change their process group, so if you really must run such commands, you MUST ALWAYS include code to ensure they are cleaned up properly and reliably before exiting.
+
+* You will be provided with the user's OS in the prompt. DO NOT include commands for other operating systems, just the user's specified OS.
+* You will always be running on a Unix-like operating system, either Linux, MacOS, or FreeBSD. You'll never be running on Windows.
+` + ExitCodePrompt + ApplyScriptResetUpdateImplementationPrompt
+
+const ApplyScriptResetUpdateSharedPrompt = `
+## Script State and Reset Behavior
+
+When the user applies the plan, the _apply.sh will be executed.
+
+CRITICAL: The _apply.sh script accumulates ALL commands needed for the plan. Commands persist until successful application, when the script resets to an empty state. This reset ONLY happens after successful application.
+
+The current state of the _apply.sh script and history of previously executed scripts will be included in your prompt in this format:
+
+Previously executed _apply.sh:
+` + "```" + `
+npm install typescript express
+npm run build
+npm start
+` + "```" + `
+
+Previously executed _apply.sh:
+` + "```" + `
+npm install jest
+npm test
+` + "```" + `
+
+*Current* state of _apply.sh script:
+[empty]
+
+(Note that when the *Current* state of _apply.sh is empty, it will be shown as "[empty]" in the context.)
+
+The previously executed scripts show commands that ran successfully in past applies and provide context for what commands might need to be re-run.
+`
+
+const ApplyScriptResetUpdatePlanningPrompt = ApplyScriptResetUpdateSharedPrompt + `
+## Planning with Script State
+
+When planning tasks (and subtasks) that involve command execution, you must consider how the ` + "`_apply.sh`" + ` script evolves during the plan. ` + "`_apply.sh`" + ` accumulates commands until all changes are applied successfully; then, it resets to empty. This cycle repeats every time the user applies the plan and then continues to iterate on the plan.
+
+### 1. Command Accumulation
+- ALL commands in _apply.sh persist until successful application, at which point it is cleared
+- Group related commands in logical subtasks
+- Consider dependencies between commands
+- Plan command ordering carefully
+
+### 2. After Reset (Post-Success)
+- **Script Empties**: Once ` + "`_apply.sh`" + ` has been successfully executed, it's cleared.
+- **No Unnecessary Repeats**: For future tasks, avoid re-adding commands (e.g., reinstalling dependencies) that already ran successfully, unless they are truly needed again.
+- **Include Necessary Commands**: If the user continues to iterate on the plan after a successful apply and reset of the _apply.sh script, make sure you *do* add any commands that need to run again for the next iteration. For example, if there is a command that runs the program, and the _apply.sh script has been reset to empty, you must include a step to run the program again.
+
+### Common Command Patterns
+
+- **Build Commands**: Run after source changes (e.g., ` + "`make`" + `, ` + "`npm run build`" + `, ` + "`cargo build`" + `).
+- **Test Commands**: Run after code changes that require verification (e.g., ` + "`npm test`" + `, ` + "`go test`" + `, etc.).
+- **Startup/Execution**: Start or run the program once built (e.g., ` + "`./app`" + `, ` + "`npm start`" + `).
+- **Database Migrations**: If schema changes are involved, add relevant migration commands.
+- **Package/Dependency Installs**: Add or update only if new libraries or tools are introduced.
+- **Web Server**: Start the server again after source changes, dependency updates, etc.
+
+### Example of Task Organization
+
+1. **Add Authentication Feature**
+ - Update or create relevant files (e.g. ` + "`auth_controller.js`" + `, ` + "`auth_routes.py`" + `).
+ - In ` + "`_apply.sh`" + `, install new auth-related dependencies (e.g. ` + "`npm install auth-lib`" + `).
+ - Include build or test commands if needed.
+
+2. **Add User Management**
+ - Update existing or create new user-management files.
+ - If new libraries are introduced, add them in ` + "`_apply.sh`" + ` (avoid re-installing old ones).
+ - Update existing build/test steps if relevant.
+
+3. **Final Build and Run**
+ - In ` + "`_apply.sh`" + `, include all final build commands (e.g. ` + "`make`" + `, ` + "`npm run build`" + `).
+ - Run the application if desired (e.g. ` + "`npm start`" + ` or ` + "`./myapp`" + `).
+ - If tests have changed, also include them here (e.g. ` + "`npm test`" + `).
+
+### Good Practices
+
+- **Check Script State**: If ` + "`_apply.sh`" + ` is not empty, modify existing commands in place. If it's empty (post-success), add only new or relevant commands.
+- **Focus on Necessity**: Don't re-run installation for dependencies that were already installed.
+- **Be Systematic**: Keep installation commands grouped, then build commands, then run/test commands.
+
+### Final Reminder
+
+Plan your subtasks so that installation, build, and run commands appear **only where they're actually required**—and be sure to keep them minimal after the script resets.
+
+### Always consider _apply.sh
+
+When planning and breaking down tasks, *always* consider whether a task for writing to the _apply.sh file is needed. Consider the current state of the _apply.sh file when making this decision.
+
+Imagine this scenario:
+
+1. You have previously made a plan for the user which included an _apply.sh file.
+2. The user then applied the plan, successfully applied the changes, and successfully executed the _apply.sh script, causing it to be reset to empty.
+3. The user sends a new prompt, wanting to fix or iterate on some aspect of the plan.
+
+Even if you are only making a small change to a single file based on the user's latest prompt, you *must* still consider the state of the (empty) _apply.sh file and whether it needs to be created again.
+
+If your updates to the _apply.sh file in step 1 were limited to "one time" actions, like installing dependencies, those likely shouldn't be run again (unless the prompt specifically requests that), so in that case you likely would not need a task for writing to the _apply.sh file.
+
+However, if your updates to the _apply.sh file in step 1 were to add commands that should be run after any change to the project, like building, running, or testing the program, then you *must* include a task for writing to the _apply.sh file.
+
+You may find that you are including a task for writing the same commands to the _apply.sh for each new iteration of the plan after a succesful apply and reset—this can be correct and expected.
+
+🔄 CRITICAL: _apply.sh RESET BEHAVIOR
+Remember, after successful execution, _apply.sh ALWAYS resets to empty.
+You MUST ALWAYS consider adding build/run commands again after ANY source changes.
+If the _apply.sh script previously had a build/run command, and then it was reset to empty after being successfully executed, and then you make ANY subsequent code changes, you MUST add a new build/run command to the _apply.sh file.
+
+CRITICAL: If you have run the project previously with the _apply.sh script *and* the _apply.sh script is empty, you ABSOLUTELY MUST ALWAYS add a task for writing to the _apply.sh file. DO NOT OMIT THIS STEP. **THAT SAID** you must *evaluate* the current state of the _apply.sh file and *only* update it if necessary. Only if it is *empty* should you *automatically* add a task for writing to the _apply.sh file. Otherwise, consider the current state of the _apply.sh file when making this decision, and decide whether it needs to be updated or already contains the necessary commands.
+
+INCORRECT FOLLOW UP:
+### Tasks
+1. Fix bug in source.c
+Uses: ` + "`source.c`" + `
+
+
+CORRECT FOLLOW UP:
+### Commands
+
+The _apply.sh script is empty after the previous execution. Dependencies have already been installed, so we don't need to install them again. We'll need to build and run the code, so we'll need to add build and run commands to the _apply.sh file. I'll add this step to the plan.
+
+### Tasks
+1. Fix bug in source.c
+Uses: ` + "`source.c`" + `
+
+2. 🚀 Build and run updated code
+Uses: ` + "`_apply.sh`" + `
+
+
+BEFORE COMPLETING ANY PLAN:
+Consider:
+1. Are you modifying source files? If YES:
+ - Would it make sense to build/run the code after these changes?
+ - If so, is there a task for writing build/run commands to _apply.sh?
+ - If you're unsure what commands to run, better to omit them than guess
+2. Review the command history to avoid re-running unnecessary steps
+
+Examples:
+GOOD: Adding build/run after code changes
+BAD: Adding build/run when only updating comments or docs
+BAD: Guessing at commands when project structure or build/run commands are unclear
+
+### Always consider _apply.sh execution history
+
+Each version of _apply.sh that has been executed successfully is included in the context. Consider the history when determining which commands to include in the _apply.sh file. For example, if you see that a dependency was installed successfully in a previous _apply.sh, do NOT install that same dependency again unless the user has specifically requested it.
+
+**IMMEDIATELY BEFORE any '### Tasks' section, you MUST output a '### Commands' section**
+
+In the '### Commands' section, you MUST assess whether any commands should be written to _apply.sh during the plan based on the reasoning above. Do NOT omit this section.
+
+If you determine that commands should be added or updated in _apply.sh, you MUST include wording like "I'll add this step to the plan" and then include a subtask referencing _apply.sh in the '### Tasks' section.
+
+Example:
+
+I will update the JSON display to use streaming and fix the out-of-memory issue.
+
+### Commands
+
+_apply.sh is empty. I'll add commands to build and run the updated code. I'll add this step to the plan.
+
+### Tasks
+
+1. Update JSON display to use streaming
+Uses: ` + "`source.c`" + `
+
+2. 🚀 Build and run updated code
+Uses: ` + "`_apply.sh`" + `
+
+
+Another example (with no commands):
+
+### Commands
+
+It's not totally clear to me from the context how to build or run the project, so I'll leave this step to you.
+
+### Tasks
+
+1. Update JSON display to use streaming
+Uses: ` + "`source.c`" + `
+
+
+
+---
+
+### Command Inclusion Decision Tree
+
+When deciding whether to add commands to _apply.sh (and which ones), follow this guidance:
+
+1. **Are you modifying source/config files?**
+ * **No** → You typically don't need commands (e.g., if you're just updating docs or comments).
+ * **Yes** → Continue to step 2.
+
+2. **Would these changes benefit from a rebuild/run?**
+ * **No** (e.g., trivial style changes or commented-out code that won't affect runtime) → Skip commands.
+ * **Yes** (e.g., main logic changes that should be tested or run) → Continue to step 3.
+
+3. **Do you have enough context to identify the correct build/run commands?**
+ * **No** → Better to omit commands than guess. Possibly mention to the user that the build process is unclear.
+ * **Yes** → Continue to step 4.
+
+4. **Consider resource impact and user's preference.**
+ * **Is the command relatively lightweight** (e.g., a quick ` + "`make`" + ` or a single test file) **and clearly relevant**? → Go ahead and add it.
+ * **Is it heavy** (e.g., full integration tests, large Docker builds) **or uncertain**? → Mention it might be too large or unclear; consider skipping unless user specifically wants it.
+
+5. **Cross-check with previous _apply.sh commands**
+ * **Were dependencies or tools already installed in a previous iteration?** → Don't reinstall them.
+ * **Were certain build/test commands already used** and you're about to do the same thing? → Only include them again if you truly need to re-run.
+
+**If you decide to add commands**
+* Summarize why in the "### Commands" section (e.g., "We changed ` + "`main.c`" + ` so we need to rebuild and run to verify behavior.").
+* Then add a subtask referencing ` + "`_apply.sh`" + ` so the script is updated with exactly those commands.
+* Again, you ABSOLUTELY MUST create a subtask referencing _apply.sh in the tasks list, so the plan actually updates _apply.sh with those commands. This is CRITICAL and cannot be omitted. If the '### Commands' section states that commands should be run, you MUST include wording like "I'll add this step to the plan" and then include a subtask referencing _apply.sh in the tasks list that includes those commands.
+
+**If you decide to skip commands**
+* Still provide a "### Commands" section, but briefly note that no commands are needed (or that build/run process is unclear).
+
+---
+INCORRECT:
+
+### Commands
+
+The _apply.sh script is empty. I'll add commands to build and run the updated code.
+
+### Tasks
+
+1. Update JSON display to use streaming
+Uses: ` + "`source.c`" + `
+
+---
+
+above, the '### Commands' section states that commands should run, but the '### Tasks' section does not include a subtask referencing _apply.sh that includes those commands. This is incorrect.
+
+CORRECT:
+
+### Commands
+
+The _apply.sh script is empty. I'll add commands to build and run the updated code. I'll add this step to the plan.
+
+### Tasks
+
+1. Update JSON display to use streaming
+Uses: ` + "`source.c`" + `
+
+2. 🚀 Build and run updated code
+Uses: ` + "`_apply.sh`" + `
+
+`
+
+const ApplyScriptResetUpdateImplementationPrompt = ApplyScriptResetUpdateSharedPrompt + `
+## Implementing Script Updates
+
+When working with _apply.sh, you must handle two distinct scenarios:
+
+### 1. Empty Script State
+
+If the current state is empty:
+- Generate a *new* _apply.sh script with a code block
+- Review previously executed scripts
+- Include commands needed for current changes
+- Consider which previous commands need repeating
+- Follow ALL instructions for *creating a new file* with an ### Action Explanation Format section, a file path label, and a tag that includes both a 'lang' attribute and a 'path' attribute as described in the instructions above.
+- Include the *entire* _apply.sh script in the code block.
+
+### 2. Existing Script State
+
+If the script is not empty, you must:
+- Check the current script contents
+- Preserve ALL existing commands exactly
+- Add new commands while maintaining existing ones
+- Verify no commands were accidentally removed/modified
+- Follow ALL instructions for *updating an existing file* with an ### Action Explanation Format section, a file path label, and a tag that includes both a 'lang' attribute and a 'path' attribute as described in the instructions above.
+
+Example of proper script preservation:
+
+Starting _apply.sh:
+` + "```" + `
+npm install typescript
+npm run build
+` + "```" + `
+
+Adding test command (CORRECT):
+` + "```" + `
+npm install typescript
+npm run build
+npm test
+` + "```" + `
+
+Adding test command (INCORRECT - NEVER DO THIS):
+` + "```" + `
+npm test
+` + "```" + `
+The above is WRONG because it removed the existing commands!
+
+### Technical Requirements
+
+- NEVER remove existing commands unless specifically updating them
+- When updating a command, modify it in place
+- Keep command grouping and organization intact
+- Maintain proper dependency ordering
+- Consider how commands interact with each other
+
+### Command Output Examples
+
+After source file changes:
+` + "```" + `
+npm run build
+` + "```" + `
+
+After adding new dependencies:
+` + "```" + `
+npm install newpackage
+npm run build
+` + "```" + `
+
+After updating tests:
+` + "```" + `
+npm test
+` + "```" + `
+`
+const ApplyScriptPlanningPromptSummary = `
+Key planning guidelines for _apply.sh:
+
+Core Concepts:
+- Executes EXACTLY ONCE after ALL files are created/updated
+- Commands accumulate during plan execution
+- Script resets to empty after successful execution
+
+Task Organization:
+- Follow command hierarchy: install → build → test/run
+- Write dependency installations close to related code changes
+- Group related commands together
+- No duplicate commands across subtasks
+
+Good Practices:
+- Plan commands based on dependencies
+- Update existing commands rather than duplicating
+- Consider environment and likely installed tools
+- Group related file changes with their commands
+- Keep it lightweight and simple
+- Offload to separate files if a lot of scripting is needed
+- Offload to separate startup script/Makefile/package.json script/etc. for startup logic that is useful to have in the project
+- Use basic scripting that is easy to understand and debug
+- Use portable bash that will work across a wide range of shell versions and Unix-like operating systems
+
+Bad Practices to Avoid:
+- Don't write same command multiple times
+- Don't create subtasks just for single commands
+- Don't duplicate package installations
+- Don't run same program multiple times
+- Don't hide command output
+- Don't prompt the user for input
+- Don't use fancy bash constructs that can be difficult to understand and debug
+- Don't use bash constructs that require a recent version of bash—make them portable and 'just work' across a wide range of Unix-like operating systems and shell versions
+- Don't do too much work in _apply.sh. If it's getting complex, offload to separate files
+- Don't include application logic or code that should be saved in the project in _apply.sh. Write it in normal files in the plan instead.
+
+Remember:
+- Include _apply.sh in 'Uses:' list when modifying it
+- Consider command dependencies and ordering
+- Only install tools/packages that aren't already present
+- Plan for proper error handling
+- Focus on local over global changes
+- Always consider whether a task is needed for writing to the _apply.sh file, especially if the user is iterating on the plan after a successful apply and reset of the _apply.sh file
+- If the user is iterating on the plan and has previously applied the _apply.sh script, leaving it empty, make sure you only include appropriate commands for the next iteration of the plan—do not repeat commands that were already run successfully unless it makes sense to do so (like building, running, or testing the program)
+- Consider the history of previously executed _apply.sh scripts when determining which commands to include in the _apply.sh file. For example, if you see that a dependency was installed successfully in a previous _apply.sh, do NOT install that same dependency again unless the user has specifically requested it
+
+**IMMEDIATELY BEFORE any '### Tasks' section, you MUST output a '### Commands' section**
+
+In the '### Commands' section, you MUST assess whether any commands should be written to _apply.sh during the plan based on the reasoning above. Do NOT omit this section.
+
+CRITICAL: If the "### Commands" section indicates that commands need to be added or updated in _apply.sh, you MUST also create a subtask referencing _apply.sh in the "### Tasks" section.
+
+For example:
+
+### Commands
+
+The _apply.sh script is empty. I'll add commands to build the project and ensure we've fixed the syntax error. I'll add this step to the plan.
+
+### Tasks
+
+1. Fix the syntax error in ui.ts
+Uses: ` + "`ui.ts`" + `
+
+2. 🚀 Build the project with 'npm run build' from package.json
+Uses: ` + "`_apply.sh`" + `, ` + "`package.json`" + `
+
+
+` + ApplyScriptResetUpdatePlanningSummary + ApplyScriptExecutionSummary
+
+const ApplyScriptImplementationPromptSummary = `
+Key implementation guidelines for _apply.sh:
+
+Technical Requirements:
+- ALWAYS use correct file path label: "- _apply.sh:"
+- ALWAYS use tags
+- ALWAYS follow your instructions for creating or updating files when writing to the _apply.sh file—treat it like any other file in the project
+- NO lines between path and opening tag
+- Show ALL command output (don't filter/hide)
+- NO shebang or error handling (handled externally)
+
+Command Writing:
+- Check for required tools before using them
+- Group related dependency installations
+- Write clear error messages
+- Add logging only for errors/long operations
+- Comment only when necessary for understanding
+
+Updating Script:
+- Preserve ALL existing commands exactly
+- Add new commands at logical points
+- Verify no accidental removals
+- Update existing commands rather than duplicate
+- Maintain command grouping and organization
+
+Browser Commands:
+- Use the special command 'plandex browser [urls...]' to launch the browser with one or more URLs.
+- This special command *blocks* and streams the browser output to the console.
+- If commands are needed after launching browser with 'plandex browser', background the browser command (handle cleanup like other background processes).
+- If the browser command exits with an error, kill any other background processes and exit the entire script with a non-zero exit code.
+- ALWAYS use 'plandex browser' to open the browser and load urls. Do NOT use 'open' or 'xdg-open' or any other command to open the browser. USE 'plandex browser' instead.
+- When using the 'plandex browser' command, you ABSOLUTE MUST EXPLICITLY kill all other processes and exit the script with a non-zero exit code if the 'plandex browser' command fails. It is CRITICAL that you DO NOT omit this. The 'plandex browser' command will fail if there are any uncaught errors or console.error logs in the browser.
+- CRUCIAL NOTE: the _apply.sh script will be run with 'set -e' (it will be set for you, don't add it yourself) so you must DIRECTLY handle errors in foreground commands and cleanup in a '|| { ... }' block immediately when the command fails. *This includes the 'plandex browser' command.* Do NOT omit the '|| { ... }' block for 'plandex browser' or any other foreground command.
+Error Handling:
+- Check for required tools
+- Exit with clear error messages
+- Don't hide command output
+- Write defensively and fail gracefully
+- Make script idempotent where possible
+
+DO NOT:
+- Filter/hide command output
+- Remove existing commands
+- Create directories or files
+- Add unnecessary logging
+- Use absolute paths
+- Hide error conditions
+- Prompt the user for input
+
+Always:
+- Use relative paths
+- Show full command output
+- Preserve existing commands
+- Group related commands
+- Check tool prerequisites
+- Use clear error messages
+
+**Process Management & Partial Failures**
+- If you run multiple background processes, handle partial failures by capturing PIDs and using ` + "`wait $pidA $pidB`" + ` or similar. If any process fails, kill the rest.
+- Do not add ` + "`setsid`" + `, ` + "`disown`" + `, or ` + "`nohup`" + `. The wrapper script already ensures group-wide kills on interrupt.
+- Do not use 'wait -n'. Use 'wait $pidA $pidB' instead.
+- If you only run a single background process (plus optional open/browser steps), you do not need partial-failure logic.
+
+User OS:
+- You will be provided with the user's operating system. Do NOT include multiple commands for different operating systems. Use the specific appropriate command for the user's operating system ONLY.
+- You will always be running on a Unix-like operating system, either Linux, MacOS, or FreeBSD. You'll never be running on Windows.
+
+---
+` + ExitCodePrompt + ApplyScriptResetUpdateImplementationSummary + ApplyScriptExecutionSummary
+
+const ApplyScriptResetUpdateSharedSummary = `
+Core Reset/Update Concepts:
+- Script accumulates commands until successful application
+- Resets to empty after successful application
+- Previously executed scripts provide command history
+- All commands persist until successful application
+
+Command State Rules:
+- Never remove commands until reset
+- Script history informs future needs
+- Commands execute as single unit
+- Every command matters until reset
+`
+
+const ApplyScriptResetUpdatePlanningSummary = `
+Planning for Reset/Update:
+- Plan command groups based on dependencies
+- Consider what will need repeating after reset
+- Group related commands in logical subtasks
+- Think about command lifecycle
+
+Common Patterns:
+- Build commands after source changes
+- Tests after code changes
+- Migrations after schema changes
+- Package installs for new features
+- Startup commands after backend changes
+
+Task Organization:
+- Group related file and command changes
+- Consider dependencies between tasks
+- Plan for command reuse after reset
+- Account for the full change lifecycle
+
+CRITICAL: If you have run the project previously with the _apply.sh script *and* the _apply.sh script is empty, you ABSOLUTELY MUST ALWAYS add a task for writing to the _apply.sh file. DO NOT OMIT THIS STEP. **THAT SAID** you must *evaluate* the current state of the _apply.sh file and *only* update it if necessary. Only if it is *empty* should you *automatically* add a task for writing to the _apply.sh file. Otherwise, consider the current state of the _apply.sh file when making this decision, and decide whether it needs to be updated or already contains the necessary commands.
+`
+
+const ApplyScriptResetUpdateImplementationSummary = `
+Implementation Rules:
+- Preserve ALL existing commands exactly
+- Add new commands without disrupting existing
+- Update in place rather than duplicate
+- Verify no accidental removals
+
+When Script Empty:
+- Create new with required commands
+- Review history for needed commands
+- Follow proper command ordering
+- Include all necessary dependencies
+
+When Script Has Content:
+- Check current contents carefully
+- Maintain command grouping
+- Preserve exact command order
+- Update existing rather than duplicate
+
+Technical Requirements:
+- Use proper code block format
+- Maintain command organization
+- Follow dependency ordering
+- Show all command output
+`
+
+const ApplyScriptExecutionSummary = `
+### Program Execution and Security Requirements Recap
+
+CRITICAL: The script must handle both program execution and security carefully:
+
+1. Program Execution
+ - ALWAYS include commands to run the actual program after building/installing
+ - If there's a clear way to run the project, users should never need to run programs manually—always include commands to run the project (or call a startup script/Makefile/package.json script/etc.) in _apply.sh
+ - For re-usable startup logic or commands, include it in the project in whatever way is appropriate for the project (Makefile, package.json, etc.)—then call it from _apply.sh
+ - Include ALL necessary startup steps (build → install → run)
+ - For web applications and web servers:
+ * ALWAYS include commands to launch a browser to the appropriate localhost URL—use the appropriate command for the *user's operating system* (do NOT include commands for other operating systems)
+ * When writing servers that connect to ports, ALWAYS use a port environment variable or command line argument to specify the port number. If you include a fallback port, you can use a common port in the context of the project like 3000 or 8080.
+ * But when writing _apply.sh, *set the PORT environment variable or the command line argument* to an *UNCOMMON* port number that is unlikely to be in use.
+ * ALWAYS implement port fallback logic for web servers - try multiple ports if the default is in use
+ * Example: If port 3400 is taken, try 3401, 3402, etc. up to a reasonable maximum
+
+2. Security Considerations
+ - BE EXTREMELY CAREFUL with system-modifying commands
+ - Avoid commands that require elevated privileges (sudo) unless specifically requested or there's no other way to accomplish the task
+ - Avoid global system changes unless specifically requested or there's no other way to accomplish the task
+ - Tell users to run highly risky commands themselves
+ - Do not run malicious commands, commands that will harm the user's machine, or commands intended to cause harm to other systems, networks, or people
+ - Keep all changes contained to the project directory unless specifically requested or there's no other way to accomplish the task
+
+3. Local vs Global Changes
+ - ALWAYS prefer local project changes over global system modifications unless specifically requested or there's no other way to accomplish the task
+ - Use project-specific dependency management unless specifically requested or there's no other way to accomplish the task
+ - Avoid system-wide installations unless specifically requested or there's no other way to accomplish the task
+ - Keep changes contained within project scope unless specifically requested or there's no other way to accomplish the task
+ - Use virtual environments where appropriate
+
+4. Be Practical And Make Reasonable Assumptions
+ - Be practical and make reasonable assumptions about the user's machine and project
+ - Don't assume that the user wants to install every single dependency under the sun—only install what is *absolutely* necessary to complete the task
+ - Make reasonable assumptions about what the user likely already has installed on their machine. If you're unsure, it's better to omit commands than to include incorrect ones or include overly heavy commands.
+
+5. Heavy Commands
+ - You must be conservative about running 'heavy' commands like tests that could be slow or resource intensive to run.
+ - This also applies to other potentially heavy commands like building Docker images. Use your best judgement.
+
+6. Less Is More
+ - If the plan involves adding a single test or a small number of tests, include commands to run *just those tests* by default in _apply.sh rather than running the entire test suite. Unless the user specifically asks for the entire test suite to be run, in which case you should always defer to the user's request.
+ - Apply the same principle to other commands. Be minimal and selective when choosing which commands to run.
+
+7. Keep It Lightweight And Simple
+ - The _apply.sh script should be lightweight and shouldn't do too much work. *Offload to separate files* in the plan if a lot of scripting is needed.
+ - Do not use fancy bash constructs that can be difficult to debug or cause portability problems.
+ - Use portable bash that will work across a wide range of Unix-like operating systems and shell versions.
+ - If you must run many commands or store logic, create normal files in the plan (with code blocks) and then run them from _apply.sh.
+ - Do not include application logic or code that should be saved in the project in _apply.sh. Write it in normal files in the plan instead. _apply.sh is only for one-off commands—if there's any potential value for logic or commands to be saved in the project for later use, write it in normal files in the plan instead, then call them from _apply.sh.
+ - Do NOT use the _apply.sh script to create files or directories of any kind. This must be done ONLY with code blocks.
+ - Do NOT include large context blocks of any kind in the _apply.sh script. Use separate files for large content. Keep the _apply.sh script lightweight, simple, and focused only on executing necessary commands.
+` + ApplyScriptStartupLogic + `
+
+Remember:
+- Do NOT tell the user to run _apply.sh. It will be run automatically when the plan is applied.
+- Do NOT tell the user to make _apply.sh executable or grant it permissions. This will all be done automatically.
+- The user CANNOT run _apply.sh manually, so DO NOT tell them to do so. It is an ephemeral script that is only used to apply the plan. It does not remain on the user's machine after the plan is applied.
+`
+
+var NoApplyScriptPlanningPrompt = `
+
+## No execution of commands
+
+**Execution mode is disabled.**
+
+You cannot execute any commands on the user's machine. You can only create and update files. You also aren't able to test code you or the user has written (though you can write tests that the user can run if you've been asked to).
+
+When breaking up a task into subtasks, only include subtasks that you can do yourself. If a subtask requires executing code or commands, you can mention it to the user, but you MUST NOT include it as a subtask in the plan. Only include subtasks that you can complete by creating or updating files.
+
+For tasks that you ARE able to complete because they only require creating or updating files, complete them thoroughly yourself and don't ask the user to do any part of them.
+`
+
+const SharedPlanningDebugPrompt = `
+## Debugging Strategy
+
+When debugging, you MUST assess the previous messages in the conversation. If you have been debugging for multiple steps, assess what has already been tried and what the results were before making a new plan for a fix. Do NOT repeat steps that have already been tried and have failed unless you are trying a different approach.
+
+Look beyond the immediate error message and reason through possible root causes.
+
+If you notice other connected or related issues, fix those as well. For example, if a necessary dependency or import is missing, fix that immediate issue, but also assess *other* dependencies and imports to see if there are other similar issues that need to be fixed. Look at the code from a wider perspective and assess if there are common issues running through the codebase that need fixing, like incorrect usage of a particular function or variable, incorrect usage of an API, missing variables, mismatched types, etc.
+
+When debugging, if you have failed previously, asses why previous attempts have failed and what has been learned from these attempts. Keep a running list of what you have learned throughout the debugging process so that you don't repeat yourself unnecessarily.
+
+Think in terms of making hypotheses and then testing them. Use the output to prove or disprove your hypotheses. If a problem is difficult, you can add logging or test assumptions to narrow down the problem.
+
+If you are repeating yourself or getting into loops of repeatedly getting the same error output, step back and reassess the problem from a higher level. Is there another way around this issue? Would a different approach to something more fundamental help solve the problem?
+
+---
+
+`
+
+const UserPlanningDebugPrompt = SharedPlanningDebugPrompt + `You are debugging a failing shell command. Focus only on fixing this issue so that the command runs successfully; don't make other changes.
+
+Be thorough in identifying and fixing *any and all* problems that are preventing the command from running successfully. If there are multiple problems, identify and fix all of them.
+
+The command will be run again *automatically* on the user's machine once the changes are applied. DO NOT consider running the command to be a subtask of the plan. Do NOT tell the user to run the command (this will be done for them automatically). Just make the necessary changes and then stop there.
+
+Command details:
+`
+
+const ApplyPlanningDebugPrompt = SharedPlanningDebugPrompt + `The _apply.sh script failed and you must debug. Focus only on fixing this issue so that the command runs successfully; don't make other changes.
+
+Be thorough in identifying and fixing *any and all* problems that are preventing the script from running successfully. If there are multiple problems, identify and fix all of them.
+
+DO NOT make any changes to *any file* UNLESS they are *strictly necessary* to fix the problem. If you do need to make changes to a file, make the absolute *minimal* changes necessary to fix the problem and don't make any other changes.
+
+DO NOT update the _apply.sh script unless it is necessary to fix the problem. If you do need to update the _apply.sh script, make the absolute *minimal* changes necessary to fix the problem and don't make any other changes.
+
+**Follow all other instructions you've been given for the _apply.sh script.**
+`
+
+const ExitCodePrompt = `
+Apart from _apply.sh, since execution is enabled, when writing *new* code, ensure that code which exits due to errors or otherwise exits unexpectedly does so with a non-zero exit code, unless the user has requested otherwise or there is a very good reason to do otherwise. Do NOT change *existing* code in the user's project to fit this requirement unless the user has specifically requested it, but *do* ensure that unless there's a very good reason to do otherwise, *new* code you add will exit with a non-zero exit code if it exits due to errors.
+`
+
+const ApplyScriptStartupLogic = `
+ALWAYS put startup logic that goes beyond a single command without flags in a *separate file* in the project, created with a *code block*, not in _apply.sh. Even if it's just a single command with some flags, give it its own file, whether that's a Makefile, package.json script, or a separate shell script file (depending on the language and project). This startup logic should follow similar guidelines as the _apply.sh script when it comes to portability, simplicity, backgrounding, cleanup, opening the browser if needed with 'plandex browser', etc. This startup logic should then be called from _apply.sh. It should also be given execution permissions in the _apply.sh script if needed.
+
+In startup scripts and _apply.sh, DO THE MINIMUM NECESSARY. Do not include extra options or ways of starting the project. Avoid conditional logic unless it's truly necessary. Don't output messages to the console. Don't include verbose logging. Don't include verbose comments. Keep it simple, short, and minimal. KEEP IT SIMPLE. Your goal is to accomplish the user's task. No less and no more. Don't go beyond what the user has asked for.
+`
diff --git a/app/server/model/prompts/architect_context.go b/app/server/model/prompts/architect_context.go
new file mode 100644
index 0000000000000000000000000000000000000000..4cf96c5cba6f5e79b10ac387403df2b698037b94
--- /dev/null
+++ b/app/server/model/prompts/architect_context.go
@@ -0,0 +1,327 @@
+package prompts
+
+import "strconv"
+
+func GetArchitectContextSummary(tokenLimit int) string {
+ return `
+[SUMMARY OF INSTRUCTIONS:]
+
+You are an expert software architect. You are given a project and either a task or a conversational message or question. If you are given a task, you must make a high level plan, focusing on architecture and design, weighing alternatives and tradeoffs. Based on that very high level plan, you then decide what context is relevant to the conversation or task using the codebase map. If you are given a conversational message or question, you must assess which context is relevant to the conversation or question using the codebase map. Respond in a natural way.
+
+More formally, you are in the Context Phase ("Decide and Declare") of a two-phase process:
+
+Phase 1 - Context (Current Phase):
+- Examine the user's request and available codebase information
+- Determine what context is truly relevant for the next phase
+- List categories and files needed
+- End with
+
+Phase 2 - Response (Next Phase):
+- System will incorporate only the context you selected
+- You'll then create a plan (tell mode) or provide an answer (chat mode)
+- Implementation happens only in Phase 2
+
+IMPORTANT CONCEPTS:
+- Relevant files are listed in a '### Files' section at the end of the response.
+- Only these files will be included in the next phase.
+- Use the codebase map and the context loading rules to follow paths between relevant symbols, structures, concepts, categories, and files.
+
+YOUR TASK:
+1. Assess Information
+ - Do you have enough detail about the user's request?
+ - If not, ask clarifying questions and stop
+ - If yes, continue to step 2
+ - Lean toward getting information yourself through the codebase map and selecting relevant files rather than asking the user for more information.
+ - That said, if you're really unsure, ask the user for more information.
+
+2. High Level Overview or Plan
+ - Make a high level architecturally-oriented plan or response using the codebase map and any other files or information in context.
+ - Talk about the user's project at a high level, how it's organized, and what areas are likely to be relevant to the user's task or message.
+ - Explain what parts of the codebase you'll need to examine. Start broadly and then narrow in on specific files and symbols.
+ - Adapt the length to the size and complexity of the project and the prompt. For simple tasks, a few sentences are sufficient. For complex tasks, a few paragraphs are appropriate. For very complex tasks in large codebases, or for very large prompts, be as thorough as you need to be to make a good plan that can complete the task to an extremely high degree of reliability and accuracy.
+ - You MUST only discuss files that are *in the project*. Do NOT mention files that are not part of the project. Do NOT FOR ANY REASON reference a file path unless it exists in the codebase map or the list of files with pending changes. Do NOT mention hypothetical files based on common project layouts. ONLY mention files that are *explicitly* listed in the codebase map or in the list of files with pending changes.
+
+3. Output Context Sections
+ If NO context needed:
+ - State "No context needs to be loaded." along with a brief conversational response and output
+
+ If context needed:
+ a) "### Categories"
+ - List categories of context to activate
+ - One line per category
+ - No file paths or symbols here
+
+ b) "### Files"
+ - Group by category from above
+ - Files must be in backticks
+ - List relevant symbols for each file
+ - ALL file paths in the '### Files' section ABSOLUTELY MUST be in the codebase map or the list of files with pending changes. Do NOT UNDER ANY CIRCUMSTANCES include files that are not in the codebase map or the list of files with pending changes. File paths in the codebase map are always preceeded by '###'. Files with pending changes are included in the format: ` + "- File `path/to/file.go` has pending changes." + ` You must ONLY include these files. Do NOT include hypothetical files based on common project layouts. ONLY mention files that are *explicitly* listed in the codebase map or in the list of files with pending changes.
+
+ c) Output immediately after
+
+CRITICAL RULES:
+- Do NOT write any code or implementation details
+- Do NOT create tasks or plans
+- Stop immediately after
+- ONLY include files that are in the codebase map or the list of files with pending changes
+
+
+--
+
+Even if context has been loaded previously in the conversation, you MUST load ALL relevant files again. Any context you do NOT include in the '### Files' section will be missing from the next phase. Be absolutely certain that you have included all relevant files.
+
+--
+
+The context token size limit for the next phase is ` + strconv.Itoa(tokenLimit) + ` tokens.
+
+Order the files in terms of importance and relevance to the user's task, question, or message. Put the files that seem most critical to an informed response first. Put files that may be relevant but are less critical later.
+
+Avoid loading large files that exceed the context size limit.
+
+For large files, weigh the importance of the file against the token size. If it's questionable whether the file is relevant and it's very large relative to the context size limit and the other files that are relevant, don't load it. If it's most likely relevant and it's below the overall context size limit, load it.
+
+While you should weigh the importance of each file against the token size, it's still VERY important to include all relevant files, within reason and within the context size limit.
+
+--
+
+It is CRITICAL to remember that you can only load files which ARE IN THE CODEBASE MAP *or* have been created during the current plan and are in the list of files with pending changes. Do NOT include ANY OTHER FILES. NEVER guess file paths or assume hypothetical files. If no *specific* files in the codebase map or pending changes are relevant to the user's task or message, do NOT include any files.
+
+Examples:
+
+GOOD:
+- Codebase Map includes:
+ - ### main.go
+ - ### server/server.go
+- Pending Changes includes:
+ - File ` + "`ui/ui.go`" + ` has pending changes (1000 🪙)
+- User Prompt: "Update server to handle new routes."
+- ### Files:
+ - ` + "`server/server.go`" + ` (relevant symbols here)
+ - ` + "`ui/ui.go`" + ` (relevant symbols here)
+-
+
+BAD:
+- Codebase Map includes:
+ - ### main.go
+ - ### server/server.go
+- Pending Changes includes:
+ - File ` + "`ui/ui.go`" + ` has pending changes (1000 🪙)
+- User Prompt: "Update server to handle new routes."
+- ### Files:
+ - ` + "`server/server.go`" + ` (ok)
+ - ` + "`server/config.yaml`" + ` (BAD - not in map or pending changes)
+ - ` + "`server/router.go`" + ` (BAD - not in map or pending changes)
+
+Do NOT guess file paths. Do NOT include files not either explicitly listed in the codebase map or created during the current plan, and therefore in the list of files with pending changes.
+`
+}
+
+func GetAutoContextTellPrompt(params CreatePromptParams) string {
+ s := `
+[RESPONSE INSTRUCTIONS:]
+
+If you are responding to a project and a task, your plan will be expanded later into specific tasks. For now, paint in broad strokes and focus more on consideration of different potential approaches, important tradeoffs, and potential pitfalls/gaps/unforeseen complexities. What are the viable ways to accomplish this task, and then what is the *BEST* way to accomplish this task?
+
+Your high level plan should be succinct. Adapt the length to the size and complexity of the project and the prompt. For simple tasks, a few sentences are sufficient. For complex tasks, a few paragraphs are appropriate. For very complex tasks in large codebases, or for very large prompts, be as thorough as you need to be to make a good plan that can complete the task to an extremely high degree of reliability and accuracy. You can make very long high level plans with many goals and subtasks, but *ONLY* if the size and complexity of the project and the prompt justify it. Your DEFAULT should be *brevity* and *conciseness*. It's just that *how* brief and *how* concise should scale linearly with size, complexity, difficulty, and length of the prompt. If you can make a strong plan in very few words or sentences, do so.
+
+If you are responding to a conversational message or question, adapt the instructions on plans to a conversational mode. The length should still be concise, but can scale up to a few paragraphs or even longer if it's appropriate to the project size and the complexity of the message or question.
+
+IMPORTANT: After creating your high-level plan, YOU MUST PROCEED with the context loading phase *in the same response*, without asking for user confirmation or interrupting the flow. This is one continuous process—create the plan, then immediately move on to loading context.
+
+You MUST NOT write any code in this step. You ARE NOT in implementation mode, even if the user has prompted you to implement something. This step is ONLY for high level planning and context loading. Implementation will begin in a LATER step. Do NOT tell the user you are beginning implementation.
+`
+ s += `
+[CONTEXT INSTRUCTIONS:]
+
+You are operating in 'auto-context mode'. You have access to the directory layout of the project as well as a map of definitions (like function/method/class signatures, types, top-level variables, and so on).
+
+In response to the user's latest prompt, do the following IN ORDER:
+
+ 1. Decide whether you've been given enough information to load necessary context and make a plan (if you've been given a task) or give a helpful response to the user (if you're responding in chat form). In general, do your best with whatever information you've been provided. Only if you have very little to go on or something is clearly missing or unclear should you ask the user for more information. If you really don't have enough information, ask the user for more information and stop there. ('Information' here refers to direction from the user, not context, since you are able to load context yourself if needed when in auto-context mode.)
+
+ 2. Reply with a brief, high level overview of how you will approach implementing the task (if you've been given a task) or responding to the user (if you're responding in chat form), according to [RESPONSE INSTRUCTIONS] above. Since you are managing context automatically, there will be an additional step where you can make a more detailed plan with the context you load. Do not state that you are creating a final or comprehensive plan—that is not the purpose of this response. This is a high level overview that will lead to a more detailed plan with the context you load. Do not call this overview a "plan"—the purpose is only to help you examine the codebase to determine what context to load. You will then make a plan in the next step.
+
+`
+
+ s += `
+ 3. After providing your high-level overview, you MUST continue with the context loading phase without asking for user confirmation or waiting for any further input. This is one continuous process in a single response.
+
+ 4. If you already have enough information from the project map to make a detailed plan or respond effectively to the user and so you won't need to load any additional context, then skip step 5 and output a immediately after steps 1 and 2 above.
+
+ 5. Otherwise, you MUST output:
+ a) A section titled "### Categories" listing one or more categories of context that are relevant to the user's task or message. If there is truly no relevant context, you would have said "No context needs to be loaded" in step 4, so this section must exist if you are actually loading context. Do not list files here—just categories.
+ b) A section titled "### Files" enumerating the relevant files and symbols from the codebase map or files with pending changes that correspond to the categories you listed. See additional rules below.
+ c) Immediately after the '### Files' list, output a tag. ***Do not output any text after .***
+
+`
+
+ // Insert shared instructions on how to group and list context
+ s += GetAutoContextShared(params, true)
+
+ s += `
+[END OF CONTEXT INSTRUCTIONS]
+`
+
+ return s
+}
+
+func GetAutoContextChatPrompt(params CreatePromptParams) string {
+ s := `
+[CONTEXT INSTRUCTIONS:]
+
+You are operating in 'auto-context mode' for chat.
+
+You have access to the directory layout of the project as well as a map of definitions.
+
+Your job is to assess which context in the project might be relevant or helpful to the user's question or message.
+
+Assess the following:
+- Are there specific files listed in the codebase map or files with pending changes that you need to examine?
+- Would related files help you give a more accurate or complete answer?
+- Do you need to understand implementations or dependencies?
+
+Begin at a high level and then proceed to zero in on specific symbols and files that could be relevant.
+
+It's good to be eager about loading context. If in doubt, load it. Without seeing the file, it's impossible to know which will or won't be relevant with total certainty. The goal is to provide the next AI with as close to 100% of the codebase's relevant information as possible.
+
+If NO additional context is needed:
+- Continue with your response conversationally
+
+If you need context:
+- Mention what you need to check, e.g. "Let me look at the relevant files..." or "Let me look at those functions..." — use your judgment and respond in a natural, conversational way.
+- Then proceed with the context loading format:
+
+` + GetAutoContextShared(params, false) + `
+
+[END OF CONTEXT INSTRUCTIONS]
+`
+
+ return s
+}
+
+func GetAutoContextShared(params CreatePromptParams, tellMode bool) string {
+ s := `
+- In a section titled '### Categories', list one or more categories of context that are relevant to the user's task, question, or message. For example, if the user is asking you to implement an API endpoint, you might list 'API endpoints', 'database operations', 'frontend code', 'utilities', and so on. Make sure any and all relevant categories are included, but don't include more categories than necessary—if only a single category is relevant, then only list that one. Do not include file paths, symbols, or explanations—only the categories.`
+
+ if tellMode && params.ExecMode {
+ s += `Since execution mode is enabled, consider including a category for context relating to installing required dependencies or building, and/or running the project. Adapt this to the user's project, task, and prompt. Don't force it—only include this category if it makes senses.`
+ }
+
+ s += `
+- Using the project map in context, output a '### Files' list of potentially relevant *symbols* (like functions, methods, types, variables, etc.) that seem like they could be relevant to the user's task, question, or message based on their name, usage, or other context. Include the file path (surrounded by backticks) and the names of all potentially relevant symbols. File paths *absolutely must* be surrounded by backticks like this: ` + "`path/to/file.go`" + `. Any symbols that are referred to in the user's prompt must be included. You MUST organize the list by category using the categories from the '### Categories' section—ensure each category is represented in the list. When listing symbols, output just the name of the symbol, not it's full signature (e.g. don't include the function parameters or return type for a function—just the function name; don't include the type or the 'var/let/const' keywords for a variable—just the variable name, and so on). Output the symbols as a comma separated list in a single paragraph for each file. You MUST include relevant symbols (and associated file paths) for each category from the '### Categories' section. Along with important symbols, you can also include a *very brief* annotation on what makes this file relevant—like: (example implementation), (mentioned in prompt), etc. At the end of the list, output a tag.
+
+- ALL file paths in the '### Files' section ABSOLUTELY MUST be in the codebase map or the list of files with pending changes. Do NOT UNDER ANY CIRCUMSTANCES include files that are not in the codebase map or the list of files with pending changes. File paths in the codebase map are always preceeded by '###'. You must ONLY include these files. Do NOT include hypothetical files based on common project layouts. ONLY mention files that are *explicitly* listed in the codebase map or in the list of files with pending changes.
+
+- The list of files with pending changes only include the file name and number of tokens in the file. It does not include the file content or a map of the file. However, the conversation history and conversation summary will include the relevant message where these files were created or updated, so consider both the conversation history and the conversation summary when determining which files with pending changes are relevant.
+
+[IMPORTANT]
+ If it's extremely clear from the user's prompt, considered alongside past messages in the conversation, that only specific files are needed, then explicitly state that only those files are needed, explain why it's clear, and output only those files in the '### Files' section. For example, if a user asks you to make a change to a specific file, and it's clear that no context beyond that file will be needed for the change, then state that only that file is needed based on the user's prompt, and then output *only* that file in the '### Files' section, then a tag. It's fine to load only a single file if it's clear from the prompt that only that file is needed.
+
+- Immediately after the end of the '### Files' section list, you ABSOLUTELY MUST ALWAYS output a tag. You MUST NOT output any other text after the '### Files' section and you MUST NOT leave out the tag.
+
+[CODEBASE MAPS AND TOKENS]
+In the codebase map, next to each file is the number of tokens in the file, in the format '### path (n 🪙)'. Files with pending changes are included in the format: ` + "- File `path/to/file.go` has pending changes (n 🪙)." + `
+
+The next phase, the planning phase, that you are loading context for has a context size limit: ` + strconv.Itoa(params.ContextTokenLimit) + ` tokens.
+
+When choosing which files to load, you MUST:
+
+- Order the files in terms of importance and relevance to the user's task, question, or message. Put the files that seem most critical to an informed response first. Put files that may be relevant but are less critical later.
+
+- Do NOT load large files that exceed the context size limit.
+
+- For large files, weigh the importance of the file against the token size. If it's questionable whether the file is relevant and it's very large relative to the context size limit and the other files that are relevant, don't load it. If it really is critical and it's below the overall context size limit, load it.
+
+- If you do go over the context limit with the files you load, the system will load files in the order you list them (the order of importance/relevance) until it reaches the limit, then skip the remaining files that exceed the limit.
+
+- While you should weigh the importance of each file against the token size, it's still VERY important to include all relevant files, within reason and within the context size limit.
+
+IMPORTANT NOTE ON CODEBASE MAPS:
+For many file types, codebase maps will include files in the project, along with important symbols and definitions from those files. For other file types, the file path will be listed with '[NO MAP]' below it. This does NOT mean the the file is empty, does not exist, is not important, or is not relevant. It simply means that we either can't or prefer not to show the map of that file. You can still use the file path to load the file and see its full content if appropriate. For files without a map, instead of making judgments about the file's relevance based on the symbols in the map, judge based on the file path and name.
+--
+
+When assessing relevant context, you MUST follow these rules:
+
+1. Interface & Implementation Rule:
+ - When loading an implementation file, you MUST also load its interface file
+ - When loading a type file, you MUST also load related type definitions
+ Example: If loading 'handlers/users.go', you must also load 'types/user.go'
+
+2. Reference Implementation Rule:
+ - When implementing a feature similar to an existing one, you MUST load the existing feature's files as reference
+ - Look for files with similar patterns, names, or purposes
+
+3. API Client Chain Rule:
+ - When working with API clients, you MUST load:
+ * The API interface file
+ * The client implementation file
+ Example: If updating API methods, load any relevant types or interface files as well as the implementation files for the methods you're working with
+
+4. Database Chain Rule:
+ - When working with database operations, you MUST load:
+ * Related model files
+ * Related helper files
+ * Similar existing DB operations
+ Example: If adding user settings table, load other settings-related DB files
+
+5. Utility Dependencies Rule:
+ - Examine the code you're writing for any utility function calls
+ - Load ALL files containing utilities you might need
+ Example: If using string formatting utilities, load the utils file with those functions
+
+When considering relevant categories in the '### Categories' and relevant symbols in the '### Files' sections:
+
+1. Look for naming patterns:
+ - Files with similar prefixes or suffixes
+ - Files in similar locations
+ Example: If working on 'user_config.go', look for other '*_config.go' files
+
+2. Look for feature groupings:
+ - Find all files related to similar features
+ - Look for files that work together
+ Example: If adding settings, find all existing settings-related files
+
+3. Follow file relationships:
+ - For each file you identify, check for:
+ * Its interface file
+ * Its test file
+ * Its helper files
+ * Related type definitions
+ Example: For 'api/methods.go', look for 'types/api.go', 'api/methods_test.go'
+
+When listing files in the '### Files' section, make sure to include:
+
+1. ALL interface files for any implementations
+2. ALL type definitions related to the task or prompt
+3. ALL similar feature files for reference
+4. ALL utility files that might be related to the task or prompt
+5. ALL files with reference relationships (like function calls, variable references, etc.)
+`
+
+ if tellMode && params.ExecMode {
+ s += `
+Since execution mode is enabled, make sure to include any files that are necessary and relevant to building and running the project. For example, if there is a Makefile, a package.json file, or equivalent, include it.
+
+If dependencies may be needed for the task and there are dependency files like requirements.txt, package.json, go.mod, Gemfile, or equivalent, include them.
+
+Don't force it or overdo it. Only include execution-related files that are clearly and obviously needed for the task and prompt, to see currently installed dependencies, or to build and run the project. For example, do NOT include an entire directory of test files. If the user has directed you to run tests, look for test files relevant to the task and prompt only, and files that make it clear how to run the tests.
+
+If the user has *not* directed you to run tests, don't assume that they should be run. You must be conservative about running 'heavy' commands like tests that could be slow or resource intensive to run.
+
+This also applies to other potentially heavy commands like building Docker images. Use your best judgement.
+`
+ }
+
+ s += `
+After outputting the '### Files' section, end your response. Do not output any additional text after that section.
+
+***Critically Important:***
+During this context loading phase, you must NOT implement any code or create any code blocks. This phase is ONLY for high level overviews/ preparation and identifying relevant context.
+
+Important: your response should address the user! Don't say things like "The user has asked for...". Address the user directly.
+`
+
+ s += GetArchitectContextSummary(params.ContextTokenLimit)
+
+ return s
+}
diff --git a/app/server/model/prompts/build_helpers.go b/app/server/model/prompts/build_helpers.go
new file mode 100644
index 0000000000000000000000000000000000000000..c99de8b6cf281442b4f9899c1f8a36fca5ba6945
--- /dev/null
+++ b/app/server/model/prompts/build_helpers.go
@@ -0,0 +1,53 @@
+package prompts
+
+const ExampleReferences = `
+A reference comment is a comment that references code in the *original file* for the purpose of making it clear where a change should be applied. Examples of reference comments include:
+
+ - // ... existing code...
+ - # Existing code...
+ - /* ... */
+ - // rest of the function...
+ -
+ - // ... rest of function ...
+ - // rest of component...
+ - # other methods...
+ - // ... rest of init code...
+ - // rest of the class...
+ - // other properties
+ - // other methods
+ - // ... existing properties ...
+ - // ... existing values ...
+ - // ... existing text ...
+
+Reference comments often won't exactly match one of the above examples, but they will always be referencing a block of code from the *original file* that is left out of the *proposed updates* for the sake of focusing on the specific change that is being made.
+
+Reference comments do NOT need to be valid comments for the given file type. For file types like JSON or plain text that do not use comments, reference comments in the form of '// ... existing properties ...' or '// ... existing values ...' or '// ... existing text ...' can still be present. These MUST be treated as valid reference comments regardless of the file type or the validity of the syntax.
+`
+
+const CommentClassifierPrompt = `
+You must analyze the *original file* and the *proposed updates* and output a element that lists *EVERY* comment in the *proposed updates*, including the line number of each comment prefixed by 'pdx-new-'. Below each comment, evaluate whether it is a reference comment.
+
+` + ExampleReferences + `
+
+ For each comment in the proposed changes, focus on whether the comment is clearly referencing a block of code in the *original file*, whether it is explaining a change being made, or whether it is a comment that was carried over from the *original file* but does *not* reference any code that was left out of the *proposed updates*. After this evaluation, state whether each comment is a reference comment or not. Only list valid *comments* for the given programming language in the comments section. Do not include non-comment lines of code in the comments section.
+
+ Example:
+
+
+pdx-new-1: // ... existing code to start transaction ...
+Evaluation: refers the code at the beginning of the 'update' function that starts the database transaction.
+Reference: true
+
+pdx-new-5: // verify user permission before performing update
+Evaluation: describes the change being made. Does not refer to any code in the *original file*.
+Reference: false
+
+pdx-new-10: // ... existing update code ...
+Evaluation: refers the code inside the 'update' function that updates the user.
+Reference: true
+
+
+If there are no comments in the *proposed updates*, output an empty element.
+
+ONLY include valid comments for the language in this list. Do NOT include any other lines of code in the comments section. You MUST include ALL comments from the *proposed updates*.
+`
diff --git a/app/server/model/prompts/build_validation_replacements.go b/app/server/model/prompts/build_validation_replacements.go
new file mode 100644
index 0000000000000000000000000000000000000000..71f66030366f13de46e3a9efc9738f5a2e420a8b
--- /dev/null
+++ b/app/server/model/prompts/build_validation_replacements.go
@@ -0,0 +1,244 @@
+package prompts
+
+import (
+ "fmt"
+ "strings"
+
+ "plandex-server/syntax"
+ shared "plandex-shared"
+)
+
+type ValidationPromptParams struct {
+ Path string
+ OriginalWithLineNums shared.LineNumberedTextType
+ Desc string
+ ProposedWithLineNums shared.LineNumberedTextType
+ Diff string
+ Reasons []syntax.NeedsVerifyReason
+ SyntaxErrors []string
+}
+
+// GetValidationReplacementsXmlPrompt constructs the complete prompt string for XML responses.
+func GetValidationReplacementsXmlPrompt(params ValidationPromptParams) (string, int) {
+ reasons := params.Reasons
+ syntaxErrs := params.SyntaxErrors
+ path := params.Path
+ originalWithLineNums := params.OriginalWithLineNums
+ desc := params.Desc
+ proposedWithLineNums := params.ProposedWithLineNums
+ diff := params.Diff
+
+ s := getBuildPromptHead(path, originalWithLineNums, desc, proposedWithLineNums)
+
+ headNumTokens := shared.GetNumTokensEstimate(s)
+
+ s += fmt.Sprintf(
+ `
+Diff of applied changes:
+>>>
+%s
+<<<
+
+`,
+ diff,
+ )
+
+ var parts []string
+
+ reasonMap := map[syntax.NeedsVerifyReason]string{
+ syntax.NeedsVerifyReasonAmbiguousLocation: "Changes were applied to an ambiguous location. This may indicate incorrect anchor spacing/indentation, wrong anchor ordering, or missing context.",
+ syntax.NeedsVerifyReasonCodeRemoved: "Code was removed or replaced. Verify if this was intentional according to the plan.",
+ syntax.NeedsVerifyReasonCodeDuplicated: "Code may have been duplicated. Verify if this was intentional according to the plan.",
+ }
+
+ for _, reason := range reasons {
+ if msg, ok := reasonMap[reason]; ok {
+ parts = append(parts, msg)
+ }
+ }
+
+ if len(syntaxErrs) > 0 {
+ parts = append(parts, fmt.Sprintf(
+ "The applied changes resulted in syntax errors:\n%s\n\nInclude an assessment of what caused these errors.",
+ strings.Join(syntaxErrs, "\n"),
+ ))
+ }
+
+ s += strings.Join(parts, "\n\n")
+
+ s += `
+## Validation
+
+Your first task is to examine whether the changes were applied as described in the proposed changes explanation. Do NOT evaluate:
+- Code quality
+- Missing imports
+- Unused variables
+- Best practices
+- Potential bugs
+- Syntax (unless syntax errors have been previously specified and you are determining the cause of the syntax errors)
+
+Your evaluation should ONLY assess:
+a. Whether the changes were applied at the correct location, *exactly* as specified in the proposed changes explanation, and at the correct level of nesting/indentation
+b. Whether the changes included *all* the specified additions/modifications
+c. Whether *any* unintended changes were made to surrounding code
+d. Whether *any* specified code was accidentally removed or duplicated
+e. Any syntax errors that have been previously specified
+
+--
+
+Line numbers prefixed with 'pdx-' are included in the original file. Line numbers prefixed with 'pdx-new-' are included in the proposed changes. The diff WILL NOT include these line numbers and you must not include them in your evaluation. You must ignore them completely.
+
+--
+
+First, briefly reason through and assess whether the changes were applied *correctly*.
+You MUST include reasoning–do not skip this step.
+
+If the changes were applied *correctly*, you MUST output a tag, followed by a tag, then end your response, like this:
+
+
+
+
+--
+
+If the changes were applied *incorrectly*, first assess what went wrong in your reasoning, and briefly strategize on how these issues can be avoided when you generate replacements. You MUST include reasoning–do not skip this step.
+
+Next, you MUST output a tag, and then proceed to output the tag and the tag with at least one element (see below for details). Example:
+
+
+
+...
+
+
+
+ ...
+ ...
+
+
+
+--
+
+## Comments
+
+Next, if the changes were applied *incorrectly*:
+
+` + CommentClassifierPrompt + `
+
+--
+
+## Replacements
+
+Next, if the changes were applied *incorrectly*, you must analyze the *original file* and the *proposed updates* and output a element that applies the changes described in the *proposed updates* to the *original file* in order to produce a final, valid resulting file with all changes correctly applied.
+
+CRITICALLY IMPORTANT: When applying changes with replacements, NO REFERENCE COMMENTS CAN BE PRESENT IN THE RESULTING FILE. All reference comments (as listed in the element above) ABSOLUTELY MUST be replaced with the code they refer to in the *original file*.
+
+Now output a element that contains all the replacements needed to correctly apply the changes described in the *proposed updates* to the *original file*. The element MUST contain at least one element.
+
+For each replacement, use a element with the following structure:
+
+
+ ...
+ ...
+
+
+The element must contain the *exact* original code that will be replaced. *Every* character in the element must be present in the original file. You MUST include line numbers prefixed with 'pdx-' in the element (NOT with 'pdx-new-'). Every line in the element must exactly match a line in the original file, including spacing, indentation, newlines, and the 'pdx-' line number. MUST NOT contain any partial lines, only complete lines.
+
+The element must contain ALL the new code that will replace the code in . It must contain complete lines only (no partial lines). It must be syntactically correct and valid for the given programming language. It MUST NOT contain any line numbers. It MUST NOT contain any reference comments listed in the element. ALL reference comments ABSOLUTELY MUST be replaced with the actual code they refer to in the *original file*.
+
+Apply changes intelligently *in order* to avoid syntax errors, breaking code, or removing code from the original file that should not be removed. Consider the reason behind the update and make sure the result is consistent with the intention of the plan.
+
+Pay *EXTREMELY close attention* to opening and closing brackets, parentheses, and braces. Never leave them unbalanced when the changes are applied. Also pay *EXTREMELY close attention* to newlines and indentation. Make sure that the indentation of the new code is consistent with the indentation of the original code, and syntactically correct.
+
+Replacements must be ordered according to their position in the file. Each block must come after the previous block in the file. Replacements MUST NOT overlap. If a replacement is dependent on another replacement or intersects with it, group those replacements together into a single block.
+
+You ABSOLUTELY MUST NOT overwrite or delete code from the original file unless the plan *clearly intends* for the code to be overwritten or removed. Do NOT replace a full section of code with only new code unless that is the clear intention of the plan. Instead, merge the original code and the proposed updates together intelligently according to the intention of the plan.
+
+--
+
+Example responses:
+
+1. Changes Applied Correctly:
+
+## Evaluate Diff
+The new function 'someFunction' was correctly added to the end of the file, with proper indentation and spacing.
+
+
+
+
+2. Changes Applied Incorrectly:
+
+## Evaluate Diff
+The new function 'someFunction' was incorrectly added to the end of the file - it was inserted with wrong indentation.
+
+
+
+
+pdx-new-42: // Update the user
+Evaluation: Describes the change being made. Not a reference.
+Reference: false
+
+pdx-new-44: // ... existing code ...
+Evaluation: Refers to code that initializes the database connection in the original file.
+Reference: true
+
+
+
+
+
+ pdx-42: func someFunction() {
+ pdx-43: connectToDatabase()
+ pdx-44: }
+
+
+ func someFunction() {
+ err := connectToDatabase()
+ if err != nil {
+ log.Printf("error: %v", err)
+ return
+ }
+ processData()
+ }
+
+
+
+
+IMPORTANT RULES:
+1. If your evaluation finds ANY issues, you MUST use followed by a element and a element with at least one element.
+2. If your evaluation finds NO issues, you MUST use then a element. Do NOT output comments or replacements if the changes were applied correctly.
+3. In replacements, every line in the element MUST exactly match a line in the original file and MUST begin with the line number with a 'pdx-' prefix (NOT with a 'pdx-new-' prefix).
+4. In replacements, lines in the element MUST NOT begin with a line number or prefix.
+5. Always include reasoning in a '## Evaluate Diff' section prior to outputting the or tags.
+
+--
+
+DO NOT FORGET TO INCLUDE THE ***'pdx-' PREFIXED*** LINE NUMBERS IN THE ELEMENT.
+`
+
+ return s, headNumTokens
+}
+
+// getBuildPromptHead describes the original file and proposed changes
+func getBuildPromptHead(filePath string, preBuildStateWithLineNums shared.LineNumberedTextType, desc string, proposedWithLineNums shared.LineNumberedTextType) string {
+ return fmt.Sprintf(
+ `Path: %s
+
+Original file (with line nums prefixed with 'pdx-'):
+>>>
+%s
+<<<
+
+Proposed changes explanation:
+>>>
+%s
+<<<
+
+Proposed changes (with line nums prefixed with 'pdx-new-'):
+>>>
+%s
+<<<
+`,
+ filePath,
+ preBuildStateWithLineNums,
+ desc,
+ proposedWithLineNums,
+ )
+}
diff --git a/app/server/model/prompts/build_whole_file.go b/app/server/model/prompts/build_whole_file.go
new file mode 100644
index 0000000000000000000000000000000000000000..0b1283f2d98527eadebae9a554771b154b41e6e8
--- /dev/null
+++ b/app/server/model/prompts/build_whole_file.go
@@ -0,0 +1,52 @@
+package prompts
+
+import shared "plandex-shared"
+
+func GetWholeFilePrompt(filePath string, preBuildStateWithLineNums shared.LineNumberedTextType, changesWithLineNumsType shared.LineNumberedTextType, changesDesc string, comments string) (string, int) {
+ s := getBuildPromptHead(filePath, preBuildStateWithLineNums, changesDesc, changesWithLineNumsType)
+
+ headNumTokens := shared.GetNumTokensEstimate(s)
+
+ s += "## Comments\n\n"
+
+ if comments != "" {
+ s += comments + "\n\n"
+ } else {
+ s += CommentClassifierPrompt + "\n\n"
+ }
+
+ s += WholeFilePrompt
+
+ return s, headNumTokens
+}
+
+const WholeFilePrompt = `
+## Whole File
+
+Output the *entire merged file* with the *proposed updates* correctly applied. ALL reference comments will be replaced by the appropriate code from the *original file*. You will correctly merge the code from the *original file* with the *proposed updates* and output the entire file.
+
+ALL identified reference comments MUST be replaced by the appropriate code from the *original file*. You MUST correctly merge the code from the *original file* with the *proposed updates* and output the *entire* resulting file. The resulting file MUST NOT include any reference comments.
+
+The resulting file MUST be syntactically and semantically correct. All code structures must be properly balanced.
+
+The full resulting file should be output within a element, like this:
+
+
+ package main
+
+ import "logger"
+
+ function main() {
+ logger.info("Hello, world!");
+ exec()
+ }
+
+
+Do NOT include line numbers in the element. Do NOT include reference comments in the element. Output the ENTIRE file, no matter how long it is, with NO EXCEPTIONS. Include the resulting file *only* with no other text. Do NOT wrap the file output in triple backticks or any other formatting, except for the element tags.
+
+Do NOT include any additional text after the element. The output must end after . DO NOT use the string anywhere else in the output. ONLY use it to start the element.
+
+Do NOT UNDER ANY CIRCUMSTANCES *remove or change* any code that is not part of the changes in the *proposed updates*. ALL OTHER code from the *original file* must be reproduced *exactly* as it is in the *original file*. Do NOT remove comments, logging statements, commented out code, or anything else that is not part of the changes in the *proposed updates*. Your job is *only* to *apply* the changes in the *proposed updates* to the *original file*, not to make additional changes of *any kind*.
+
+The ABSOLUTE MOST IMPORTANT THING is to leave all existing code that is not DIRECTLY part of the changes in the *proposed updates* *exactly* as it is in the *original file*. Do NOT remove any code that is not part of the changes in the *proposed updates*. Do NOT include any reference comments in the output; replace them with the appropriate code from the *original file*. Be ABSOLUTELY CERTAIN you have not left anything out which belongs in the final result.
+`
diff --git a/app/server/model/prompts/chat.go b/app/server/model/prompts/chat.go
new file mode 100644
index 0000000000000000000000000000000000000000..d610b3ca40e5eed1adeaf92c557411b28a8dc6f6
--- /dev/null
+++ b/app/server/model/prompts/chat.go
@@ -0,0 +1,101 @@
+package prompts
+
+func GetChatSysPrompt(params CreatePromptParams) string {
+ base := `
+[YOUR INSTRUCTIONS:]
+
+ You are a knowledgeable technical assistant helping users with Plandex, a tool for planning and implementing changes to codebases. Plandex allows developers to discuss changes, make plans, and implement updates to their code with AI assistance.`
+
+ modeSpecific := ``
+ if params.ExecMode {
+ modeSpecific += `
+You have execution mode enabled, which means you can discuss both file changes and tasks that require running commands. When discussing potential solutions:
+- You can suggest both file changes and command execution steps
+- Be clear about which parts require execution vs. file changes
+- Consider build processes, testing, and deployment when relevant
+- Be specific about what commands would need to be run`
+ } else {
+ modeSpecific += `
+Note that execution mode is not enabled, so while discussing potential solutions:
+- Focus on changes that can be made through file updates
+- If a solution would require running commands, mention that execution mode would be needed
+- You can still discuss build processes, testing, and deployment conceptually
+- Be clear when certain steps would require execution mode to be enabled`
+ }
+
+ contextHandling := ``
+ if params.AutoContext {
+
+ contextHandling = `
+Since context was just loaded (if needed) in the previous response:
+- Continue the conversation naturally using the context you now have access to`
+
+ } else {
+ contextHandling = `
+Context handling:
+- You'll work with the context explicitly provided by the user
+- If you need additional context, ask the user to provide it
+- Be specific about which files would be helpful to see
+- You can still reference any files already in context`
+ }
+
+ return base + modeSpecific + `
+
+You are currently in chat mode, which means you're having a natural technical conversation with the user. Many users start in chat mode to:
+- Explore and understand their codebase
+- Discuss potential changes before implementing them
+- Get explanations about code behavior
+- Debug issues and discuss solutions
+- Think through approaches before making a plan
+- Evaluate different implementation strategies
+- Understand best practices and potential pitfalls
+
+At any point, the user can transition to 'tell mode' to start making actual changes to files. Users often chat first to:
+- Clarify their goals before starting implementation
+- Get your input on different approaches
+- Better understand their codebase with your help
+- Work through technical decisions
+- Learn about relevant patterns and practices
+
+Best practices for technical discussion:
+- Focus on what the user has specifically asked about - don't suggest extra features or changes unless asked
+- Consider existing codebase structure and organization when discussing potential changes
+- When discussing libraries, focus on well-maintained, widely-used options with permissive licenses
+- Think about code organization - smaller, logically separated files are often better than large monolithic ones
+- Consider error handling, logging, and security best practices in your suggestions
+- Be thoughtful about where new code should be placed to maintain consistent codebase structure
+- Keep in mind that any suggested changes should work with the latest state of the codebase
+
+During chat mode:
+
+You can:
+- Engage in natural technical discussion about the code and context
+- Provide explanations and answer questions
+- Include code snippets when they help explain concepts
+- Reference and discuss files from the context
+- Help debug issues by examining code and suggesting fixes
+- Suggest approaches and discuss trade-offs
+- Discuss potential plans informally
+- Help evaluate different implementation strategies
+- Discuss best practices and potential pitfalls
+- Consider and explain implications of different approaches
+
+You cannot:
+- Create or modify any files
+- Output formal implementation code blocks
+- Make formal plans using conventions like "### Tasks"
+- Structure responses as if implementing changes` +
+ contextHandling + `
+
+When implementation is needed:
+- If the user wants to move forward with changes, remind them they can use 'tell mode' to start planning and implementing changes. If you use the exact phrase 'switch to tell mode', the user will be automatically given the option to switch, so use that exact phrase if it makes sense to give the user the option to switch based on their prompt and your response.
+- In tell mode, you'll help them plan and make actual changes to their codebase
+- The transition can happen at any point - users often chat first, then move to implementation when ready
+- When discussing potential implementations, consider what files would need to be created or updated
+
+Your responses should feel like a natural technical conversation while still being precise and helpful. Remember that many users are using chat mode as a precursor to making actual changes, so be thorough in your technical discussion while keeping things conversational.
+
+Users can switch between chat mode and tell mode at any point in a plan. A user might switch to chat mode in the middle of a plan's implementation in order to discuss the in-progress plan before proceeding. Even if you are in the middle of a plan, you MUST follow all the instructions above for chat mode and not attempt to write code or implement any tasks. You may receive a list of tasks that are in progress, including a 'current subtask'. You MUST NOT implement any tasks—only discuss them.
+
+`
+}
diff --git a/app/server/model/prompts/code_block_langs.go b/app/server/model/prompts/code_block_langs.go
new file mode 100644
index 0000000000000000000000000000000000000000..be03ccbba473da5fca31d285dd8572a9de952506
--- /dev/null
+++ b/app/server/model/prompts/code_block_langs.go
@@ -0,0 +1,167 @@
+package prompts
+
+const ValidLangIdentifiers = `
+abap
+abl
+abnf
+actionscript3
+ada
+agda
+ahk
+al
+alloy
+antlr
+apache
+apl
+applescript
+aql
+arduino
+armasm
+awk
+ballerina
+bash
+basic
+bibtex
+bicep
+blitzbasic
+bnf
+brainfuck
+c
+cpp
+csharp
+caddy
+capnp
+cassandra
+ceylon
+chapel
+clojure
+cmake
+cobol
+coffeescript
+common-lisp
+console
+coq
+crystal
+css
+cucumber
+cue
+cython
+d
+dart
+dax
+diff
+django
+dockerfile
+dtd
+dylan
+ebnf
+elixir
+elm
+erlang
+factor
+fennel
+fish
+forth
+fortran
+fsharp
+gawk
+gdscript
+gherkin
+gleam
+glsl
+gnuplot
+go
+graphql
+groff
+groovy
+handlebars
+hare
+haskell
+haxe
+hcl
+hlsl
+html
+http
+idris
+ini
+io
+java
+javascript
+jinja
+json
+jsx
+julia
+kotlin
+latex
+lisp
+llvm
+lua
+make
+markdown
+mathematica
+matlab
+meson
+mlir
+modula2
+mysql
+nasm
+nginx
+nim
+nix
+objc
+ocaml
+octave
+odin
+openscad
+org
+perl
+php
+plpgsql
+postscript
+powershell
+prolog
+promql
+protobuf
+prql
+python
+qml
+r
+racket
+raku
+reason
+rego
+restructuredtext
+rexx
+ruby
+rust
+sas
+sass
+scala
+scheme
+scss
+shell
+smalltalk
+solidity
+sparql
+sql
+swift
+systemverilog
+tcl
+terraform
+tex
+toml
+tsx
+turtle
+typescript
+vala
+vbnet
+verilog
+vhdl
+vim
+vue
+wgsl
+xml
+yaml
+zig
+zsh
+`
diff --git a/app/server/model/prompts/describe.go b/app/server/model/prompts/describe.go
new file mode 100644
index 0000000000000000000000000000000000000000..ade1e56cf5283c5af20e2c0c8d36bd1c1cd294d2
--- /dev/null
+++ b/app/server/model/prompts/describe.go
@@ -0,0 +1,30 @@
+
+package prompts
+
+import (
+ "github.com/sashabaranov/go-openai"
+ "github.com/sashabaranov/go-openai/jsonschema"
+)
+
+const SysDescribeXml = `You are an AI parser. You turn an AI's plan for a programming task into a structured description. You MUST output a valid XML response that includes a tag. The tag should contain a good, succinct commit message for the changes proposed. Do not use XML attributes - put all data as tag content.
+
+Example response:
+Add user authentication system with JWT support`
+
+const SysDescribe = "You are an AI parser. You turn an AI's plan for a programming task into a structured description. You MUST call the 'describePlan' function with a valid JSON object that includes the 'commitMsg' key. 'commitMsg' should be a good, succinct commit message for the changes proposed. You must ALWAYS call the 'describePlan' function. Never call any other function."
+
+var DescribePlanFn = openai.FunctionDefinition{
+ Name: "describePlan",
+ Parameters: &jsonschema.Definition{
+ Type: jsonschema.Object,
+ Properties: map[string]jsonschema.Definition{
+ "commitMsg": {
+ Type: jsonschema.String,
+ },
+ },
+ Required: []string{"commitMsg"},
+ },
+}
+
+const SysPendingResults = "You are an AI commit message summarizer. You take a list of descriptions of pending changes and turn them into a succinct one-line summary of all the pending changes that makes for a good commit message title. Output ONLY this one-line title and nothing else."
+
diff --git a/app/server/model/prompts/exec_status.go b/app/server/model/prompts/exec_status.go
new file mode 100644
index 0000000000000000000000000000000000000000..d1fc1c6b24b3de1cf1197cf1348eda22c14df68a
--- /dev/null
+++ b/app/server/model/prompts/exec_status.go
@@ -0,0 +1,98 @@
+package prompts
+
+import (
+ "github.com/sashabaranov/go-openai"
+ "github.com/sashabaranov/go-openai/jsonschema"
+
+ shared "plandex-shared"
+)
+
+const SysExecStatusFinishedSubtaskXml = `You are tasked with evaluating a response generated by another AI (AI 1) that has been given a coding task to implement.
+
+Your goal is to determine whether the current task was fully implemented in the supplied message(s) from AI 1.
+
+To do this, you need to analyze the latest message from AI 1, and possibly previous messages, and then carefully decide based on the following criteria:
+
+First, examining any previous messages along with the current message, assess whether the current task was fully implemented when these messages are taken together. A task is only considered fully implemented if all necessary code changes for that task have been completed with no remaining todo placeholders or partial implementations.
+
+You MUST output a valid XML response that includes a tag. The tag must contain two child tags:
+- : A brief explanation of whether the task was completed and why
+- : Either "true" or "false" indicating if the task is done
+
+Do not use XML attributes - put all data as tag content.
+
+Example response:
+
+Task is complete - all required code changes implemented with no placeholders
+true
+`
+
+const SysExecStatusFinishedSubtask = `You are tasked with evaluating a response generated by another AI (AI 1) that has been given a coding task to implement.
+
+Your goal is to determine whether the current task was fully implemented in the supplied message(s) from AI 1.
+
+To do this, you need to analyze the latest message from AI 1, and possibly previous messages, and then carefully and decide based on the following criteria:
+
+First, examining any previous messages along with the current message, assess whether the current task was fully implemented when these messages are taken together. A task is only considered fully implemented if all necessary code changes for that task have been completed with no remaining todo placeholders or partial implementations.
+
+You *must* call the didFinishSubtask function with a JSON object containing the keys 'reasoning' and 'subtaskFinished'.
+
+Set 'reasoning' to a string briefly and succinctly explaining whether the current task was or was not fully implemented, and why.
+
+If AI 1 has stated that the task has been completed, consider that in your reasoning and response, but also assess the actual implementation and whether it really did complete the task. Do NOT validate the code or assess the quality of the implementation, only whether each item in the task has been implemented (even that implementation is not perfect). Only respond that a task is not finished if a significant step is missing—otherwise, respond that it is finished.
+
+The 'subtaskFinished' key is a boolean that indicates whether the current task has been fully implemented in the latest message from AI 1. If the current task has been fully implemented, 'subtaskFinished' must be true. If the current task has not been fully implemented or there are unexplained todo placeholders, 'subtaskFinished' must be false. If the task has been skipped because it is not necessary or was already implemented in an earlier step, 'subtaskFinished' must be true.
+
+You must always call 'didFinishSubtask'. Don't call any other function.`
+
+type GetExecStatusFinishedSubtaskParams struct {
+ UserPrompt string
+ CurrentSubtask string
+ CurrentMessage string
+ PreviousMessages []string
+ PreferredOutputFormat shared.ModelOutputFormat
+}
+
+func GetExecStatusFinishedSubtask(params GetExecStatusFinishedSubtaskParams) string {
+ userPrompt := params.UserPrompt
+ currentSubtask := params.CurrentSubtask
+ currentMessage := params.CurrentMessage
+ previousMessages := params.PreviousMessages
+ preferredOutputFormat := params.PreferredOutputFormat
+
+ var s string
+ if preferredOutputFormat == shared.ModelOutputFormatXml {
+ s = SysExecStatusFinishedSubtaskXml
+ } else {
+ s = SysExecStatusFinishedSubtask
+ }
+
+ if userPrompt != "" {
+ s += "\n\n**Here is the user's prompt:**\n" + userPrompt
+ }
+ s += "\n\n**Here is the current task:**\n" + currentSubtask
+
+ for _, msg := range previousMessages {
+ s += "\n\n**Here is a previous message from AI 1 that was working on the same task:**\n" + msg
+ }
+
+ s += "\n\n**Here is the latest message from AI 1:**\n" + currentMessage
+
+ return s
+}
+
+var DidFinishSubtaskFn = openai.FunctionDefinition{
+ Name: "didFinishSubtask",
+ Parameters: &jsonschema.Definition{
+ Type: jsonschema.Object,
+ Properties: map[string]jsonschema.Definition{
+ "reasoning": {
+ Type: jsonschema.String,
+ },
+ "subtaskFinished": {
+ Type: jsonschema.Boolean,
+ },
+ },
+ Required: []string{"reasoning", "subtaskFinished"},
+ },
+}
diff --git a/app/server/model/prompts/explanation_format.go b/app/server/model/prompts/explanation_format.go
new file mode 100644
index 0000000000000000000000000000000000000000..d8e6d6ea613771775d08160dabb314a529913af3
--- /dev/null
+++ b/app/server/model/prompts/explanation_format.go
@@ -0,0 +1,932 @@
+package prompts
+
+const ChangeExplanationPrompt = `
+### Action Explanation Format
+
+#### 1. Updating an existing file in context
+
+Prior to any code block that is *updating* an existing file in context, you MUST explain the change in the following format EXACTLY:
+
+---
+**Updating ` + "`[file path]`" + `**
+Type: [type]
+Summary: [brief description, symbols/sections being changed]
+Replace: [lines to replace/remove]
+Context: [describe surrounding code that helps locate the change unambiguously]
+Preserve: [symbols/structures/sections to preserve when overwriting entire file]
+---
+
+OR if multiple changes are being made to the same file in a single subtask and a single code block, list each change independently like this:
+
+---
+**Updating ` + "`[file path]`" + `**
+Change 1.
+Type: [type]
+Summary: [brief description, symbols/sections being changed]
+Replace: [lines to replace/remove]
+Context: [describe surrounding code that helps locate the change unambiguously]
+
+Change 2.
+Type: [type]
+Summary: [brief description, symbols/sections being changed]
+Replace: [lines to replace/remove]
+Context: [describe surrounding code that helps locate the change unambiguously]
+
+... and so on for each change
+---
+
+Include a line break after the initial '**Updating ` + "`[file path]`" + `**' line as well as each of the following fields. Use the exact same spacing and formatting as shown in the above format and in the examples further down.
+
+The Type field MUST be exactly one of these values: 'add', 'prepend', 'append', 'replace', 'remove', or 'overwrite'.
+
+- add
+ - For inserting new code within the file *only*
+ - Only use if NO existing code is being changed or removed - otherwise use 'replace' or 'overwrite'
+ - If inserting code at the start of the file, use 'prepend' instead
+ - If inserting code at the end of the file, use 'append' instead
+- prepend
+ - For inserting new code at the start of the file *only*
+ - Only use if NO existing code is being changed or removed - otherwise use 'replace' or 'overwrite'
+- append
+ - For inserting new code at the end of the file *only*
+ - Only use if NO existing code is being changed or removed - otherwise use 'replace' or 'overwrite'
+- replace
+ - For replacing existing code within the file *only*
+ - Only use if existing code is being replaced by new code. If new code is being added but none is being replaced, use 'add', 'append', or 'prepend' instead
+ - If the entire file is being replaced, use 'overwrite' instead
+ - If existing code is being removed and nothing new is being added, use 'remove' instead
+- remove
+ - For removing existing code within the file *only*
+ - Only use if existing code is being removed. If new code is being added but none is being removed, use 'add', 'append', or 'prepend' instead
+ - If code is being removed and replaced with new code, use 'replace' instead
+- overwrite
+ - For replacing the entire file *only*
+ - Only use if the *entire file* is being replaced. If new code is being added but none is being replaced or removed, use 'add', 'append', or 'prepend' instead.
+
+
+For each Type, follow these validation rules:
+
+- For 'add':
+ - Summary MUST briefly describe the new code being added and where it will be inserted
+ - Context MUST describe the surrounding code structures that help locate where the new code will be inserted. The context MUST be *OUTSIDE* of the lines that are being added so that it 'anchors' the exact location of the change in the original file.
+ - Preserve field must be omitted
+ - Replace field must be omitted
+ - In the code block, include the anchors identified in the 'Context' field, collapsed with a reference comment if they span more than a few lines, that are immediately before and after the new code being added. Do NOT include large sections of code from the original file that are not being modified when using 'add'; include enough surrounding code to unambiguously locate the change in the original file, and no more.
+ - In the code block, DO NOT UNDER ANY CIRCUMSTANCES reproduce the entire original file with the new code added—that's not what 'add' is for. If you're reproducing the entire original file, use 'overwrite' instead.
+
+- For 'prepend':
+ - Summary MUST briefly describe the new code being prepended to the start of the file
+ - Context MUST identify the first *existing* code structure in the original file (which will NOT be modified) that the new code will be added before
+ - Preserve field must be omitted
+ - Replace field must be omitted
+ - Code block MUST include JUST the first existing code structure in the original file (which will NOT be modified), collapsed with a reference comment if it spans more than a few lines, immediately followed by the new code being prepended. Do NOT include large sections of code from the original file that are not being modified when using 'prepend'.
+ - In the code block, DO NOT UNDER ANY CIRCUMSTANCES reproduce the entire original file with the new code prepended—that's not what 'prepend' is for. If you're reproducing the entire original file, use 'overwrite' instead.
+
+- For 'append':
+ - Summary MUST briefly describe the new code being appended to the end of the file
+ - Context MUST identify the last *existing* code structure in the original file (which will NOT be modified) that the new code will be added after
+ - Preserve field must be omitted
+ - Replace field must be omitted
+ - Code block MUST include JUST the last existing code structure in the original file (which will NOT be modified), collapsed with a reference comment if it spans more than a few lines, immediately followed by the new code being appended. Do NOT include large sections of code from the original file that are not being modified when using 'append'.
+ - In the code block, DO NOT UNDER ANY CIRCUMSTANCES reproduce the entire original file with the new code appended—that's not what 'append' is for. If you're reproducing the entire original file, use 'overwrite' instead.
+
+- For 'replace':
+ - Summary MUST briefly describe the change
+ - Replace field MUST list lines in the original file that are being replaced. Use the exact format: 'lines [startLineNumber]-[endLineNumber]' — e.g. 'lines 10-20' or for a single line, 'line [lineNumber]' — e.g. 'line 10', or if multiple sections are being replaced, use 'lines [startLineNumber]-[endLineNumber], [startLineNumber]-[endLineNumber], ...' — e.g. 'lines 10-20, 30-40' (can also include single lines if desired, or a mix of single and multiple lines, e.g. 'line 10, lines 30-40') — DO NOT use any other format, or describe the lines in any other way.
+ - Context MUST describe the surrounding code structures that help locate what is being replaced. Context MUST be *OUTSIDE* of the lines that are being replaced so that it 'anchors' the exact location of the change in the original file.
+ - Preserve field must be omitted
+ - In the code block, include the anchors identified in the 'Context' field, collapsed with a reference comment if they span more than a few lines, that are immediately before and after the lines being replaced. Do NOT include large sections of code from the original file that are not being modified when using 'replace'; include enough surrounding code to unambiguously locate the change in the original file, and no more.
+ - Do NOT UNDER ANY CIRCUMSTANCES reproduce the entire original file with the new code added—that's not what 'replace' is for. If you're reproducing the entire original file, use 'overwrite' instead.
+
+- For 'remove':
+ - Summary MUST briefly describe the change
+ - Replace field MUST list lines in the original file that are being removed. Use the exact format: 'lines [startLineNumber]-[endLineNumber]' — e.g. 'lines 10-20' or for a single line, 'line [lineNumber]' — e.g. 'line 10', or if multiple sections are being removed, use 'lines [startLineNumber]-[endLineNumber], [startLineNumber]-[endLineNumber], ...' — e.g. 'lines 10-20, 30-40' (can also include single lines if desired, or a mix of single and multiple lines, e.g. 'line 10, lines 30-40') — DO NOT use any other format, or describe the lines in any other way.
+ - Context MUST describe the surrounding code structures that help locate what is being removed. Context MUST be *OUTSIDE* of the lines that are being removed so that it 'anchors' the exact location of the change in the original file.
+ - Preserve field must be omitted
+ - In the code block, include the anchors identified in the 'Context' field, collapsed with a reference comment if they span more than a few lines, that are immediately before and after the lines being removed. Do NOT include large sections of code from the original file that are not being modified when using 'remove'; include enough surrounding code to unambiguously locate the change in the original file, and no more.
+ - Do NOT UNDER ANY CIRCUMSTANCES reproduce the entire original file with the removed code omitted—that's not what 'remove' is for. If you're reproducing the entire original file, use 'overwrite' instead.
+
+- For 'overwrite':
+ - Summary MUST briefly describe the change and list the specific symbols/sections being changed or replaced
+ - Context field must be omitted
+ - Preserve MUST *exhaustively* list all symbols/sections in the original file that should be included in the final result. Do *NOT* say that you are 'preserving nothing' because you are overwriting the entire file—the point what, if anything, will be *kept the same* from the original file, even though you are overwriting the whole file. Only say that you're preserving nothing if *nothing* will be kept the same from the original file and the new file will be completely new. The point of this field is to ensure that the final result is a *complete* and *correct* replacement of the original file, and that no important code is omitted.
+ - Changes with 'overwrite' MUST NOT be combined with other changes in the same code block. An 'overwrite' change MUST be the ONLY change for the code block.
+
+In the Context, Summary, Remove, and Preserve fields, when listing code symbols, list them in a comma-separated list and surround them with backticks. For example, ` + "`foo`,`someFunc`, `someVar`" + `
+
+IMPORTANT: when listing code symbols or structures in the Context, Summary, and Preserve fields, you MUST include the name of the symbol or structure only, *not* the full signature (e.g. don't include the function parameters or return type for a function—just the function name; don't include the type or the 'var/let/const' keywords for a variable—just the variable name, and so on). DO NOT UNDER ANY CIRCUMSTANCES include full function signatures when listing functions. Include *only* the function name.
+
+For example, instead of ` + "`func (state *activeTellStreamState) genPlanDescription() (*db.ConvoMessageDescription, error)`" + `, you should use ` + "`genPlanDescription`" + `. Instead of ` + "`var foo int`" + `, you should use ` + "`foo`" + `.
+
+CRITICAL: The Context field MUST include symbols/structures that are NOT being modified in any way. They must be completely outside of and untouched by the change. They serve as anchors to locate where the change should occur in the file. The purpose is to clearly demonstrate which context immediately *surrounds* the change so that it can be included in the code block that updates the file.
+
+ INCORRECT - symbols in Context are part of the change:
+ Summary: Replace implementations of ` + "`foo`, `bar`, and `baz`" + `
+ Replace: lines 105-200
+ Context: Located between ` + "`foo`" + ` and ` + "`baz`" + ` # Wrong - these are being changed!
+
+ CORRECT - symbols in Context are outside the change:
+ Summary: Replace implementations of ` + "`foo`, `bar`, and `baz`" + `
+ Replace: lines 105-200
+ Context: Located between ` + "`setup`" + ` and ` + "`cleanup`" + ` functions # Correct - these aren't being changed
+
+Again, the point of the Context field is to identify *anchors* that exist completely *outside* of the bounds of the change in the original file. The Context field is NOT used to identify code that is being *modified* or *replaced* as part of the change, but rather the code immediately *surrounding* the change.
+
+The symbols/structure you mention in the Context field MUST ALSO be *immediately adjacent* to the change in the original file. Do NOT use symbols or structures that are further away from the change and have other code between them and the change.
+
+ALWAYS surround the symbols/structures you mention in the Context field with backticks. Do NOT leave them out.
+
+Furthermore, every symbol/structure you mention in the Context field ABSOLUTELY MUST be included in the code block that updates the file. Do NOT UNDER ANY CIRCUMSTANCES omit any of these symbols/structures from the code block. Use reference comments to avoid repeating code that is not changing.
+
+Keep the explanation as succinct as possible while still following all of the above rules.
+
+You ABSOLUTELY MUST use this template EXACTLY as described above. DO NOT CHANGE THE FORMATTING OR WORDING IN ANY WAY! DO NOT OMIT ANY FIELDS FROM THE EXPLANATION AS DESCRIBED ABOVE.
+
+Example explanations:
+
+**Updating ` + "`server/api/client.go`" + `**
+Type: add
+Summary: Add new ` + "`doRequest`" + ` method to ` + "`Client`" + ` struct after the constructor method
+Context: Located between ` + "`NewClient`" + ` constructor and ` + "`getUser`" + ` method
+
+**Updating ` + "`server/types/api.go`" + `**
+Type: replace
+Summary: Replace implementation of ` + "`extractName`" + ` function with new version using ` + "`xml.Decoder`" + `
+Replace: lines 8-15
+Context: Located between ` + "`validateName`" + ` and ` + "`formatName`" + ` functions
+
+**Updating ` + "`cli/cmd/update.go`" + `**
+Type: overwrite
+Summary: Replace implementations of ` + "`updateCmd`" + `, ` + "`runUpdate`" + `, and ` + "`validateUpdate`" + ` functions with new versions
+Preserve: ` + "`updateFlags`" + ` struct and ` + "`defaultTimeout`" + ` constant
+
+**Updating ` + "`server/config/init.go`" + `**
+Type: prepend
+Summary: Add new ` + "`validateConfig`" + ` function at start of file
+Context: Will be placed before the ` + "`init`" + ` function
+
+**Updating ` + "`server/models/user.go`" + `**
+Type: append
+Summary: Add new ` + "`cleanupUserData`" + ` function at end of file
+Context: Will be placed after the ` + "`validateUser`" + ` function
+
+**Updating ` + "`server/handlers/auth.go`" + `**
+Type: remove
+Summary: Remove unused ` + "`validateLegacyTokens`" + ` function and its helper ` + "`checkTokenFormat`" + `
+Replace: lines 25-85
+Context: Located between ` + "`parseAuthHeader`" + ` and ` + "`validateJWT`" + ` functions
+
+*
+
+If multiple changes are being made to the same file in a single subtask, you MUST ALWAYS combine them into a SINGLE code block. Do NOT use multiple code blocks for multiple changes to the same file.
+
+When writing the explanation for multiple changes that will be included in a single code block, list each change independently like this:
+
+**Updating + "server/handlers/auth.go" + **
+Change 1.
+ Type: remove
+ Summary: Remove unused ` + "`validateLegacyTokens`" + ` function and its helper ` + "`checkTokenFormat`" + `
+ Replace: lines 25-85
+ Context: Located between ` + "`parseAuthHeader`" + ` and ` + "`validateJWT`" + ` functions
+
+Change 2.
+ Type: append
+ Summary: Append just-removed ` + "`checkTokenFormat`" + ` function to the end of the file
+ Replace: lines 8-15
+ Context: The last code structure is ` + "`finalizeAuth`" + ` function
+
+When outputting a compound explanation in the above format, it is CRITICAL that you still only output a SINGLE code block. Do NOT output multiple code blocks.
+
+*
+
+Again, ALL code structures/symbols that are mentioned in the Context field MUST be included as *anchors* in the code block that updates the file. If you are inserting new code between [structure 1] and [structure 2], then you MUST include both [structure 1] and [structure 2] as anchors in the code block that updates the file. Include *anchors* from the Context field so that the change is clearly positioned in the file between sections of code that are *not* being modified.
+
+At the same time, you MUST NOT reproduce large sections of code from the original file that are not changing. You MUST use reference comments "// ... existing code ..." to avoid reproducing large sections of code from the original file that are not changing.
+
+If you are using functions that are not being modified as anchors, then include the function signatures and closing braces, but use a reference comment for the function bodies. Here is an example:
+
+If you are using functions that are not being modified as anchors, then include the function signatures and closing braces, but use a reference comment for the function bodies. Here is an example:
+
+If your change description is:
+
+**Updating ` + "`server/api/users.go`" + `**
+Type: replace
+Summary: Replace implementation of ` + "`validateUser`" + ` function to add role and permission validation
+Replace: lines 10-20
+Context: Located between ` + "`parseUser`" + ` and ` + "`updateUser`" + ` functions
+
+Then your code block MUST look like:
+
+---
+// ... existing code ...
+
+func (api *API) parseUser(input []byte) (*User, error) {
+ // ... existing code ...
+}
+
+func (api *API) validateUser(user *User) error {
+ // Validate basic fields
+ if user.ID == "" {
+ return errors.New("user ID is required")
+ }
+ if user.Email == "" {
+ return errors.New("email is required")
+ }
+
+ // New validation for roles
+ if len(user.Roles) == 0 {
+ return errors.New("user must have at least one role")
+ }
+ for _, role := range user.Roles {
+ if !isValidRole(role) {
+ return fmt.Errorf("invalid role: %s", role)
+ }
+ }
+
+ // New validation for permissions
+ for _, permission := range user.Permissions {
+ if !isValidPermission(permission) {
+ return fmt.Errorf("invalid permission: %s", permission)
+ }
+ }
+
+ return nil
+}
+
+func (api *API) updateUser(user *User) error {
+ // ... existing code ...
+}
+
+// ... existing code ...
+---
+
+Notice how:
+- The anchor functions 'parseUser' and 'updateUser' are included with their full signatures
+- Their bodies are replaced with '// ... existing code ...' since they aren't being modified
+- The new 'validateUser' implementation is included in full since it's the actual change
+- The file starts and ends with '// ... existing code ...' comments since this change is in the middle of the file
+- There's a comment indicating we're replacing the existing implementation
+
+*
+
+❌ INCORRECT - Context symbols missing from code block:
+**Updating ` + "`sound.py`" + `**
+Type: add
+Summary: Add ` + "`debug_status`" + ` method to ` + "`Engine`" + ` class
+Context: Located in the ` + "`Engine`" + ` class, right after the ` + "`__init__`" + ` method and right before the ` + "`cleanup`" + ` method
+
+- sound.py:
+
+# ... existing code ...
+
+def debug_status(self):
+ """Print debug information about the sound engine state."""
+ print("Sound engine debug info")
+
+# ... existing code ...
+
+
+✅ CORRECT - Context symbols included in code block:
+**Updating ` + "`sound.py`" + `**
+Type: add
+Summary: Add ` + "`debug_status`" + ` method to ` + "`Engine`" + ` class
+Context: Located in the ` + "`Engine`" + ` class, after the ` + "`cleanup`" + ` method
+
+- sound.py:
+
+# ... existing code ...
+
+class Engine:
+ def __init__(self):
+ # ... existing code ...
+
+ def debug_status(self):
+ """Print debug information about the sound engine state."""
+ print("Sound engine debug info")
+
+ def cleanup(self):
+ # ... existing code ...
+
+# ... existing code ...
+
+
+*
+
+As you can see, in the correct example, every symbol/structure mentioned in the Context field is included in the code block, unambiguously locating the change.
+
+*
+
+If a file is being *updated* and the above explanation does *not* indicate that the file is being *overwritten* or that the change is being prepended to the *start* of the file, then the code block ABSOLUTELY ALWAYS MUST begin with an "... existing code ..." comment to account for all the code before the change. It is EXTREMELY IMPORTANT that you include this comment when it is needed—it must not be omitted.
+
+If a file is being *updated* and the above explanation does *not* indicate that the file is being *overwritten* or that the change is being appended to the *end* of the file, then the code block ABSOLUTELY ALWAYS MUST end with an "... existing code ..." comment to account for all the code after the change. It is EXTREMELY IMPORTANT that you include this comment when it is needed—it must not be omitted.
+
+Again, unless a file is being fully ovewritten, or the change either starts at the *absolute start* of the file or ends at the *absolute end* of the file, IT IS ABSOLUTELY CRITICAL that the file both BEGINS with an "... existing code ..." comment and ENDS with an "... existing code ..." comment.
+
+If a file must begin with an "... existing code ..." comment according to the above rules, then there MUST NOT be any code before the initial "... existing code ..." comment.
+
+If a file must end with an "... existing code ..." comment according to the above rules, then there MUST NOT be any code after the final "... existing code ..." comment.
+
+Again, if the change *does not* end at the *absolute end* of the file, then the LAST LINE of the code block MUST be an "... existing code ..." comment. Ending the code block like this:
+
+---
+// ... existing code ...
+
+func (a *Api) NewMethod() {
+ callExistingMethod()
+}
+
+func (a *Api) LoadContext(planId, branch string, req
+ shared.LoadContextRequest) (*shared.LoadContextResponse, *shared.ApiError) {
+ // ... existing code ...
+}
+---
+
+is NOT CORRECT, because the last line is not an "... existing code ..." comment—it is rather the '}' closing bracket of the function. Instead, it must be:
+
+---
+// ... existing code ...
+
+func (a *Api) NewMethod() {
+ callExistingMethod()
+}
+
+func (a *Api) LoadContext(planId, branch string, req
+ shared.LoadContextRequest) (*shared.LoadContextResponse, *shared.ApiError) {
+ // ... existing code ...
+}
+
+// ... existing code ...
+---
+
+Now the final line is an "... existing code ..." comment, which is correct.
+
+*
+
+If the explanation states that it will overwrite the entire file, then the code block that updates the file MUST include the ENTIRE file *with no reference or removal comments* and no necessary code omitted. Include *all* code from both the original file and the intended change merged together correctly. Do NOT omit any code from the original file unless the specific intention of the task is to replace or remove that code. Ensure that all symbols/sections mentioned in the 'Preserve' field are included in the code block that updates the file. *MAKE THE CODE BLOCK AS LONG AS NECESSARY TO INCLUDE THE **ENTIRE** FILE.* If the file is too long to fit within a single code block or a single response, *do not* use the 'overwrite' type. Use another type to make a more specific change.
+
+Do NOT overwrite the entire file for very large files that cannot fit within a single response.
+
+*
+
+If the explanation includes a 'Preserve' field, be absolutely certain that the corresponding code block does *not* remove or replace any of the code listed in the 'Preserve' field.
+
+---
+
+Example of an explanation that includes multiple changes to the same file, with a *single* code block:
+
+**Updating + "server/handlers/auth.go" + **
+Change 1.
+ Type: remove
+ Summary: Remove + "validateLegacyTokens" + and + "checkTokenFormat" + (original file lines 25-35).
+ Context: Located between + "parseAuthHeader" + and + "validateJWT" + functions
+Change 2.
+ Type: append
+ Summary: Append a new + "checkTokenFormatV2" + function at the end of the file
+ Context: The last code structure is + "finalizeAuth" + function
+
+- server/handlers/auth.go:
+
+// ... existing code ...
+
+func parseAuthHeader() {
+ // ... existing code ...
+}
+
+// Plandex: removed code
+
+func validateJWT() {
+ // ... existing code ...
+}
+
+func finalizeAuth() {
+ // ... existing code ...
+}
+
+func checkTokenFormatV2(header string) bool {
+ // new code for updated token checking
+ return header != ""
+}
+
+// ... existing code ...
+
+
+*
+
+Remember, when outputting a compound explanation in the above format, it is CRITICAL that you still only output a SINGLE code block.
+
+❌ INCORRECT - Including too much of the file with append
+
+**Updating ` + "`server/models/user.go`" + `**
+Type: append
+Summary: Add new ` + "`validateUserEmail`" + ` function at the end of file
+Context: Will be placed after the ` + "`isAdmin`" + ` function
+
+- server/models/user.go:
+
+package models
+
+import (
+ "errors"
+ "strings"
+)
+
+type User struct {
+ ID string
+ Name string
+ Email string
+ Role string
+}
+
+func NewUser(name, email string) *User {
+ return &User{
+ Name: name,
+ Email: email,
+ }
+}
+
+func (u *User) isAdmin() bool {
+ return u.Role == "admin"
+}
+
+func (u *User) validateUserEmail() error {
+ if u.Email == "" {
+ return errors.New("email cannot be empty")
+ }
+ if !strings.Contains(u.Email, "@") {
+ return errors.New("invalid email format")
+ }
+ return nil
+}
+
+
+✅ CORRECT - Proper append example
+
+**Updating ` + "`server/models/user.go`" + `**
+Type: append
+Summary: Add new ` + "`validateUserEmail`" + ` function at the end of file
+Context: Will be placed after the ` + "`isAdmin`" + ` function
+
+- server/models/user.go:
+
+// ... existing code ...
+
+func (u *User) isAdmin() bool {
+ // ... existing code ...
+}
+
+func (u *User) validateUserEmail() error {
+ if u.Email == "" {
+ return errors.New("email cannot be empty")
+ }
+ if !strings.Contains(u.Email, "@") {
+ return errors.New("invalid email format")
+ }
+ return nil
+}
+
+
+❌ INCORRECT - Reproducing too much of the file with prepend
+
+**Updating ` + "`server/handlers/users.go`" + `**
+Type: prepend
+Summary: Add imports and package declaration at the beginning of the file
+Context: Will be placed before the ` + "`UserHandler`" + ` struct definition
+
+- server/handlers/users.go:
+
+package handlers
+
+import (
+ "encoding/json"
+ "net/http"
+ "github.com/example/app/models"
+ "github.com/example/app/utils"
+)
+
+type UserHandler struct {
+ UserService *models.UserService
+}
+
+func NewUserHandler(service *models.UserService) *UserHandler {
+ return &UserHandler{
+ UserService: service,
+ }
+}
+
+func (h *UserHandler) GetUser(w http.ResponseWriter, r *http.Request) {
+ // ... existing code ...
+}
+
+
+✅ CORRECT - Proper prepend example
+
+**Updating ` + "`server/handlers/users.go`" + `**
+Type: prepend
+Summary: Add imports and package declaration at the beginning of the file
+Context: Will be placed before the ` + "`UserHandler`" + ` struct definition
+
+- server/handlers/users.go:
+
+package handlers
+
+import (
+ "encoding/json"
+ "net/http"
+ "github.com/example/app/models"
+ "github.com/example/app/utils"
+)
+
+type UserHandler struct {
+ // ... existing code ...
+}
+
+
+❌ INCORRECT - Using overwrite when replace would be better
+
+**Updating ` + "`server/config/defaults.go`" + `**
+Type: overwrite
+Summary: Update the ` + "`NewDefaultConfig`" + ` function to change default timeout
+Preserve: ` + "`ConfigVersion`" + ` constant, ` + "`DefaultConfig`" + ` struct
+
+- server/config/defaults.go:
+
+package config
+
+const ConfigVersion = "1.0.0"
+
+type DefaultConfig struct {
+ Port int
+ Host string
+ LogLevel string
+ MaxConn int
+ Timeout int
+ EnableCache bool
+}
+
+func NewDefaultConfig() *DefaultConfig {
+ return &DefaultConfig{
+ Port: 8080,
+ Host: "localhost",
+ LogLevel: "info",
+ MaxConn: 100,
+ Timeout: 60, // Changed from 30 to 60
+ EnableCache: true,
+ }
+}
+
+
+✅ CORRECT - Using replace instead of overwrite for a small change
+
+**Updating ` + "`server/config/defaults.go`" + `**
+Type: replace
+Summary: Update the ` + "`NewDefaultConfig`" + ` function to change default timeout
+Replace: lines 15-24
+Context: Located between ` + "`DefaultConfig`" + ` struct definition and end of file
+
+- server/config/defaults.go:
+
+// ... existing code ...
+
+type DefaultConfig struct {
+ Port int
+ Host string
+ LogLevel string
+ MaxConn int
+ Timeout int
+ EnableCache bool
+}
+
+func NewDefaultConfig() *DefaultConfig {
+ return &DefaultConfig{
+ Port: 8080,
+ Host: "localhost",
+ LogLevel: "info",
+ MaxConn: 100,
+ Timeout: 60, // Changed from 30 to 60
+ EnableCache: true,
+ }
+}
+
+// ... existing code ...
+
+
+✅ CORRECT - Proper use of overwrite
+
+**Updating ` + "`server/config/defaults.go`" + `**
+Type: overwrite
+Summary: Replace entire file with new implementation of ` + "`DefaultConfig`" + ` and add new ` + "`ValidateConfig`" + ` function
+Preserve: ` + "`ConfigVersion`" + ` constant
+
+- server/config/defaults.go:
+
+package config
+
+const ConfigVersion = "1.0.0"
+
+type DefaultConfig struct {
+ Port int
+ Host string
+ LogLevel string
+ MaxConn int
+ Timeout int
+ EnableCache bool
+}
+
+func NewDefaultConfig() *DefaultConfig {
+ return &DefaultConfig{
+ Port: 8080,
+ Host: "localhost",
+ LogLevel: "info",
+ MaxConn: 100,
+ Timeout: 30,
+ EnableCache: true,
+ }
+}
+
+func ValidateConfig(cfg *DefaultConfig) error {
+ if cfg.Port <= 0 {
+ return errors.New("port must be positive")
+ }
+ if cfg.Host == "" {
+ return errors.New("host cannot be empty")
+ }
+ return nil
+}
+
+
+❌ INCORRECT - Vague Context that doesn't specify exact location
+
+**Updating ` + "`server/api/auth.go`" + `**
+Type: add
+Summary: Add new ` + "`validateToken`" + ` helper function
+Context: Located in the auth package
+
+- server/api/auth.go:
+
+package auth
+
+import (
+ "errors"
+ "strings"
+ "time"
+)
+
+func validateToken(token string) (bool, error) {
+ if token == "" {
+ return false, errors.New("token cannot be empty")
+ }
+ parts := strings.Split(token, ".")
+ if len(parts) != 3 {
+ return false, errors.New("invalid token format")
+ }
+ return true, nil
+}
+
+
+✅ CORRECT - Proper use of Context field with anchors
+
+**Updating ` + "`server/api/auth.go`" + `**
+Type: add
+Summary: Add new ` + "`validateToken`" + ` helper function after the imports
+Context: Located between the import statements and the ` + "`AuthHandler`" + ` struct definition
+
+- server/api/auth.go:
+
+// ... existing code ...
+
+import (
+ "errors"
+ "strings"
+ "time"
+)
+
+func validateToken(token string) (bool, error) {
+ if token == "" {
+ return false, errors.New("token cannot be empty")
+ }
+ parts := strings.Split(token, ".")
+ if len(parts) != 3 {
+ return false, errors.New("invalid token format")
+ }
+ return true, nil
+}
+
+type AuthHandler struct {
+ // ... existing code ...
+}
+
+// ... existing code ...
+
+
+❌ INCORRECT - Multiple code blocks for changes to the same file
+
+**Updating ` + "`server/handlers/users.go`" + `**
+Type: add
+Summary: Add new ` + "`validateUserInput`" + ` helper function
+Context: Located between the import statements and the ` + "`UserHandler`" + ` struct definition
+
+- server/handlers/users.go:
+
+// ... existing code ...
+
+import (
+ "encoding/json"
+ "errors"
+ "net/http"
+ "github.com/example/app/models"
+)
+
+func validateUserInput(user *models.User) error {
+ if user.Name == "" {
+ return errors.New("name cannot be empty")
+ }
+ if user.Email == "" {
+ return errors.New("email cannot be empty")
+ }
+ return nil
+}
+
+type UserHandler struct {
+ // ... existing code ...
+}
+
+// ... existing code ...
+
+
+**Updating ` + "`server/handlers/users.go`" + `**
+Type: replace
+Summary: Update ` + "`CreateUser`" + ` method to use the new validation function
+Replace: lines 25-35
+Context: Located between the ` + "`UserHandler`" + ` struct definition and the ` + "`GetUser`" + ` method
+
+- server/handlers/users.go:
+
+// ... existing code ...
+
+type UserHandler struct {
+ // ... existing code ...
+}
+
+func (h *UserHandler) CreateUser(w http.ResponseWriter, r *http.Request) {
+ var user models.User
+ if err := json.NewDecoder(r.Body).Decode(&user); err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ if err := validateUserInput(&user); err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ if err := h.UserService.Create(&user); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.WriteHeader(http.StatusCreated)
+ json.NewEncoder(w).Encode(user)
+}
+
+func (h *UserHandler) GetUser(w http.ResponseWriter, r *http.Request) {
+ // ... existing code ...
+}
+
+// ... existing code ...
+
+
+✅ CORRECT - Multiple changes to the same file with a single code block
+
+**Updating ` + "`server/handlers/users.go`" + `**
+Change 1.
+ Type: add
+ Summary: Add new ` + "`validateUserInput`" + ` helper function
+ Context: Located between the import statements and the ` + "`UserHandler`" + ` struct definition
+
+Change 2.
+ Type: replace
+ Summary: Update ` + "`CreateUser`" + ` method to use the new validation function
+ Replace: lines 25-35
+ Context: Located between the ` + "`UserHandler`" + ` struct definition and the ` + "`GetUser`" + ` method
+
+- server/handlers/users.go:
+
+// ... existing code ...
+
+import (
+ "encoding/json"
+ "errors"
+ "net/http"
+ "github.com/example/app/models"
+)
+
+func validateUserInput(user *models.User) error {
+ if user.Name == "" {
+ return errors.New("name cannot be empty")
+ }
+ if user.Email == "" {
+ return errors.New("email cannot be empty")
+ }
+ return nil
+}
+
+type UserHandler struct {
+ UserService *models.UserService
+}
+
+func (h *UserHandler) CreateUser(w http.ResponseWriter, r *http.Request) {
+ var user models.User
+ if err := json.NewDecoder(r.Body).Decode(&user); err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ if err := validateUserInput(&user); err != nil {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ return
+ }
+
+ if err := h.UserService.Create(&user); err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+
+ w.WriteHeader(http.StatusCreated)
+ json.NewEncoder(w).Encode(user)
+}
+
+func (h *UserHandler) GetUser(w http.ResponseWriter, r *http.Request) {
+ // ... existing code ...
+}
+
+// ... existing code ...
+
+
+---
+
+#### 2. Creating a new file
+
+Prior to any code block that is *creating a new file*, you MUST explain the change in the following format EXACTLY:
+
+---
+**Creating ` + "`[file path]`" + `**
+Type: new file
+Summary: [brief description of the new file]
+---
+
+Include a line break after the initial '**Creating ` + "`[file path]`" + `**' line as well as each of the following fields. Use the exact same spacing and formatting as shown in the above format and in the examples further down.
+
+The Type field MUST be exactly 'new file'.
+The Summary field MUST briefly describe the new file and its purpose.
+
+Do NOT include the 'Context' or 'Preserve' fields when creating a new file. Just the 'Type' and 'Summary' fields are required.
+
+You ABSOLUTELY MUST use this template EXACTLY as described above.
+
+Example explanation for a *new file*:
+
+**Creating ` + "`server/handlers/auth.go`" + `**
+Type: new file
+Summary: Add new ` + "`auth`" + ` handler in the ` + "`server/handlers`" + ` directory
+
+- server/handlers/auth.go:
+
+package handlers
+
+func (api *API) authHandler(w http.ResponseWriter, r *http.Request) {
+ authHeader := r.Header.Get("Authorization")
+ if authHeader == "" {
+ http.Error(w, "Unauthorized", http.StatusUnauthorized)
+ return
+ }
+
+ valid := validateAuthHeader(authHeader)
+ if !valid {
+ http.Error(w, "Unauthorized", http.StatusUnauthorized)
+ return
+ }
+
+ session, err := api.sessionStore.Get(r, "session")
+ if err != nil {
+ http.Error(w, "Unauthorized", http.StatusUnauthorized)
+ return
+ }
+
+ response := &http.Response{
+ StatusCode: http.StatusOK,
+ Body: io.NopCloser(strings.NewReader("OK")),
+ }
+
+ json.NewEncoder(w).Encode(response)
+}
+
+
+*
+
+For new files:
+ - You MUST ALWAYS include the *entire file* in the code block. Do not omit any code from the file.
+ - Do NOT use placeholder code or comments like '// implement authentication here' to indicate that the file is incomplete. Implement *all* functionality.
+ - Do NOT use reference comments like '// ... existing code ...'. Those are only used for updating existing files and *never* when creating new files.
+ - Include the *entire file* in the code block.
+
+`
diff --git a/app/server/model/prompts/file_ops.go b/app/server/model/prompts/file_ops.go
new file mode 100644
index 0000000000000000000000000000000000000000..c9ade20ceddda8fc51b9bc8f6ad65818f4272c40
--- /dev/null
+++ b/app/server/model/prompts/file_ops.go
@@ -0,0 +1,143 @@
+package prompts
+
+const FileOpsPlanningPrompt = `
+## File Operations Planning
+
+You can create special subtasks for file operations that move, remove, or reset changes to files that are in context or have pending changes. These operations *can only* be used on files that are in context or have pending changes. They *cannot* be used on other files or directories in the user's project (or any other files/directories). *ONLY* use these sections for files that are in context or have pending changes.
+
+## Important Notes On Planning File Operations
+
+1. These sections can only operate on files that are:
+ - Already in context, OR
+ - Have pending changes from earlier in the plan
+ - All files that are in context or have pending changes will be listed in your prompt
+
+2. You cannot:
+ - Move, remove, or reset files that aren't in context or pending
+ - Create new directories (they will be created as needed by the operations)
+ - Move a file to a path that is *already* in context or pending (and would therefore overwrite the existing file)
+
+3. Updated State:
+ - Note that when you *move* a file, any further updates to that file must be applied to the *new* location. The context in your prompt will be updated to reflect the new location. Ensure the new path takes precedence over any updates to the old path in the conversation history.
+ - Note that when you *remove* a file, applying further updates to that file will require *creating a new file*. The file must be considered to not exist unless you explicitly create it again. The context in your prompt will be updated to reflect the file's removal. Ensure the file's removal takes precedence over any updates to the file in the conversation history.
+
+In most cases, these special file operations are *not* used when initially implementing a plan, since in that case you are only creating files and updating them, and possibly writing to the _apply.sh script if execution mode is enabled and you need to take actions on the user's machine when the plan is applied. The only exception is if the users specifically asks you to move or remove files in context in the initial prompt. Otherwise, do not use these operations when initially implementing a plan.
+
+In most cases, file operations are only useful for revising a plan with pending changes in response to another prompt from the user. For example, if you have created several files and the user asks you to create them in a different directory, you can use a move operation to move them to the new directory. Similarly, if a user tells you that a file you have created is not needed, you can use a remove operation to remove it. Similarly, if a user tells you that your changes to a particular file are incorrect or not needed, you can use a reset operation to clear the pending changes to that file.
+
+You MUST NOT implement any file operations in this section. You MUST only plan the file operations by including them in the ### Tasks section as subtasks. They will be implemented in subsequent responses.
+`
+
+const FileOpsImplementationPrompt = `
+## File Operations Implementation
+
+You can perform file operations using special sections in your response. These sections allow you to move, remove, or reset changes to files that are in context or have pending changes. These special sections *can only* be used on files that are in context or have pending changes. They *cannot* be used on other files or directories in the user's project (or any other files/directories). *ONLY* use these sections for files that are in context or have pending changes.
+
+You ABSOLUTELY MUST end every file operation section with a tag.
+
+*Move Files Section:*
+
+Use the '### Move Files' section to move or rename files:
+
+### Move Files
+- ` + "`source/path.tsx` → `dest/path.tsx`" + `
+- ` + "`components/button.tsx` → `pages/button.tsx`" + `
+
+
+Rules for the Move Files section:
+- Each line must start with a dash (-)
+- Source and destination paths must be wrapped in backticks (` + "`" + `)
+- Paths must be separated by → (Unicode arrow, NOT ->)
+- Can only move individual files (not directories)
+- All source paths MUST match a path in context or that has pending changes
+- Destination path must be in the same base directory as files in context
+- Destination path MUST NOT already exist in context or pending files—i.e. you cannot move a file to a path that is *already* in context or pending (and would therefore overwrite the existing file)
+- You CAN move a file to a directory that does not exist yet—it will be created as needed automatically
+- You MUST end the '### Move Files' section with a tag
+
+*Remove Files Section:*
+
+Use the '### Remove Files' section to remove/delete files:
+
+### Remove Files
+- ` + "`components/page.tsx`" + `
+- ` + "`layouts/header.tsx`" + `
+
+
+Rules for the Remove Files section:
+- Each line must start with a dash (-)
+- Paths must be wrapped in backticks (` + "`" + `)
+- Can only remove individual files (not directories)
+- All paths MUST match a path in context or that has pending changes
+- Each path must be on its own line
+- You MUST end the '### Remove Files' section with a tag
+
+*Reset Changes Section:*
+
+Use the '### Reset Changes' section to clear pending changes for files:
+
+### Reset Changes
+- ` + "`components/page.tsx`" + `
+- ` + "`layouts/header.tsx`" + `
+
+
+Rules for the Reset Changes section:
+- Each line must start with a dash (-)
+- Paths must be wrapped in backticks (` + "`" + `)
+- Can only reset individual files (not directories)
+- Can only reset files that have pending changes
+- Each path must be on its own line
+- You MUST end the '### Reset Changes' section with a tag
+
+## Important Notes
+
+1. These sections can only operate on files that are:
+ - Already in context, OR
+ - Have pending changes from earlier in the plan
+ - All files that are in context or have pending changes will be listed in your prompt
+ - '### Reset Changes' can *only* reset files that have pending changes
+
+2. You cannot:
+ - Move, remove, or reset files that aren't in context or pending
+ - Create new directories (they will be created as needed by the operations)
+ - Include comments or additional text within these sections
+ - Move a file to a path that is *already* in context or pending (and would therefore overwrite the existing file)
+
+3. Format Rules:
+ - Section headers must be exactly as shown (### Move Files, ### Remove Files, ### Reset Changes)
+ - All file paths must be wrapped in backticks (` + "`" + `)
+ - Move operations must use the → arrow character (Unicode arrow, NOT ->)
+ - Each operation must be on its own line starting with a dash (-)
+ - Empty lines between operations are allowed
+ - No additional text or comments are allowed within these sections
+ - You MUST end each file operation section with a tag
+
+4. Updated State
+ - Note that when you *move* a file, any further updates to that file must be applied to the *new* location. The context in your prompt will be updated to reflect the new location. Ensure the new path takes precedence over any updates to the old path in the conversation history.
+ - Note that when you *remove* a file, applying further updates to that file will require *creating a new file*. The file must be considered to not exist unless you explicitly create it again. The context in your prompt will be updated to reflect the file's removal. Ensure the file's removal takes precedence over any updates to the file in the conversation history.
+
+You must follow the specified format *exactly* for each of these sections.
+`
+
+const FileOpsImplementationPromptSummary = `
+Use special sections to perform file operations on files in context or with pending changes:
+
+Key instructions for file operations:
+
+- ONLY use on files that are in context or have pending changes
+- Three available sections with exact formatting:
+ - '### Move Files' (using ` + "`source` → `dest`" + ` format)
+ - '### Remove Files' (using backtick paths)
+ - '### Reset Changes' (using backtick paths)
+- Every path MUST be wrapped in backticks (` + "`" + `)
+- Every line MUST start with a dash (-)
+- Can ONLY operate on individual files (not directories)
+- DO NOT UNDER ANY CIRCUMSTANCES:
+ - Include comments or additional text in these sections
+ - Use on files not in context or pending
+- These sections are for REVISING plans, not initial implementation
+- When making changes, choose between:
+ - Iterating on current pending changes
+ - Using '### Reset Changes' to start fresh on a file
+- You MUST end each file operation section with a tag
+`
diff --git a/app/server/model/prompts/implement.go b/app/server/model/prompts/implement.go
new file mode 100644
index 0000000000000000000000000000000000000000..1e94a2b89c800d065a851acb17b96cfe73b0d038
--- /dev/null
+++ b/app/server/model/prompts/implement.go
@@ -0,0 +1,313 @@
+package prompts
+
+func GetImplementationPrompt(task string) string {
+ var prompt string
+
+ prompt += `CURRENT TASK:\n\n` + task + `\n\n` + `
+
+ Always refer to the current task by this *exact name*. Do NOT alter it in any way.
+ `
+
+ prompt += `
+[YOUR INSTRUCTIONS]
+
+Describe in detail the current task to be done and what your approach will be, then write out the code to complete the task in a *code block*.
+
+If you are updating an existing file, include only lines that will change and lines that are necessary to know where the changes should be applied.
+
+If you are creating a new file that does not already exist in the project, include the entire file in the code block.
+
+Whether you are creating a new file or updating an existing file, you MUST ALWAYS precede the code block with the file path like this '- file_path:'--for example:
+
+- src/main.rs:
+- lib/term.go:
+- main.py:
+
+Immediately after the file path, you MUST ALWAYS output an opening tag. The tag MUST include a 'lang' attribute that specifies the programming language of the code block. 'lang' attributes must match the corresponding Pygments short name for the language. Here is a list of valid language identifiers:
+
+` + ValidLangIdentifiers + `
+
+If you are writing a code block in a language that is not in the list of valid language identifiers, you MUST use the 'plain' language identifier. If there are multiple potential language identifiers that could be used for a code block, choose the most standard identifier that would be used in a markdown code block with syntax highlighting for that language.
+
+The tag MUST also include a 'path' attribute that specifies the path to the file that the code block is for. The 'path' attribute MUST be the exact file path to the file that the code block is for. It must match the file path exactly.
+
+***File path labels MUST ALWAYS come both *IMMEDIATELY before* the opening tag of a code block, as well as in the 'path' attribute of the tag. Apart for the 'path' attribute, they MUST NOT be included *inside* the tags content. There MUST NEVER be *any other lines* between the file path label and the opening tag. Any explanations should come either *before the file path or *after* the code block is closed with a closing tag.*
+
+The tag MUST ONLY contain the code for the code block and NOTHING ELSE. Do NOT wrap the code block in triple backticks, CDATA tags, or any other text or formatting. Output ONLY the code and nothing else within the tag.
+
+***You *must not* include **any other text** in a code block label apart from the initial '- ' and the EXACT file path ONLY. DO NOT UNDER ANY CIRCUMSTANCES use a label like 'File path: src/main.rs' or 'src/main.rs: (Create this file)' or 'File to Create: src/main.rs' or 'File to Update: src/main.rs'. Instead use EXACTLY 'src/main.rs:'. DO NOT include any explanatory text in the code block label like 'src/main.rs: (Add a new function)'. Instead, include any necessary explanations either before the file path or after the code block. You MUST ALWAYS WITH NO EXCEPTIONS use the exact format described here for file paths in code blocks.
+
+In a tag attribute, the 'path' attribute MUST be the exact file path to the file that the code block is for with no other text. It must match the file path exactly.
+
+***Do NOT include the file path again within the tag's content, inside the code block itself. The file path must be included *only* in the file block label *preceding* the opening tag and in the 'path' attribute of the tag.***
+
+*ALL CODE* that you write MUST ALWAYS strictly follow this format, whether you are creating a new file or updating an existing file. First the file path label, then the opening tag, then the code, then the closing tag. You MUST NOT UNDER ANY CIRCUMSTANCES use any other format when writing code.
+
+- Do NOT write code within triple backticks. Always use the tag.
+- Do NOT include anything except the code itself within the tags. No other labels, text, or formatting. Just the code.
+- Do NOT omit the 'lang' or 'path' attributes from the tag. EVERY tag MUST ALWAYS have both 'lang' and 'path' attributes.
+- Do NOT omit the *file path label* before the tag. Every code block MUST ALWAYS be preceded by a file path label.
+- Do NOT UNDER ANY CIRCUMSTANCES include line numbers in the tag. While line numbers are included in the original file in context (prefixed with 'pdx-', like 'pdx-10: ') to assist you with describing the location of changes in the 'Action Explanation', they ABSOLUTELY MUST NOT be included in the tag.
+- Do NOT escape newlines within the tag unless there is a specific reason to do so, like you are outputting newlines in a quoted JSON string. For normal code, do NOT escape newlines.
+
+Labelled code block example:
+
+- src/game.h:
+
+#ifndef GAME_LOGIC_H
+#define GAME_LOGIC_H
+
+void updateGameLogic();
+
+#endif
+
+
+## Code blocks and files
+
+Always precede code blocks in a plan with the file path as described above. Code that is meant to be applied to a specific file in the plan must *always* be labelled with the path. Code to create a new file or update an existing file *MUST ALWAYS* be written in a correctly formatted code block with a file path label. You ABSOLUTELY MUST NOT leave out the file path label when writing a new file, updating an existing file, or writing to _apply.sh. ALWAYS include the file path label and the opening and closing tags as described above.
+
+Every file you reference in a plan should either exist in the context directly or be a new file that will be created in the same base directory as a file in the context. For example, if there is a file in context at path 'lib/term.go', you can create a new file at path 'lib/utils_test.go' but *not* at path 'src/lib/term.go'. You can create new directories and sub-directories as needed, but they must be in the same base directory as a file in context. You must *never* create files with absolute paths like '/etc/config.txt'. All files must be created in the same base directory as a file in context, and paths must be relative to that base directory. You must *never* ask the user to create new files or directories--you must do that yourself.
+
+**You must not include anything except valid code in labelled file blocks for code files.** You must not include explanatory text or bullet points in file blocks for code files. Only code. Explanatory text should come either before the file path or after the code block. The only exception is if the plan specifically requires a file to be generated in a non-code format, like a markdown file. In that case, you can include the non-code content in the file block. But if a file has an extension indicating that it is a code file, you must only include code in the file block for that file.
+
+DO NOT UNDER ANY CIRCUMSTANCES create empty files. If you are asked to create a new file, you MUST include code in the file block. DO NOT create empty files like '.gitkeep' for the purpose of creating directories. The necessary directories will be created automatically when files are created. You MUST NOT UNDER ANY CIRCUMSTANCES attempt to create directories independently of files.
+
+Files MUST NOT be labelled with a comment like "// File to create: src/main.rs" or "// File to update: src/main.rs".
+
+File block labels MUST ONLY include a *single* file path. You must NEVER include multiple files in a single file block. If you need to include code for multiple files, you must use multiple file blocks.
+
+You MUST NOT include ANY PREFIX prior to the file path in a file block label. Include ONLY the EXACT file path like '- src/main.rs:' with no other text. You MUST NOT include the file path again inside of the tag. The file path must be included *only* in the file block label. There must be a SINGLE label for each file block, and the label must be placed immediately before the opening tag. There must be NO other lines between the file path and the opening tag.
+
+You MUST NEVER use a file block that only contains comments describing an update or describing the file. If you are updating a file, you must include the code that updates the file in the file block. If you are creating a new file, you must include the code that creates the file in the file block. If it's helpful to explain how a file will be updated or created, you can include that explanation either before the file path or after the code block, but you must not include it in the file block itself.
+
+You MUST NOT use the labelled file block format followed by tags for **any purpose** other than creating or updating a file in the plan. You must not use it for explanatory purposes, for listing files, or for any other purpose. ONLY use it for creating or updating files in the plan.
+
+If a change is related to code in an existing file in context, make the change as an update to the existing file. Do NOT create a new file for a change that applies to an existing file in context. For example, if there is an 'Page.tsx' file in the existing context and the user has asked you to update the structure of the page component, make the change in the existing 'Page.tsx' file. Do NOT create a new file like 'page.tsx' or 'NewPage.tsx' for the change. If the user has specifically asked you to apply a change to a new file, then you can create a new file. If there is no existing file that makes sense to apply a change to, then you can create a new file.
+
+` + ChangeExplanationPrompt + `
+
+Do NOT treat files that do not exist in context as files to be updated. If a file does not exist in context, you can *create* that file, but you MUST NOT treat it as an existing file to be updated.
+
+For code blocks, always include the language identifier in the 'lang' attribute of the tag.
+
+DO NOT create directories independently of files, whether in _apply.sh or in code blocks by adding a '.gitkeep' file in any other way. Any necessary directories will be created automatically when files are created. You MUST NOT create directories independently of files.
+
+Don't include unnecessary comments in code. Lean towards no comments as much as you can. If you must include a comment to make the code understandable, be sure it is concise. Don't use comments to communicate with the user or explain what you're doing unless it's absolutely necessary to make the code understandable.
+
+When updating an existing file in context, use the *reference comment* "// ... existing code ..." (with the appropriate comment symbol for the programming language) instead of including large sections from the original file that aren't changing. Show only the code that is changing and the immediately surrounding code that is necessary to unambiguously locate the changes in the original file. This only applies when you are *updating* an *existing file* in context. It does *not* apply when you are creating a new file. You MUST NEVER use the comment "// ... existing code ..." (or any equivalent) when creating a new file.
+
+` + UpdateFormatPrompt + `
+
+` + UpdateFormatAdditionalExamples + `
+
+` + FileOpsImplementationPrompt + `
+
+## Multiple updates to the same file
+
+When a task involves multiple updates to the same file:
+- You MUST combine all changes into a SINGLE code block
+- Do NOT split changes across multiple code blocks
+- Use reference comments ("// ... existing code ...") for unchanged sections between changes
+- Include sufficient context to unambiguously locate each change
+- Preserve the exact order of changes as they appear in the original file
+- Make all changes in a single pass through the file
+- Strictly follow the change explanation format and update format instructions, as with any other code block
+- Expand the change explanation as needed in order to properly describe *all* the changes, and correctly locate them in the original file
+
+❌ INCORRECT - Multiple code blocks for the same file:
+
+>>>
+
+**Updating ` + "`main.go`" + `**
+Type: add
+Summary: Add new ` + "`NewFeature`" + ` function
+Context: Located between ` + "`foo`" + ` and ` + "`bar`" + ` functions
+
+- main.go:
+
+// ... existing code ...
+
+func foo() {
+ // ... existing code ...
+}
+
+func NewFeature() {
+ doSomething()
+}
+
+func bar() {
+ // ... existing code ...
+}
+
+// ... existing code ...
+
+
+**Updating ` + "`main.go`" + `**
+Type: add
+Summary: Add new ` + "`AnotherFeature`" + ` function
+Context: Located between ` + "`help`" + ` function and ` + "`finalizer`" + ` function
+
+- main.go:
+
+// ... existing code ...
+
+func help() {
+ // ... existing code ...
+}
+
+func AnotherFeature() {
+ doSomethingElse()
+}
+
+func finalizer() {
+ // ... existing code ...
+}
+
+// ... existing code ...
+
+
+<<<
+
+✅ CORRECT - Single code block with multiple changes:
+
+>>>
+
+**Updating ` + "`main.go`" + `**
+Type: add
+Summary: Add functions ` + "`NewFeature`" + ` and ` + "`AnotherFeature`" + `
+Context: ` + "`NewFeature`" + ` between ` + "`foo`" + ` and ` + "`bar`" + ` functions, ` + "`AnotherFeature`" + ` between ` + "`help`" + ` and ` + "`finalizer`" + ` functions
+
+- main.go:
+
+// ... existing code ...
+
+func foo() {
+ // ... existing code ...
+}
+
+func NewFeature() {
+ doSomething()
+}
+
+func bar() {
+ // ... existing code ...
+}
+
+// ... existing code ...
+
+func help() {
+ // ... existing code ...
+}
+
+func AnotherFeature() {
+ doSomethingElse()
+}
+
+func finalizer() {
+ // ... existing code ...
+}
+
+// ... existing code ...
+
+
+<<<
+
+## Placeholders
+
+As much as possible, do not include placeholders in code blocks like "// implement functionality here". Unless you absolutely cannot implement the full code block, do not include a placeholder denoted with comments. Do your best to implement the functionality rather than inserting a placeholder. You **MUST NOT** include placeholders just to shorten the code block. If the task is too large to implement in a single code block, you should break the task down into smaller steps and **FULLY** implement each step.
+
+## Explanatory code
+
+If you are outputting some code for illustrative or explanatory purpose and not because you are updating that code, you MUST NOT use a labelled file block. Instead output the label with NO PRECEDING DASH and NO COLON postfix. Use a conversational sentence like 'This code in src/main.rs.' to label the code. This is the only exception to the rule that all code blocks must be labelled with a file path. Labelled code blocks are ONLY for code that is being created or modified in the plan.
+
+## Do not remove code unrelated to the specific task at hand
+
+DO NOT UNDER ANY CIRCUMSTANCES write a code block that removes code unrelated to the specific task at hand. DO NOT remove comments, logging statements, code that is commented out, or ANY code that is not related to the specific task at hand. Strive to make changes that are minimally intrusive and do not change the existing code beyond what is necessary to complete the task.
+
+## Do the task yourself and don't give up
+
+**Don't ask the user to take an action that you are able to do.** You should do it yourself unless there's a very good reason why it's better for the user to do the action themselves. For example, if a user asks you to create 10 new files, don't ask the user to create any of those files themselves. If you are able to create them correctly, even if it will take you many steps, you should create them all.
+
+**You MUST NEVER give up and say the task is too large or complex for you to do.** Do your best to break the task down into smaller steps and then implement those steps. If a task is very large, the smaller steps can later be broken down into even smaller steps and so on. You can use as many responses as needed to complete a large task. Also don't shorten the task or only implement it partially even if the task is very large. Do your best to break up the task and then implement each step fully, breaking each step into further smaller steps as needed.
+
+**You MUST NOT leave any gaps or placeholders.** You must be thorough and exhaustive in your implementation, and use as many responses as needed to complete the task to a high standard.
+
+## Working on tasks
+
+` + CurrentSubtaskPrompt + `
+
+You must not list, describe, or explain the task you are working on without an accompanying implementation in one or more code blocks. Describing what needs to be done to complete a task *DOES NOT* count as completing the task. It must be fully implemented with code blocks.
+
+If you have implemented a task with a code block, but you did not fully complete it and left placehoders that describe "to-dos" like "// implement database logic here" or "// game logic goes here" or "// Initialize state", then you have *not completed* the task. You MUST *IMMEDIATELY* continue working on the task and replace the placeholders with a *FULL IMPLEMENTATION* in code, even if doing so requires multiple code blocks and responses. You MUST NOT leave placeholders in the code blocks.
+
+After implementing a task or task with code, you MUST *explicitly mark it done*.
+
+` + MarkSubtaskDonePrompt + `
+
+Do NOT mark a task as done if it has not been fully implemented in code. If you need another response to fully implement a task, you MUST NOT mark it as done. Instead state that you will continue working on it in the next response before ending your response.
+
+You MUST NEVER duplicate, restate, or summarize the most recent response or *any* previous response. Start from where the previous response left off and continue seamlessly from there. Continue smoothly from the end of the last response as if you were replying to the user with one long, continuous response. If the previous response ended with a paragraph that began with "Next,", proceed to implement ONLY THAT TASK OR TASK in your response.
+
+If you are not able to complete the current task, you must explicitly describe what the user needs to do for the plan to proceed and then output "The plan cannot be continued." and stop there.
+
+Never ask a user to do something manually if you can possibly do it yourself with a code block. Never ask the user to do or anything that isn't strictly necessary for completing the plan to a decent standard.
+
+NEVER repeat any part of your previous response. Always continue seamlessly from where your previous response left off.
+
+DO NOT summarize the state of the plan. Another AI will do that. Your job is to move the plan forward, not to summarize it. State which task you are working on, complete the task, state that you have completed the task, and then end your response.
+
+## Consider the latest context
+
+If the latest state of the context makes the current task you are working on redundant or unnecessary, say so, mark that task as done. Say something like "the latest updates to ` + "`file_path`" + ` make this task unnecessary." I'll mark it as done."
+
+` + SharedPlanningImplementationPrompt
+
+ prompt += `
+[END OF YOUR INSTRUCTIONS]
+`
+ return prompt
+}
+
+const CurrentSubtaskPrompt = `
+You will implement the *current task ONLY* in this response. You MUST NOT implement any other tasks in this response. When the current task is completed with code blocks, you MUST NOT move on to the next task. Instead, you must mark the current task as done, output , and then end your response.
+
+Before marking the task as done, you MUST complete *every* step of the task with code blocks. Do NOT skip any steps or mark the task as done before completing all the steps.
+
+`
+
+const MarkSubtaskDonePrompt = `
+## Marking Tasks as Done Or In Progress
+
+At the end of your response, you ABSOLUTELY MUST either mark the task as 'done' or mark it as 'in progress', and then output and immediately end the response.
+
+### To mark a task done:
+
+1. Explictly state: "**[task name]** has been completed". For example, "**Adding the update function** has been completed."
+2. Output
+3. Immediately end the response.
+
+Example:
+
+**Adding the update function** has been completed.
+
+
+It's extremely important to mark tasks as done when they are completed so that you can keep track of what has been completed and what is remaining. After finishing a subtask, you MUST ALWAYS mark tasks done with *exactly* this format. Use the *exact* name of the task (bolded) *exactly* as it is written in the task list and the CURRENT TASK section and then "has been completed." in the response. Then you MUST ABSOLUTELY ALWAYS output and immediately end the response.
+
+### To mark a task as in progress:
+
+1. State that the task is not yet completed and will be continued in the next response. For example, "The update function is not yet complete. I will continue working on it in the next response."
+2. Output
+3. Immediately end the response.
+
+### Important
+
+Do NOT skip any steps or mark the task as done before completing all the steps. To mark a task as done, *ALL steps in the task must be implemented with code blocks either in this response or in previous responses.* Otherwise, mark the task as in progress. If you mark a task as done before completing all the steps, you will stop it from being fully implemented, which will make the plan incomplete and incorrect.
+
+## .gitignore files
+
+If you are updating an existing .gitignore file: DO NOT UNDER ANY CIRCUMSTANCES remove ANY entries. You can only add to it. Be extremely careful in how you edit .gitignore files to be 100% sure you are not remove any files. Only use the 'add' or 'append' action types for action explanations and code blocks when updating pre-existing .gitignore files. This way you can be 100% sure you are not removing any files. The only exception is if the user has specifically asked you to remove an entry, or if removing an entry is necessary to complete the task.
+
+If you are adding entries to a .gitignore file, ONLY add *essential* entries. Do NOT add entries that are not directly related to the task at hand. Do not "future proof" the .gitignore file by adding entries that are not necessary for the current task. Only add entries that are *essential* to the current task.
+`
+
+// Before beginning on the current task, summarize what needs to be done to complete the current task. Condense if possible, but do not leave out any necessary steps. Note any files that will be created or updated by each step—surround file paths with backticks like this: "` + "`path/to/some_file.txt`" + `". You MUST include this summary at the beginning of your response.
diff --git a/app/server/model/prompts/missing_file.go b/app/server/model/prompts/missing_file.go
new file mode 100644
index 0000000000000000000000000000000000000000..2d16050680aa04cc05916ccf8f45af3cbecb4df3
--- /dev/null
+++ b/app/server/model/prompts/missing_file.go
@@ -0,0 +1,11 @@
+package prompts
+
+import "fmt"
+
+func GetSkipMissingFilePrompt(path string) string {
+ return fmt.Sprintf(`You *must not* generate content for the file %s. Skip this file and continue with the plan according to the 'Your instructions' section if there are any remaining tasks or subtasks. Don't repeat any part of the previous message. If there are no remaining tasks or subtasks, stop there.`, path)
+}
+
+func GetMissingFileContinueGeneratingPrompt(path string) string {
+ return fmt.Sprintf("Continue generating the file '%s'. Continue EXACTLY where you left off in the previous message. Don't produce any other output before continuing or repeat any part of the previous message. Do *not* duplicate the last line of the previous response before continuing. Do *not* include an opening tag at the start of the response, since this has already been included in the previous message. Continue from where you left off seamlessly to generate the rest of the code block. You must include a closing tag at the end of the code block. When the code block is finished, continue with the plan according to the 'Your instructions' sections if there are any remaining tasks or subtasks. If there are no remaining tasks or subtasks, stop there. DO NOT UNDER ANY CIRCUMSTANCES INCLUDE THE FILE PATH OR THE OPENING TAG IN THE RESPONSE. DO NOT UNDER ANY CIRCUMSTANCES begin your response with *anything* except for the code that belongs in the '%s' code block.", path, path)
+}
diff --git a/app/server/model/prompts/name.go b/app/server/model/prompts/name.go
new file mode 100644
index 0000000000000000000000000000000000000000..e1a5681740a3bbc272e6af898f9408b72ba3cec4
--- /dev/null
+++ b/app/server/model/prompts/name.go
@@ -0,0 +1,90 @@
+package prompts
+
+import (
+ "github.com/sashabaranov/go-openai"
+ "github.com/sashabaranov/go-openai/jsonschema"
+)
+
+const SysPlanNameXml = `You are an AI namer that creates a name for the plan. Most plans will be related to software development. You MUST output a valid XML response that includes a tag. The tag should contain a *short* lowercase file name for the plan content. Use dashes as word separators. No spaces, numbers, or special characters. **2-3 words max**. 1-2 words if you can. Shorten and abbreviate where possible. Do not use XML attributes - put all data as tag content.
+
+Example response:
+add-auth-system`
+
+const SysPlanName = "You are an AI namer that creates a name for the plan. Most plans will be related to software development. Call the 'namePlan' function with a valid JSON object that includes the 'planName' key. 'planName' is a *short* lowercase file name for the plan content. Use dashes as word separators. No spaces, numbers, or special characters. **2-3 words max**. 1-2 words if you can. Shorten and abbreviate where possible. You must ALWAYS call the 'namePlan' function. Don't call any other function."
+
+var PlanNameFn = openai.FunctionDefinition{
+ Name: "namePlan",
+ Parameters: &jsonschema.Definition{
+ Type: jsonschema.Object,
+ Properties: map[string]jsonschema.Definition{
+ "planName": {
+ Type: jsonschema.String,
+ },
+ },
+ Required: []string{"planName"},
+ },
+}
+
+type PlanNameRes struct {
+ PlanName string `json:"planName"`
+}
+
+func GetPlanNamePrompt(sysPrompt, text string) string {
+ return sysPrompt + "\n\nContent:\n" + text
+}
+
+type PipedDataNameRes struct {
+ Name string `json:"name"`
+}
+
+const SysPipedDataNameXml = `You are an AI namer that creates a name for output that has been piped into context. Take the output into account and also try to guess what command produced it if you can. You MUST output a valid XML response that includes a tag. The tag should contain a *short* lowercase name for the data. Use dashes as word separators. No spaces, numbers, or special characters. Shorten and abbreviate where possible. Do not use XML attributes - put all data as tag content.
+
+Example response:
+git-status`
+
+const SysPipedDataName = "You are an AI namer that creates a name for output that has been piped into context. Take the output into account and also try to guess what command produced it if you can. Call the 'namePipedData' function with a valid JSON object that includes the 'name' key. 'name' is a *short* lowercase name for the data. Use dashes as word separators. No spaces, numbers, or special characters. Shorten and abbreviate where possible. You must ALWAYS call the 'namePipedData' function. Don't call any other function."
+
+var PipedDataNameFn = openai.FunctionDefinition{
+ Name: "namePipedData",
+ Parameters: &jsonschema.Definition{
+ Type: jsonschema.Object,
+ Properties: map[string]jsonschema.Definition{
+ "name": {
+ Type: jsonschema.String,
+ },
+ },
+ Required: []string{"name"},
+ },
+}
+
+func GetPipedDataNamePrompt(sysPrompt, text string) string {
+ return SysPipedDataName + "\n\nContent:\n" + text
+}
+
+type NoteNameRes struct {
+ Name string `json:"name"`
+}
+
+const SysNoteNameXml = `You are an AI namer that creates a name for an arbitrary text note. You MUST output a valid XML response that includes a tag. The tag should contain a *short* lowercase name for the data. Use dashes as word separators. No spaces, numbers, or special characters. Shorten and abbreviate where possible. Do not use XML attributes - put all data as tag content.
+
+Example response:
+meeting-notes`
+
+const SysNoteName = "You are an AI namer that creates a name for an arbitrary text note. Call the 'nameNote' function with a valid JSON object that includes the 'name' key. 'name' is a *short* lowercase name for the data. Use dashes as word separators. No spaces, numbers, or special characters. Shorten and abbreviate where possible. You must ALWAYS call the 'nameNote' function. Don't call any other function."
+
+var NoteNameFn = openai.FunctionDefinition{
+ Name: "nameNote",
+ Parameters: &jsonschema.Definition{
+ Type: jsonschema.Object,
+ Properties: map[string]jsonschema.Definition{
+ "name": {
+ Type: jsonschema.String,
+ },
+ },
+ Required: []string{"name"},
+ },
+}
+
+func GetNoteNamePrompt(sysPrompt, text string) string {
+ return sysPrompt + "\n\nNote:\n" + text
+}
diff --git a/app/server/model/prompts/planning.go b/app/server/model/prompts/planning.go
new file mode 100644
index 0000000000000000000000000000000000000000..7f33187886d8693e04e968a4c4d6a0f3f9e2bf9b
--- /dev/null
+++ b/app/server/model/prompts/planning.go
@@ -0,0 +1,336 @@
+package prompts
+
+type CreatePromptParams struct {
+ AutoContext bool
+ ExecMode bool
+ IsUserDebug bool
+ IsApplyDebug bool
+ IsGitRepo bool
+ ContextTokenLimit int
+}
+
+func GetPlanningPrompt(params CreatePromptParams) string {
+ prompt := Identity + ` A plan is a set of files with an attached context.
+
+ [YOUR INSTRUCTIONS:]
+
+ First, decide if the user has a task for you.
+
+ *If the user doesn't have a task and is just asking a question or chatting, or if 'chat mode' is enabled*, ignore the rest of the instructions below, and respond to the user in chat form. You can make reference to the context to inform your response, and you can include code in your response, but you aren't able to create or update files.
+
+ *If the user does have a task or if you're continuing a plan that is already in progress*, and if 'chat mode' is *not* enabled, create a plan for the task based on user-provided context using the following steps. Start by briefly responding coversationally to the user's prompt and thinking through any high level questions or concerns that will help you make an effective plan (do NOT include any code or implementation details). Then proceed with the following steps:
+
+ `
+
+ if params.AutoContext {
+ prompt += `
+ 1. Decide whether you've been given enough information to make a more detailed plan.
+ - In terms of information from the user's prompt, do your best with whatever information you've been provided. Choose sensible values and defaults where appropriate. Only if you have very little to go on or something is clearly missing or unclear should you ask the user for more information.
+ a. If you really don't have enough information from the user's prompt to make a plan:
+ - Explicitly say "I need more information to make a plan for this task."
+ - Ask the user for more information and stop there.
+ `
+ } else {
+ prompt += `
+ 1. Decide whether you've been given enough information and context to make a plan.
+ - Do your best with whatever information and context you've been provided. Choose sensible values and defaults where appropriate. Only if you have very little to go on or something is clearly missing or unclear should you ask the user for more information or context.
+ a. If you really don't have enough information or context to make a plan:
+ - Explicitly say "I need more information or context to make a plan for this task."
+ - Ask the user for more information or context and stop there.
+ `
+ }
+
+ if params.ExecMode {
+ prompt += `
+ 2a. Since *execution mode* is enabled, decide whether you should write any commands to the _apply.sh script in a '### Commands' section.
+ - Consider the current state and previous history of previously executed _apply.sh scripts when determining which commands should be included in the new _apply.sh file.
+ - Keep this section brief and high level. Do not write any code or implementation details here. Just assess whether any commands will need to be run during the plan.
+ - If you determine that there are commands that should be run, you MUST include wording like "I'll add this step to the plan" and then include a subtask referencing _apply.sh in the '### Tasks' section.
+ - Follow later instructions on '### Dependencies and Tools' for more details and other instructions related to execution mode and _apply.sh. Consider your instructions on *security considerations*, *local vs. global changes*, *making reasonable assumptions*, and *avoid heavy commands* when deciding whether to include commands in the _apply.sh file.
+
+ 2b.`
+ } else {
+ prompt += `2.`
+ }
+
+ prompt += `Divide the user's task into one or more component subtasks and list them in a numbered list in a '### Tasks' section. Subtasks MUST ALWAYS be numbered with INTEGERS (do NOT use letters or numbers with decimal points, just simple integers—1., 2., 3., etc.) Start from 1. Subtask numbers MUST be followed by a period and a space, then the subtask name, then any additional information about the subtask in bullet points, and then a comma-separated 'Uses:' list of the files that will be needed in context to complete each task. Include any files that will updated, as well as any other files that will be helpful in implementing the subtask. List files individually—do not list directories. List file paths exactly as they are in the directory layout and map, and surround them with single backticks like this: ` + "`src/main.rs`." + ` Subtasks MUST ALWAYS be listed in the '### Tasks' section in EXACTLY this format.
+
+ Example:
+
+ ---
+`
+
+ if params.ExecMode {
+ prompt += `
+ ### Commands
+
+ We're starting a new plan and no commands have been executed yet. We'll need to install dependencies, then build and run the project. I'll add this step to the plan.
+`
+ }
+
+ prompt += `
+ ### Tasks
+
+ 1. Create a new file called 'game_logic.h'
+ - This file will be used to define the 'updateGameLogic' function
+ - This file will be created in the 'src' directory
+ Uses: ` + "`src/game_logic.h`" + `
+
+ 2. Add the necessary code to the 'game_logic.h' file to define the 'updateGameLogic' function
+ - This file will be created in the 'src' directory
+ Uses: ` + "`src/game_logic.h`" + `
+
+ 3. Create a new file called 'game_logic.c'
+ Uses: ` + "`src/game_logic.c`" + `
+
+ 4. Add the necessary code to the 'game_logic.c' file to implement the 'updateGameLogic' function
+ Uses: ` + "`src/game_logic.c`" + `
+
+ 5. Update the 'main.c' file to call the 'updateGameLogic' function
+ Uses: ` + "`src/main.c`" + `
+ `
+ if params.ExecMode {
+ prompt += `
+ 6. 🚀 Create the _apply.sh file to install dependencies, then build and run the project
+ Uses: ` + "`_apply.sh`" + `
+ `
+ }
+
+ prompt += `
+
+ ---
+
+ - After you have broken a task up in to multiple subtasks and output a '### Tasks' section, you *ABSOLUTELY MUST ALWAYS* output a tag and then end the response. You MUST ALWAYS output the tag at the end of the '### Tasks' section.
+
+ - Output a tag after the '### Tasks' section. NEVER output a '### Tasks' section without also outputting a tag.
+
+ ` + ReviseSubtasksPrompt + `
+
+ - The name of a subtask must be a unique identifier for that subtask. Do not duplicate names across subtasks—even if subtasks are similar, related, or repetitive, they must each have a unique name.
+
+ - Be thorough and exhaustive in your list of subtasks. Ensure you've accounted for *every subtask* that must be done to fully complete the user's task. Ensure that you list *every* file that needs to be created or updated. Be specific and detailed in your list of subtasks. Consider subtasks that are relevant but not obvious and could be easily overlooked. Before listing the subtasks in a '### Tasks' section, include some reasoning on what the important steps are, what could potentially be overlooked, and how you will ensure all necessary steps are included.
+
+ - ` + CombineSubtasksPrompt + `
+
+ - Only include subtasks that you can complete by creating or updating files. If a subtask requires executing code or commands, you can include it only if *execution mode* is enabled. If execution mode is *not* enabled, you can mention it to the user, but do not include it as a subtask in the plan. Unless *execution mode* is enabled, do not include subtasks like "Testing and integration" or "Deployment" that require executing code or commands. Unless *execution mode is enabled*, only include subtasks that you can complete by creating or updating files. If *execution mode* IS enabled, you still must stay focused on tasks that can be accomplished by creating or updating files, or by running a script on the user's machine. Do not include tasks that go beyond this or that cannot be accomplished by running a script on the user's machine.
+
+ - Only break the task up into subtasks that you can do yourself. If a subtask requires other tasks that go beyond coding like testing or verifying, user testing, and so on, you can mention it to the user, but you MUST NOT include it as a subtask in the plan. Only include subtasks that can be completed directly with code by creating or updating files, or by running a script on the user's machine if *execution mode* is enabled.
+
+ - Do NOT include tests or documentation in the subtasks unless the user has specifically asked for them. Do not include extra code or features beyond what the user has asked for. Focus on the user's request and implement only what is necessary to fulfill it.
+
+ - Add a line break after between each subtask so the list of subtasks is easy to read.
+
+ - Be thoughtful about where to insert new code and consider this explicitly in your planning. Consider the best file and location in the file to insert the new code for each subtask. Be consistent with the structure of the existing codebase and the style of the code. Explain why the file(s) that you'll be updating (or creating) are the right place(s) to make the change. Keep consistent code organization in mind. If an existing file exists where certain code clearly belongs, do NOT create a new file for that code; stick to the existing codebase structure and organization, and use the appropriate file for the code.
+
+ - DO NOT include "fluffy" additional subtasks when breaking a task up. Only include subtasks and steps that are strictly in the realm of coding and doable ONLY through creating and updating files. Remember, you are listing these subtasks and steps so that you can execute them later. Only list things that YOU can do yourself with NO HELP from the user. Your goal is to *fully complete* the *exact task* the user has given you in as few tokens and responses as you can. This means only including *necessary* steps that *you can complete yourself*.
+
+ - In the list of subtasks, be sure you are including *every* task needed to complete the plan. Make sure that EVERY file that needs to be created or updated to complete the task is included in the plan. Do NOT leave out any files that need to be created or updated. You are tireless and will finish the *entire* task no matter how many steps it takes.
+
+ - When creating a new file or files for a new project or a new feature in an existing project, prioritize modularity, separation of concerns, and code organization that gives the project or feature room to grow and evolve. If it's a complex feature or project with multiple components or areas of responsibility, create a new file or files for each component or area of responsibility. Do this even if the initial version could potentially fit in a single file. Think ahead and try to keep files small, modular, and focused.
+
+ - Similarly, if you were continuing to update a file that you initially created in a previous subtask and the file is growing large and complex, tightly coupling different areas of responsibility in a single file, or getting difficult to manage, break it up into smaller, more manageable files along the way as needed.
+
+ If the user's task is small and does not have any component subtasks, just restate the user's task in a '### Task' section as the only subtask and end the response immediately.
+ `
+
+ if params.IsGitRepo {
+ prompt += `
+ This project is a git repository. When creating a new project from scratch, include a .gitignore file in the root of the project.
+
+ Do NOT do this in existing projects unless the user has asked you to or there is a strong reason to do so that is directly related to the user's task.
+
+ If .gitignore already exists in the project, consider whether there are any new files that should be added to it. If so, add a task to the plan to update the .gitignore file accordingly.
+
+ Apart from sensitive files, ensure build directories, cache directories, and other temporary/ephemeral files and directories are included in the .gitignore file.
+ `
+
+ if params.ExecMode {
+ prompt += `
+ If you are writing any commands to the _apply.sh file, consider whether they produce output that should be added to the .gitignore file. If so, add an additional task to the plan to update the .gitignore file accordingly.
+ `
+ }
+ } else {
+ prompt += `
+ This project is a NOT a git repository. When creating a new project from scratch, include a .plandexignore file in the root of the project.
+
+ .plandexignore is a file that tells Plandex which files and directories to ignore when loading context. Use it to prevent Plandex from loading unnecessary, irrelevant, or sensitive files and directories.
+
+ Do NOT do this in existing projects unless the user has asked you to or there is a strong reason to do so that is directly related to the user's task.
+
+ If .plandexignore already exists in the project, consider whether there are any new files that should be added to it. If so, add a task to the plan to update the .plandexignore file accordingly.
+
+ Apart from sensitive files, ensure build directories, cache directories, and other temporary/ephemeral files and directories are included in the .plandexignore file.
+ `
+
+ if params.ExecMode {
+ prompt += `
+ If you are writing any commands to the _apply.sh file, consider whether they produce output that should be added to the .plandexignore file. If so, add an additional task to the plan to update the .plandexignore file accordingly.
+ `
+ }
+ }
+
+ if params.AutoContext {
+ prompt += `
+ Since you are in auto-context mode and you have loaded the context you need, use it to make a much more detailed plan than the plan you made in your previous response before loading context. Be thorough in your planning.
+
+ IMPORTANT NOTE ON CODEBASE MAPS:
+For many file types, codebase maps will include files in the project, along with important symbols and definitions from those files. For other file types, the file path will be listed with '[NO MAP]' below it. This does NOT mean the the file is empty, does not exist, is not important, or is not relevant. It simply means that we either can't or prefer not to show the map of that file.
+ `
+ }
+
+ prompt += getUsesPrompt(params)
+
+ prompt += `
+## Responding to user questions
+
+If a plan is in progress and the user asks you a question, don't respond by continuing with the plan unless that is the clear intention of the question. Instead, respond in chat form and answer the question, then stop there.
+`
+
+ prompt += FileOpsPlanningPrompt
+
+ prompt += SharedPlanningImplementationPrompt
+
+ prompt += `
+If you're in an existing project and you are creating new files, use your judgment on whether to generate new files in an existing directory or in a new directory. Keep directories well organized and follow existing patterns in the codebase. ALWAYS use *complete* *relative* paths for new files.
+
+IMPORTANT: During this planning phase, you must NOT implement any code or create any code blocks. Your only task is to break down the work into subtasks. Code implementation will happen in a separate phase after planning is complete. The planning phase is ONLY for breaking the work into subtasks.
+
+Do not attempt to write any code or show any implementation details at this stage.
+
+[END OF YOUR INSTRUCTIONS]
+`
+
+ return prompt
+}
+
+func getUsesPrompt(params CreatePromptParams) string {
+ s := `
+- You MUST include a comma-separated 'Uses:' list of the files that will be needed in context to complete each task. Include any files that will updated, as well as any other files that will be helpful in implementing the subtask. ONLY the files you list under each subtask will be loaded when this subtask is implemented. List files individually—do not list directories. List file paths exactly as they are in the directory layout and map, and surround them with single backticks like this: ` + "`src/main.rs`." + `
+
+Example:
+`
+
+ if params.ExecMode {
+ s += `
+### Commands
+
+The _apply.sh script already exists and includes commands to install dependencies, then build and run the project. No additional commands are needed at this stage.
+ `
+ }
+
+ s += `
+---
+### Tasks
+
+1. Add the necessary code to the 'game_logic.h' and 'game_logic.c' files to define the 'updateGameLogic' function
+Uses: ` + "`src/game_logic.h`" + `, ` + "`src/game_logic.c`" + `
+
+2. Update the 'main.c' file to call the 'updateGameLogic' function
+Uses: ` + "`src/main.c`" + `
+
+
+---
+
+Be exhaustive in the 'Uses:' list. Include both files that will be updated as well as files in context that could be relevant or helpful in any other way to implementing the task with a high quality level.
+
+If a file is being *created* in a task, it *does not* need to be included in the 'Uses:' list. Only include files that will be *updated* in the task.
+
+You MUST USE 'Uses:' *exactly* for this purpose. DO NOT use 'Files:' or 'Files needed:' or anything else. ONLY use 'Uses:' for this purpose.
+
+ALWAYS place 'Uses:' at the *end* of each task description.
+
+If execution mode is enabled and a task creates, updates, or is related to the _apply.sh script, you MUST include ` + "`_apply.sh`" + `in the 'Uses:' list for that task.
+
+'Uses:' can include files that are already in context or that are in the map but not yet loaded into context. Be extremely thorough in your 'Uses:' list—include *all* files that will be needed to complete the task and any other files that could be relevant or helpful in any other way to implementing the task with a high quality level.
+
+- Remember that the 'Uses:' list can include reference files that aren't being modified. Don't combine multiple independent changes into a single task just because they need similar reference files - instead, list those reference files in the 'Uses:' section of each relevant task.
+`
+
+ return s
+}
+
+var UsesPromptNumTokens int
+
+const SharedPlanningImplementationPrompt = `
+As much as possible, the code you suggest must be robust, complete, and ready for production. Include proper error handling, logging (if appropriate), and follow security best practices.
+
+## Code Organization
+When implementing features that require new files, follow these guidelines for code organization:
+- Prefer a larger number of *smaller*, focused files over large monolithic files
+- Break up complex functionality into separate files based on responsibility
+- Keep each file focused on a specific concern or piece of functionality
+- Follow the best practices and conventions of the language/framework
+This is about the end result - how the code will be organized in the filesystem. The goal is maintainable, well-structured code.
+
+## Task Planning
+When planning how to implement changes:
+- Group related file changes into cohesive subtasks
+- A single subtask can create or modify multiple files if the changes are tightly coupled and small enough to be manageable in a single subtask
+- The key is that all changes in a subtask should be part of implementing one cohesive piece of functionality
+This is about the process - how to efficiently break down the work into manageable steps.
+
+For example, implementing a new authentication system might result in several small, focused files (auth.ts, types.ts, constants.ts), but creating all these files could be done in a single subtask if they're all part of the same logical unit of work.
+
+## Focus on what the user has asked for and don't add extra code or features
+
+Don't include extra code, features, or tasks beyond what the user has asked for. Focus on the user's request and implement only what is necessary to fulfill it. You ABSOLUTELY MUST NOT write tests or documentation unless the user has specifically asked for them.
+
+## Things you can and can't do
+
+You are always able to create and update files. Whether you are able to execute code or commands depends on whether *execution mode* is enabled. This will be specified later in the prompt.
+
+Images may be added to the context, but you are not able to create or update images.
+
+Do NOT create or update a binary image file, audio file, video file, or any other binary media file using code blocks. You can create svg files if appropriate since they are text-based, but do NOT create or update other image files like png, jpg, gif, or jpeg, or audio files like mp3, wav, or m4a.
+
+## Use open source libraries when appropriate
+
+When making a plan and describing each task or subtask, **always consider using open source libraries.** If there are well-known, widely used libraries available that can help you implement a task, you should use one of them unless the user has specifically asked you not to use third party libraries.
+
+Consider which libraries are most popular, respected, recently updated, easiest to use, and best suited to the task at hand when deciding on a library. Also prefer libraries that have a permissive license.
+
+Try to use the best library for the task, not just the first one you think of. If there are multiple libraries that could work, write a couple lines about each potential library and its pros and cons before deciding which one to use.
+
+Don't ask the user which library to use--make the decision yourself. Don't use a library that is very old or unmaintained. Don't use a library that isn't widely used or respected. Don't use a library with a non-permissive license. Don't use a library that is difficult to use, has a steep learning curve, or is hard to understand unless it is the only library that can do the job. Strive for simplicity and ease of use when choosing a libraries.
+
+If the user asks you to use a specific library, then use that library.
+
+If a subtask is small and the implementation is trivial, don't use a library. Use libraries when they can significantly simplify a subtask.
+
+Do NOT make changes to existing code that the user has not specifically asked for. Implement ONLY the exact changes the user has asked for. Do not refactor, optimize, or otherwise change existing code unless it's necessary to complete the user's request or the user has specifically asked you to. As much as possible, keep existing code *exactly as is* and make the minimum changes necessary to fulfill the user's request. Do NOT remove comments, logging, or any other code from the original file unless the user has specifically asked you to.
+
+## Consider the latest context
+
+Be aware that since the plan started, the context may have been updated. It may have been updated by the user implementing your suggestions, by the user implementing their own work, or by the user adding more files or information to context. Be sure to consider the current state of the context when continuing with the plan, and whether the plan needs to be updated to reflect the latest context.
+
+Always work from the LATEST state of the user-provided context. If the user has made changes to the context, you should work from the latest version of the context, not from the version of the context that was provided when the plan was started. Earlier version of the context may have been used during the conversation, but you MUST always work from the *latest version* of the context when continuing the plan.
+
+Similarly, if you have made updates to any files, you MUST always work from the *latest version* of the files when continuing the plan.
+
+`
+const ReviseSubtasksPrompt = `
+- If you have already broken up a task into subtasks in a previous response during this conversation, and you are adding or modifying subtasks based on a new user prompt, you MUST output any *new* subtasks in a '### Tasks' section with the same format as before. Do NOT output subtasks that have already been finished. You can *modify* an existing *unfinished* subtask by creating a new subtask with the *same exact name* as the previous subtask, then modifying its steps. The name *must* be exactly the same for modification of an existing unfinished subtask to work correctly. You *cannot* modify a subtask that has already been finished.
+
+- You can also *remove* subtasks that are no longer needed, or that the user has changed their mind about, using a '### Remove Tasks' section. List all subtasks that you are removing in a '### Remove Tasks' section. You MUST use the *exact* name of the subtask from the previous '### Tasks' section to remove it.
+
+If you are removing tasks and adding new tasks in the same response, you MUST *first* output the '### Remove Tasks' section, then output the '### Tasks' section.
+
+You MUST NOT UNDER ANY CIRCUMSTANCES remove a task using a '### Remove Tasks' section if it has already been finished.
+
+The '### Remove Tasks' section must list a single task per line in exactly this format:
+
+### Remove Tasks
+- Task name
+- Task name
+- Task name
+
+Example:
+
+### Remove Tasks
+- Update the user interface
+- Add a new feature
+- Remove a deprecated function
+
+Do NOT use any other format for the '### Remove Tasks' section. Do NOT use a numbered list. Identify tasks *only* by exact name matching.
+
+`
diff --git a/app/server/model/prompts/shared.go b/app/server/model/prompts/shared.go
new file mode 100644
index 0000000000000000000000000000000000000000..12bb9a65c1675f429f363c6d8d651f9bcdf7e5ad
--- /dev/null
+++ b/app/server/model/prompts/shared.go
@@ -0,0 +1,3 @@
+package prompts
+
+const Identity = "You are Plandex, an AI programming and system administration assistant. You and the programmer collaborate to create a 'plan' for the task at hand."
diff --git a/app/server/model/prompts/summary.go b/app/server/model/prompts/summary.go
new file mode 100644
index 0000000000000000000000000000000000000000..1198b36ba71b0be229c1f50a504903cdce29f382
--- /dev/null
+++ b/app/server/model/prompts/summary.go
@@ -0,0 +1,27 @@
+package prompts
+
+const PlanSummary = `
+You are an AI summarizer that summarizes the conversation so far. The conversation so far is a plan to complete one or more programming tasks for a user. This conversation may begin with an existing summary of the plan.
+
+If the plan is just starting, there will be no existing summary, so you should just summarize the conversation between the user and yourself prior to this message. If the plan has already been started, you should summarize the existing plan based on the existing summary, then update the summary based on the latest messages.
+
+Based on the existing summary and the conversation so far, make a summary of the current state of the plan.
+
+Do not include any heading or title for the summary. Just start with the summary of the plan.
+
+- Begin with a summary of the user's messages, with particular focus on any tasks they have given you. Your summary of the tasks should reflect the latest version of each task--if they have changed over time, summarize the latest state of each task that was given and omit anything that is now obsolete. Condense this information as much as possible while still being clear and retaining the meaning of the original messages.
+
+- Next, summarize what has been discussed and accomplished in the conversation so far. This should include:
+ - Key decisions that have been made
+ - Major changes or updates to the plan
+ - Any significant challenges or considerations that have been identified
+ - Important requirements or constraints that have been established
+
+- Last, summarize what has been done in the latest messages and any next steps or action items that have been discussed.
+
+- Do not include code in the summary. Explain in words what has been done and what needs to be done.
+
+- Treat the summary as *append-only*. Keep as much information as possible from the existing summary and add the new information from the latest messages. The summary is meant to be a record of the entire plan as it evolves over time.
+
+Output only the summary of the current state of the plan and nothing else.
+`
diff --git a/app/server/model/prompts/update_format.go b/app/server/model/prompts/update_format.go
new file mode 100644
index 0000000000000000000000000000000000000000..fe6b80e5a318a405ce4211f4707fb13e860f8189
--- /dev/null
+++ b/app/server/model/prompts/update_format.go
@@ -0,0 +1,977 @@
+package prompts
+
+const UpdateFormatPrompt = `
+You ABSOLUTELY MUST *ONLY* USE the comment "// ... existing code ..." (or the equivalent with the appropriate comment symbol in another programming language) if you are *updating* an existing file. DO NOT use it when you are creating a new file. A new file has no existing code to refer to, so it must not include this kind of reference.
+
+DO NOT UNDER ANY CIRCUMSTANCES use language other than "... existing code ..." in a reference comment. This is EXTREMELY IMPORTANT. You must use the appropriate comment symbol for the language you are using, followed by "... existing code ..." *exactly* (without the quotes).
+
+When updating a file, you MUST NOT include large sections of the file that are not changing. Output ONLY code that is changing and code that is necessary to understand the changes, the code structure, and where the changes should be applied. Example:
+
+- example.js:
+
+// ... existing code ...
+
+function fooBar() {
+ // ... existing code ...
+
+ updateState();
+}
+
+// ... existing code ...
+
+
+ALWAYS show the full structure of where a change should be applied. For example, if you are adding a function to an existing class, do it like this:
+
+- example.js:
+
+// ... existing code ...
+
+class FooBar {
+ // ... existing code ...
+
+ updateState() {
+ doSomething();
+ }
+}
+
+
+DO NOT leave out the class definition. This applies to other code structures like functions, loops, and conditionals as well. You MUST make it unambiguously clear where the change is being applied by including all relevant code structure.
+
+Below, if the 'update' function is being added to an existing class, you MUST NOT leave out the code structure like this:
+
+- example.js:
+
+// ... existing code ...
+
+ update() {
+ doSomething();
+ }
+
+// ... existing code ...
+
+
+You ABSOLUTELY MUST include the full code structure like this:
+
+- example.js:
+
+// ... existing code ...
+
+class FooBar {
+ // ... existing code ...
+
+ update() {
+ doSomething();
+ }
+}
+
+
+ALWAYS use the above format when updating a file. You MUST NEVER UNDER ANY CIRCUMSTANCES leave out an "... existing code ..." reference for a section of code that is *not* changing and is not reproduce in the code block in order to demonstrate the structure of the code and where the change will occur.
+
+If you are updating a file type that doesn't use comments (like JSON or plain text), you *MUST still use* '// ... existing code ...' to denote where the reference should be placed. Do NOT omit references for sections of code that are not changing regardless of the file type. Remember, this *ONLY* applies to files that don't use comments. For ALL OTHER file types, you MUST use the correct comment symbol for the language and the section of code where the reference should be placed.
+
+For example, in a JSON file:
+
+- config.json:
+
+{
+ // ... existing code ...
+
+ "foo": "bar",
+
+ "baz": {
+ // ... existing code ...
+
+ "arr": [
+ // ... existing code ...
+ "val"
+ ]
+ },
+
+ // ... existing code ...
+}
+
+
+You MUST NOT omit references in JSON files or similar file types. You MUST NOT leave out "// ... existing code ..." references for sections of code that are not changing, and you MUST use these references to make the structure of the code unambiguously clear.
+
+Even if you are only updating a single property or value, you MUST use the appropriate references where needed to make it clear exactlywhere the change should be applied.
+
+If you have a JSON file like:
+
+- package.json:
+
+{
+ "name": "vscode-plandex",
+ "contributes": {
+ "languages": [{
+ "id": "plandex",
+ }],
+ "commands": [
+ {
+ "command": "plandex.tellPlandex",
+ }
+ ],
+ "keybindings": [{
+ "command": "plandex.showFilePicker",
+ }]
+ },
+ "scripts": {
+ "compile": "webpack",
+ },
+}
+
+
+And you are adding a new key to the 'contributes' object, you MUST NOT output a code block like:
+
+- package.json:
+
+{
+ "contributes": {
+ "languages": [{
+ "id": "plandex",
+ }],
+ "grammars": [
+ {
+ "language": "plandex",
+ }
+ ]
+ }
+}
+
+
+The problem with the above is that it leaves out *multiple* reference comments that *MUST* be present. It is EXTREMELY IMPORTANT that you include these references.
+
+You also MUST NOT output a code block like:
+
+- package.json:
+
+{
+ // ... existing code ...
+
+ "contributes":{
+ "languages": [{
+ "id": "plandex",
+ }],
+ "grammars": [
+ {
+ "language": "plandex",
+ }
+ ]
+ }
+}
+
+
+This ONLY includes a single reference comment for the code that isn't changing *before* the change. It *forgets* the code that isn't changing *after* the change, as well the remaining properties of the 'contributes' object.
+
+Here's the CORRECT way to output the code block for this change:
+
+- package.json:
+
+{
+ // ... existing code ...
+
+ "contributes": {
+ "languages": [{
+ "id": "plandex",
+ }],
+ "grammars": [
+ {
+ "language": "plandex",
+ }
+ ]
+
+ // ... existing code ...
+ },
+
+ // ... existing code ...
+}
+
+
+You MUST NOT omit references for code that is not changing—this applies to EVERY level of the structural hierarchy. No matter how deep the nesting, every level MUST be accounted for with references if it includes code that is not included in the code block and is not changing.
+
+You MUST ONLY use the exact comment "// ... existing code ..." (with the appropriate comment symbol for the programming language) to denote where the reference should be placed.
+
+You MUST NOT use any other form of reference comment. ONLY use "// ... existing code ...".
+
+When reproducing lines of code from the *original file*, you ABSOLUTELY MUST *exactly match* the indentation of the code being referenced. Do NOT alter the indentation of the code being referenced in any way. If the original file uses tabs for indentation, you MUST use tabs for indentation. If the original file uses spaces for indentation, you MUST use spaces for indentation. When you are reproducing a line, you MUST use the exact same number of spaces or tabs for indentation as the original file.
+
+You MUST NOT output multiple references with no changes in between them. DO NOT UNDER ANY CIRCUMSTANCES DO THIS:
+
+- main.go:
+
+function fooBar() error {
+ log.Println("fooBar")
+
+ // ... existing code ...
+
+ // ... existing code ...
+
+ return nil
+}
+
+
+It must instead be:
+
+- main.go:
+
+function fooBar() error {
+ log.Println("fooBar")
+
+ // ... existing code ...
+
+ return nil
+}
+
+
+You MUST ensure that references are clear and can be unambiguously located in the file in terms of both position and structure/depth of nesting. You MUST NOT use references in a way that makes their exact location in the file ambiguous. It must be possible from the surrounding code to unambiguously and deterministically locate the exact position and depth of nesting of the code that is being referenced. Include as much surrounding code as necessary to achieve this (and no more).
+
+For example, if the original file looks like this:
+
+- array.js:
+
+const a = [
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+]
+
+
+you MUST NOT do this:
+
+- array.js:
+
+const a = [
+ // ... existing code ...
+ 1,
+ 5,
+ 7,
+ // ... existing code ...
+]
+
+
+Because it is not unambiguously clear where in the array the new code should be inserted. It could be inserted between any pair of existing elements. The reference comment does not make it clear which, so it is ambiguous.
+
+The correct way to do it is:
+
+- array.js:
+
+const a = [
+ // ... existing code ...
+ 10,
+ 1,
+ 5,
+ 7,
+ 11,
+ // ... existing code ...
+]
+
+
+In the above example, the lines with '10' and '11' and included on either side of the new code to make it unambiguously clear exactly where the new code should be inserted.
+
+When using reference comments, you MUST include trailing commas (or similar syntax) where necessary to ensure that when the reference is replace with the new code, ALL the code is perfectly syntactically correct and no comma or other necessary syntax is omitted.
+
+You MUST NOT do this:
+
+- array.js:
+
+const a = [
+ 1,
+ 5
+ // ... existing code ...
+]
+
+
+Because it leaves out a necessary trailing comman after the '5'. Instead do this:
+
+- array.js:
+
+const a = [
+ 1,
+ 5,
+ // ... existing code ...
+]
+
+
+Reference comments MUST ALWAYS be on their *OWN LINES*. You MUST NEVER include a reference comment on the same line as code.
+
+You MUST NOT do this:
+
+- array.js:
+
+const a = [1, 2, /* ... existing code ... */, 4, 5]
+
+
+Instead, rewrite the entire line to include the new code without using a reference comment:
+
+- array.js:
+
+const a = [1, 2, 11, 15, 14, 4, 5]
+
+
+You MUST NOT extra newlines around a reference comment unless they are also present in the original file. You ABSOLUTELY MUST be precise about matching newlines with corresponding code in the original file.
+
+If the original file looks like this:
+
+- main.go:
+
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+func main() {
+ fmt.Println("Hello, World!")
+ exec()
+ measure()
+ os.Exit(0)
+}
+
+
+DO NOT output superfluous newlines before or after reference comments like this:
+
+- main.go:
+
+// ... existing code ...
+
+func main() {
+ fmt.Println("Hello, World!")
+ prepareData()
+
+ // ... existing code ...
+
+}
+
+
+Instead, do this:
+
+- main.go:
+
+// ... existing code ...
+
+func main() {
+ fmt.Println("Hello, World!")
+ prepareData()
+ // ... existing code ...
+}
+
+
+Note the lack of superfluous newlines before and after the reference comment. There is a newline included between the first '// ... existing code ...' and the 'func main()' line because this newline is present in the original file. There is no newline *before* the first '// ... existing code ...' reference comment because the original file does not have a newline before that comment. Similarly, there is no newline before *or* after the second '// ... existing code ...' reference comment because the original file does not have newlines before or after the code that is being referenced. Newlines are SIGNIFICANT—you must strive to maintain consistent formatting between the original file and the changes in the code block.
+
+*
+
+If code is being removed from a file and not replaced with new code, the removal MUST ALWAYS WITHOUT EXCEPTION be shown in a labelled code block according to your instructions. Use the comment "// Plandex: removed code" (with the appropriate comment symbol for the programming language) to denote the removal. You MUST ALWAYS use this exact comment for any code that is removed and not replaced with new code. DO NOT USE ANY OTHER COMMENT FOR CODE REMOVAL.
+
+'// Plandex: removed code' comments MUST *replace* the code that is being removed. The code that is being removed MUST NOT be included in the code block.
+
+Do NOT use any other formatting apart from a labelled code block with the comment "// Plandex: removed code" to denote code removal.
+
+Example of code being removed and not replaced with new code:
+
+- main.go:
+
+function fooBar() {
+ log.Println("called fooBar")
+ // Plandex: removed code
+}
+
+
+As with reference comments, code removal comments MUST ALWAYS:
+ - Be on their own line. They must not be on the same line as any other code.
+ - Be on the same line as the code being removed
+ - Be surrounded by enough context so that the location and nesting depth of the code being removed is obvious and unambiguous.
+
+Also like reference comments, you MUST NOT use multiple code removal comments in a row without any code in between them.
+
+You MUST NOT do this:
+
+- main.go:
+
+function fooBar() {
+ // Plandex: removed code
+ // Plandex: removed code
+ exec()
+}
+
+
+Instead, do this:
+
+- main.go:
+
+function fooBar() {
+ // Plandex: removed code
+ exec()
+}
+
+
+You MUST NOT use reference comments and removal comments together in an ambiguous way. Do NOT do this:
+
+- main.go:
+
+function fooBar() {
+ log.Println("called fooBar")
+ // Plandex: removed code
+ // ... existing code ...
+}
+
+
+Above, there is no way to know deterministically which code should be removed. Instead, include context that makes it clear and unambiguous which code should be removed:
+
+- main.go:
+
+function fooBar() {
+ log.Println("called fooBar")
+ // Plandex: removed code
+ exec()
+ // ... existing code ...
+}
+
+
+By including the 'exec()' line from the original file, it becomes clear and unambiguous that all code between the 'log.Println("called fooBar")' line and the 'exec()' line is being removed.
+
+*
+
+When *replacing* code from the original file with *new code*, you MUST make it unambiguously clear exactly which code is being replaced by including surrounding context. Include as much surrounding context as necessary to achieve this (and no more).
+
+If the original file looks like this:
+
+- main.go:
+
+class FooBar {
+ func baz() {
+ log.Println("baz")
+ }
+
+ func bar() {
+ log.Println("bar")
+ sendMessage("bar")
+ reportSentMessage()
+ }
+
+ func qux() {
+ log.Println("qux")
+ }
+
+ func axon() {
+ log.Println("axon")
+ escapeFromBar()
+ runAway()
+ }
+
+ func tango() {
+ log.Println("tango")
+ }
+}
+
+
+and you are replacing the 'qux()' method with a different method, you MUST include enough context so that it is clear and unambiguous which method is being replaced. Do NOT do this:
+
+- main.go:
+
+class FooBar {
+ // ... existing code ...
+
+ func updatedQux() {
+ log.Println("updatedQux")
+ }
+
+ // ... existing code ...
+}
+
+
+The code above is ambiguous because it could also be *inserting* the 'updatedQux()' method in addition to the 'qux()' method rather than replacing the 'qux()' method. Instead, include enough context so that it is clear and unambiguous which method is being replaced, like this:
+
+- main.go:
+
+class FooBar {
+ // ... existing code ...
+
+ func bar() {
+ // ... existing code ...
+ }
+
+ func updatedQux() {
+ log.Println("updatedQux")
+ }
+
+ func axon() {
+ // ... existing code ...
+ }
+
+ // ... existing code ...
+}
+
+
+By including the context before and after the 'updatedQux()'—the 'bar' and 'axon' method signatures—it becomes clear and unambiguous that the 'qux()' method is being *replaced* with the 'updatedQux()' method.
+
+*
+
+When using an "... existing code ..." comment, you must ensure that the lines around the comment which locate the comment in the code exactly the match the lines in the original file and do not change it in subtle ways. For example, if the original file looks like this:
+
+- config.json:
+
+{
+ "key1": [{
+ "subkey1": "value1",
+ "subkey2": "value2"
+ }],
+ "key2": "value2"
+}
+
+
+DO NOT output a code block like this:
+
+- config.json:
+
+{
+ "key1": [
+ // ... existing code ...
+ ],
+ "key2": "updatedValue2"
+}
+
+
+The problem is that the line '"key1": [{' has been changed to '"key1": [' and the line '}],' has been changed to '],' which makes it difficult to locate these lines in the original file. Instead, do this:
+
+- config.json:
+
+{
+ "key1": [{
+ // ... existing code ...
+ }],
+ "key2": "updatedValue2"
+}
+
+
+Note that the lines around the "... existing code ..." comment exactly match the lines in the original file.
+
+*
+
+When outputting a code block for a change, unless the change begins at the *start* of the file, you ABSOLUTELY MUST include an "... existing code ..." comment prior to the change to account for all the code before the change. Similarly, unless the change goes to the *end* of the file, you ABSOLUTE MUST include an "... existing code ..." comment after the change to account for all the code after the change. It is EXTREMELY IMPORTANT that you include these references and do no leave them out under any circumstances.
+
+For example, if the original file looks like this:
+
+- main.go:
+
+package main
+
+import "fmt"
+
+func main() {
+ fmt.Println("Hello, World!")
+}
+
+func fooBar() {
+ fmt.Println("fooBar")
+}
+
+
+DO NOT output a code block like this:
+
+- main.go:
+
+func main() {
+ fmt.Println("Hello, World!")
+ fooBar()
+}
+
+
+The problem is that the change doesn't begin at the start of the file, and doesn't go to the end of the file, but "... existing code ..." comments are missing from both before and after the change. Instead, do this:
+
+- main.go:
+
+// ... existing code ...
+
+func main() {
+ fmt.Println("Hello, World!")
+ fooBar()
+}
+
+// ... existing code ...
+
+
+Now the code before and after the change is accounted for.
+
+Unless you are fully overwriting the entire file, you ABSOLUTELY MUST ALWAYS include at least one "... existing code ..." comment before or after the change to account for all the code before or after the change.
+
+*
+
+When outputting a change to a file, like adding a new function, you MUST NOT include only the new function without including *anchors* from the original file to locate the position of the new code unambiguously. For example, if the original file looks like this:
+
+- main.js:
+
+function someFunction() {
+ console.log("someFunction")
+ const res = await fetch("https://example.com")
+ processResponse(res)
+ return res
+}
+
+function processResponse(res) {
+ console.log("processing response")
+ callSomeOtherFunction(res)
+ return res
+}
+
+function yetAnotherFunction() {
+ console.log("yetAnotherFunction")
+}
+
+function callSomething() {
+ console.log("callSomething")
+ await logSomething()
+ return "something"
+}
+
+
+DO NOT output a code block like this:
+
+- main.js:
+
+// ... existing code ...
+
+function newFunction() {
+ console.log("newFunction")
+ const res = await callSomething()
+ return res
+}
+
+// ... existing code ...
+
+
+The problem is that surrounding context from the original file was not included to clearly indicate *exactly* where the new function is being added in the file. Instead, do this:
+
+- main.js:
+
+// ... existing code ...
+
+function processResponse(res) {
+ // ... existing code ...
+}
+
+function newFunction() {
+ console.log("newFunction")
+ const res = await callSomething()
+ return res
+}
+
+// ... existing code ...
+
+
+By including the 'processResponse' function signature from the original code as an *anchor*, the location of the new code can be *unambiguously* located in the original file. It is clear now that the new function is being added immediately after the 'processResponse' function.
+
+It's EXTREMELY IMPORTANT that every code block that is *updating* an existing file includes at least one anchor that maps the lines from the original file to the lines in the code block so that the changes can be unambiguously located in the original file, and applied correctly.
+
+Even if it's unimportant where in the original file the new code should be added and it could be added anywhere, you still *must decide* *exactly* where in the original file the new code should be added and include one or more *anchors* to make the insertion point clear and unambiguous. Do NOT leave out anchors for a file update under any circumstances.
+
+*
+
+When inserting new code between two existing blocks of code in the original file, you MUST include "... existing code ..." comments correctly in order to avoid overwriting sections of existing code. For example, if the original file looks like this:
+
+- main.js:
+
+func main() {
+ console.log("main")
+}
+
+func fooBar() {
+ console.log("fooBar")
+}
+
+func baz() {
+ console.log("baz")
+}
+
+func qux() {
+ console.log("qux")
+}
+
+func quix() {
+ console.log("quix")
+}
+
+func qwoo() {
+ console.log("qwoo")
+}
+
+func last() {
+ console.log("last")
+}
+
+
+DO NOT output a code block like this to demonstrate that new code will be inserted somewhere between the 'fooBar' and 'last' functions:
+
+- main.js:
+
+// ... existing code ...
+
+func fooBar() {
+ console.log("fooBar")
+}
+
+func newCode() {
+ console.log("newCode")
+}
+
+func last() {
+ console.log("last")
+}
+
+
+If you want to demonstrate that a new function will be inserted somewhere between the 'fooBar' and 'last' functions, you MUST include "... existing code ..." comments correctly in order to avoid overwriting sections of existing code. Instead, do this to show exactly where the new function will be inserted:
+
+- main.js:
+
+// ... existing code ...
+
+func baz() {
+ // ... existing code ...
+}
+
+func newCode() {
+ console.log("newCode")
+}
+
+func qux() {
+ // ... existing code ...
+}
+
+// ... existing code ...
+
+
+Or this to show that the new function will be inserted *somehwere* between the 'fooBar' and 'last' functions:
+
+- main.js:
+
+// ... existing code ...
+
+func fooBar() {
+ console.log("fooBar")
+}
+
+// ... existing code ...
+
+func newCode() {
+ console.log("newCode")
+}
+
+// ... existing code ...
+
+func last() {
+ console.log("last")
+}
+
+
+Either way, you MUST NOT leave out the "... existing code ..." comments for ANY existing code that will remain in the file after the change is applied.
+
+*
+
+When including code from the original file to that is not changing and is intended to be used as an *anchor* to locate the insertion point of the new code, you ABSOLUTELY MUST NOT EVER change the order of the code in the original file. The order of the code in the original file MUST be preserved exactly as it is in the original file unless the proposed change is specifically changing the order of this code.
+
+If you are making multiple changes to the same file in a single code block, you MUST adhere to the order of the original file as closely as possible.
+
+If the original file is:
+
+- main.js:
+
+func buck() {
+ console.log("buck")
+}
+
+func qux() {
+ console.log("qux")
+}
+
+func fooBar() {
+ console.log("fooBar")
+}
+
+func baz() {
+ console.log("baz")
+}
+
+func yup() {
+ console.log("yup")
+}
+
+
+DO NOT output a code block like this to demonstrate that new code will be inserted between the 'fooBar' and 'baz' functions:
+
+- main.js:
+
+// ... existing code ...
+
+func baz() {
+ console.log("baz-updated")
+}
+
+// ... existing code ...
+
+func qux() {
+ console.log("qux-updated")
+}
+
+// ... existing code ...
+
+
+The problem is that the order of the 'baz' and 'qux' functions has been changed in the proposed changes unnecessarily. Instead, do this:
+
+- main.js:
+
+// ... existing code ...
+
+func qux() {
+ console.log("qux-updated")
+}
+
+// ... existing code ...
+
+func baz() {
+ console.log("baz-updated")
+}
+
+// ... existing code ...
+
+
+Now the order of the 'baz' and 'qux' functions is preserved exactly as it is in the original file.
+
+*
+
+When writing an "... existing code ..." comment, you MUST use the correct comment symbol for the programming language. For example, if you are writing a plan in Python, Ruby, or Bash, you MUST use '# ... existing code ...' instead of '// ... existing code ...'. If you're writing HTML, you MUST use ''. If you're writing jsx, tsx, svelte, or another language where the correct comment symbol(s) depend on where in the code you are, use the appropriate comment symbol(s) for where that comment is placed in the file. If you're in a javascript block of a jsx file, use '// ... existing code ...'. If you're in a markup block of a jsx file, use '{/* ... existing code ... */}'.
+
+Now the order of the 'baz' and 'qux' functions is preserved exactly as it is in the original file.
+
+*
+
+When writing an "... existing code ..." comment, you MUST use the correct comment symbol for the programming language. For example, if you are writing a plan in Python, Ruby, or Bash, you MUST use '# ... existing code ...' instead of '// ... existing code ...'. If you're writing HTML, you MUST use ''. If you're writing jsx, tsx, svelte, or another language where the correct comment symbol(s) depend on where in the code you are, use the appropriate comment symbol(s) for where that comment is placed in the file. If you're in a javascript block of a jsx file, use '// ... existing code ...'. If you're in a markup block of a jsx file, use '{/* ... existing code ... */}'.
+`
+
+const UpdateFormatAdditionalExamples = `
+Here are some important examples of INCORRECT vs CORRECT file updates:
+
+Example 1 - Adding a new route:
+
+❌ INCORRECT - Replacing instead of inserting:
+- src/main.go:
+
+// ... existing code ...
+
+r.HandleFunc(prefix+"/api/users", handlers.ListUsersHandler).Methods("GET")
+
+r.HandleFunc(prefix+"/api/config", handlers.GetConfigHandler).Methods("GET")
+
+// ... existing code ...
+
+This is wrong because it doesn't show enough context to know what surrounding routes were preserved.
+
+✅ CORRECT - Proper insertion with context:
+- src/main.go:
+
+// ... existing code ...
+
+r.HandleFunc(prefix+"/api/users", handlers.ListUsersHandler).Methods("GET")
+r.HandleFunc(prefix+"/api/teams", handlers.ListTeamsHandler).Methods("GET")
+
+r.HandleFunc(prefix+"/api/config", handlers.GetConfigHandler).Methods("GET")
+
+r.HandleFunc(prefix+"/api/settings", handlers.GetSettingsHandler).Methods("GET")
+r.HandleFunc(prefix+"/api/status", handlers.GetStatusHandler).Methods("GET")
+
+// ... existing code ...
+
+
+Example 2 - Adding a method to a class:
+
+❌ INCORRECT - Ambiguous insertion:
+- src/main.go:
+
+class UserService {
+ // ... existing code ...
+
+ async createUser(data) {
+ // new method
+ }
+
+ // ... existing code ...
+}
+
+This is wrong because it doesn't show where exactly the new method should go.
+
+✅ CORRECT - Clear insertion point:
+- src/main.go:
+
+class UserService {
+ // ... existing code ...
+
+ async getUser(id) {
+ return await this.db.users.findOne(id)
+ }
+
+ async createUser(data) {
+ return await this.db.users.create(data)
+ }
+
+ async updateUser(id, data) {
+ return await this.db.users.update(id, data)
+ }
+
+ // ... existing code ...
+}
+
+
+Example 3 - Adding a configuration section:
+
+❌ INCORRECT - Lost context:
+- src/config.json:
+
+{
+ "database": {
+ "host": "localhost",
+ "port": 5432
+ },
+ "newFeature": {
+ "enabled": true,
+ "timeout": 30
+ }
+}
+
+This is wrong because it dropped existing configuration sections.
+
+✅ CORRECT - Preserved context:
+- src/config.json:
+
+{
+ // ... existing code ...
+
+ "database": {
+ "host": "localhost",
+ "port": 5432,
+ "username": "admin"
+ },
+
+ "newFeature": {
+ "enabled": true,
+ "timeout": 30
+ },
+
+ "logging": {
+ "level": "info",
+ "file": "app.log"
+ }
+
+ // ... existing code ...
+}
+
+
+Key principles demonstrated in these examples:
+1. Always show the surrounding context that will be preserved
+2. Make insertion points unambiguous by showing adjacent code
+3. Never remove existing functionality unless explicitly instructed to do so
+4. Use "... existing code ..." comments properly to indicate preserved sections
+5. Show enough context to understand the code structure
+`
diff --git a/app/server/model/prompts/user_prompt.go b/app/server/model/prompts/user_prompt.go
new file mode 100644
index 0000000000000000000000000000000000000000..9bdc68dc7289ed91725dd04d66ac8be40c5318de
--- /dev/null
+++ b/app/server/model/prompts/user_prompt.go
@@ -0,0 +1,585 @@
+package prompts
+
+import (
+ "fmt"
+ shared "plandex-shared"
+ "time"
+)
+
+const SharedPromptWrapperFormatStr = "# The user's latest prompt:\n```\n%s\n```\n\n" + `Please respond according to the 'Your instructions' section above.
+
+Do not ask the user to do anything that you can do yourself. Do not say a task is too large or complex for you to complete--do your best to break down the task and complete it even if it's very large or complex.
+
+If a high quality, well-respected open source library is available that can simplify a task or subtask, use it.
+
+The current UTC timestamp is: %s — this can be useful if you need to create a new file that includes the current date in the file name—database migrations, for example, often follow this pattern.
+
+Do NOT create or update a binary image file, audio file, video file, or any other binary media file using code blocks. You can create svg files if appropriate since they are text-based, but do NOT create or update other image files like png, jpg, gif, or jpeg, or audio files like mp3, wav, or m4a.
+
+User's operating system details:
+%s
+
+---
+%s
+---
+`
+
+func GetContextLoadingPromptWrapperFormatStr(params CreatePromptParams) string {
+ s := SharedPromptWrapperFormatStr + `
+ ` + GetArchitectContextSummary(params.ContextTokenLimit)
+
+ return s
+}
+
+func GetPlanningPromptWrapperFormatStr(params CreatePromptParams) string {
+ s := SharedPromptWrapperFormatStr + `
+
+` + GetPlanningFlowControl(params) + `
+
+Do NOT include tests or documentation in the subtasks unless the user has specifically asked for them. Do not include extra code or features beyond what the user has asked for. Focus on the user's request and implement only what is necessary to fulfill it.
+
+` + ReviseSubtasksPrompt + `
+
+` + CombineSubtasksPrompt + `
+
+At the end of the '### Tasks' section, you ABSOLUTELY MUST ALWAYS include a tag, then end the response.
+
+Example:
+`
+
+ if params.ExecMode {
+ s += `
+### Commands
+
+The _apply.sh script is empty. I'll create it with commands to compile the project and run the new test with cargo.
+`
+ }
+
+ s += `
+### Tasks
+
+1. Create a new file called 'src/main.rs' with a 'main' function that returns 'Hello, world!'
+Uses: ` + "`src/main.rs`" + `
+
+2. Write a basic test for the 'main' function
+Uses: ` + "`src/main.rs`"
+
+ if params.ExecMode {
+ s += `
+3. 🚀 Run the new test with cargo
+Uses: ` + "`_apply.sh`" + `
+ `
+ }
+
+ s += `
+
+
+After you have broken a task up in to multiple subtasks and output a '### Tasks' section, you *ABSOLUTELY MUST ALWAYS* output a tag and then end the response. You MUST ALWAYS output the tag at the end of the '### Tasks' section.
+
+Output a tag after the '### Tasks' section. NEVER output a '### Tasks' section without also outputting a tag.
+
+Use your judgment on the paths of new files you create. Keep directories well organized and if you're working in an existing project, follow existing patterns in the codebase. ALWAYS use *complete* *relative* paths for new files.
+
+Modular Project Structure: When creating new files for a project or feature, prioritize modularity and separation of concerns by creating separate files for each component/responsibility area, even if everything could initially fit in one file.
+
+Ongoing File Management: If a file you initially created grows complex or tightly couples different responsibilities, progressively break it into smaller, more focused files rather than letting it become monolithic.
+
+Forward-Thinking Design: Organize code to accommodate growth and evolution, following language conventions while keeping files small, focused, and maintainable.
+
+IMPORTANT: During this planning phase, you must NOT implement any code or create any code blocks. Your ONLY JOB is to break down the work into subtasks. Code implementation will happen in a separate phase after planning is complete. The planning phase is ONLY for breaking the work into subtasks.
+
+Do not attempt to write any code or show any implementation details at this stage.
+
+The MOST IMPORTANT THING to remember is that you are in the PLANNING phase. Even though you see examples of implementation in your conversation history, you MUST NOT do any implementation at this stage. Your ONLY JOB is to make a plan and output a list of tasks, even if there is only *one* task in your list. That is your ONLY JOB at this stage. It may seem more natural to just respond to the user with code for small tasks, but it is ABSOLUTELY CRITICAL that you devote sufficient attention that you never make this mistake. It is critical that you have a 100%% success rate at giving correct output according to the stage.
+`
+
+ if params.IsUserDebug {
+ s += UserPlanningDebugPrompt
+ } else if params.IsApplyDebug {
+ s += ApplyPlanningDebugPrompt
+ } else if !params.ExecMode {
+ s += NoApplyScriptPlanningPrompt
+ }
+
+ return s
+}
+
+func GetImplementationPromptWrapperFormatStr(params CreatePromptParams) string {
+ s := SharedPromptWrapperFormatStr + `
+
+If you're making a plan, remember to label code blocks with the file path *exactly* as described in point 2, and do not use any other formatting for file paths. **Do not include explanations or any other text apart from the file path in code block labels.**
+
+You MUST NOT include any other text in a code block label apart from the initial '- ' and the EXACT file path ONLY. DO NOT UNDER ANY CIRCUMSTANCES use a label like 'File path: src/main.rs' or 'src/main.rs: (Create this file)' or 'File to Create: src/main.rs' or 'File to Update: src/main.rs'. Instead use EXACTLY 'src/main.rs:'. DO NOT include any explanatory text in the code block label like 'src/main.rs: (Add a new function)'. It is EXTREMELY IMPORTANT that the code block label includes *only* the initial '- ', the file path, and NO OTHER TEXT whatsoever. If additional text apart from the initial '- ' and the exact file path is included in the code block label, the plan will not be parsed properly and you will have failed at the task of generating a usable plan.
+
+Always use an opening tag to start a code block and a closing tag to end a code block.
+
+The tag content MUST ONLY contain the code for the code block and NOTHING ELSE. Do NOT wrap the code block in triple backticks, CDATA tags, or any other text or formatting. Output ONLY the code and nothing else within the tag.
+
+The tag MUST ALWAYS include both a 'lang' attribute and a 'path' attribute as described in the instructions above. It must not include any other attributes.
+
+When *updating an existing file*, you MUST follow the instructions you've been given on how to update code in code blocks:
+
+ - Do NOT include large sections of the file that are not changing. Output ONLY code that is changing and code that is necessary to understand the changes, the code structure, and where the changes should be applied. Use references comments for sections of the file that are not changing. ONLY use exactly '... existing code ...' (with appropriate comment symbol(s) for the language) for reference comments—no other variations are allowed.
+
+ - Include enough code from the original file to precisely and unambiguously locate where the changes should be applied and their level of nesting.
+
+ - Match the indentation of the original file exactly.
+
+ - Do NOT include line numbers in the tag. While line numbers are included in the original file in context (prefixed with 'pdx-', like 'pdx-10: ') in context to assist you with describing the location of changes in the 'Action Explanation', they ABSOLUTELY MUST NOT be included in the tag.
+
+ - Do NOT output multiple references with no changes in between them.
+
+ - Do NOT add superfluous newlines around reference comments.
+
+ - Use a removal comment to denote code that is being removed from a file. As with reference comments, removal comments must be surrounded by enough context so that the location and nesting depth of the code being removed is clear and unambiguous.
+
+ - When replacing code from the original file with *new code*, you MUST make it unambiguously clear exactly which code is being replaced by including surrounding context.
+
+ - Unless you are fully overwriting the entire file, you ABSOLUTELY MUST ALWAYS include at least one "... existing code ..." comment before or after the change to account for all the code before or after the change.
+
+ - Even if the location of new code is not important and could be placed anywhere in the file, you still MUST determine *exactly* where the new code should be placed and include sufficient surrounding context so that the location and nesting depth of the code being added is clear and unambiguous.
+
+ - Never remove existing functionality unless explicitly instructed to do so.
+
+ - DO NOT remove comments, logging statements, code that is commented out, or ANY code that is not related to the specific task at hand.
+
+ - Do NOT escape newlines within the tag unless there is a specific reason to do so, like you are outputting newlines in a quoted JSON string. For normal code, do NOT escape newlines.
+
+ - Strive to make changes that are minimally intrusive and do not change the existing code beyond what is necessary to complete the task.
+
+ - Show enough surrounding context to understand the code structure.
+
+ - When outputting the explanation, do *NOT* insert code between two code structures that aren't *immediately adjacent* in the original file.
+
+ - Every code block that *updates* an existing file MUST ALWAYS be preceded by an explanation of the change that *exactly matches* one of the formats listed in the "### Action Explanation Format" section. Do *NOT* UNDER ANY CIRCUMSTANCES use an explanation like "I'll update the code to..." that does not match one of these formats.
+
+ - If you are replacing or removing code, you MUST include an exhaustive list of all symbols/sections that are being removed—ALL removed code must be accounted for. That MUST be followed by a line number range of lines in the original file that are being replaced. Use the exact format: '(original file lines [startLineNumber]-[endLineNumber])' — e.g. '(original file lines 10-20)' or for a single line, '(original file line [lineNumber])' — e.g. '(original file line 10)'
+
+ - CRITICAL: When writing the Context field in an Action Explanation:
+ - The symbols/structures mentioned MUST be code that is NOT being changed
+ - These symbols serve as ANCHORS to precisely locate where the change should be applied
+ - Every symbol/structure mentioned in the Context MUST appear in the code block
+ - These anchors MUST be immediately adjacent to where the change occurs
+ - Do NOT use distant symbols with other code between them and the change
+ - All symbols must be surrounded with backticks
+ - The code block MUST include these anchors to unambiguously locate the change
+ - If you mention "Located between ` + "`functionA`" + "` and `" + "`functionB`" + `, both functions MUST appear in your code block
+
+ FAILURE TO INCLUDE THE CONTEXT SYMBOLS IN THE CODE BLOCK MAKES CHANGES IMPOSSIBLE TO APPLY CORRECTLY AND IS A CRITICAL ERROR.
+
+When *creating a new file*, follow the instructions in the "### Action Explanation Format" section for creating a new file.
+
+ - The Type field MUST be exactly 'new file'.
+ - The Summary field MUST briefly describe the new file and its purpose.
+ - The file path MUST be included in the code block label.
+ - The code itself MUST be written within a tag.
+ - The tag MUST include both a 'lang' attribute and a 'path' attribute as described in the instructions above. It must not include any other attributes.
+ - The tag MUST NOT include any other text or formatting. It must only contain the code for the code block and NOTHING ELSE. Do NOT wrap the code block in triple backticks, CDATA tags, or any other text or formatting. Output ONLY the code and nothing else within the tag.
+ - The code block MUST include the *entire file* to be created. Do not omit any code from the file.
+ - Do NOT use placeholder code or comments like '// implement authentication here' to indicate that the file is incomplete. Implement *all* functionality.
+ - Do NOT use reference comments ('// ... existing code ...'). Those are only used for updating existing files and *never* when creating new files.
+ - Include the *entire file* in the code block.
+
+
+If multiple changes are being made to the same file in a single subtask, you MUST ALWAYS combine them into a SINGLE code block. Do NOT use multiple code blocks for multiple changes to the same file. Instead:
+
+ - Include all changes in a single code block that follows the file's structure
+ - Use "... existing code ..." comments between changes
+ - Show enough context around each change for unambiguous location
+ - Maintain the original file's order of elements
+ - Only reproduce parts of the file necessary to show structure and locate changes
+ - Make all changes in a single pass from top to bottom of the file
+
+ When writing the explanation for multiple changes that will be included in a single code block, list each change independently like this:
+
+ **Updating + "server/handlers/auth.go" + **
+ Change 1.
+ Type: remove
+ Summary: Remove unused + "validateLegacyTokens" + function and its helper + "checkTokenFormat" + . Removes + "validateLegacyTokens and checkTokenFormat" + functions (original file lines 25-85).
+ Context: Located between + "parseAuthHeader" + and + "validateJWT" + functions
+ Change 2.
+ Type: append
+ Summary: Append just-removed + "checkTokenFormat" + function to the end of the file"
+
+
+Only list out subtasks once for the plan--after that, do not list or describe a subtask that can be implemented in code without including a code block that implements the subtask.
+
+Do not implement a task partially and then give up even if it's very large or complex--do your best to implement each task and subtask **fully**.
+
+Do NOT repeat any part of your previous response. Always continue seamlessly from where your previous response left off.
+
+ALWAYS complete subtasks in order and never go backwards in the list of subtasks. Never skip a subtask or work on subtasks out of order. Never repeat a subtask that has been marked implemented in the latest summary or that has already been implemented during conversation.
+
+` + CurrentSubtaskPrompt + `
+
+` + MarkSubtaskDonePrompt + `
+
+` + FileOpsImplementationPromptSummary
+
+ file := ".gitignore"
+ if !params.IsGitRepo {
+ file = ".plandexignore"
+ }
+
+ s += fmt.Sprintf(`
+- Create or update the %s file if necessary.
+- If you write commands to _apply.sh, consider if output should be added to %s.
+`, file, file)
+
+ s += `
+## Is the task done or in progress?
+
+Remember, you must follow these instructions on marking tasks as done or in progress:
+
+- When a subtask is *completed*, you *must* either:
+
+1. Mark it as 'done' in the format described in the 'Marking Tasks as Done Or In Progress' section.
+2. Mark it as 'in progress' by explaining that the task is not yet complete and will be continued in the next response.
+
+Remember, you must WAIT until the subtask is *fully implemented* before marking it as done. If a subtask is large, this may require multiple responses. If you have only implemented part of a subtask, do NOT mark it as done. It will be continued in one or more subsequent responses, and the last one of those reponses will mark the subtask as done. If you mark the subtask done prematurely, you will stop it from being fully implemented, which will prevent the plan from being implemented correctly.
+
+## The Most Critical Factor
+
+Remember, the MOST critical factor in creating code blocks correctly is to locate them unambiguously in the file using the definitions that are immediately before and immediately after the the section of code that is being changed or extended. Pay special attention to the 'Context' field in the Action Explanation. ALWAYS include at least a few additional lines of code before and after the section that is changing. And even if you need to include many lines to reach the *definitions* that are immediately before and after the section that is changing, do so.
+
+Definitions in the original file that are outside of the section that is changing are like "hooks" that determine where in the resulting file the new code you write will be placed.
+
+This is why it's critical for you to ALWAYS include enough immediately surrounding code to unambiguously locate ALL the new code you write. All the blocks of new code you write must hook in correctly using the hooks you supply from the original file when you include additional lines of code from the original file before and after the section that is changing.
+
+Even though you should include the definitions before and after the section, don't reproduce large sections of the original file. Use '... existing code ...' reference comments to 'collapse' large sections of the original file that are not changing.
+
+It's not easy to be 100% consistent in writing code blocks that follow these rules, but you are capable of doing it with sufficient attention.
+
+This disambiguation technique is the *most important* part of correctly implementing a plan.
+`
+
+ return s
+}
+
+type UserPromptParams struct {
+ CreatePromptParams
+ Prompt string
+ OsDetails string
+ CurrentStage shared.CurrentStage
+ UnfinishedSubtaskReasoning string
+}
+
+func GetWrappedPrompt(params UserPromptParams) string {
+ currentStage := params.CurrentStage
+
+ prompt := params.Prompt
+ osDetails := params.OsDetails
+
+ var promptWrapperFormatStr string
+ if currentStage.TellStage == shared.TellStagePlanning {
+ if currentStage.PlanningPhase == shared.PlanningPhaseContext {
+ promptWrapperFormatStr = GetContextLoadingPromptWrapperFormatStr(params.CreatePromptParams)
+ } else {
+ promptWrapperFormatStr = GetPlanningPromptWrapperFormatStr(params.CreatePromptParams)
+ }
+ } else {
+ promptWrapperFormatStr = GetImplementationPromptWrapperFormatStr(params.CreatePromptParams)
+ }
+
+ // If we're in the context loading stage, we don't need to include the apply script summary
+ var applyScriptSummary string
+ if currentStage.TellStage == shared.TellStagePlanning && currentStage.PlanningPhase == shared.PlanningPhaseTasks {
+ applyScriptSummary = ApplyScriptPlanningPromptSummary
+ } else if currentStage.TellStage == shared.TellStageImplementation {
+ applyScriptSummary = ApplyScriptImplementationPromptSummary
+ }
+
+ ts := time.Now().Format(time.RFC3339)
+
+ s := "The current stage is: "
+ if currentStage.TellStage == shared.TellStagePlanning {
+ if currentStage.PlanningPhase == shared.PlanningPhaseContext {
+ s += "CONTEXT"
+ } else {
+ s += "PLANNING"
+ }
+ } else if currentStage.TellStage == shared.TellStageImplementation {
+ s += "IMPLEMENTATION"
+ }
+ s += "\n\n"
+ s += fmt.Sprintf(promptWrapperFormatStr, prompt, ts, osDetails, applyScriptSummary)
+
+ if currentStage.TellStage == shared.TellStageImplementation && params.UnfinishedSubtaskReasoning != "" {
+ s += "\n\n" + `
+The current task was not completed in the previous response and remains unfinished. Here is the reasoning for why it was not completed:
+
+` + params.UnfinishedSubtaskReasoning + `
+
+You MUST address these issues in the next response and ensure the task is fully completed. You MUST continue working on the current task until it is fully completed. Do NOT work on any other tasks. If you are able to finish it in this response, state explicitly that the task is finished as described in your instructions. If not, state what you have finished and what remains to be done—it will be finished in a later response.
+ `
+
+ }
+
+ return s
+}
+
+const UserContinuePrompt = "Continue the plan according to your instructions for the current stage. Don't repeat any part of your previous response."
+
+const AutoContinuePlanningPrompt = UserContinuePrompt
+
+const AutoContinueImplementationPrompt = `Continue the plan from where you left off in the previous response. Don't repeat any part of your previous response.
+
+Continue seamlessly from where your previous response left off.
+
+Always name the subtask you are working on before starting it, and mark it as done before moving on to the next subtask.
+
+` + CurrentSubtaskPrompt + `
+
+` + MarkSubtaskDonePrompt + `
+
+ALWAYS complete subtasks in order and never go backwards in the list of subtasks. Never skip a subtask or work on subtasks out of order. Never repeat a subtask that has been marked implemented in the latest summary or that has already been implemented during conversation.
+
+If you break up a task into subtasks, only include subtasks that can be implemented directly in code by creating or updating files. Only include subtasks that require executing code or commands if execution mode is enabled. Do not include subtasks that require user testing, deployment, or other tasks that go beyond coding.
+
+Do NOT include tests or documentation in the subtasks unless the user has specifically asked for them. Do not include extra code or features beyond what the user has asked for. Focus on the user's request and implement only what is necessary to fulfill it.`
+
+const SkippedPathsPrompt = "\n\nSome files have been skipped by the user and *must not* be generated. The user will handle any updates to these files themselves. Skip any parts of the plan that require generating these files. You *must not* generate a file block for any of these files.\nSkipped files:\n"
+
+const CombineSubtasksPrompt = `
+- Combine multiple steps into a single larger subtask where all of the steps are small enough to be completed in a single response (especially do this if multiple steps are closely related). Try to both size each subtask so that it can be completed in a single response, while also aiming to minimize the total number of subtasks. For subtasks involving multiple steps and/or multiple files, use bullet points to break them up into smaller sub-subtasks.
+
+- When using bullet points to break up a subtask into multiple steps, make a note of any files that will be created or updated by each step—surround file paths with backticks like this: "` + "`path/to/some_file.txt`" + `". All paths mentioned in the bullet points of the subtask must be included in the 'Uses: ' list for the subtask.
+
+- Do NOT break up file operations of the same type (e.g. moving files, removing files, resetting pending changes) into multiple subtasks. Group them all into a *single* subtask.
+
+- Keep subtasks focused and manageable. While it's fine to group closely related changes (like small updates to a few tightly coupled files) into a single subtask, prefer breaking work into smaller, more focused subtasks when the changes are more substantial or independent. If a subtask involves many files or multiple distinct changes, consider whether it would be clearer and more maintainable to break it into multiple subtasks.
+
+Here are examples of good and poor task division:
+
+Example 1 - Poor (tasks too small and fragmented):
+1. Create the product.js file
+Uses: ` + "`src/models/product.js`" + `
+
+2. Add the product schema
+Uses: ` + "`src/models/product.js`" + `
+
+3. Add the validate() method
+Uses: ` + "`src/models/product.js`" + `
+
+4. Add the save() method
+Uses: ` + "`src/models/product.js`" + `
+
+Better:
+1. Create product model with core functionality
+- Create product.js with schema definition
+- Add validate() and save() methods
+Uses: ` + "`src/models/product.js`" + `
+
+Example 2 - Poor (task too large with unrelated changes):
+1. Implement user profile features
+- Add user avatar upload
+- Add profile settings page
+- Implement friend requests
+- Add user search
+- Create notification system
+Uses: ` + "`src/components/Profile.tsx`" + `, ` + "`src/components/Avatar.tsx`" + `, ` + "`src/components/Settings.tsx`" + `, ` + "`src/services/friends.ts`" + `, ` + "`src/services/search.ts`" + `, ` + "`src/services/notifications.ts`" + `
+
+Better:
+1. Implement user avatar upload functionality
+- Add avatar component with upload UI
+- Add avatar upload service
+Uses: ` + "`src/components/Avatar.tsx`" + `, ` + "`src/services/avatar.ts`" + `
+
+2. Create profile settings page
+- Add settings form components
+- Implement save/load settings
+Uses: ` + "`src/components/Settings.tsx`" + `, ` + "`src/services/settings.ts`" + `
+
+3. Add friend request system
+Uses: ` + "`src/services/friends.ts`" + `, ` + "`src/components/Profile.tsx`" + `
+
+Example 3 - Good (related changes properly grouped):
+1. Update error handling in authentication flow
+- Add error handling to login function
+- Add corresponding error states in auth context
+- Update error display in login form
+Uses: ` + "`src/auth/login.ts`" + `, ` + "`src/context/auth.tsx`" + `, ` + "`src/components/LoginForm.tsx`" + `
+
+Example 4 - Good (tightly coupled file updates):
+1. Rename UserType enum to AccountType
+- Update enum definition
+- Update all imports and usages
+Uses: ` + "`src/types/user.ts`" + `, ` + "`src/auth/account.ts`" + `, ` + "`src/components/UserProfile.tsx`" + `
+
+Notice in these examples:
+- Tasks that are too granular waste responses on tiny changes
+- Tasks that are too large mix unrelated changes and become hard to implement
+- Good tasks group related changes that make sense to implement together
+- Multiple files can be included when the changes are tightly coupled
+- Bullet points describe steps in a cohesive change, not separate features
+`
+
+type ChatUserPromptParams struct {
+ CreatePromptParams
+ Prompt string
+ OsDetails string
+}
+
+func GetWrappedChatOnlyPrompt(params ChatUserPromptParams) string {
+ // Base wrapper that's always included
+ baseWrapper := "# The user's latest prompt:\n```\n%s\n```\n\n" + `Please respond according to the 'Your instructions' section above.
+
+The current UTC timestamp is: %s
+
+User's operating system details:
+%s`
+
+ // Build additional instructions based on parameter combinations
+ var additionalInstructions string
+
+ // Execution mode handling
+ if params.ExecMode {
+ additionalInstructions += `
+*Execution mode is enabled.*
+- If you switch to tell mode, you can execute commands locally as needed
+- While you remain in chat mode, you can discuss both file changes and command execution, but you cannot update files or execute commands (unless the user first switches to tell mode)
+- Be specific about what commands would need to be run
+- Consider build processes, testing, and deployment
+- Distinguish between file changes and execution steps`
+ } else {
+ additionalInstructions += `
+*Execution mode is disabled.*
+- If you switch to tell mode, you cannot execute commands—keep this in mind when discussing the plan. If the plan requires commands to be run after switching to tell mode, the user would need to run them manually.
+- You can discuss build/test/deploy conceptually, but you cannot execute commands either in chat mode or in tell mode
+- Be clear when certain steps would need execution mode enabled`
+ }
+
+ additionalInstructions += `
+Keep in mind:
+- Stay conversational while being technically precise
+- Reference and explain code when helpful, but don't output formal implementation blocks
+- Focus on what's specifically asked - don't suggest extra features
+- Consider existing codebase structure in your explanations
+- When discussing libraries, focus on well-maintained, widely-used options
+- If the user wants to implement changes, remind them about 'tell mode'
+- Use error handling, logging, and security best practices in your suggestions
+- Be thoughtful about code organization and structure
+- Consider implications of suggested changes on the existing codebase
+
+Remember you're in chat mode:
+- Engage in natural technical discussion about code and context
+- Help users understand their codebase and plan potential changes
+- Provide explanations and answer questions thoroughly
+- Include code snippets only when they help explain concepts
+- Help debug issues by examining and explaining code
+- Suggest approaches and discuss trade-offs
+- Help evaluate different implementation strategies
+- Consider and explain implications of different approaches
+- Stay focused on understanding and planning rather than implementation
+
+You cannot:
+- Create or modify any files
+- Output formal implementation code blocks
+- Make plans using "### Tasks" sections
+- Structure responses as if implementing changes
+- Load context multiple times in consecutive responses
+- Switch to implementation mode without user request
+
+Even if a plan is in progress:
+- Stay in discussion mode, don't attempt to implement anything
+- You can discuss the current tasks and progress
+- You can provide explanations and suggestions
+- You can help debug issues or clarify approach
+- But you must not output any implementation code
+- Return to implementation only when user switches back to tell mode
+
+Remember that users often:
+- Switch between chat and tell mode during implementation
+- Use chat mode to understand before implementing
+- Need detailed technical discussion to plan effectively
+- Want to explore options before committing to changes
+- May need to debug or understand issues mid-implementation
+- You may receive a list of tasks that are in progress, including a 'current subtask'. You MUST NOT implement any tasks—only discuss them.
+`
+
+ promptWrapperFormatStr := baseWrapper + additionalInstructions
+
+ ts := time.Now().Format(time.RFC3339)
+ return fmt.Sprintf(promptWrapperFormatStr,
+ params.Prompt,
+ ts,
+ params.OsDetails)
+}
+
+func GetPlanningFlowControl(params CreatePromptParams) string {
+ s := `
+CRITICAL PLANNING RULES:
+1. For ANY update/revision to tasks:
+`
+
+ if params.ExecMode {
+ s += `You MUST output a ### Commands section before the ### Tasks list. If you determine that commands should be added or updated in _apply.sh, you MUST include wording like "I'll add this step to the plan" and then include a subtask referencing _apply.sh in the ### Tasks list.`
+ }
+
+ s += `
+ - You MUST output a new/updated ### Tasks list
+ `
+
+ if params.ExecMode {
+ s += `
+ - If the ### Commands section indicates that commands should be added or updated in _apply.sh, you MUST also create a subtask referencing _apply.sh in the ### Tasks list
+ `
+ }
+
+ s += `
+ - You MUST NOT UNDER ANY CIRCUMSTANCES start implementing code, even if you have already made a plan in a previous response and are ready to implement it—you still ABSOLUTELY MUST NOT implement code at this stage. You MUST make a plan first in the format described above.
+ - You MUST follow planning phase format exactly
+
+2. Even for small changes:
+ - Create/update task list first
+ - No implementation and NO CODE until planning is complete, and you have output a '### Tasks' section and a
+ - All changes must be in task list
+
+3. The planning stage is *ALWAYS* required. You MUST NEVER skip ahead and start writing code in this response. You MUST complete the planning stage first and output a '### Tasks' section and a before you can start implementing code.
+`
+
+ return s
+}
+
+// func GetFollowUpRequiredPrompt(params CreatePromptParams) string {
+// s := `
+// [MANDATORY FOLLOW-UP FLOW]
+
+// CRITICAL FLOW CONTROL:
+// 1. You MUST FIRST respond naturally to what the user has said/asked
+// 2. Then classify the prompt as either:
+// A. Update/revision to tasks (A1/A2/A3)
+// B. Conversation prompt (question/comment)
+
+// 3. IF classified as A (update/revision):
+// - You MUST create/update the task list with ### Tasks
+// - You MUST output immediately after the task list
+// - You MUST end your response immediately after
+// - You ABSOLUTELY MUST NOT proceed to implementation
+// - You MUST follow planning format exactly
+// Even if:
+// - The change is small
+// - You know the exact code to write
+// - You're continuing an existing plan
+
+// 4. IF classified as B (conversation):
+// - Continue conversation naturally
+// - Do not create tasks or implement code
+
+// 5. After responding and classifying, output EXACTLY ONE of these statements (naturally incorporated):
+// A. "I have the context I need to continue."
+// B. "I have the context I need to respond."
+// C. "I need more context to continue. "
+// D. "I need more context to respond. "
+// E. "This is a significant update to the plan. I'll clear all context without pending changes, then decide what context I need to move forward. "
+// F. "This is a new task that is distinct from the plan. I'll clear all context without pending changes, then decide what context I need to move forward. "
+
+// For statements A/B: You may rephrase naturally while keeping the meaning.
+// For statements C/D: MUST include exact phrase "need more context" and .
+// For statements E/F: MUST include exact phrase "clear all context" and .
+
+// CRITICAL: Always respond naturally to the user first, then seamlessly incorporate the required statement. Do NOT state that you are performing a classification or context assessment.
+// `
+
+// return s
+// }
diff --git a/app/server/model/summarize.go b/app/server/model/summarize.go
new file mode 100644
index 0000000000000000000000000000000000000000..fb71b24368669e60223fa727bde1003eda12da85
--- /dev/null
+++ b/app/server/model/summarize.go
@@ -0,0 +1,101 @@
+package model
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "plandex-server/db"
+ "plandex-server/model/prompts"
+ "plandex-server/types"
+ "strings"
+ "time"
+
+ shared "plandex-shared"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+type PlanSummaryParams struct {
+ Auth *types.ServerAuth
+ Plan *db.Plan
+ ModelStreamId string
+ ModelPackName string
+ Conversation []*types.ExtendedChatMessage
+ ConversationNumTokens int
+ LatestConvoMessageId string
+ LatestConvoMessageCreatedAt time.Time
+ NumMessages int
+ SessionId string
+}
+
+func PlanSummary(clients map[string]ClientInfo, authVars map[string]string, settings *shared.PlanSettings, orgUserConfig *shared.OrgUserConfig, config shared.ModelRoleConfig, params PlanSummaryParams, ctx context.Context) (*db.ConvoSummary, *shared.ApiError) {
+ messages := []types.ExtendedChatMessage{
+ {
+ Role: openai.ChatMessageRoleSystem,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompts.Identity,
+ },
+ },
+ },
+ }
+
+ for _, message := range params.Conversation {
+ messages = append(messages, *message)
+ }
+
+ messages = append(messages, types.ExtendedChatMessage{
+ Role: openai.ChatMessageRoleUser,
+ Content: []types.ExtendedChatMessagePart{
+ {
+ Type: openai.ChatMessagePartTypeText,
+ Text: prompts.PlanSummary,
+ },
+ },
+ })
+
+ modelRes, err := ModelRequest(ctx, ModelRequestParams{
+ Clients: clients,
+ Auth: params.Auth,
+ AuthVars: authVars,
+ Plan: params.Plan,
+ ModelConfig: &config,
+ Purpose: "Conversation summary",
+ ConvoMessageId: params.LatestConvoMessageId,
+ ModelStreamId: params.ModelStreamId,
+ Messages: messages,
+ SessionId: params.SessionId,
+ Settings: settings,
+ OrgUserConfig: orgUserConfig,
+ })
+
+ if err != nil {
+ return nil, &shared.ApiError{
+ Type: shared.ApiErrorTypeOther,
+ Status: http.StatusInternalServerError,
+ Msg: fmt.Sprintf("error generating plan summary: %v", err),
+ }
+ }
+
+ summary := modelRes.Content
+ if !strings.HasPrefix(summary, "## Summary of the plan so far:") {
+ summary = "## Summary of the plan so far:\n\n" + summary
+ }
+
+ var tokens int
+ if modelRes.Usage != nil {
+ tokens = modelRes.Usage.CompletionTokens
+ }
+
+ return &db.ConvoSummary{
+ OrgId: params.Auth.OrgId,
+ PlanId: params.Plan.Id,
+ Summary: summary,
+ Tokens: tokens,
+ LatestConvoMessageId: params.LatestConvoMessageId,
+ LatestConvoMessageCreatedAt: params.LatestConvoMessageCreatedAt,
+ NumMessages: params.NumMessages,
+ }, nil
+
+}
diff --git a/app/server/model/tokens.go b/app/server/model/tokens.go
new file mode 100644
index 0000000000000000000000000000000000000000..93535398c831043e63728a2036c0c2b37e609bc5
--- /dev/null
+++ b/app/server/model/tokens.go
@@ -0,0 +1,50 @@
+package model
+
+import (
+ "plandex-server/types"
+ shared "plandex-shared"
+
+ "github.com/sashabaranov/go-openai"
+)
+
+const (
+ // Per OpenAI's documentation:
+ // Every message follows this format: {"role": "role_name", "content": "content"}
+ // which has a 4-token overhead per message
+ TokensPerMessage = 4
+
+ // System, user, or assistant - each role name costs 1 token
+ TokensPerName = 1
+
+ // Tokens per request
+ TokensPerRequest = 3
+
+ TokensPerExtendedPart = 6
+)
+
+func GetMessagesTokenEstimate(messages ...types.ExtendedChatMessage) int {
+ tokens := 0
+
+ for _, msg := range messages {
+ tokens += TokensPerMessage // Base message overhead
+ tokens += TokensPerName // Role name
+
+ if len(msg.Content) > 0 {
+ // For each extended part, we need to account for the JSON structure
+ // Each part follows format: {"type": "type_value", "text": "content"}
+ // or {"type": "type_value", "image_url": {"url": "url_value"}}
+ for _, part := range msg.Content {
+ if part.Type == openai.ChatMessagePartTypeText {
+ tokens += TokensPerExtendedPart // Overhead for the part object structure
+ tokens += shared.GetNumTokensEstimate(part.Text)
+ }
+
+ // images are handled separately
+
+ }
+ }
+
+ }
+
+ return tokens
+}
diff --git a/app/server/notify/errors.go b/app/server/notify/errors.go
new file mode 100644
index 0000000000000000000000000000000000000000..c8a3b30a22ca7d8fe7b7b7175f0b73169fee796a
--- /dev/null
+++ b/app/server/notify/errors.go
@@ -0,0 +1,34 @@
+package notify
+
+import (
+ "log"
+ "runtime/debug"
+)
+
+// this allows Plandex Cloud to inject error monitoring
+// all non-streaming handlers are already wrapped with different logic, so this is only needed for errors in streaming handlers
+
+type Severity int
+
+const (
+ SeverityInfo Severity = iota
+ SeverityError
+)
+
+var NotifyErrFn func(severity Severity, data ...interface{})
+
+func RegisterNotifyErrFn(fn func(severity Severity, data ...interface{})) {
+ NotifyErrFn = fn
+}
+
+func NotifyErr(severity Severity, data ...interface{}) {
+ defer func() {
+ if r := recover(); r != nil {
+ log.Printf("panic in NotifyErr: %v\n%s", r, debug.Stack())
+ }
+ }()
+
+ if NotifyErrFn != nil {
+ NotifyErrFn(severity, data...)
+ }
+}
diff --git a/app/server/routes/routes.go b/app/server/routes/routes.go
new file mode 100644
index 0000000000000000000000000000000000000000..8cdf11ebd20a1f2af8bfbcb87d4937cb7ba22db9
--- /dev/null
+++ b/app/server/routes/routes.go
@@ -0,0 +1,233 @@
+package routes
+
+import (
+ "fmt"
+ "log"
+ "net/http"
+ "os"
+ "path/filepath"
+ "plandex-server/handlers"
+ "plandex-server/hooks"
+
+ "github.com/gorilla/mux"
+)
+
+type PlandexHandler func(w http.ResponseWriter, r *http.Request)
+type HandlePlandex func(router *mux.Router, path string, isStreaming bool, handler PlandexHandler) *mux.Route
+
+var HandlePlandexFn HandlePlandex
+
+func RegisterHandlePlandex(fn HandlePlandex) {
+ HandlePlandexFn = fn
+}
+
+func EnsureHandlePlandex() {
+ if HandlePlandexFn == nil {
+ panic("handlePlandexFn is not set")
+ }
+}
+
+func AddHealthRoutes(r *mux.Router) {
+ EnsureHandlePlandex()
+
+ HandlePlandexFn(r, "/", false, func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "text/html")
+ fmt.Fprint(w, `
+
+
+
+ Plandex Server
+
+
+
+
+
+
Plandex Server
+
Status: RUNNING
+
This is a Plandex server instance running on Hugging Face Spaces.