Amiel commited on
Commit
676fc08
·
verified ·
1 Parent(s): 44d050f

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .dockerignore +6 -0
  2. .github/ISSUE_TEMPLATE/bug_report.yml +146 -0
  3. .github/ISSUE_TEMPLATE/feature_request.yml +53 -0
  4. .github/workflows/docker.yml +46 -0
  5. .github/workflows/ghcr.yml +60 -0
  6. .github/workflows/issue-translator.yml +15 -0
  7. .github/workflows/sync.yml +39 -0
  8. .gitignore +45 -0
  9. .node-version +1 -0
  10. .vscode/settings.json +7 -0
  11. Dockerfile +44 -0
  12. LICENSE +21 -0
  13. README.md +459 -0
  14. components.json +21 -0
  15. docker-compose.yml +12 -0
  16. docs/How-to-deploy-to-Cloudflare-Pages.md +14 -0
  17. docs/deep-research-api-doc.md +332 -0
  18. env.tpl +101 -0
  19. eslint.config.mjs +21 -0
  20. next.config.ts +158 -0
  21. package.json +114 -0
  22. pnpm-lock.yaml +0 -0
  23. postcss.config.mjs +7 -0
  24. public/logo.png +0 -0
  25. public/logo.svg +14 -0
  26. public/screenshots/main-interface.png +0 -0
  27. public/scripts/eruda.min.js +0 -0
  28. public/scripts/pdf.worker.min.mjs +0 -0
  29. src/app/api/ai/anthropic/[...slug]/route.ts +55 -0
  30. src/app/api/ai/azure/[...slug]/route.ts +56 -0
  31. src/app/api/ai/deepseek/[...slug]/route.ts +53 -0
  32. src/app/api/ai/google/[...slug]/route.ts +57 -0
  33. src/app/api/ai/mistral/[...slug]/route.ts +52 -0
  34. src/app/api/ai/ollama/[...slug]/route.ts +51 -0
  35. src/app/api/ai/openai/[...slug]/route.ts +52 -0
  36. src/app/api/ai/openaicompatible/[...slug]/route.ts +51 -0
  37. src/app/api/ai/openrouter/[...slug]/route.ts +54 -0
  38. src/app/api/ai/pollinations/[...slug]/route.ts +52 -0
  39. src/app/api/ai/xai/[...slug]/route.ts +52 -0
  40. src/app/api/crawler/route.ts +36 -0
  41. src/app/api/mcp/[...slug]/route.ts +91 -0
  42. src/app/api/mcp/route.ts +50 -0
  43. src/app/api/mcp/server.ts +404 -0
  44. src/app/api/search/bocha/[...slug]/route.ts +47 -0
  45. src/app/api/search/exa/[...slug]/route.ts +47 -0
  46. src/app/api/search/firecrawl/[...slug]/route.ts +48 -0
  47. src/app/api/search/searxng/[...slug]/route.ts +49 -0
  48. src/app/api/search/tavily/[...slug]/route.ts +47 -0
  49. src/app/api/sse/live/route.ts +105 -0
  50. src/app/api/sse/route.ts +111 -0
.dockerignore ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ .dockerignore
2
+ node_modules
3
+ README.md
4
+ .next
5
+ .git
6
+ out
.github/ISSUE_TEMPLATE/bug_report.yml ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Bug report
2
+ description: Create a report to help us improve
3
+ title: "[Bug]: "
4
+ labels: ["bug"]
5
+
6
+ body:
7
+ - type: markdown
8
+ attributes:
9
+ value: "## Describe the bug"
10
+ - type: textarea
11
+ id: bug-description
12
+ attributes:
13
+ label: "Bug Description"
14
+ description: "A clear and concise description of what the bug is."
15
+ placeholder: "Explain the bug..."
16
+ validations:
17
+ required: true
18
+
19
+ - type: markdown
20
+ attributes:
21
+ value: "## To Reproduce"
22
+ - type: textarea
23
+ id: steps-to-reproduce
24
+ attributes:
25
+ label: "Steps to Reproduce"
26
+ description: "Steps to reproduce the behavior:"
27
+ placeholder: |
28
+ 1. Go to '...'
29
+ 2. Click on '....'
30
+ 3. Scroll down to '....'
31
+ 4. See error
32
+ validations:
33
+ required: true
34
+
35
+ - type: markdown
36
+ attributes:
37
+ value: "## Expected behavior"
38
+ - type: textarea
39
+ id: expected-behavior
40
+ attributes:
41
+ label: "Expected Behavior"
42
+ description: "A clear and concise description of what you expected to happen."
43
+ placeholder: "Describe what you expected to happen..."
44
+ validations:
45
+ required: true
46
+
47
+ - type: markdown
48
+ attributes:
49
+ value: "## Screenshots"
50
+ - type: textarea
51
+ id: screenshots
52
+ attributes:
53
+ label: "Screenshots"
54
+ description: "If applicable, add screenshots to help explain your problem."
55
+ placeholder: "Paste your screenshots here or write 'N/A' if not applicable..."
56
+ validations:
57
+ required: false
58
+
59
+ - type: markdown
60
+ attributes:
61
+ value: "## Deployment"
62
+ - type: checkboxes
63
+ id: deployment
64
+ attributes:
65
+ label: "Deployment Method"
66
+ description: "Please select the deployment method you are using."
67
+ options:
68
+ - label: "Docker"
69
+ - label: "Vercel"
70
+ - label: "Server"
71
+
72
+ - type: markdown
73
+ attributes:
74
+ value: "## Desktop (please complete the following information):"
75
+ - type: input
76
+ id: desktop-os
77
+ attributes:
78
+ label: "Desktop OS"
79
+ description: "Your desktop operating system."
80
+ placeholder: "e.g., Windows 10"
81
+ validations:
82
+ required: false
83
+ - type: input
84
+ id: desktop-browser
85
+ attributes:
86
+ label: "Desktop Browser"
87
+ description: "Your desktop browser."
88
+ placeholder: "e.g., Chrome, Safari"
89
+ validations:
90
+ required: false
91
+ - type: input
92
+ id: desktop-version
93
+ attributes:
94
+ label: "Desktop Browser Version"
95
+ description: "Version of your desktop browser."
96
+ placeholder: "e.g., 89.0"
97
+ validations:
98
+ required: false
99
+
100
+ - type: markdown
101
+ attributes:
102
+ value: "## Smartphone (please complete the following information):"
103
+ - type: input
104
+ id: smartphone-device
105
+ attributes:
106
+ label: "Smartphone Device"
107
+ description: "Your smartphone device."
108
+ placeholder: "e.g., iPhone X"
109
+ validations:
110
+ required: false
111
+ - type: input
112
+ id: smartphone-os
113
+ attributes:
114
+ label: "Smartphone OS"
115
+ description: "Your smartphone operating system."
116
+ placeholder: "e.g., iOS 14.4"
117
+ validations:
118
+ required: false
119
+ - type: input
120
+ id: smartphone-browser
121
+ attributes:
122
+ label: "Smartphone Browser"
123
+ description: "Your smartphone browser."
124
+ placeholder: "e.g., Safari"
125
+ validations:
126
+ required: false
127
+ - type: input
128
+ id: smartphone-version
129
+ attributes:
130
+ label: "Smartphone Browser Version"
131
+ description: "Version of your smartphone browser."
132
+ placeholder: "e.g., 14"
133
+ validations:
134
+ required: false
135
+
136
+ - type: markdown
137
+ attributes:
138
+ value: "## Additional Logs"
139
+ - type: textarea
140
+ id: additional-logs
141
+ attributes:
142
+ label: "Additional Logs"
143
+ description: "Add any logs about the problem here."
144
+ placeholder: "Paste any relevant logs here..."
145
+ validations:
146
+ required: false
.github/ISSUE_TEMPLATE/feature_request.yml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Feature request
2
+ description: Suggest an idea for this project
3
+ title: "[Feature Request]: "
4
+ labels: ["feature"]
5
+
6
+ body:
7
+ - type: markdown
8
+ attributes:
9
+ value: "## Is your feature request related to a problem? Please describe."
10
+ - type: textarea
11
+ id: problem-description
12
+ attributes:
13
+ label: Problem Description
14
+ description: "A clear and concise description of what the problem is. Example: I'm always frustrated when [...]"
15
+ placeholder: "Explain the problem you are facing..."
16
+ validations:
17
+ required: true
18
+
19
+ - type: markdown
20
+ attributes:
21
+ value: "## Describe the solution you'd like"
22
+ - type: textarea
23
+ id: desired-solution
24
+ attributes:
25
+ label: Solution Description
26
+ description: A clear and concise description of what you want to happen.
27
+ placeholder: "Describe the solution you'd like..."
28
+ validations:
29
+ required: true
30
+
31
+ - type: markdown
32
+ attributes:
33
+ value: "## Describe alternatives you've considered"
34
+ - type: textarea
35
+ id: alternatives-considered
36
+ attributes:
37
+ label: Alternatives Considered
38
+ description: A clear and concise description of any alternative solutions or features you've considered.
39
+ placeholder: "Describe any alternative solutions or features you've considered..."
40
+ validations:
41
+ required: false
42
+
43
+ - type: markdown
44
+ attributes:
45
+ value: "## Additional context"
46
+ - type: textarea
47
+ id: additional-context
48
+ attributes:
49
+ label: Additional Context
50
+ description: Add any other context or screenshots about the feature request here.
51
+ placeholder: "Add any other context or screenshots about the feature request here..."
52
+ validations:
53
+ required: false
.github/workflows/docker.yml ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Publish Docker image
2
+
3
+ on:
4
+ push:
5
+ tags:
6
+ # Push events matching v*, such as v1.0, v20.15.10, etc. to trigger workflows
7
+ - "v*"
8
+
9
+ jobs:
10
+ push_to_registry:
11
+ name: Push Docker image to Docker Hub
12
+ runs-on: ubuntu-latest
13
+ steps:
14
+ - name: Check out the repo
15
+ uses: actions/checkout@v3
16
+ - name: Log in to Docker Hub
17
+ uses: docker/login-action@v2
18
+ with:
19
+ username: ${{ secrets.DOCKER_USERNAME }}
20
+ password: ${{ secrets.DOCKER_PASSWORD }}
21
+
22
+ - name: Extract metadata (tags, labels) for Docker
23
+ id: meta
24
+ uses: docker/metadata-action@v4
25
+ with:
26
+ images: xiangfa/deep-research
27
+ tags: |
28
+ type=raw,value=latest
29
+ type=ref,event=tag
30
+
31
+ - name: Set up QEMU
32
+ uses: docker/setup-qemu-action@v2
33
+
34
+ - name: Set up Docker Buildx
35
+ uses: docker/setup-buildx-action@v2
36
+
37
+ - name: Build and push Docker image
38
+ uses: docker/build-push-action@v4
39
+ with:
40
+ context: .
41
+ platforms: linux/amd64,linux/arm64
42
+ push: true
43
+ tags: ${{ steps.meta.outputs.tags }}
44
+ labels: ${{ steps.meta.outputs.labels }}
45
+ cache-from: type=gha
46
+ cache-to: type=gha,mode=max
.github/workflows/ghcr.yml ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Create and publish a Docker image
2
+
3
+ # Configures this workflow to run every time a change is pushed to the branch called `release`.
4
+ on:
5
+ push:
6
+ tags:
7
+ # Push events matching v*, such as v1.0, v20.15.10, etc. to trigger workflows
8
+ - "v*"
9
+
10
+ # Defines two custom environment variables for the workflow. These are used for the Container registry domain, and a name for the Docker image that this workflow builds.
11
+ env:
12
+ REGISTRY: ghcr.io
13
+ IMAGE_NAME: ${{ github.repository }}
14
+
15
+ # There is a single job in this workflow. It's configured to run on the latest available version of Ubuntu.
16
+ jobs:
17
+ build-and-push-image:
18
+ runs-on: ubuntu-latest
19
+ # Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job.
20
+ permissions:
21
+ contents: read
22
+ packages: write
23
+ attestations: write
24
+ id-token: write
25
+ #
26
+ steps:
27
+ - name: Checkout repository
28
+ uses: actions/checkout@v4
29
+ # Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
30
+ - name: Log in to the Container registry
31
+ uses: docker/login-action@v2
32
+ with:
33
+ registry: ${{ env.REGISTRY }}
34
+ username: ${{ github.actor }}
35
+ password: ${{ secrets.GITHUB_TOKEN }}
36
+ # This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
37
+ - name: Extract metadata (tags, labels) for Docker
38
+ id: meta
39
+ uses: docker/metadata-action@v5
40
+ with:
41
+ images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
42
+ # This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
43
+ # It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see [Usage](https://github.com/docker/build-push-action#usage) in the README of the `docker/build-push-action` repository.
44
+ # It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
45
+ - name: Build and push Docker image
46
+ id: push
47
+ uses: docker/build-push-action@v4
48
+ with:
49
+ context: .
50
+ push: true
51
+ tags: ${{ steps.meta.outputs.tags }}
52
+ labels: ${{ steps.meta.outputs.labels }}
53
+
54
+ # This step generates an artifact attestation for the image, which is an unforgeable statement about where and how it was built. It increases supply chain security for people who consume the image. For more information, see [Using artifact attestations to establish provenance for builds](/actions/security-guides/using-artifact-attestations-to-establish-provenance-for-builds).
55
+ - name: Generate artifact attestation
56
+ uses: actions/attest-build-provenance@v2
57
+ with:
58
+ subject-name: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME}}
59
+ subject-digest: ${{ steps.push.outputs.digest }}
60
+ push-to-registry: true
.github/workflows/issue-translator.yml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Issue Translator
2
+ on:
3
+ issue_comment:
4
+ types: [created]
5
+ issues:
6
+ types: [opened]
7
+
8
+ jobs:
9
+ build:
10
+ runs-on: ubuntu-latest
11
+ steps:
12
+ - uses: usthe/issues-translate-action@v2.7
13
+ with:
14
+ IS_MODIFY_TITLE: false
15
+ CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically.
.github/workflows/sync.yml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Upstream Sync
2
+
3
+ permissions:
4
+ contents: write
5
+
6
+ on:
7
+ schedule:
8
+ - cron: "0 4 * * *" # At 04:00, every day
9
+ workflow_dispatch:
10
+
11
+ jobs:
12
+ sync_latest_from_upstream:
13
+ name: Sync latest commits from upstream repo
14
+ runs-on: ubuntu-latest
15
+ if: ${{ github.event.repository.fork }}
16
+
17
+ steps:
18
+ # Step 1: run a standard checkout action
19
+ - name: Checkout target repo
20
+ uses: actions/checkout@v3
21
+
22
+ # Step 2: run the sync action
23
+ - name: Sync upstream changes
24
+ id: sync
25
+ uses: aormsby/Fork-Sync-With-Upstream-action@v3.4
26
+ with:
27
+ upstream_sync_repo: u14app/deep-research
28
+ upstream_sync_branch: main
29
+ target_sync_branch: main
30
+ target_repo_token: ${{ secrets.GITHUB_TOKEN }} # automatically generated, no need to set
31
+
32
+ # Set test_mode true to run tests instead of the true action!!
33
+ test_mode: false
34
+
35
+ - name: Sync check
36
+ if: failure()
37
+ run: |
38
+ echo "[Error] Due to a change in the workflow file of the upstream repository, GitHub has automatically suspended the scheduled automatic update. You need to manually sync your fork."
39
+ exit 1
.gitignore ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2
+
3
+ # dependencies
4
+ /node_modules
5
+ /.pnp
6
+ .pnp.*
7
+ .yarn/*
8
+ !.yarn/patches
9
+ !.yarn/plugins
10
+ !.yarn/releases
11
+ !.yarn/versions
12
+
13
+ # testing
14
+ /coverage
15
+
16
+ # next.js
17
+ /.next/
18
+ /out/
19
+
20
+ # production
21
+ /build
22
+
23
+ # misc
24
+ .DS_Store
25
+ *.pem
26
+
27
+ # debug
28
+ npm-debug.log*
29
+ yarn-debug.log*
30
+ yarn-error.log*
31
+ .pnpm-debug.log*
32
+
33
+ # env files (can opt-in for committing if needed)
34
+ .env*
35
+
36
+ # vercel
37
+ .vercel
38
+
39
+ # typescript
40
+ *.tsbuildinfo
41
+ next-env.d.ts
42
+
43
+ # Serwist
44
+ public/sw*
45
+ public/swe-worker*
.node-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 18.18.0
.vscode/settings.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "css.lint.unknownAtRules": "ignore",
3
+ "i18n-ally.localesPaths": ["src/locales"],
4
+ "i18n-ally.keystyle": "nested",
5
+ "i18n-ally.displayLanguage": "en-US",
6
+ "i18n-ally.sourceLanguage": "en-US"
7
+ }
Dockerfile ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM node:18-alpine AS base
2
+
3
+ # Install dependencies only when needed
4
+ FROM base AS deps
5
+ # Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed.
6
+ RUN apk add --no-cache libc6-compat
7
+
8
+ WORKDIR /app
9
+
10
+ # Install dependencies based on the preferred package manager
11
+ COPY package.json pnpm-lock.yaml ./
12
+ RUN yarn global add pnpm && pnpm install --frozen-lockfile
13
+
14
+ # Rebuild the source code only when needed
15
+ FROM base AS builder
16
+ WORKDIR /app
17
+ COPY --from=deps /app/node_modules ./node_modules
18
+ COPY . .
19
+
20
+ # Next.js collects completely anonymous telemetry data about general usage.
21
+ # Learn more here: https://nextjs.org/telemetry
22
+ # Uncomment the following line in case you want to disable telemetry during the build.
23
+ # ENV NEXT_TELEMETRY_DISABLED 1
24
+
25
+ RUN yarn run build:standalone
26
+
27
+ # Production image, copy all the files and run next
28
+ FROM base AS runner
29
+ WORKDIR /app
30
+
31
+ ENV NODE_ENV=production
32
+ ENV NEXT_PUBLIC_BUILD_MODE=standalone
33
+
34
+ # Automatically leverage output traces to reduce image size
35
+ # https://nextjs.org/docs/advanced-features/output-file-tracing
36
+ COPY --from=builder /app/.next/standalone ./
37
+ COPY --from=builder /app/.next/static ./.next/static
38
+ COPY --from=builder /app/public ./public
39
+
40
+ EXPOSE 3000
41
+
42
+ # server.js is created by next build from the standalone output
43
+ # https://nextjs.org/docs/pages/api-reference/next-config-js/output
44
+ CMD ["node", "server.js"]
LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2024 u14app
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
README.md ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+ <h1>Deep Research</h1>
3
+
4
+ ![GitHub deployments](https://img.shields.io/github/deployments/u14app/gemini-next-chat/Production)
5
+ ![GitHub Release](https://img.shields.io/github/v/release/u14app/deep-research)
6
+ ![Docker Image Size](https://img.shields.io/docker/image-size/xiangfa/deep-research/latest)
7
+ ![Docker Pulls](https://img.shields.io/docker/pulls/xiangfa/deep-research)
8
+ [![License: MIT](https://img.shields.io/badge/License-MIT-default.svg)](https://opensource.org/licenses/MIT)
9
+
10
+ [![Gemini](https://img.shields.io/badge/Gemini-8E75B2?style=flat&logo=googlegemini&logoColor=white)](https://ai.google.dev/)
11
+ [![Next](https://img.shields.io/badge/Next.js-111111?style=flat&logo=nextdotjs&logoColor=white)](https://nextjs.org/)
12
+ [![Tailwind CSS](https://img.shields.io/badge/Tailwind%20CSS-06B6D4?style=flat&logo=tailwindcss&logoColor=white)](https://tailwindcss.com/)
13
+ [![shadcn/ui](https://img.shields.io/badge/shadcn/ui-111111?style=flat&logo=shadcnui&logoColor=white)](https://ui.shadcn.com/)
14
+
15
+ [![Vercel](https://img.shields.io/badge/Vercel-111111?style=flat&logo=vercel&logoColor=white)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fu14app%2Fdeep-research&project-name=deep-research&repository-name=deep-research)
16
+ [![Cloudflare](https://img.shields.io/badge/Cloudflare-F69652?style=flat&logo=cloudflare&logoColor=white)](./docs/How-to-deploy-to-Cloudflare-Pages.md)
17
+ [![PWA](https://img.shields.io/badge/PWA-blue?style=flat&logo=pwa&logoColor=white)](https://research.u14.app/)
18
+
19
+ </div>
20
+
21
+ **Lightning-Fast Deep Research Report**
22
+
23
+ Deep Research uses a variety of powerful AI models to generate in-depth research reports in just a few minutes. It leverages advanced "Thinking" and "Task" models, combined with an internet connection, to provide fast and insightful analysis on a variety of topics. **Your privacy is paramount - all data is processed and stored locally.**
24
+
25
+ ## ✨ Features
26
+
27
+ - **Rapid Deep Research:** Generates comprehensive research reports in about 2 minutes, significantly accelerating your research process.
28
+ - **Multi-platform Support:** Supports rapid deployment to Vercel, Cloudflare and other platforms.
29
+ - **Powered by AI:** Utilizes the advanced AI models for accurate and insightful analysis.
30
+ - **Privacy-Focused:** Your data remains private and secure, as all data is stored locally on your browser.
31
+ - **Support for Multi-LLM:** Supports a variety of mainstream large language models, including Gemini, OpenAI, Anthropic, Deepseek, Grok, Mistral, Azure OpenAI, any OpenAI Compatible LLMs, OpenRouter, Ollama, etc.
32
+ - **Support Web Search:** Supports search engines such as Searxng, Tavily, Firecrawl, Exa, Bocha, etc., allowing LLMs that do not support search to use the web search function more conveniently.
33
+ - **Thinking & Task Models:** Employs sophisticated "Thinking" and "Task" models to balance depth and speed, ensuring high-quality results quickly. Support switching research models.
34
+ - **Support Further Research:** You can refine or adjust the research content at any stage of the project and support re-research from that stage.
35
+ - **Local Knowledge Base:** Supports uploading and processing text, Office, PDF and other resource files to generate local knowledge base.
36
+ - **Artifact:** Supports editing of research content, with two editing modes: WYSIWYM and Markdown. It is possible to adjust the reading level, article length and full text translation.
37
+ - **Knowledge Graph:** It supports one-click generation of knowledge graph, allowing you to have a systematic understanding of the report content.
38
+ - **Research History:** Support preservation of research history, you can review previous research results at any time and conduct in-depth research again.
39
+ - **Local & Server API Support:** Offers flexibility with both local and server-side API calling options to suit your needs.
40
+ - **Support for SaaS and MCP:** You can use this project as a deep research service (SaaS) through the SSE API, or use it in other AI services through MCP service.
41
+ - **Support PWA:** With Progressive Web App (PWA) technology, you can use the project like a software.
42
+ - **Support Multi-Key payload:** Support Multi-Key payload to improve API response efficiency.
43
+ - **Multi-language Support**: English, 简体中文, Español.
44
+ - **Built with Modern Technologies:** Developed using Next.js 15 and Shadcn UI, ensuring a modern, performant, and visually appealing user experience.
45
+ - **MIT Licensed:** Open-source and freely available for personal and commercial use under the MIT License.
46
+
47
+ ## 🎯 Roadmap
48
+
49
+ - [x] Support preservation of research history
50
+ - [x] Support editing final report and search results
51
+ - [x] Support for other LLM models
52
+ - [x] Support file upload and local knowledge base
53
+ - [x] Support SSE API and MCP server
54
+
55
+ ## 🚀 Getting Started
56
+
57
+ ### Use Free Gemini (recommend)
58
+
59
+ 1. Get [Gemini API Key](https://aistudio.google.com/app/apikey)
60
+ 2. One-click deployment of the project, you can choose to deploy to Vercel or Cloudflare
61
+
62
+ [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fu14app%2Fdeep-research&project-name=deep-research&repository-name=deep-research)
63
+
64
+ Currently the project supports deployment to Cloudflare, but you need to follow [How to deploy to Cloudflare Pages](./docs/How-to-deploy-to-Cloudflare-Pages.md) to do it.
65
+
66
+ 3. Start using
67
+
68
+ ### Use Other LLM
69
+
70
+ 1. Deploy the project to Vercel or Cloudflare
71
+ 2. Set the LLM API key
72
+ 3. Set the LLM API base URL (optional)
73
+ 4. Start using
74
+
75
+ ## ⌨️ Development
76
+
77
+ Follow these steps to get Deep Research up and running on your local browser.
78
+
79
+ ### Prerequisites
80
+
81
+ - [Node.js](https://nodejs.org/) (version 18.18.0 or later recommended)
82
+ - [pnpm](https://pnpm.io/) or [npm](https://www.npmjs.com/) or [yarn](https://yarnpkg.com/)
83
+
84
+ ### Installation
85
+
86
+ 1. **Clone the repository:**
87
+
88
+ ```bash
89
+ git clone https://github.com/u14app/deep-research.git
90
+ cd deep-research
91
+ ```
92
+
93
+ 2. **Install dependencies:**
94
+
95
+ ```bash
96
+ pnpm install # or npm install or yarn install
97
+ ```
98
+
99
+ 3. **Set up Environment Variables:**
100
+
101
+ You need to modify the file `env.tpl` to `.env`, or create a `.env` file and write the variables to this file.
102
+
103
+ ```bash
104
+ # For Development
105
+ cp env.tpl .env.local
106
+ # For Production
107
+ cp env.tpl .env
108
+ ```
109
+
110
+ 4. **Run the development server:**
111
+
112
+ ```bash
113
+ pnpm dev # or npm run dev or yarn dev
114
+ ```
115
+
116
+ Open your browser and visit [http://localhost:3000](http://localhost:3000) to access Deep Research.
117
+
118
+ ### Custom Model List
119
+
120
+ The project allow custom model list, but **only works in proxy mode**. Please add an environment variable named `NEXT_PUBLIC_MODEL_LIST` in the `.env` file or environment variables page.
121
+
122
+ Custom model lists use `,` to separate multiple models. If you want to disable a model, use the `-` symbol followed by the model name, i.e. `-existing-model-name`. To only allow the specified model to be available, use `-all,+new-model-name`.
123
+
124
+ ## 🚢 Deployment
125
+
126
+ ### Vercel
127
+
128
+ [![Deploy with Vercel](https://vercel.com/button)](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2Fu14app%2Fdeep-research&project-name=deep-research&repository-name=deep-research)
129
+
130
+ ### Cloudflare
131
+
132
+ Currently the project supports deployment to Cloudflare, but you need to follow [How to deploy to Cloudflare Pages](./docs/How-to-deploy-to-Cloudflare-Pages.md) to do it.
133
+
134
+ ### Docker
135
+
136
+ > The Docker version needs to be 20 or above, otherwise it will prompt that the image cannot be found.
137
+
138
+ > ⚠️ Note: Most of the time, the docker version will lag behind the latest version by 1 to 2 days, so the "update exists" prompt will continue to appear after deployment, which is normal.
139
+
140
+ ```bash
141
+ docker pull xiangfa/deep-research:latest
142
+ docker run -d --name deep-research -p 3333:3000 xiangfa/deep-research
143
+ ```
144
+
145
+ You can also specify additional environment variables:
146
+
147
+ ```bash
148
+ docker run -d --name deep-research \
149
+ -p 3333:3000 \
150
+ -e ACCESS_PASSWORD=your-password \
151
+ -e GOOGLE_GENERATIVE_AI_API_KEY=AIzaSy... \
152
+ xiangfa/deep-research
153
+ ```
154
+
155
+ or build your own docker image:
156
+
157
+ ```bash
158
+ docker build -t deep-research .
159
+ docker run -d --name deep-research -p 3333:3000 deep-research
160
+ ```
161
+
162
+ If you need to specify other environment variables, please add `-e key=value` to the above command to specify it.
163
+
164
+ Deploy using `docker-compose.yml`:
165
+
166
+ ```bash
167
+ version: '3.9'
168
+ services:
169
+ deep-research:
170
+ image: xiangfa/deep-research
171
+ container_name: deep-research
172
+ environment:
173
+ - ACCESS_PASSWORD=your-password
174
+ - GOOGLE_GENERATIVE_AI_API_KEY=AIzaSy...
175
+ ports:
176
+ - 3333:3000
177
+ ```
178
+
179
+ or build your own docker compose:
180
+
181
+ ```bash
182
+ docker compose -f docker-compose.yml build
183
+ ```
184
+
185
+ ### Static Deployment
186
+
187
+ You can also build a static page version directly, and then upload all files in the `out` directory to any website service that supports static pages, such as Github Page, Cloudflare, Vercel, etc..
188
+
189
+ ```bash
190
+ pnpm build:export
191
+ ```
192
+
193
+ ## ⚙️ Configuration
194
+
195
+ As mentioned in the "Getting Started" section, Deep Research utilizes the following environment variables for server-side API configurations:
196
+
197
+ Please refer to the file [env.tpl](./env.tpl) for all available environment variables.
198
+
199
+ **Important Notes on Environment Variables:**
200
+
201
+ - **Privacy Reminder:** These environment variables are primarily used for **server-side API calls**. When using the **local API mode**, no API keys or server-side configurations are needed, further enhancing your privacy.
202
+
203
+ - **Multi-key Support:** Supports multiple keys, each key is separated by `,`, i.e. `key1,key2,key3`.
204
+
205
+ - **Security Setting:** By setting `ACCESS_PASSWORD`, you can better protect the security of the server API.
206
+
207
+ - **Make variables effective:** After adding or modifying this environment variable, please redeploy the project for the changes to take effect.
208
+
209
+ ## 📄 API documentation
210
+
211
+ Currently the project supports two forms of API: Server-Sent Events (SSE) and Model Context Protocol (MCP).
212
+
213
+ ### Server-Sent Events API
214
+
215
+ The Deep Research API provides a real-time interface for initiating and monitoring complex research tasks.
216
+
217
+ Recommended to use the API via `@microsoft/fetch-event-source`, to get the final report, you need to listen to the `message` event, the data will be returned in the form of a text stream.
218
+
219
+ #### POST method
220
+
221
+ Endpoint: `/api/sse`
222
+
223
+ Method: `POST`
224
+
225
+ Body:
226
+
227
+ ```typescript
228
+ interface SSEConfig {
229
+ // Research topic
230
+ query: string;
231
+ // AI provider, Possible values ​​include: google, openai, anthropic, deepseek, xai, mistral, azure, openrouter, openaicompatible, pollinations, ollama
232
+ provider: string;
233
+ // Thinking model id
234
+ thinkingModel: string;
235
+ // Task model id
236
+ taskModel: string;
237
+ // Search provider, Possible values ​​include: model, tavily, firecrawl, exa, bocha, searxng
238
+ searchProvider: string;
239
+ // Response Language, also affects the search language. (optional)
240
+ language?: string;
241
+ // Maximum number of search results. Default, `5` (optional)
242
+ maxResult?: number;
243
+ // Whether to include content-related images in the final report. Default, `true`. (optional)
244
+ enableCitationImage?: boolean;
245
+ // Whether to include citation links in search results and final reports. Default, `true`. (optional)
246
+ enableReferences?: boolean;
247
+ }
248
+ ```
249
+
250
+ Headers:
251
+
252
+ ```typescript
253
+ interface Headers {
254
+ "Content-Type": "application/json";
255
+ // If you set an access password
256
+ // Authorization: "Bearer YOUR_ACCESS_PASSWORD";
257
+ }
258
+ ```
259
+
260
+ See the detailed [API documentation](./docs/deep-research-api-doc.md).
261
+
262
+ #### GET method
263
+
264
+ This is an interesting implementation. You can watch the whole process of deep research directly through the URL just like watching a video.
265
+
266
+ You can access the deep research report via the following link:
267
+
268
+ ```text
269
+ http://localhost:3000/api/sse/live?query=AI+trends+for+this+year&provider=pollinations&thinkingModel=openai&taskModel=openai-fast&searchProvider=searxng
270
+ ```
271
+
272
+ Query Params:
273
+
274
+ ```typescript
275
+ // The parameters are the same as POST parameters
276
+ interface QueryParams extends SSEConfig {
277
+ // If you set the `ACCESS_PASSWORD` environment variable, this parameter is required
278
+ password?: string;
279
+ }
280
+ ```
281
+
282
+ ### Model Context Protocol (MCP) Server
283
+
284
+ Currently supports `StreamableHTTP` and `SSE` Server Transport.
285
+
286
+ StreamableHTTP server endpoint: `/api/mcp`, transport type: `streamable-http`
287
+
288
+ SSE server endpoint: `/api/mcp/sse`, transport type: `sse`
289
+
290
+ ```json
291
+ {
292
+ "mcpServers": {
293
+ "deep-research": {
294
+ "url": "http://127.0.0.1:3000/api/mcp",
295
+ "transportType": "streamable-http",
296
+ "timeout": 600
297
+ }
298
+ }
299
+ }
300
+ ```
301
+
302
+ **Note:** Since deep research take a long time to execute, you need to set a longer timeout to avoid interrupting the study.
303
+
304
+ If your server sets `ACCESS_PASSWORD`, the MCP service will be protected and you need to add additional headers parameters:
305
+
306
+ ```json
307
+ {
308
+ "mcpServers": {
309
+ "deep-research": {
310
+ "url": "http://127.0.0.1:3000/api/mcp",
311
+ "transportType": "streamable-http",
312
+ "timeout": 600,
313
+ "headers": {
314
+ "Authorization": "Bearer YOUR_ACCESS_PASSWORD"
315
+ }
316
+ }
317
+ }
318
+ }
319
+ ```
320
+
321
+ **Enabling MCP service requires setting global environment variables:**
322
+
323
+ ```bash
324
+ # MCP Server AI provider
325
+ # Possible values ​​include: google, openai, anthropic, deepseek, xai, mistral, azure, openrouter, openaicompatible, pollinations, ollama
326
+ MCP_AI_PROVIDER=google
327
+ # MCP Server search provider. Default, `model`
328
+ # Possible values ​​include: model, tavily, firecrawl, exa, bocha, searxng
329
+ MCP_SEARCH_PROVIDER=tavily
330
+ # MCP Server thinking model id, the core model used in deep research.
331
+ MCP_THINKING_MODEL=gemini-2.0-flash-thinking-exp
332
+ # MCP Server task model id, used for secondary tasks, high output models are recommended.
333
+ MCP_TASK_MODEL=gemini-2.0-flash-exp
334
+ ```
335
+
336
+ **Note:** To ensure that the MCP service can be used normally, you need to set the environment variables of the corresponding model and search engine. For specific environment variable parameters, please refer to [env.tpl](./env.tpl).
337
+
338
+ ## 🪄 How it works
339
+
340
+ 1. **Research topic**
341
+
342
+ - Input research topic
343
+ - Use local research resources (optional)
344
+ - Start thinking (or rethinking)
345
+
346
+ 2. **Propose your ideas**
347
+
348
+ - The system asks questions
349
+ - Answer system questions (optional)
350
+ - Write a research plan (or rewrite the research plan)
351
+ - The system outputs the research plan
352
+ - Start in-depth research (or re-research)
353
+ - The system generates SERP queries
354
+
355
+ 3. **Information collection**
356
+
357
+ - Initial research
358
+ - Retrieve local research resources based on SERP queries
359
+ - Collect information from the Internet based on SERP queries
360
+ - In-depth research (this process can be repeated)
361
+ - Propose research suggestions (optional)
362
+ - Start a new round of information collection (the process is the same as the initial research)
363
+
364
+ 4. **Generate Final Report**
365
+
366
+ - Make a writing request (optional)
367
+ - Summarize all research materials into a comprehensive Markdown report
368
+ - Regenerate research report (optional)
369
+
370
+ ```mermaid
371
+ flowchart TB
372
+ A[Research Topic]:::start
373
+
374
+ subgraph Propose[Propose your ideas]
375
+ B1[System asks questions]:::process
376
+ B2[System outputs the research plan]:::process
377
+ B3[System generates SERP queries]:::process
378
+ B1 --> B2
379
+ B2 --> B3
380
+ end
381
+
382
+ subgraph Collect[Information collection]
383
+ C1[Initial research]:::collection
384
+ C1a[Retrieve local research resources based on SERP queries]:::collection
385
+ C1b[Collect information from the Internet based on SERP queries]:::collection
386
+ C2[In-depth research]:::recursive
387
+ Refine{More in-depth research needed?}:::decision
388
+
389
+ C1 --> C1a
390
+ C1 --> C1b
391
+ C1a --> C2
392
+ C1b --> C2
393
+ C2 --> Refine
394
+ Refine -->|Yes| C2
395
+ end
396
+
397
+ Report[Generate Final Report]:::output
398
+
399
+ A --> Propose
400
+ B3 --> C1
401
+
402
+ %% Connect the exit from the loop/subgraph to the final report
403
+ Refine -->|No| Report
404
+
405
+ %% Styling
406
+ classDef start fill:#7bed9f,stroke:#2ed573,color:black
407
+ classDef process fill:#70a1ff,stroke:#1e90ff,color:black
408
+ classDef recursive fill:#ffa502,stroke:#ff7f50,color:black
409
+ classDef output fill:#ff4757,stroke:#ff6b81,color:black
410
+ classDef collection fill:#a8e6cf,stroke:#3b7a57,color:black
411
+ classDef decision fill:#c8d6e5,stroke:#8395a7,color:black
412
+
413
+ class A start
414
+ class B1,B2,B3 process
415
+ class C1,C1a,C1b collection
416
+ class C2 recursive
417
+ class Refine decision
418
+ class Report output
419
+ ```
420
+
421
+ ## 🙋 FAQs
422
+
423
+ **Why does my Ollama or SearXNG not work properly and displays the error `TypeError: Failed to fetch`?**
424
+
425
+ If your request generates `CORS` due to browser security restrictions, you need to configure parameters for Ollama or SearXNG to allow cross-domain requests. You can also consider using the server proxy mode, which is a backend server that makes requests, which can effectively avoid cross-domain issues.
426
+
427
+ ## 🛡️ Privacy
428
+
429
+ Deep Research is designed with your privacy in mind. **All research data and generated reports are stored locally on your machine.** We do not collect or transmit any of your research data to external servers (unless you are explicitly using server-side API calls, in which case data is sent to API through your configured proxy if any). Your privacy is our priority.
430
+
431
+ ## 🙏 Acknowledgements
432
+
433
+ - [Next.js](https://nextjs.org/) - The React framework for building performant web applications.
434
+ - [Shadcn UI](https://ui.shadcn.com/) - Beautifully designed components that helped streamline the UI development.
435
+ - [AI SDKs](https://sdk.vercel.ai) - Powering the intelligent research capabilities of Deep Research.
436
+ - [Deep Research](https://github.com/dzhng/deep-research) - Thanks to the project `dzhng/deep-research` for inspiration.
437
+
438
+ ## 🤝 Contributing
439
+
440
+ We welcome contributions to Deep Research! If you have ideas for improvements, bug fixes, or new features, please feel free to:
441
+
442
+ 1. Fork the repository.
443
+ 2. Create a new branch for your feature or bug fix.
444
+ 3. Make your changes and commit them.
445
+ 4. Submit a pull request.
446
+
447
+ For major changes, please open an issue first to discuss your proposed changes.
448
+
449
+ ## ✉️ Contact
450
+
451
+ If you have any questions, suggestions, or feedback, please create a new [issue](https://github.com/u14app/deep-research/issues).
452
+
453
+ ## 🌟 Star History
454
+
455
+ [![Star History Chart](https://api.star-history.com/svg?repos=u14app/deep-research&type=Date)](https://www.star-history.com/#u14app/deep-research&Date)
456
+
457
+ ## 📝 License
458
+
459
+ Deep Research is released under the [MIT License](LICENSE). This license allows for free use, modification, and distribution for both commercial and non-commercial purposes.
components.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "$schema": "https://ui.shadcn.com/schema.json",
3
+ "style": "new-york",
4
+ "rsc": true,
5
+ "tsx": true,
6
+ "tailwind": {
7
+ "config": "tailwind.config.ts",
8
+ "css": "src/app/globals.css",
9
+ "baseColor": "slate",
10
+ "cssVariables": true,
11
+ "prefix": ""
12
+ },
13
+ "aliases": {
14
+ "components": "@/components",
15
+ "utils": "@/utils/style",
16
+ "ui": "@/components/ui",
17
+ "lib": "@/lib",
18
+ "hooks": "@/hooks"
19
+ },
20
+ "iconLibrary": "lucide"
21
+ }
docker-compose.yml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: "3.9"
2
+ services:
3
+ deep-research:
4
+ build:
5
+ context: .
6
+ dockerfile: Dockerfile
7
+ image: deep-research
8
+ container_name: deep-research
9
+ env_file:
10
+ - .env
11
+ ports:
12
+ - "3333:3000"
docs/How-to-deploy-to-Cloudflare-Pages.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### How to deploy your project to Cloudflare Pages
2
+
3
+ 1. Log in to the Cloudflare dashboard and select your account.
4
+ 2. Select Compute(Workers) > Create > Pages.
5
+ 3. Click Connect to Git > deep-research > Begin Setup
6
+ 4. Framework preset > `Next.js` > Set Environment Variables (optional) > Save and Deploy
7
+ 5. Deploy > Redeploy
8
+
9
+ ### Set a custom domain name (optional)
10
+
11
+ Since the `pages.dev` domain name cannot be accessed normally in some countries, you can solve this problem by setting a custom domain name.
12
+
13
+ 1. Select Compute(Workers) > Your project.
14
+ 2. Select Custom Domains > Set up a custom domain.
docs/deep-research-api-doc.md ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Deep Research API Documentation
2
+
3
+ ## Overview
4
+
5
+ The Deep Research API provides a real-time interface for initiating and monitoring complex research tasks. Leveraging Server-Sent Events (SSE), it delivers updates, information, message, progress, and errors as they occur, allowing clients to receive continuous streams of data without polling.
6
+
7
+ ## Protocol
8
+
9
+ This API uses **Server-Sent Events (SSE)** over HTTP. Clients should establish an HTTP connection and keep it open to receive a stream of events from the server.
10
+
11
+ ## Data Format
12
+
13
+ All data sent via SSE adheres to the following structure:
14
+
15
+ ```text
16
+ event: EventName
17
+ data: JSON_String
18
+
19
+ ```
20
+
21
+ - `event`: Specifies the type of event being sent (e.g., `infor`, `message`, `reasoning`, `progress`, `error`).
22
+ - `data`: A string containing a JSON object relevant to the event type.
23
+ - A double newline (`\n\n`) signifies the end of an event block.
24
+
25
+ ## API config
26
+
27
+ Recommended to use the API via `@microsoft/fetch-event-source`.
28
+
29
+ Endpoint: `/api/sse`
30
+
31
+ Method: `POST`
32
+
33
+ Body:
34
+
35
+ ```typescript
36
+ interface Config {
37
+ // Research topic
38
+ query: string;
39
+ // AI provider, Possible values ​​include: google, openai, anthropic, deepseek, xai, mistral, azure, openrouter, openaicompatible, pollinations, ollama
40
+ provider: string;
41
+ // Thinking model id
42
+ thinkingModel: string;
43
+ // Task model id
44
+ taskModel: string;
45
+ // Search provider, Possible values ​​include: model, tavily, firecrawl, exa, bocha, searxng
46
+ searchProvider: string;
47
+ // Response Language, also affects the search language. (optional)
48
+ language?: string;
49
+ // Maximum number of search results. Default, `5` (optional)
50
+ maxResult?: number;
51
+ // Whether to include content-related images in the final report. Default, `true`. (optional)
52
+ enableCitationImage?: boolean;
53
+ // Whether to include citation links in search results and final reports. Default, `true`. (optional)
54
+ enableReferences?: boolean;
55
+ }
56
+ ```
57
+
58
+ Headers:
59
+
60
+ ```typescript
61
+ interface Headers {
62
+ "Content-Type": "application/json";
63
+ // If you set an access password
64
+ // Authorization: "Bearer YOUR_ACCESS_PASSWORD";
65
+ }
66
+ ```
67
+
68
+ For specific usage parameter forms, see the [example code](#client-code-example).
69
+
70
+ ## Response Events
71
+
72
+ The API streams data as a series of events. Each event has a type (`event`) and associated data (`data`).
73
+
74
+ ### General Structure
75
+
76
+ ```text
77
+ event: [event_type]
78
+ data: [JSON_payload]
79
+
80
+ ```
81
+
82
+ ### Event Types
83
+
84
+ The following event types are supported:
85
+
86
+ - `infor`
87
+ - `message`
88
+ - `reasoning`
89
+ - `progress`
90
+ - `error`
91
+
92
+ ---
93
+
94
+ ### `infor` Event
95
+
96
+ Sent at the beginning of the stream (or upon specific requests) to provide initial information about the API instance or the research session.
97
+
98
+ **Description:** Provides basic information about the running API instance.
99
+
100
+ **Data Structure (`data` field):** A JSON string representing the following structure:
101
+
102
+ | Parameter | Type | Description |
103
+ | :-------- | :----- | :------------------ |
104
+ | `name` | string | Project name |
105
+ | `version` | string | Current API version |
106
+
107
+ ```typescript
108
+ interface InforEvent {
109
+ // Project name
110
+ name: string;
111
+ // Current API version
112
+ version: string;
113
+ }
114
+ ```
115
+
116
+ **Example:**
117
+
118
+ ```text
119
+ event: infor
120
+ data: {"name":"deep-research","version":"0.1.0"}
121
+
122
+ ```
123
+
124
+ ---
125
+
126
+ ### `message` Event
127
+
128
+ Used to send text content of deep research to the client.
129
+
130
+ **Description:** Delivers textual messages during the research process.
131
+
132
+ **Data Structure (`data` field):** A JSON string representing the following structure:
133
+
134
+ | Parameter | Type | Description | Notes |
135
+ | :-------- | :----- | :------------------------------------- | :---------------------------------------------------- |
136
+ | `type` | string | Type of the message content | Currently only `"text"` is supported. |
137
+ | `text` | string | The message content (Markdown format). | Optional for future types, but required for `"text"`. |
138
+
139
+ ```typescript
140
+ interface MessageEvent {
141
+ // Message type, currently only "text" is supported
142
+ type: "text";
143
+ // Textual data
144
+ text?: string;
145
+ }
146
+ ```
147
+
148
+ **Example:**
149
+
150
+ ```text
151
+ event: message
152
+ data: {"type":"text","text":"This is a **markdown** string."}
153
+
154
+ ```
155
+
156
+ ---
157
+
158
+ ### `reasoning` Event
159
+
160
+ Used to send thinking content of deep research to the client. Some thinking models support output thinking process.
161
+
162
+ **Description:** Delivers textual messages during the research process.
163
+
164
+ **Data Structure (`data` field):** A JSON string representing the following structure:
165
+
166
+ | Parameter | Type | Description | Notes |
167
+ | :-------- | :----- | :--------------------------------------- | :------------------------------------ |
168
+ | `type` | string | Type of the reasoning content | Currently only `"text"` is supported. |
169
+ | `text` | string | The reasoning content (Markdown format). | Required for `"text"`. |
170
+
171
+ ```typescript
172
+ interface ReasoningEvent {
173
+ // Reasoning type, currently only "text" is supported
174
+ type: "text";
175
+ // Textual data
176
+ text: string;
177
+ }
178
+ ```
179
+
180
+ **Example:**
181
+
182
+ ```text
183
+ event: message
184
+ data: {"type":"text","text":"Output thinking process"}
185
+
186
+ ```
187
+
188
+ ---
189
+
190
+ ### `progress` Event
191
+
192
+ Communicates the current step and status of the research task execution. This is crucial for providing real-time feedback on the process flow.
193
+
194
+ **Description:** Indicates the progress of the research task, including the current step and its status (start or end).
195
+
196
+ **Data Structure (`data` field):** A JSON string representing the following structure:
197
+
198
+ | Parameter | Type | Description | Notes |
199
+ | :-------- | :------------------------------------------------------------------------------ | :------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------- |
200
+ | `step` | "report-plan" \| "serp-query" \| "task-list" \| "search-task" \| "final-report" | The identifier of the current step in the research process. | See "Possible `step` Values" below. |
201
+ | `status` | "start" \| "end" | The status of the current step. | Indicates if the step is starting or ending. See "Possible `status` Values" below. |
202
+ | `name` | string | A descriptive name for the specific instance of the step (e.g., for a specific search task). | Included only when `step` is `"search-task"`. |
203
+ | `data` | any | Optional data relevant to the step's outcome or details. | May be included when `status` is `"end"`. The content varies by step. |
204
+
205
+ ```typescript
206
+ interface ProgressEvent {
207
+ // Current step
208
+ step:
209
+ | "report-plan"
210
+ | "serp-query"
211
+ | "task-list"
212
+ | "search-task"
213
+ | "final-report";
214
+ // Status of the step
215
+ status: "start" | "end";
216
+ // Name of the specific task (e.g., search query)
217
+ name?: string;
218
+ // Data related to the step's outcome or details
219
+ data?: any;
220
+ }
221
+ ```
222
+
223
+ **Possible `step` Values:**
224
+
225
+ - `report-plan`: The system is generating or processing the overall report plan.
226
+ - `serp-query`: The system is performing a Search Engine Results Page (SERP) query.
227
+ - `task-list`: The system is generating or processing a list of specific research tasks.
228
+ - `search-task`: The system is executing a specific search task. This step includes the `name` parameter.
229
+ - `final-report`: The system is compiling or finalizing the comprehensive research report.
230
+
231
+ **Possible `status` Values:**
232
+
233
+ - `start`: Indicates that the specified `step` has just begun.
234
+ - `end`: Indicates that the specified `step` has just finished.
235
+
236
+ **Example:**
237
+
238
+ ```text
239
+ event: progress
240
+ data: {"step":"search-task","status":"start","name":"AI trends for this year"}
241
+
242
+ event: progress
243
+ data: {"step":"search-task","status":"end","name":"AI trends for this year","data":{"results_count": 15}}
244
+
245
+ ```
246
+
247
+ ---
248
+
249
+ ### `error` Event
250
+
251
+ Sent when an error occurs during the research process that prevents the task from completing successfully or requires user attention.
252
+
253
+ **Description:** Signals that an error has occurred.
254
+
255
+ **Data Structure (`data` field):** A JSON string typically containing information about the error. A common structure is:
256
+
257
+ | Parameter | Type | Description | Notes |
258
+ | :-------- | :----- | :----------------------------------------- | :---- |
259
+ | `message` | string | A human-readable description of the error. | |
260
+
261
+ ```typescript
262
+ interface ErrorEvent {
263
+ // A human-readable description of the error.
264
+ message: string;
265
+ }
266
+ ```
267
+
268
+ **Example:**
269
+
270
+ ```text
271
+ event: error
272
+ data: {"message":"Invalid query parameters."}
273
+
274
+ ```
275
+
276
+ ---
277
+
278
+ ## Error Handling
279
+
280
+ Clients should always listen for the `error` event. Upon receiving an `error` event, the client should typically display the error message to the user and may consider the current research task terminated unless otherwise specified by the API's behavior.
281
+
282
+ ## Client Code Example
283
+
284
+ This example demonstrates how to connect to the SSE endpoint using `EventSource` API and listen for the defined event types, specifically focusing on displaying `message` events.
285
+
286
+ ```typescript
287
+ import { fetchEventSource } from "@microsoft/fetch-event-source";
288
+
289
+ const ctrl = new AbortController();
290
+
291
+ let report = "";
292
+ fetchEventSource("/api/sse", {
293
+ method: "POST",
294
+ headers: {
295
+ "Content-Type": "application/json",
296
+ // If you set an access password
297
+ // Authorization: "Bearer YOUR_ACCESS_PASSWORD",
298
+ },
299
+ body: JSON.stringify({
300
+ query: "AI trends for this year",
301
+ provider: "google",
302
+ thinkingModel: "gemini-2.0-flash-thinking-exp",
303
+ taskModel: "gemini-2.0-flash-exp",
304
+ searchProvider: "model",
305
+ language: "en-US",
306
+ maxResult: 5,
307
+ enableCitationImage: true,
308
+ enableReferences: true,
309
+ }),
310
+ signal: ctrl.signal,
311
+ onmessage(msg) {
312
+ const msgData = JSON.parse(msg.data);
313
+ if (msg.event === "message") {
314
+ if (msgData.type === "text") {
315
+ report += msgData.text;
316
+ }
317
+ } else if (msg.event === "progress") {
318
+ console.log(
319
+ `[${data.step}]: ${msgData.name ? `${msgData.name} ` : ""}${
320
+ msgData.status
321
+ }`
322
+ );
323
+ if (msgData.data) console.log(msgData.data);
324
+ } else if (msg.event === "error") {
325
+ throw new Error(msgData.message);
326
+ }
327
+ },
328
+ onclose() {
329
+ console.log(report);
330
+ },
331
+ });
332
+ ```
env.tpl ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # (Optional) Server API Access Password for enhanced security
2
+ ACCESS_PASSWORD=
3
+
4
+ # (Optional) Server-side Gemini API Key (Required for server API calls)
5
+ GOOGLE_GENERATIVE_AI_API_KEY=
6
+ # (Optional) Server-side Gemini API Proxy URL. Default, `https://generativelanguage.googleapis.com`
7
+ GOOGLE_GENERATIVE_AI_API_BASE_URL=
8
+ # (Deprecated) Server-side Gemini API Proxy URL. Default, `https://generativelanguage.googleapis.com`
9
+ API_PROXY_BASE_URL=
10
+
11
+ # (Optional) Server-side OpenRouter API Key (Required for server API calls)
12
+ OPENROUTER_API_KEY=
13
+ # (Optional) Server-side OpenRouter API Proxy URL. Default, `https://openrouter.ai`
14
+ OPENROUTER_API_BASE_URL=
15
+
16
+ # (Optional) Server-side OpenAI API Key (Required for server API calls)
17
+ OPENAI_API_KEY=
18
+ # (Optional) Server-side OpenAI API Proxy URL. Default, `https://api.openai.com`
19
+ OPENAI_API_BASE_URL=
20
+
21
+ # (Optional) Server-side Anthropic API Key (Required for server API calls)
22
+ ANTHROPIC_API_KEY=
23
+ # (Optional) Server-side Anthropic API Proxy URL. Default, `https://api.anthropic.com`
24
+ ANTHROPIC_API_BASE_URL=
25
+
26
+ # (Optional) Server-side DeepSeek API Key (Required for server API calls)
27
+ DEEPSEEK_API_KEY=
28
+ # (Optional) Server-side DeepSeek API Proxy URL. Default, `https://api.deepseek.com`
29
+ DEEPSEEK_API_BASE_URL=
30
+
31
+ # (Optional) Server-side XAI API Key (Required for server API calls)
32
+ XAI_API_KEY=
33
+ # (Optional) Server-side XAI API Proxy URL. Default, `https://api.x.ai`
34
+ XAI_API_BASE_URL=
35
+
36
+ # (Optional) Server-side Mistral API Key (Required for server API calls)
37
+ MISTRAL_API_KEY=
38
+ # (Optional) Server-side Mistral API Proxy URL. Default, `https://api.mistral.ai`
39
+ MISTRAL_API_BASE_URL=
40
+
41
+ # (Optional) Server-side Azure API Key (Required for server API calls)
42
+ AZURE_API_KEY=
43
+ # (Optional) Server-side Azure Resource Name. The resource name is used in the assembled URL: `https://{AZURE_RESOURCE_NAME}.openai.azure.com/openai/deployments`
44
+ AZURE_RESOURCE_NAME=
45
+
46
+ # (Optional) Server-side Compatible with OpenAI API Key (Required for server API calls)
47
+ OPENAI_COMPATIBLE_API_KEY=
48
+ # (Optional) Server-side Compatible with OpenAI API Proxy URL.
49
+ OPENAI_COMPATIBLE_API_BASE_URL=
50
+
51
+ # (Optional) Server-side pollinations.ai API Proxy URL. Default, `https://text.pollinations.ai/openai`
52
+ POLLINATIONS_API_BASE_URL=
53
+
54
+ # (Optional) Server-side Ollama API Proxy URL. Default, `http://0.0.0.0:11434`
55
+ OLLAMA_API_BASE_URL=
56
+
57
+ # (Optional) Server-side Tavily API Key (Required for server API calls)
58
+ TAVILY_API_KEY=
59
+ # (Optional) Server-side Tavily API Proxy URL. Default, `https://api.tavily.com`
60
+ TAVILY_API_BASE_URL=
61
+
62
+ # (Optional) Server-side Firecrawl API Key (Required for server API calls)
63
+ FIRECRAWL_API_KEY=
64
+ # (Optional) Server-side Firecrawl API Proxy URL. Default, `https://api.firecrawl.dev`
65
+ FIRECRAWL_API_BASE_URL=
66
+
67
+ # (Optional) Server-side Exa API Key (Required for server API calls)
68
+ EXA_API_KEY=
69
+ # (Optional) Server-side Exa API Proxy URL. Default, `https://api.exa.ai`
70
+ EXA_API_BASE_URL=
71
+
72
+ # (Optional) Server-side Bocha API Key (Required for server API calls)
73
+ BOCHA_API_KEY=
74
+ # (Optional) Server-side Bocha API Proxy URL. Default, `https://api.bochaai.com`
75
+ BOCHA_API_BASE_URL=
76
+
77
+ # (Optional) Server-side Searxng API Proxy URL. Default, `http://0.0.0.0:8080`
78
+ SEARXNG_API_BASE_URL=
79
+
80
+ # (Optional) MCP Server AI provider
81
+ # Possible values ​​include: google, openai, anthropic, deepseek, xai, mistral, azure, openrouter, openaicompatible, pollinations, ollama
82
+ MCP_AI_PROVIDER=
83
+ # (Optional) MCP Server search provider. Default, `model`
84
+ # Possible values ​​include: model, tavily, firecrawl, exa, bocha, searxng
85
+ MCP_SEARCH_PROVIDER=
86
+ # (Optional) MCP Server thinking model id, the core model used in deep research.
87
+ MCP_THINKING_MODEL=
88
+ # (Optional) MCP Server task model id, used for secondary tasks, high output models are recommended.
89
+ MCP_TASK_MODEL=
90
+
91
+ # (Optional) Disable server-side AI provider usage permissions
92
+ # Possible values ​​include: google, openai, anthropic, deepseek, xai, mistral, azure, openrouter, openaicompatible, pollinations, ollama
93
+ NEXT_PUBLIC_DISABLED_AI_PROVIDER=
94
+ # (Optional) Disable server-side search provider usage permissions
95
+ # Possible values ​​include: model, tavily, firecrawl, exa, bocha, searxng
96
+ NEXT_PUBLIC_DISABLED_SEARCH_PROVIDER=
97
+ # (Optional) Customize the model list, add or delete models
98
+ NEXT_PUBLIC_MODEL_LIST=
99
+
100
+ # (Optional) Injected script code can be used for statistics or error tracking.
101
+ HEAD_SCRIPTS=
eslint.config.mjs ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { dirname } from "path";
2
+ import { fileURLToPath } from "url";
3
+ import { FlatCompat } from "@eslint/eslintrc";
4
+
5
+ const __filename = fileURLToPath(import.meta.url);
6
+ const __dirname = dirname(__filename);
7
+
8
+ const compat = new FlatCompat({
9
+ baseDirectory: __dirname,
10
+ });
11
+
12
+ const eslintConfig = [
13
+ ...compat.extends("next/core-web-vitals", "next/typescript"),
14
+ {
15
+ rules: {
16
+ "@typescript-eslint/no-explicit-any": "off",
17
+ },
18
+ },
19
+ ];
20
+
21
+ export default eslintConfig;
next.config.ts ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { NextConfig } from "next";
2
+ import withSerwistInit from "@serwist/next";
3
+ import { PHASE_PRODUCTION_BUILD } from "next/constants.js";
4
+ import pkg from "./package.json";
5
+
6
+ const BUILD_MODE = process.env.NEXT_PUBLIC_BUILD_MODE;
7
+ // AI provider API base url
8
+ const API_PROXY_BASE_URL = process.env.API_PROXY_BASE_URL || "";
9
+ const GOOGLE_GENERATIVE_AI_API_BASE_URL =
10
+ process.env.GOOGLE_GENERATIVE_AI_API_BASE_URL ||
11
+ "https://generativelanguage.googleapis.com";
12
+ const OPENROUTER_API_BASE_URL =
13
+ process.env.OPENROUTER_API_BASE_URL || "https://openrouter.ai/api";
14
+ const OPENAI_API_BASE_URL =
15
+ process.env.OPENAI_API_BASE_URL || "https://api.openai.com";
16
+ const ANTHROPIC_API_BASE_URL =
17
+ process.env.ANTHROPIC_API_BASE_URL || "https://api.anthropic.com";
18
+ const DEEPSEEK_API_BASE_URL =
19
+ process.env.DEEPSEEK_API_BASE_URL || "https://api.deepseek.com";
20
+ const XAI_API_BASE_URL = process.env.XAI_API_BASE_URL || "https://api.x.ai";
21
+ const MISTRAL_API_BASE_URL =
22
+ process.env.MISTRAL_API_BASE_URL || "https://api.mistral.ai";
23
+ const AZURE_API_BASE_URL = `https://${process.env.AZURE_RESOURCE_NAME}.openai.azure.com/openai/deployments`;
24
+ const OPENAI_COMPATIBLE_API_BASE_URL =
25
+ process.env.OPENAI_COMPATIBLE_API_BASE_URL || "";
26
+ const POLLINATIONS_API_BASE_URL =
27
+ process.env.POLLINATIONS_API_BASE_URL ||
28
+ "https://text.pollinations.ai/openai";
29
+ const OLLAMA_API_BASE_URL =
30
+ process.env.OLLAMA_API_BASE_URL || "http://0.0.0.0:11434";
31
+ // Search provider API base url
32
+ const TAVILY_API_BASE_URL =
33
+ process.env.TAVILY_API_BASE_URL || "https://api.tavily.com";
34
+ const FIRECRAWL_API_BASE_URL =
35
+ process.env.FIRECRAWL_API_BASE_URL || "https://api.firecrawl.dev";
36
+ const EXA_API_BASE_URL = process.env.EXA_API_BASE_URL || "https://api.exa.ai";
37
+ const BOCHA_API_BASE_URL =
38
+ process.env.BOCHA_API_BASE_URL || "https://api.bochaai.com";
39
+ const SEARXNG_API_BASE_URL =
40
+ process.env.SEARXNG_API_BASE_URL || "http://0.0.0.0:8080";
41
+
42
+ export default async function Config(phase: string) {
43
+ const nextConfig: NextConfig = {
44
+ /* config options here */
45
+ experimental: {
46
+ reactCompiler: true,
47
+ },
48
+ env: {
49
+ NEXT_PUBLIC_VERSION: pkg.version,
50
+ },
51
+ transpilePackages: ["pdfjs-dist", "mermaid"],
52
+ };
53
+
54
+ if (BUILD_MODE === "export") {
55
+ nextConfig.output = "export";
56
+ // Only used for static deployment, the default deployment directory is the root directory
57
+ nextConfig.basePath = "";
58
+ // Statically exporting a Next.js application via `next export` disables API routes and middleware.
59
+ nextConfig.webpack = (config) => {
60
+ config.module.rules.push({
61
+ test: /src\/app\/api/,
62
+ loader: "ignore-loader",
63
+ });
64
+ config.module.rules.push({
65
+ test: /src\/middleware/,
66
+ loader: "ignore-loader",
67
+ });
68
+ return config;
69
+ };
70
+ } else if (BUILD_MODE === "standalone") {
71
+ nextConfig.output = "standalone";
72
+ } else {
73
+ nextConfig.rewrites = async () => {
74
+ return [
75
+ {
76
+ source: "/api/ai/google/:path*",
77
+ destination: `${
78
+ GOOGLE_GENERATIVE_AI_API_BASE_URL || API_PROXY_BASE_URL
79
+ }/:path*`,
80
+ },
81
+ {
82
+ source: "/api/ai/openrouter/:path*",
83
+ destination: `${OPENROUTER_API_BASE_URL}/:path*`,
84
+ },
85
+ {
86
+ source: "/api/ai/openai/:path*",
87
+ destination: `${OPENAI_API_BASE_URL}/:path*`,
88
+ },
89
+ {
90
+ source: "/api/ai/anthropic/:path*",
91
+ destination: `${ANTHROPIC_API_BASE_URL}/:path*`,
92
+ },
93
+ {
94
+ source: "/api/ai/deepseek/:path*",
95
+ destination: `${DEEPSEEK_API_BASE_URL}/:path*`,
96
+ },
97
+ {
98
+ source: "/api/ai/xai/:path*",
99
+ destination: `${XAI_API_BASE_URL}/:path*`,
100
+ },
101
+ {
102
+ source: "/api/ai/mistral/:path*",
103
+ destination: `${MISTRAL_API_BASE_URL}/:path*`,
104
+ },
105
+ {
106
+ source: "/api/ai/azure/:path*",
107
+ destination: `${AZURE_API_BASE_URL}/:path*`,
108
+ },
109
+ {
110
+ source: "/api/ai/openaicompatible/:path*",
111
+ destination: `${OPENAI_COMPATIBLE_API_BASE_URL}/:path*`,
112
+ },
113
+ {
114
+ source: "/api/ai/pollinations/:path*",
115
+ destination: `${POLLINATIONS_API_BASE_URL}/:path*`,
116
+ },
117
+ {
118
+ source: "/api/ai/ollama/:path*",
119
+ destination: `${OLLAMA_API_BASE_URL}/:path*`,
120
+ },
121
+ {
122
+ source: "/api/search/tavily/:path*",
123
+ destination: `${TAVILY_API_BASE_URL}/:path*`,
124
+ },
125
+ {
126
+ source: "/api/search/firecrawl/:path*",
127
+ destination: `${FIRECRAWL_API_BASE_URL}/:path*`,
128
+ },
129
+ {
130
+ source: "/api/search/exa/:path*",
131
+ destination: `${EXA_API_BASE_URL}/:path*`,
132
+ },
133
+ {
134
+ source: "/api/search/bocha/:path*",
135
+ destination: `${BOCHA_API_BASE_URL}/:path*`,
136
+ },
137
+ {
138
+ source: "/api/search/searxng/:path*",
139
+ destination: `${SEARXNG_API_BASE_URL}/:path*`,
140
+ },
141
+ ];
142
+ };
143
+ }
144
+
145
+ if (phase === PHASE_PRODUCTION_BUILD) {
146
+ const withSerwist = withSerwistInit({
147
+ // Note: This is only an example. If you use Pages Router,
148
+ // use something else that works, such as "service-worker/index.ts".
149
+ swSrc: "src/app/sw.ts",
150
+ swDest: "public/sw.js",
151
+ register: false,
152
+ });
153
+
154
+ return withSerwist(nextConfig);
155
+ }
156
+
157
+ return nextConfig;
158
+ }
package.json ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "deep-research",
3
+ "description": "Use any LLMs (Large Language Models) for Deep Research. Support SSE API and MCP server.",
4
+ "version": "0.9.18",
5
+ "license": "MIT",
6
+ "repository": {
7
+ "url": "https://github.com/u14app/deep-research"
8
+ },
9
+ "bugs": "https://github.com/u14app/deep-research/issues",
10
+ "private": true,
11
+ "scripts": {
12
+ "dev": "next dev --turbopack",
13
+ "build": "next build",
14
+ "build:standalone": "cross-env NEXT_PUBLIC_BUILD_MODE=standalone next build",
15
+ "build:export": "cross-env NEXT_PUBLIC_BUILD_MODE=export next build",
16
+ "start": "next start",
17
+ "lint": "next lint"
18
+ },
19
+ "dependencies": {
20
+ "@ai-sdk/anthropic": "^1.2.10",
21
+ "@ai-sdk/azure": "^1.3.23",
22
+ "@ai-sdk/deepseek": "^0.2.13",
23
+ "@ai-sdk/google": "^1.2.14",
24
+ "@ai-sdk/mistral": "^1.2.8",
25
+ "@ai-sdk/openai": "^1.3.21",
26
+ "@ai-sdk/openai-compatible": "^0.2.14",
27
+ "@ai-sdk/ui-utils": "^1.2.9",
28
+ "@ai-sdk/xai": "^1.2.15",
29
+ "@hookform/resolvers": "^4.1.2",
30
+ "@openrouter/ai-sdk-provider": "^0.4.5",
31
+ "@radix-ui/react-accordion": "^1.2.3",
32
+ "@radix-ui/react-dialog": "^1.1.6",
33
+ "@radix-ui/react-dropdown-menu": "^2.1.6",
34
+ "@radix-ui/react-label": "^2.1.2",
35
+ "@radix-ui/react-popover": "^1.1.6",
36
+ "@radix-ui/react-scroll-area": "^1.2.6",
37
+ "@radix-ui/react-select": "^2.1.6",
38
+ "@radix-ui/react-separator": "^1.1.2",
39
+ "@radix-ui/react-slider": "^1.2.3",
40
+ "@radix-ui/react-slot": "^1.1.2",
41
+ "@radix-ui/react-tabs": "^1.1.3",
42
+ "@radix-ui/react-tooltip": "^1.1.8",
43
+ "@serwist/next": "^9.0.14",
44
+ "@xiangfa/mdeditor": "^0.2.3",
45
+ "@zip.js/zip.js": "^2.7.60",
46
+ "ai": "^4.3.12",
47
+ "class-variance-authority": "^0.7.1",
48
+ "clsx": "^2.1.1",
49
+ "copy-to-clipboard": "^3.3.3",
50
+ "dayjs": "^1.11.13",
51
+ "file-saver": "^2.0.5",
52
+ "fuse.js": "^7.1.0",
53
+ "i18next": "^24.2.3",
54
+ "i18next-browser-languagedetector": "^8.0.4",
55
+ "i18next-resources-to-backend": "^1.2.1",
56
+ "katex": "^0.16.22",
57
+ "localforage": "^1.10.0",
58
+ "lucide-react": "^0.475.0",
59
+ "marked": "^15.0.12",
60
+ "mermaid": "^11.6.0",
61
+ "nanoid": "^5.1.5",
62
+ "next": "^15.3.1",
63
+ "next-themes": "^0.4.4",
64
+ "ollama-ai-provider": "^1.2.0",
65
+ "p-limit": "^6.2.0",
66
+ "pdfjs-dist": "5.1.91",
67
+ "radash": "^12.1.0",
68
+ "react": "^19.1.0",
69
+ "react-dom": "^19.1.0",
70
+ "react-hook-form": "^7.54.2",
71
+ "react-i18next": "^15.4.1",
72
+ "react-markdown": "^10.1.0",
73
+ "react-resizable-panels": "^3.0.1",
74
+ "react-use-pwa-install": "^1.0.3",
75
+ "react-zoom-pan-pinch": "^3.7.0",
76
+ "rehype-highlight": "^7.0.2",
77
+ "rehype-katex": "^7.0.1",
78
+ "rehype-raw": "^7.0.0",
79
+ "remark-breaks": "^4.0.0",
80
+ "remark-gfm": "^4.0.1",
81
+ "remark-math": "^6.0.0",
82
+ "sonner": "^2.0.1",
83
+ "tailwind-merge": "^3.0.2",
84
+ "tailwindcss-animate": "^1.0.7",
85
+ "ts-md5": "^1.3.1",
86
+ "unist-util-visit": "^5.0.0",
87
+ "zod": "^3.24.2",
88
+ "zod-to-json-schema": "^3.24.3",
89
+ "zustand": "^5.0.3"
90
+ },
91
+ "devDependencies": {
92
+ "@eslint/eslintrc": "^3",
93
+ "@tailwindcss/typography": "^0.5.16",
94
+ "@types/file-saver": "^2.0.7",
95
+ "@types/hast": "^3.0.4",
96
+ "@types/jsdom": "^21.1.7",
97
+ "@types/node": "^20",
98
+ "@types/react": "^19",
99
+ "@types/react-dom": "^19",
100
+ "babel-plugin-react-compiler": "19.1.0-rc.1",
101
+ "cross-env": "^7.0.3",
102
+ "eslint": "^9",
103
+ "eslint-config-next": "15.1.7",
104
+ "ignore-loader": "^0.1.2",
105
+ "postcss": "^8",
106
+ "serwist": "^9.0.14",
107
+ "tailwindcss": "^3.4.1",
108
+ "typescript": "^5"
109
+ },
110
+ "engines": {
111
+ "npm": ">= 9.8.0",
112
+ "node": ">= 18.18.0"
113
+ }
114
+ }
pnpm-lock.yaml ADDED
The diff for this file is too large to render. See raw diff
 
postcss.config.mjs ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ const config = {
2
+ plugins: {
3
+ tailwindcss: {},
4
+ },
5
+ };
6
+
7
+ export default config;
public/logo.png ADDED
public/logo.svg ADDED
public/screenshots/main-interface.png ADDED
public/scripts/eruda.min.js ADDED
The diff for this file is too large to render. See raw diff
 
public/scripts/pdf.worker.min.mjs ADDED
The diff for this file is too large to render. See raw diff
 
src/app/api/ai/anthropic/[...slug]/route.ts ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { ANTHROPIC_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL =
17
+ process.env.ANTHROPIC_API_BASE_URL || ANTHROPIC_BASE_URL;
18
+
19
+ async function handler(req: NextRequest) {
20
+ let body;
21
+ if (req.method.toUpperCase() !== "GET") {
22
+ body = await req.json();
23
+ }
24
+ const searchParams = req.nextUrl.searchParams;
25
+ const path = searchParams.getAll("slug");
26
+ searchParams.delete("slug");
27
+ const params = searchParams.toString();
28
+
29
+ try {
30
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
31
+ if (params) url += `?${params}`;
32
+ const payload: RequestInit = {
33
+ method: req.method,
34
+ headers: {
35
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
36
+ "x-api-key": req.headers.get("x-api-key") || "",
37
+ "anthropic-version":
38
+ req.headers.get("anthropic-version") || "2023-06-01",
39
+ },
40
+ };
41
+ if (body) payload.body = JSON.stringify(body);
42
+ const response = await fetch(url, payload);
43
+ return new NextResponse(response.body, response);
44
+ } catch (error) {
45
+ if (error instanceof Error) {
46
+ console.error(error);
47
+ return NextResponse.json(
48
+ { code: 500, message: error.message },
49
+ { status: 500 }
50
+ );
51
+ }
52
+ }
53
+ }
54
+
55
+ export { handler as GET, handler as POST, handler as PUT, handler as DELETE };
src/app/api/ai/azure/[...slug]/route.ts ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+
3
+ export const runtime = "edge";
4
+ export const preferredRegion = [
5
+ "cle1",
6
+ "iad1",
7
+ "pdx1",
8
+ "sfo1",
9
+ "sin1",
10
+ "syd1",
11
+ "hnd1",
12
+ "kix1",
13
+ ];
14
+
15
+ const API_PROXY_BASE_URL = `https://${process.env.AZURE_RESOURCE_NAME}.openai.azure.com/openai/deployments`;
16
+ const API_VERSION = process.env.AZURE_API_VERSION || "";
17
+
18
+ async function handler(req: NextRequest) {
19
+ let body;
20
+ if (req.method.toUpperCase() !== "GET") {
21
+ body = await req.json();
22
+ }
23
+ const searchParams = req.nextUrl.searchParams;
24
+ const path = searchParams.getAll("slug");
25
+ searchParams.delete("slug");
26
+ if (API_VERSION) searchParams.append("api-version", API_VERSION);
27
+ const params = searchParams.toString();
28
+
29
+ try {
30
+ if (API_PROXY_BASE_URL === "") {
31
+ throw new Error("API base url is missing.");
32
+ }
33
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
34
+ if (params) url += `?${params}`;
35
+ const payload: RequestInit = {
36
+ method: req.method,
37
+ headers: {
38
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
39
+ Authorization: req.headers.get("Authorization") || "",
40
+ },
41
+ };
42
+ if (body) payload.body = JSON.stringify(body);
43
+ const response = await fetch(url, payload);
44
+ return new NextResponse(response.body, response);
45
+ } catch (error) {
46
+ if (error instanceof Error) {
47
+ console.error(error);
48
+ return NextResponse.json(
49
+ { code: 500, message: error.message },
50
+ { status: 500 }
51
+ );
52
+ }
53
+ }
54
+ }
55
+
56
+ export { handler as GET, handler as POST, handler as PUT, handler as DELETE };
src/app/api/ai/deepseek/[...slug]/route.ts ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { DEEPSEEK_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL =
17
+ process.env.DEEPSEEK_API_BASE_URL || DEEPSEEK_BASE_URL;
18
+
19
+ async function handler(req: NextRequest) {
20
+ let body;
21
+ if (req.method.toUpperCase() !== "GET") {
22
+ body = await req.json();
23
+ }
24
+ const searchParams = req.nextUrl.searchParams;
25
+ const path = searchParams.getAll("slug");
26
+ searchParams.delete("slug");
27
+ const params = searchParams.toString();
28
+
29
+ try {
30
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
31
+ if (params) url += `?${params}`;
32
+ const payload: RequestInit = {
33
+ method: req.method,
34
+ headers: {
35
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
36
+ Authorization: req.headers.get("Authorization") || "",
37
+ },
38
+ };
39
+ if (body) payload.body = JSON.stringify(body);
40
+ const response = await fetch(url, payload);
41
+ return new NextResponse(response.body, response);
42
+ } catch (error) {
43
+ if (error instanceof Error) {
44
+ console.error(error);
45
+ return NextResponse.json(
46
+ { code: 500, message: error.message },
47
+ { status: 500 }
48
+ );
49
+ }
50
+ }
51
+ }
52
+
53
+ export { handler as GET, handler as POST, handler as PUT, handler as DELETE };
src/app/api/ai/google/[...slug]/route.ts ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { GEMINI_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL =
17
+ process.env.API_PROXY_BASE_URL ||
18
+ process.env.GOOGLE_GENERATIVE_AI_API_BASE_URL ||
19
+ GEMINI_BASE_URL;
20
+
21
+ async function handler(req: NextRequest) {
22
+ let body;
23
+ if (req.method.toUpperCase() !== "GET") {
24
+ body = await req.json();
25
+ }
26
+ const searchParams = req.nextUrl.searchParams;
27
+ const path = searchParams.getAll("slug");
28
+ searchParams.delete("slug");
29
+ const params = searchParams.toString();
30
+
31
+ try {
32
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
33
+ if (params) url += `?${params}`;
34
+ const payload: RequestInit = {
35
+ method: req.method,
36
+ headers: {
37
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
38
+ "x-goog-api-client":
39
+ req.headers.get("x-goog-api-client") || "genai-js/0.24.0",
40
+ "x-goog-api-key": req.headers.get("x-goog-api-key") || "",
41
+ },
42
+ };
43
+ if (body) payload.body = JSON.stringify(body);
44
+ const response = await fetch(url, payload);
45
+ return new NextResponse(response.body, response);
46
+ } catch (error) {
47
+ if (error instanceof Error) {
48
+ console.error(error);
49
+ return NextResponse.json(
50
+ { code: 500, message: error.message },
51
+ { status: 500 }
52
+ );
53
+ }
54
+ }
55
+ }
56
+
57
+ export { handler as GET, handler as POST, handler as PUT, handler as DELETE };
src/app/api/ai/mistral/[...slug]/route.ts ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { MISTRAL_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL = process.env.MISTRAL_API_BASE_URL || MISTRAL_BASE_URL;
17
+
18
+ async function handler(req: NextRequest) {
19
+ let body;
20
+ if (req.method.toUpperCase() !== "GET") {
21
+ body = await req.json();
22
+ }
23
+ const searchParams = req.nextUrl.searchParams;
24
+ const path = searchParams.getAll("slug");
25
+ searchParams.delete("slug");
26
+ const params = searchParams.toString();
27
+
28
+ try {
29
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
30
+ if (params) url += `?${params}`;
31
+ const payload: RequestInit = {
32
+ method: req.method,
33
+ headers: {
34
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
35
+ Authorization: req.headers.get("Authorization") || "",
36
+ },
37
+ };
38
+ if (body) payload.body = JSON.stringify(body);
39
+ const response = await fetch(url, payload);
40
+ return new NextResponse(response.body, response);
41
+ } catch (error) {
42
+ if (error instanceof Error) {
43
+ console.error(error);
44
+ return NextResponse.json(
45
+ { code: 500, message: error.message },
46
+ { status: 500 }
47
+ );
48
+ }
49
+ }
50
+ }
51
+
52
+ export { handler as GET, handler as POST, handler as PUT, handler as DELETE };
src/app/api/ai/ollama/[...slug]/route.ts ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { OLLAMA_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL = process.env.OLLAMA_API_BASE_URL || OLLAMA_BASE_URL;
17
+
18
+ async function handler(req: NextRequest) {
19
+ let body;
20
+ if (req.method.toUpperCase() !== "GET") {
21
+ body = await req.json();
22
+ }
23
+ const searchParams = req.nextUrl.searchParams;
24
+ const path = searchParams.getAll("slug");
25
+ searchParams.delete("slug");
26
+ const params = searchParams.toString();
27
+
28
+ try {
29
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
30
+ if (params) url += `?${params}`;
31
+ const payload: RequestInit = {
32
+ method: req.method,
33
+ headers: {
34
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
35
+ },
36
+ };
37
+ if (body) payload.body = JSON.stringify(body);
38
+ const response = await fetch(url, payload);
39
+ return new NextResponse(response.body, response);
40
+ } catch (error) {
41
+ if (error instanceof Error) {
42
+ console.error(error);
43
+ return NextResponse.json(
44
+ { code: 500, message: error.message },
45
+ { status: 500 }
46
+ );
47
+ }
48
+ }
49
+ }
50
+
51
+ export { handler as GET, handler as POST, handler as PUT, handler as DELETE };
src/app/api/ai/openai/[...slug]/route.ts ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { OPENAI_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL = process.env.OPENAI_API_BASE_URL || OPENAI_BASE_URL;
17
+
18
+ async function handler(req: NextRequest) {
19
+ let body;
20
+ if (req.method.toUpperCase() !== "GET") {
21
+ body = await req.json();
22
+ }
23
+ const searchParams = req.nextUrl.searchParams;
24
+ const path = searchParams.getAll("slug");
25
+ searchParams.delete("slug");
26
+ const params = searchParams.toString();
27
+
28
+ try {
29
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
30
+ if (params) url += `?${params}`;
31
+ const payload: RequestInit = {
32
+ method: req.method,
33
+ headers: {
34
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
35
+ Authorization: req.headers.get("Authorization") || "",
36
+ },
37
+ };
38
+ if (body) payload.body = JSON.stringify(body);
39
+ const response = await fetch(url, payload);
40
+ return new NextResponse(response.body, response);
41
+ } catch (error) {
42
+ if (error instanceof Error) {
43
+ console.error(error);
44
+ return NextResponse.json(
45
+ { code: 500, message: error.message },
46
+ { status: 500 }
47
+ );
48
+ }
49
+ }
50
+ }
51
+
52
+ export { handler as GET, handler as POST, handler as PUT, handler as DELETE };
src/app/api/ai/openaicompatible/[...slug]/route.ts ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+
3
+ export const runtime = "edge";
4
+ export const preferredRegion = [
5
+ "cle1",
6
+ "iad1",
7
+ "pdx1",
8
+ "sfo1",
9
+ "sin1",
10
+ "syd1",
11
+ "hnd1",
12
+ "kix1",
13
+ ];
14
+
15
+ const API_PROXY_BASE_URL = process.env.OPENAI_COMPATIBLE_API_BASE_URL || "";
16
+
17
+ async function handler(req: NextRequest) {
18
+ let body;
19
+ if (req.method.toUpperCase() !== "GET") {
20
+ body = await req.json();
21
+ }
22
+ const searchParams = req.nextUrl.searchParams;
23
+ const path = searchParams.getAll("slug");
24
+ searchParams.delete("slug");
25
+ const params = searchParams.toString();
26
+
27
+ try {
28
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
29
+ if (params) url += `?${params}`;
30
+ const payload: RequestInit = {
31
+ method: req.method,
32
+ headers: {
33
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
34
+ Authorization: req.headers.get("Authorization") || "",
35
+ },
36
+ };
37
+ if (body) payload.body = JSON.stringify(body);
38
+ const response = await fetch(url, payload);
39
+ return new NextResponse(response.body, response);
40
+ } catch (error) {
41
+ if (error instanceof Error) {
42
+ console.error(error);
43
+ return NextResponse.json(
44
+ { code: 500, message: error.message },
45
+ { status: 500 }
46
+ );
47
+ }
48
+ }
49
+ }
50
+
51
+ export { handler as GET, handler as POST, handler as PUT, handler as DELETE };
src/app/api/ai/openrouter/[...slug]/route.ts ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { OPENROUTER_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL =
17
+ process.env.OPENROUTER_API_BASE_URL || OPENROUTER_BASE_URL;
18
+
19
+ async function handler(req: NextRequest) {
20
+ let body;
21
+ if (req.method.toUpperCase() !== "GET") {
22
+ body = await req.json();
23
+ }
24
+ const searchParams = req.nextUrl.searchParams;
25
+ const path = searchParams.getAll("slug");
26
+ searchParams.delete("slug");
27
+ const params = searchParams.toString();
28
+
29
+ try {
30
+ let url = `${API_PROXY_BASE_URL}/api/${decodeURIComponent(path.join("/"))}`;
31
+ if (params) url += `?${params}`;
32
+ console.log(url);
33
+ const payload: RequestInit = {
34
+ method: req.method,
35
+ headers: {
36
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
37
+ Authorization: req.headers.get("Authorization") || "",
38
+ },
39
+ };
40
+ if (body) payload.body = JSON.stringify(body);
41
+ const response = await fetch(url, payload);
42
+ return new NextResponse(response.body, response);
43
+ } catch (error) {
44
+ if (error instanceof Error) {
45
+ console.error(error);
46
+ return NextResponse.json(
47
+ { code: 500, message: error.message },
48
+ { status: 500 }
49
+ );
50
+ }
51
+ }
52
+ }
53
+
54
+ export { handler as GET, handler as POST, handler as PUT, handler as DELETE };
src/app/api/ai/pollinations/[...slug]/route.ts ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { POLLINATIONS_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL =
17
+ process.env.POLLINATIONS_API_BASE_URL || POLLINATIONS_BASE_URL;
18
+
19
+ async function handler(req: NextRequest) {
20
+ let body;
21
+ if (req.method.toUpperCase() !== "GET") {
22
+ body = await req.json();
23
+ }
24
+ const searchParams = req.nextUrl.searchParams;
25
+ const path = searchParams.getAll("slug");
26
+ searchParams.delete("slug");
27
+ const params = searchParams.toString();
28
+
29
+ try {
30
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
31
+ if (params) url += `?${params}`;
32
+ const payload: RequestInit = {
33
+ method: req.method,
34
+ headers: {
35
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
36
+ },
37
+ };
38
+ if (body) payload.body = JSON.stringify(body);
39
+ const response = await fetch(url, payload);
40
+ return new NextResponse(response.body, response);
41
+ } catch (error) {
42
+ if (error instanceof Error) {
43
+ console.error(error);
44
+ return NextResponse.json(
45
+ { code: 500, message: error.message },
46
+ { status: 500 }
47
+ );
48
+ }
49
+ }
50
+ }
51
+
52
+ export { handler as GET, handler as POST, handler as PUT, handler as DELETE };
src/app/api/ai/xai/[...slug]/route.ts ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { XAI_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL = process.env.XAI_API_BASE_URL || XAI_BASE_URL;
17
+
18
+ async function handler(req: NextRequest) {
19
+ let body;
20
+ if (req.method.toUpperCase() !== "GET") {
21
+ body = await req.json();
22
+ }
23
+ const searchParams = req.nextUrl.searchParams;
24
+ const path = searchParams.getAll("slug");
25
+ searchParams.delete("slug");
26
+ const params = searchParams.toString();
27
+
28
+ try {
29
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
30
+ if (params) url += `?${params}`;
31
+ const payload: RequestInit = {
32
+ method: req.method,
33
+ headers: {
34
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
35
+ Authorization: req.headers.get("Authorization") || "",
36
+ },
37
+ };
38
+ if (body) payload.body = JSON.stringify(body);
39
+ const response = await fetch(url, payload);
40
+ return new NextResponse(response.body, response);
41
+ } catch (error) {
42
+ if (error instanceof Error) {
43
+ console.error(error);
44
+ return NextResponse.json(
45
+ { code: 500, message: error.message },
46
+ { status: 500 }
47
+ );
48
+ }
49
+ }
50
+ }
51
+
52
+ export { handler as GET, handler as POST, handler as PUT, handler as DELETE };
src/app/api/crawler/route.ts ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+
3
+ export const runtime = "edge";
4
+ export const preferredRegion = [
5
+ "cle1",
6
+ "iad1",
7
+ "pdx1",
8
+ "sfo1",
9
+ "sin1",
10
+ "syd1",
11
+ "hnd1",
12
+ "kix1",
13
+ ];
14
+
15
+ export async function POST(req: NextRequest) {
16
+ try {
17
+ const { url } = await req.json();
18
+ if (!url) throw new Error("Missing parameters!");
19
+ const response = await fetch(url, { next: { revalidate: 60 } });
20
+ const result = await response.text();
21
+
22
+ const titleRegex = /<title>(.*?)<\/title>/i;
23
+ const titleMatch = result.match(titleRegex);
24
+ const title = titleMatch ? titleMatch[1].trim() : "";
25
+
26
+ return NextResponse.json({ url, title, content: result });
27
+ } catch (error) {
28
+ if (error instanceof Error) {
29
+ console.error(error);
30
+ return NextResponse.json(
31
+ { code: 500, message: error.message },
32
+ { status: 500 }
33
+ );
34
+ }
35
+ }
36
+ }
src/app/api/mcp/[...slug]/route.ts ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextRequest, NextResponse } from "next/server";
2
+ import { SSEServerTransport } from "@/libs/mcp-server/sse";
3
+ import { initMcpServer } from "../server";
4
+
5
+ export const runtime = "edge";
6
+ export const dynamic = "force-dynamic";
7
+ export const preferredRegion = [
8
+ "cle1",
9
+ "iad1",
10
+ "pdx1",
11
+ "sfo1",
12
+ "sin1",
13
+ "syd1",
14
+ "hnd1",
15
+ "kix1",
16
+ ];
17
+
18
+ // In-memory storage for active transport sessions.
19
+ // In a production app, consider a more robust distributed cache or state store.
20
+ const activeTransports = new Map<string, SSEServerTransport>();
21
+
22
+ // The API route path clients will POST messages to
23
+ const POST_ENDPOINT_PATH = "/api/mcp/sse/messages"; // This must match your POST API route path
24
+
25
+ export async function GET(): Promise<NextResponse> {
26
+ // Create an MCP server
27
+ const server = initMcpServer();
28
+
29
+ // Create a new transport instance for this session
30
+ const transport = new SSEServerTransport({
31
+ endpoint: POST_ENDPOINT_PATH,
32
+ });
33
+ const sessionId = transport.sessionId;
34
+
35
+ // Store the transport instance keyed by session ID
36
+ activeTransports.set(sessionId, transport);
37
+
38
+ transport.onerror = (error) => {
39
+ return NextResponse.json(
40
+ { code: 500, message: error.message },
41
+ { status: 500 }
42
+ );
43
+ };
44
+
45
+ transport.onclose = () => {
46
+ activeTransports.delete(sessionId); // Clean up the instance
47
+ transport.close();
48
+ server.close();
49
+ };
50
+
51
+ await server.connect(transport);
52
+ // Call the transport method to handle the GET request and return the SSE response
53
+ const response = await transport.handleGetRequest();
54
+ return new NextResponse(response.body, response);
55
+ }
56
+
57
+ export async function POST(req: NextRequest): Promise<NextResponse> {
58
+ // Extract the session ID from the query parameter sent by the client
59
+ const sessionId = req.nextUrl.searchParams.get("sessionId");
60
+
61
+ if (!sessionId) {
62
+ return new NextResponse(
63
+ JSON.stringify({
64
+ jsonrpc: "2.0",
65
+ error: { code: -32600, message: "Missing sessionId query parameter" },
66
+ id: null,
67
+ }),
68
+ { status: 400, headers: { "Content-Type": "application/json" } }
69
+ );
70
+ }
71
+
72
+ // Find the corresponding transport instance
73
+ const transport = activeTransports.get(sessionId);
74
+
75
+ if (!transport) {
76
+ // Session not found or already closed
77
+ console.warn(`Received POST for unknown session ID: ${sessionId}`);
78
+ return new NextResponse(
79
+ JSON.stringify({
80
+ jsonrpc: "2.0",
81
+ error: { code: -32001, message: "Session not found" },
82
+ id: null,
83
+ }),
84
+ { status: 404, headers: { "Content-Type": "application/json" } }
85
+ );
86
+ }
87
+
88
+ // Delegate the POST message handling to the specific transport instance
89
+ const response = await transport.handlePostMessage(req);
90
+ return new NextResponse(response.body, response);
91
+ }
src/app/api/mcp/route.ts ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { StreamableHTTPServerTransport } from "@/libs/mcp-server/streamableHttp";
3
+ import { initMcpServer } from "./server";
4
+
5
+ export const runtime = "edge";
6
+ export const dynamic = "force-dynamic";
7
+ export const preferredRegion = [
8
+ "cle1",
9
+ "iad1",
10
+ "pdx1",
11
+ "sfo1",
12
+ "sin1",
13
+ "syd1",
14
+ "hnd1",
15
+ "kix1",
16
+ ];
17
+
18
+ export async function POST(req: NextRequest) {
19
+ try {
20
+ const server = initMcpServer();
21
+ const transport: StreamableHTTPServerTransport =
22
+ new StreamableHTTPServerTransport({
23
+ sessionIdGenerator: undefined,
24
+ });
25
+
26
+ transport.onclose = () => {
27
+ transport.close();
28
+ server.close();
29
+ };
30
+
31
+ transport.onerror = (err) => {
32
+ return NextResponse.json(
33
+ { code: 500, message: err.message },
34
+ { status: 500 }
35
+ );
36
+ };
37
+
38
+ await server.connect(transport);
39
+ const response = await transport.handleRequest(req);
40
+ return new NextResponse(response.body, response);
41
+ } catch (error) {
42
+ if (error instanceof Error) {
43
+ console.error(error);
44
+ return NextResponse.json(
45
+ { code: 500, message: error.message },
46
+ { status: 500 }
47
+ );
48
+ }
49
+ }
50
+ }
src/app/api/mcp/server.ts ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { z } from "zod";
2
+ import { McpServer } from "@/libs/mcp-server/mcp";
3
+ import DeepResearch from "@/utils/deep-research";
4
+ import { multiApiKeyPolling } from "@/utils/model";
5
+ import {
6
+ getAIProviderBaseURL,
7
+ getAIProviderApiKey,
8
+ getSearchProviderBaseURL,
9
+ getSearchProviderApiKey,
10
+ } from "../utils";
11
+
12
+ const AI_PROVIDER = process.env.MCP_AI_PROVIDER || "";
13
+ const SEARCH_PROVIDER = process.env.MCP_SEARCH_PROVIDER || "model";
14
+ const THINKING_MODEL = process.env.MCP_THINKING_MODEL || "";
15
+ const TASK_MODEL = process.env.MCP_TASK_MODEL || "";
16
+
17
+ function initDeepResearchServer({
18
+ language,
19
+ maxResult,
20
+ }: {
21
+ language?: string;
22
+ maxResult?: number;
23
+ }) {
24
+ const deepResearch = new DeepResearch({
25
+ language,
26
+ AIProvider: {
27
+ baseURL: getAIProviderBaseURL(AI_PROVIDER),
28
+ apiKey: multiApiKeyPolling(getAIProviderApiKey(AI_PROVIDER)),
29
+ provider: AI_PROVIDER,
30
+ thinkingModel: THINKING_MODEL,
31
+ taskModel: TASK_MODEL,
32
+ },
33
+ searchProvider: {
34
+ baseURL: getSearchProviderBaseURL(SEARCH_PROVIDER),
35
+ apiKey: multiApiKeyPolling(getSearchProviderApiKey(SEARCH_PROVIDER)),
36
+ provider: SEARCH_PROVIDER,
37
+ maxResult,
38
+ },
39
+ onMessage: (event, data) => {
40
+ if (event === "progress") {
41
+ console.log(
42
+ `[${data.step}]: ${data.name ? `"${data.name}" ` : ""}${data.status}`
43
+ );
44
+ if (data.status === "end" && data.data) {
45
+ console.log(data.data);
46
+ }
47
+ } else if (event === "error") {
48
+ console.error(data.message);
49
+ throw new Error(data.message);
50
+ }
51
+ },
52
+ });
53
+
54
+ return deepResearch;
55
+ }
56
+
57
+ export function initMcpServer() {
58
+ const deepResearchToolDescription =
59
+ "Start deep research on any question, obtain and organize information through search engines, and generate research report.";
60
+ const writeResearchPlanDescription =
61
+ "Generate research plan based on user query.";
62
+ const generateSERPQueryDescription =
63
+ "Generate a list of data collection tasks based on the research plan.";
64
+ const searchTaskDescription =
65
+ "Generate SERP queries based on the research plan.";
66
+ const writeFinalReportDescription =
67
+ "Write a final research report based on the research plan and the results of the information collection tasks.";
68
+
69
+ const server = new McpServer(
70
+ {
71
+ name: "deep-research",
72
+ version: "0.1.0",
73
+ },
74
+ {
75
+ capabilities: {
76
+ tools: {
77
+ "deep-research": {
78
+ description: deepResearchToolDescription,
79
+ },
80
+ "write-research-plan": {
81
+ description: writeResearchPlanDescription,
82
+ },
83
+ "generate-SERP-query": {
84
+ description: generateSERPQueryDescription,
85
+ },
86
+ "search-task": {
87
+ description: searchTaskDescription,
88
+ },
89
+ "write-final-report": {
90
+ description: writeFinalReportDescription,
91
+ },
92
+ },
93
+ },
94
+ }
95
+ );
96
+
97
+ server.tool(
98
+ "deep-research",
99
+ deepResearchToolDescription,
100
+ {
101
+ query: z.string().describe("The topic for deep research."),
102
+ language: z
103
+ .string()
104
+ .optional()
105
+ .describe("The final report text language."),
106
+ maxResult: z
107
+ .number()
108
+ .optional()
109
+ .default(5)
110
+ .describe("Maximum number of search results."),
111
+ enableCitationImage: z
112
+ .boolean()
113
+ .default(true)
114
+ .optional()
115
+ .describe(
116
+ "Whether to include content-related images in the final report."
117
+ ),
118
+ enableReferences: z
119
+ .boolean()
120
+ .default(true)
121
+ .optional()
122
+ .describe(
123
+ "Whether to include citation links in search results and final reports."
124
+ ),
125
+ },
126
+ async (
127
+ { query, language, maxResult, enableCitationImage, enableReferences },
128
+ { signal }
129
+ ) => {
130
+ signal.addEventListener("abort", () => {
131
+ throw new Error("The client closed unexpectedly!");
132
+ });
133
+
134
+ try {
135
+ const deepResearch = initDeepResearchServer({
136
+ language,
137
+ maxResult,
138
+ });
139
+ const result = await deepResearch.start(
140
+ query,
141
+ enableCitationImage,
142
+ enableReferences
143
+ );
144
+ return {
145
+ content: [{ type: "text", text: JSON.stringify(result) }],
146
+ };
147
+ } catch (error) {
148
+ return {
149
+ isError: true,
150
+ content: [
151
+ {
152
+ type: "text",
153
+ text: `Error: ${
154
+ error instanceof Error ? error.message : "Unknown error"
155
+ }`,
156
+ },
157
+ ],
158
+ };
159
+ }
160
+ }
161
+ );
162
+
163
+ server.tool(
164
+ "write-research-plan",
165
+ writeResearchPlanDescription,
166
+ {
167
+ query: z.string().describe("The topic for deep research."),
168
+ language: z.string().optional().describe("The response Language."),
169
+ },
170
+ async ({ query, language }, { signal }) => {
171
+ signal.addEventListener("abort", () => {
172
+ throw new Error("The client closed unexpectedly!");
173
+ });
174
+
175
+ try {
176
+ const deepResearch = initDeepResearchServer({ language });
177
+ const result = await deepResearch.writeReportPlan(query);
178
+ return {
179
+ content: [
180
+ { type: "text", text: JSON.stringify({ reportPlan: result }) },
181
+ ],
182
+ };
183
+ } catch (error) {
184
+ return {
185
+ isError: true,
186
+ content: [
187
+ {
188
+ type: "text",
189
+ text: `Error: ${
190
+ error instanceof Error ? error.message : "Unknown error"
191
+ }`,
192
+ },
193
+ ],
194
+ };
195
+ }
196
+ }
197
+ );
198
+
199
+ server.tool(
200
+ "generate-SERP-query",
201
+ generateSERPQueryDescription,
202
+ {
203
+ plan: z.string().describe("Research plan for deep research."),
204
+ language: z.string().optional().describe("The response Language."),
205
+ },
206
+ async ({ plan, language }, { signal }) => {
207
+ signal.addEventListener("abort", () => {
208
+ throw new Error("The client closed unexpectedly!");
209
+ });
210
+
211
+ try {
212
+ const deepResearch = initDeepResearchServer({ language });
213
+ const result = await deepResearch.generateSERPQuery(plan);
214
+ return {
215
+ content: [{ type: "text", text: JSON.stringify(result) }],
216
+ };
217
+ } catch (error) {
218
+ return {
219
+ isError: true,
220
+ content: [
221
+ {
222
+ type: "text",
223
+ text: `Error: ${
224
+ error instanceof Error ? error.message : "Unknown error"
225
+ }`,
226
+ },
227
+ ],
228
+ };
229
+ }
230
+ }
231
+ );
232
+
233
+ server.tool(
234
+ "search-task",
235
+ searchTaskDescription,
236
+ {
237
+ tasks: z
238
+ .array(
239
+ z.object({
240
+ query: z.string().describe("Information to be queried."),
241
+ researchGoal: z.string().describe("The goal of this query task."),
242
+ })
243
+ )
244
+ .describe("Information Collection Task List."),
245
+ language: z.string().optional().describe("The response Language."),
246
+ maxResult: z
247
+ .number()
248
+ .optional()
249
+ .default(5)
250
+ .describe("Maximum number of search results."),
251
+ enableReferences: z
252
+ .boolean()
253
+ .default(true)
254
+ .optional()
255
+ .describe(
256
+ "Whether to include citation links in search results and final reports."
257
+ ),
258
+ },
259
+ async (
260
+ { tasks, language, maxResult, enableReferences = true },
261
+ { signal }
262
+ ) => {
263
+ signal.addEventListener("abort", () => {
264
+ throw new Error("The client closed unexpectedly!");
265
+ });
266
+
267
+ try {
268
+ const deepResearch = initDeepResearchServer({ language, maxResult });
269
+ const result = await deepResearch.runSearchTask(
270
+ tasks,
271
+ enableReferences
272
+ );
273
+ return {
274
+ content: [{ type: "text", text: JSON.stringify(result) }],
275
+ };
276
+ } catch (error) {
277
+ return {
278
+ isError: true,
279
+ content: [
280
+ {
281
+ type: "text",
282
+ text: `Error: ${
283
+ error instanceof Error ? error.message : "Unknown error"
284
+ }`,
285
+ },
286
+ ],
287
+ };
288
+ }
289
+ }
290
+ );
291
+
292
+ server.tool(
293
+ "write-final-report",
294
+ writeFinalReportDescription,
295
+ {
296
+ plan: z.string().describe("Research plan for deep research."),
297
+ tasks: z
298
+ .array(
299
+ z.object({
300
+ query: z.string().describe("Information to be queried."),
301
+ researchGoal: z.string().describe("The goal of this query task."),
302
+ learning: z
303
+ .string()
304
+ .describe(
305
+ "Knowledge learned while performing information gathering tasks."
306
+ ),
307
+ sources: z
308
+ .array(
309
+ z.object({
310
+ url: z.string().describe("Web link."),
311
+ title: z.string().optional().describe("Page title."),
312
+ })
313
+ )
314
+ .optional()
315
+ .describe(
316
+ "Web page information that was queried when performing information collection tasks."
317
+ ),
318
+ images: z
319
+ .array(
320
+ z.object({
321
+ url: z.string().describe("Image link."),
322
+ description: z
323
+ .string()
324
+ .optional()
325
+ .describe("Image Description."),
326
+ })
327
+ )
328
+ .optional()
329
+ .describe(
330
+ "Image resources obtained when performing information collection tasks."
331
+ ),
332
+ })
333
+ )
334
+ .describe(
335
+ "The data information collected during the execution of the query task."
336
+ ),
337
+ language: z
338
+ .string()
339
+ .optional()
340
+ .describe("The final report text language."),
341
+ maxResult: z
342
+ .number()
343
+ .optional()
344
+ .default(5)
345
+ .describe("Maximum number of search results."),
346
+ enableCitationImage: z
347
+ .boolean()
348
+ .default(true)
349
+ .optional()
350
+ .describe(
351
+ "Whether to include content-related images in the final report."
352
+ ),
353
+ enableReferences: z
354
+ .boolean()
355
+ .default(true)
356
+ .optional()
357
+ .describe(
358
+ "Whether to include citation links in search results and final reports."
359
+ ),
360
+ },
361
+ async (
362
+ {
363
+ plan,
364
+ tasks,
365
+ language,
366
+ maxResult,
367
+ enableCitationImage = true,
368
+ enableReferences = true,
369
+ },
370
+ { signal }
371
+ ) => {
372
+ signal.addEventListener("abort", () => {
373
+ throw new Error("The client closed unexpectedly!");
374
+ });
375
+
376
+ try {
377
+ const deepResearch = initDeepResearchServer({ language, maxResult });
378
+ const result = await deepResearch.writeFinalReport(
379
+ plan,
380
+ tasks,
381
+ enableCitationImage,
382
+ enableReferences
383
+ );
384
+ return {
385
+ content: [{ type: "text", text: JSON.stringify(result) }],
386
+ };
387
+ } catch (error) {
388
+ return {
389
+ isError: true,
390
+ content: [
391
+ {
392
+ type: "text",
393
+ text: `Error: ${
394
+ error instanceof Error ? error.message : "Unknown error"
395
+ }`,
396
+ },
397
+ ],
398
+ };
399
+ }
400
+ }
401
+ );
402
+
403
+ return server;
404
+ }
src/app/api/search/bocha/[...slug]/route.ts ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { BOCHA_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL = process.env.BOCHA_API_BASE_URL || BOCHA_BASE_URL;
17
+
18
+ export async function POST(req: NextRequest) {
19
+ const body = await req.json();
20
+ const searchParams = req.nextUrl.searchParams;
21
+ const path = searchParams.getAll("slug");
22
+ searchParams.delete("slug");
23
+ const params = searchParams.toString();
24
+
25
+ try {
26
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
27
+ if (params) url += `?${params}`;
28
+ const payload: RequestInit = {
29
+ method: req.method,
30
+ headers: {
31
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
32
+ Authorization: req.headers.get("Authorization") || "",
33
+ },
34
+ body: JSON.stringify(body),
35
+ };
36
+ const response = await fetch(url, payload);
37
+ return new NextResponse(response.body, response);
38
+ } catch (error) {
39
+ if (error instanceof Error) {
40
+ console.error(error);
41
+ return NextResponse.json(
42
+ { code: 500, message: error.message },
43
+ { status: 500 }
44
+ );
45
+ }
46
+ }
47
+ }
src/app/api/search/exa/[...slug]/route.ts ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { EXA_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL = process.env.EXA_API_BASE_URL || EXA_BASE_URL;
17
+
18
+ export async function POST(req: NextRequest) {
19
+ const body = await req.json();
20
+ const searchParams = req.nextUrl.searchParams;
21
+ const path = searchParams.getAll("slug");
22
+ searchParams.delete("slug");
23
+ const params = searchParams.toString();
24
+
25
+ try {
26
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
27
+ if (params) url += `?${params}`;
28
+ const payload: RequestInit = {
29
+ method: req.method,
30
+ headers: {
31
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
32
+ Authorization: req.headers.get("Authorization") || "",
33
+ },
34
+ body: JSON.stringify(body),
35
+ };
36
+ const response = await fetch(url, payload);
37
+ return new NextResponse(response.body, response);
38
+ } catch (error) {
39
+ if (error instanceof Error) {
40
+ console.error(error);
41
+ return NextResponse.json(
42
+ { code: 500, message: error.message },
43
+ { status: 500 }
44
+ );
45
+ }
46
+ }
47
+ }
src/app/api/search/firecrawl/[...slug]/route.ts ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { FIRECRAWL_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL =
17
+ process.env.FIRECRAWL_API_BASE_URL || FIRECRAWL_BASE_URL;
18
+
19
+ export async function POST(req: NextRequest) {
20
+ const body = await req.json();
21
+ const searchParams = req.nextUrl.searchParams;
22
+ const path = searchParams.getAll("slug");
23
+ searchParams.delete("slug");
24
+ const params = searchParams.toString();
25
+
26
+ try {
27
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
28
+ if (params) url += `?${params}`;
29
+ const payload: RequestInit = {
30
+ method: req.method,
31
+ headers: {
32
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
33
+ Authorization: req.headers.get("Authorization") || "",
34
+ },
35
+ body: JSON.stringify(body),
36
+ };
37
+ const response = await fetch(url, payload);
38
+ return new NextResponse(response.body, response);
39
+ } catch (error) {
40
+ if (error instanceof Error) {
41
+ console.error(error);
42
+ return NextResponse.json(
43
+ { code: 500, message: error.message },
44
+ { status: 500 }
45
+ );
46
+ }
47
+ }
48
+ }
src/app/api/search/searxng/[...slug]/route.ts ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { SEARXNG_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL = process.env.SEARXNG_API_BASE_URL || SEARXNG_BASE_URL;
17
+
18
+ export async function POST(req: NextRequest) {
19
+ let body;
20
+ if (req.method.toUpperCase() !== "GET") {
21
+ body = await req.json();
22
+ }
23
+ const searchParams = req.nextUrl.searchParams;
24
+ const path = searchParams.getAll("slug");
25
+ searchParams.delete("slug");
26
+ const params = searchParams.toString();
27
+
28
+ try {
29
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
30
+ if (params) url += `?${params}`;
31
+ const payload: RequestInit = {
32
+ method: req.method,
33
+ headers: {
34
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
35
+ },
36
+ };
37
+ if (body) payload.body = JSON.stringify(body);
38
+ const response = await fetch(url, payload);
39
+ return new NextResponse(response.body, response);
40
+ } catch (error) {
41
+ if (error instanceof Error) {
42
+ console.error(error);
43
+ return NextResponse.json(
44
+ { code: 500, message: error.message },
45
+ { status: 500 }
46
+ );
47
+ }
48
+ }
49
+ }
src/app/api/search/tavily/[...slug]/route.ts ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import { TAVILY_BASE_URL } from "@/constants/urls";
3
+
4
+ export const runtime = "edge";
5
+ export const preferredRegion = [
6
+ "cle1",
7
+ "iad1",
8
+ "pdx1",
9
+ "sfo1",
10
+ "sin1",
11
+ "syd1",
12
+ "hnd1",
13
+ "kix1",
14
+ ];
15
+
16
+ const API_PROXY_BASE_URL = process.env.TAVILY_API_BASE_URL || TAVILY_BASE_URL;
17
+
18
+ export async function POST(req: NextRequest) {
19
+ const body = await req.json();
20
+ const searchParams = req.nextUrl.searchParams;
21
+ const path = searchParams.getAll("slug");
22
+ searchParams.delete("slug");
23
+ const params = searchParams.toString();
24
+
25
+ try {
26
+ let url = `${API_PROXY_BASE_URL}/${decodeURIComponent(path.join("/"))}`;
27
+ if (params) url += `?${params}`;
28
+ const payload: RequestInit = {
29
+ method: req.method,
30
+ headers: {
31
+ "Content-Type": req.headers.get("Content-Type") || "application/json",
32
+ Authorization: req.headers.get("Authorization") || "",
33
+ },
34
+ body: JSON.stringify(body),
35
+ };
36
+ const response = await fetch(url, payload);
37
+ return new NextResponse(response.body, response);
38
+ } catch (error) {
39
+ if (error instanceof Error) {
40
+ console.error(error);
41
+ return NextResponse.json(
42
+ { code: 500, message: error.message },
43
+ { status: 500 }
44
+ );
45
+ }
46
+ }
47
+ }
src/app/api/sse/live/route.ts ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import DeepResearch from "@/utils/deep-research";
3
+ import { multiApiKeyPolling } from "@/utils/model";
4
+ import {
5
+ getAIProviderBaseURL,
6
+ getAIProviderApiKey,
7
+ getSearchProviderBaseURL,
8
+ getSearchProviderApiKey,
9
+ } from "../../utils";
10
+
11
+ export const runtime = "edge";
12
+ export const dynamic = "force-dynamic";
13
+ export const preferredRegion = [
14
+ "cle1",
15
+ "iad1",
16
+ "pdx1",
17
+ "sfo1",
18
+ "sin1",
19
+ "syd1",
20
+ "hnd1",
21
+ "kix1",
22
+ ];
23
+
24
+ export async function GET(req: NextRequest) {
25
+ function getValueFromSearchParams(key: string) {
26
+ return req.nextUrl.searchParams.get(key);
27
+ }
28
+ const query = getValueFromSearchParams("query") || "";
29
+ const provider = getValueFromSearchParams("provider") || "";
30
+ const thinkingModel = getValueFromSearchParams("thinkingModel") || "";
31
+ const taskModel = getValueFromSearchParams("taskModel") || "";
32
+ const searchProvider = getValueFromSearchParams("searchProvider") || "";
33
+ const language = getValueFromSearchParams("language") || "";
34
+ const maxResult = Number(getValueFromSearchParams("maxResult")) || 5;
35
+ const enableCitationImage =
36
+ getValueFromSearchParams("enableCitationImage") === "false";
37
+ const enableReferences =
38
+ getValueFromSearchParams("enableReferences") === "false";
39
+
40
+ const encoder = new TextEncoder();
41
+ const readableStream = new ReadableStream({
42
+ start: async (controller) => {
43
+ console.log("Client connected");
44
+
45
+ req.signal.addEventListener("abort", () => {
46
+ console.log("Client disconnected");
47
+ });
48
+
49
+ const deepResearch = new DeepResearch({
50
+ language,
51
+ AIProvider: {
52
+ baseURL: getAIProviderBaseURL(provider),
53
+ apiKey: multiApiKeyPolling(getAIProviderApiKey(provider)),
54
+ provider,
55
+ thinkingModel,
56
+ taskModel,
57
+ },
58
+ searchProvider: {
59
+ baseURL: getSearchProviderBaseURL(searchProvider),
60
+ apiKey: multiApiKeyPolling(getSearchProviderApiKey(searchProvider)),
61
+ provider: searchProvider,
62
+ maxResult,
63
+ },
64
+ onMessage: (event, data) => {
65
+ if (event === "message") {
66
+ controller.enqueue(encoder.encode(data.text));
67
+ } else if (event === "progress") {
68
+ console.log(
69
+ `[${data.step}]: ${data.name ? `"${data.name}" ` : ""}${
70
+ data.status
71
+ }`
72
+ );
73
+ if (data.step === "final-report" && data.status === "end") {
74
+ controller.close();
75
+ }
76
+ } else if (event === "error") {
77
+ console.error(data);
78
+ controller.close();
79
+ }
80
+ },
81
+ });
82
+
83
+ req.signal.addEventListener("abort", () => {
84
+ controller.close();
85
+ });
86
+
87
+ try {
88
+ await deepResearch.start(query, enableCitationImage, enableReferences);
89
+ } catch (err) {
90
+ throw new Error(err instanceof Error ? err.message : "Unknown error");
91
+ }
92
+ controller.close();
93
+ },
94
+ });
95
+
96
+ return new NextResponse(readableStream, {
97
+ headers: {
98
+ "Content-Type": "text/event-stream; charset=utf-8",
99
+ "Cache-Control": "no-cache, no-transform",
100
+ Connection: "keep-alive",
101
+ "X-Accel-Buffering": "no",
102
+ "Access-Control-Allow-Origin": "*",
103
+ },
104
+ });
105
+ }
src/app/api/sse/route.ts ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, type NextRequest } from "next/server";
2
+ import DeepResearch from "@/utils/deep-research";
3
+ import { multiApiKeyPolling } from "@/utils/model";
4
+ import {
5
+ getAIProviderBaseURL,
6
+ getAIProviderApiKey,
7
+ getSearchProviderBaseURL,
8
+ getSearchProviderApiKey,
9
+ } from "../utils";
10
+
11
+ export const runtime = "edge";
12
+ export const dynamic = "force-dynamic";
13
+ export const preferredRegion = [
14
+ "cle1",
15
+ "iad1",
16
+ "pdx1",
17
+ "sfo1",
18
+ "sin1",
19
+ "syd1",
20
+ "hnd1",
21
+ "kix1",
22
+ ];
23
+
24
+ export async function POST(req: NextRequest) {
25
+ const {
26
+ query,
27
+ provider,
28
+ thinkingModel,
29
+ taskModel,
30
+ searchProvider,
31
+ language,
32
+ maxResult,
33
+ enableCitationImage = true,
34
+ enableReferences = true,
35
+ } = await req.json();
36
+
37
+ const encoder = new TextEncoder();
38
+ const readableStream = new ReadableStream({
39
+ start: async (controller) => {
40
+ console.log("Client connected");
41
+ controller.enqueue(
42
+ encoder.encode(
43
+ `event: infor\ndata: ${JSON.stringify({
44
+ name: "deep-research",
45
+ version: "0.1.0",
46
+ })}\n\n`
47
+ )
48
+ );
49
+
50
+ const deepResearch = new DeepResearch({
51
+ language,
52
+ AIProvider: {
53
+ baseURL: getAIProviderBaseURL(provider),
54
+ apiKey: multiApiKeyPolling(getAIProviderApiKey(provider)),
55
+ provider,
56
+ thinkingModel,
57
+ taskModel,
58
+ },
59
+ searchProvider: {
60
+ baseURL: getSearchProviderBaseURL(searchProvider),
61
+ apiKey: multiApiKeyPolling(getSearchProviderApiKey(searchProvider)),
62
+ provider: searchProvider,
63
+ maxResult,
64
+ },
65
+ onMessage: (event, data) => {
66
+ if (event === "progress") {
67
+ console.log(
68
+ `[${data.step}]: ${data.name ? `"${data.name}" ` : ""}${
69
+ data.status
70
+ }`
71
+ );
72
+ if (data.step === "final-report" && data.status === "end") {
73
+ controller.close();
74
+ }
75
+ } else if (event === "error") {
76
+ console.error(data);
77
+ controller.close();
78
+ } else {
79
+ console.warn(`Unknown event: ${event}`);
80
+ }
81
+ controller.enqueue(
82
+ encoder.encode(
83
+ `event: ${event}\ndata: ${JSON.stringify(data)})}\n\n`
84
+ )
85
+ );
86
+ },
87
+ });
88
+
89
+ req.signal.addEventListener("abort", () => {
90
+ controller.close();
91
+ });
92
+
93
+ try {
94
+ await deepResearch.start(query, enableCitationImage, enableReferences);
95
+ } catch (err) {
96
+ throw new Error(err instanceof Error ? err.message : "Unknown error");
97
+ }
98
+ controller.close();
99
+ },
100
+ });
101
+
102
+ return new NextResponse(readableStream, {
103
+ headers: {
104
+ "Content-Type": "text/event-stream; charset=utf-8",
105
+ "Cache-Control": "no-cache, no-transform",
106
+ Connection: "keep-alive",
107
+ "X-Accel-Buffering": "no",
108
+ "Access-Control-Allow-Origin": "*",
109
+ },
110
+ });
111
+ }