This view is limited to 50 files because it contains too many changes.  See the raw diff here.
.gitattributes CHANGED
@@ -1 +1,35 @@
1
- Animated_Logo_Video_Ready.gif filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gitignore CHANGED
@@ -1,3 +1,5 @@
 
 
1
  # Byte-compiled / optimized / DLL files
2
  __pycache__/
3
  *.py[cod]
@@ -19,18 +21,16 @@ lib64/
19
  parts/
20
  sdist/
21
  var/
 
 
22
  *.egg-info/
23
  .installed.cfg
24
  *.egg
25
  MANIFEST
26
 
27
- # Virtual environments
28
- venv/
29
- env/
30
- ENV/
31
- .venv/
32
-
33
  # PyInstaller
 
 
34
  *.manifest
35
  *.spec
36
 
@@ -48,34 +48,115 @@ htmlcov/
48
  nosetests.xml
49
  coverage.xml
50
  *.cover
 
51
  .hypothesis/
52
  .pytest_cache/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  # Jupyter Notebook
55
  .ipynb_checkpoints
56
 
 
 
 
 
57
  # pyenv
58
- .python-version
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  # mypy
61
  .mypy_cache/
62
  .dmypy.json
 
63
 
64
  # Pyre type checker
65
  .pyre/
66
 
67
- # Gradio cache
68
- log/
69
- logs/
70
-
71
- # System files
72
- .DS_Store
73
- Thumbs.db
74
 
75
- # Lock files
76
- uv.lock
77
- poetry.lock
78
- Pipfile.lock
79
 
80
- # VSCode
81
- .vscode/
 
 
 
 
 
1
+ .gradio/
2
+
3
  # Byte-compiled / optimized / DLL files
4
  __pycache__/
5
  *.py[cod]
 
21
  parts/
22
  sdist/
23
  var/
24
+ wheels/
25
+ share/python-wheels/
26
  *.egg-info/
27
  .installed.cfg
28
  *.egg
29
  MANIFEST
30
 
 
 
 
 
 
 
31
  # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
  *.manifest
35
  *.spec
36
 
 
48
  nosetests.xml
49
  coverage.xml
50
  *.cover
51
+ *.py,cover
52
  .hypothesis/
53
  .pytest_cache/
54
+ cover/
55
+
56
+ # Translations
57
+ *.mo
58
+ *.pot
59
+
60
+ # Django stuff:
61
+ *.log
62
+ local_settings.py
63
+ db.sqlite3
64
+ db.sqlite3-journal
65
+
66
+ # Flask stuff:
67
+ instance/
68
+ .webassets-cache
69
+
70
+ # Scrapy stuff:
71
+ .scrapy
72
+
73
+ # Sphinx documentation
74
+ docs/_build/
75
+
76
+ # PyBuilder
77
+ .pybuilder/
78
+ target/
79
 
80
  # Jupyter Notebook
81
  .ipynb_checkpoints
82
 
83
+ # IPython
84
+ profile_default/
85
+ ipython_config.py
86
+
87
  # pyenv
88
+ # For a library or package, you might want to ignore these files since the code is
89
+ # intended to run in multiple environments; otherwise, check them in:
90
+ # .python-version
91
+
92
+ # pipenv
93
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
95
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
96
+ # install all needed dependencies.
97
+ #Pipfile.lock
98
+
99
+ # poetry
100
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
102
+ # commonly ignored for libraries.
103
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104
+ #poetry.lock
105
+
106
+ # pdm
107
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108
+ #pdm.lock
109
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110
+ # in version control.
111
+ # https://pdm.fming.dev/#use-with-ide
112
+ .pdm.toml
113
+
114
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115
+ __pypackages__/
116
+
117
+ # Celery stuff
118
+ celerybeat-schedule
119
+ celerybeat.pid
120
+
121
+ # SageMath parsed files
122
+ *.sage.py
123
+
124
+ # Environments
125
+ .env
126
+ .venv
127
+ env/
128
+ venv/
129
+ ENV/
130
+ env.bak/
131
+ venv.bak/
132
+
133
+ # Spyder project settings
134
+ .spyderproject
135
+ .spyproject
136
+
137
+ # Rope project settings
138
+ .ropeproject
139
+
140
+ # mkdocs documentation
141
+ /site
142
 
143
  # mypy
144
  .mypy_cache/
145
  .dmypy.json
146
+ dmypy.json
147
 
148
  # Pyre type checker
149
  .pyre/
150
 
151
+ # pytype static type analyzer
152
+ .pytype/
 
 
 
 
 
153
 
154
+ # Cython debug symbols
155
+ cython_debug/
 
 
156
 
157
+ # PyCharm
158
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
161
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162
+ #.idea/
.pre-commit-config.yaml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v5.0.0
4
+ hooks:
5
+ - id: check-executables-have-shebangs
6
+ - id: check-json
7
+ - id: check-merge-conflict
8
+ - id: check-shebang-scripts-are-executable
9
+ - id: check-toml
10
+ - id: check-yaml
11
+ - id: end-of-file-fixer
12
+ - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
+ - id: requirements-txt-fixer
15
+ - id: trailing-whitespace
16
+ - repo: https://github.com/astral-sh/ruff-pre-commit
17
+ rev: v0.8.6
18
+ hooks:
19
+ - id: ruff
20
+ args: ["--fix"]
21
+ - repo: https://github.com/pre-commit/mirrors-mypy
22
+ rev: v1.14.1
23
+ hooks:
24
+ - id: mypy
25
+ args: ["--ignore-missing-imports"]
26
+ additional_dependencies:
27
+ [
28
+ "types-python-slugify",
29
+ "types-requests",
30
+ "types-PyYAML",
31
+ "types-pytz",
32
+ ]
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.10
.vscode/extensions.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "recommendations": [
3
+ "ms-python.python",
4
+ "charliermarsh.ruff",
5
+ "streetsidesoftware.code-spell-checker",
6
+ "tamasfe.even-better-toml"
7
+ ]
8
+ }
.vscode/settings.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "editor.formatOnSave": true,
3
+ "files.insertFinalNewline": false,
4
+ "[python]": {
5
+ "editor.defaultFormatter": "charliermarsh.ruff",
6
+ "editor.formatOnType": true,
7
+ "editor.codeActionsOnSave": {
8
+ "source.fixAll.ruff": "explicit"
9
+ }
10
+ },
11
+ "[jupyter]": {
12
+ "files.insertFinalNewline": false
13
+ },
14
+ "notebook.output.scrolling": true,
15
+ "notebook.formatOnSave.enabled": true
16
+ }
README.md CHANGED
@@ -4,359 +4,10 @@ emoji: 🏢
4
  colorFrom: indigo
5
  colorTo: indigo
6
  sdk: gradio
7
- sdk_version: 5.36.2
8
  app_file: app.py
9
  pinned: false
10
  disable_embedding: true
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
-
15
- # AnyCoder - AI Code Generator
16
-
17
- AnyCoder is an AI-powered code generator that helps you create applications by describing them in plain English. It supports multiple AI models and can generate HTML/CSS/JavaScript code for web applications.
18
-
19
- ## Features
20
-
21
- - **Multi-Model Support**: Choose from various AI models including DeepSeek, ERNIE-4.5-VL, MiniMax, and Qwen
22
- - **Image-to-Code**: Upload UI design images and get corresponding HTML/CSS code (ERNIE-4.5-VL model)
23
- - **Image Text Extraction**: Upload images and extract text using OCR for processing
24
- - **Website Redesign**: Enter a website URL to extract content and redesign it with modern, responsive layouts
25
- - **Live Preview**: See your generated code in action with the built-in sandbox
26
- - **Web Search Integration**: Enable real-time web search to get the latest information and best practices
27
- - **Chat History**: Keep track of your conversations and generated code
28
- - **Quick Examples**: Pre-built examples to get you started quickly
29
- - **🚀 One-Click Deployment**: Deploy your generated applications directly to Hugging Face Spaces
30
-
31
- ## Installation
32
-
33
- 1. Clone the repository:
34
- ```bash
35
- git clone <repository-url>
36
- cd anycoder
37
- ```
38
-
39
- 2. Install dependencies:
40
- ```bash
41
- pip install -r requirements.txt
42
- ```
43
-
44
- 3. Set up environment variables:
45
- ```bash
46
- export HF_TOKEN="your_huggingface_token"
47
- export TAVILY_API_KEY="your_tavily_api_key" # Optional, for web search feature
48
- ```
49
-
50
- ## Usage
51
-
52
- 1. Run the application:
53
- ```bash
54
- python app.py
55
- ```
56
-
57
- 2. Open your browser and navigate to the provided URL
58
-
59
- 3. Describe your application in the text input field
60
-
61
- 4. Optionally:
62
- - Upload a UI design image (for ERNIE-4.5-VL model)
63
- - Enable web search to get the latest information
64
- - Choose a different AI model
65
-
66
- 5. Click "Generate" to create your code
67
-
68
- 6. View the generated code in the Code Editor tab or see it in action in the Live Preview tab
69
-
70
- 7. **Deploy to Space**: Enter a title and click "🚀 Deploy to Space" to publish your application
71
-
72
- ## 🚀 Deployment Feature
73
-
74
- AnyCoder now includes one-click deployment to Hugging Face Spaces! This feature allows you to:
75
-
76
- ### How to Deploy
77
-
78
- 1. **Login**: Click the "Sign in with Hugging Face" button in the sidebar
79
- 2. **Authorize Permissions**: When the authorization page appears, make sure to grant ALL the requested permissions:
80
- - ✅ **read-repos** - Read access to repositories
81
- - ✅ **write-repos** - Write access to create repositories
82
- - ✅ **manage-repos** - Manage repository settings
83
- 3. **Complete Authorization**: Click "Authorize" to complete the login
84
- 4. **Generate Code**: Generate some HTML code using the AI
85
- 5. **Enter Title**: In the sidebar, enter a title for your space (e.g., "My Todo App")
86
- 6. **Deploy**: Click the "🚀 Deploy to Space" button
87
- 7. **Share**: Get a shareable URL for your deployed application
88
-
89
- **Important**: You must grant ALL three permissions during the OAuth authorization process. If you only grant partial permissions, deployment will fail.
90
-
91
- **Note**: You need to be logged in with your Hugging Face account to deploy. This ensures that:
92
- - Deployments are created under your own account namespace
93
- - You can manage and update your spaces from your Hugging Face dashboard
94
- - Each deployment gets a unique URL under your username
95
-
96
- **Technical Note**: The deployment uses your personal OAuth token to create spaces under your account, ensuring full security and ownership of your deployed applications.
97
-
98
- ### Troubleshooting Deployment Issues
99
-
100
- If you encounter permission errors during deployment:
101
-
102
- 1. **Check Permissions**: Make sure you granted all three required permissions during login
103
- 2. **Logout and Login Again**: Click logout and sign in again, ensuring all permissions are granted
104
- 3. **Account Status**: Verify your Hugging Face account allows repository creation
105
- 4. **Network Issues**: Check your internet connection and try again
106
- 5. **Contact Support**: If issues persist, contact Hugging Face support
107
-
108
- ### What Gets Deployed
109
-
110
- - **Complete HTML Application**: Your generated code wrapped in a professional template
111
- - **Responsive Design**: Mobile-friendly layout with modern styling
112
- - **Project Documentation**: README with project details and prompts used
113
- - **Live URL**: Publicly accessible URL that anyone can visit
114
-
115
- ### Deployment Benefits
116
-
117
- - **Instant Publishing**: No need to set up hosting or domains
118
- - **Shareable**: Get a public URL to share with others
119
- - **Professional**: Clean, branded presentation of your work
120
- - **Version Control**: Each deployment creates a new space with timestamp
121
- - **Free Hosting**: Hosted on Hugging Face's infrastructure
122
-
123
- ### Example Deployment
124
-
125
- ```
126
- Title: "My Weather Dashboard"
127
- Generated Code: <div>Weather app HTML...</div>
128
- Result: https://huggingface.co/spaces/my-weather-dashboard-1234567890
129
- ```
130
-
131
- The deployed space will include:
132
- - Your application with professional styling
133
- - A header with your title and AnyCoder branding
134
- - A footer with attribution
135
- - A README documenting the project
136
-
137
- ## Web Search Feature
138
-
139
- The web search feature uses Tavily to provide real-time information when generating code. To enable this feature:
140
-
141
- 1. Get a free Tavily API key from [Tavily Platform](https://tavily.com/)
142
- 2. Set the `TAVILY_API_KEY` environment variable
143
- 3. Toggle the "🔍 Enable Web Search" checkbox in the sidebar
144
-
145
- When enabled, the AI will search the web for the latest information, best practices, and technologies related to your request.
146
-
147
- ## Image Text Extraction
148
-
149
- The application supports extracting text from images using OCR (Optical Character Recognition). This feature allows you to:
150
-
151
- 1. Upload image files (JPG, PNG, BMP, TIFF, GIF, WebP) through the file input
152
- 2. Automatically extract text from the images using Tesseract OCR
153
- 3. Include the extracted text in your prompts for code generation
154
-
155
- ### Setting up OCR
156
-
157
- To use the image text extraction feature, you need to install Tesseract OCR on your system. See `install_tesseract.md` for detailed installation instructions.
158
-
159
- **Example usage:**
160
- - Upload an image containing text (like a screenshot, document, or handwritten notes)
161
- - The application will extract the text and include it in your prompt
162
- - You can then ask the AI to process, summarize, or work with the extracted text
163
-
164
- ## Website Redesign Feature
165
-
166
- The website redesign feature allows you to extract content from existing websites and generate modern, responsive redesigns. This feature:
167
-
168
- 1. **Extracts Website Content**: Automatically scrapes the target website to extract:
169
- - Page title and meta description
170
- - Navigation menu structure
171
- - Main content sections
172
- - Images and their descriptions
173
- - Overall page structure and purpose
174
-
175
- 2. **Generates Modern Redesigns**: Creates improved versions with:
176
- - Modern, responsive layouts
177
- - Enhanced user experience
178
- - Better accessibility
179
- - Mobile-first design principles
180
- - Current design trends and best practices
181
-
182
- ### How to Use Website Redesign
183
-
184
- 1. **Enter a Website URL**: In the "🌐 Website URL (for redesign)" field, enter the URL of the website you want to redesign
185
- - Example: `https://example.com`
186
- - The URL can be with or without `https://`
187
-
188
- 2. **Add Custom Requirements**: Optionally describe specific improvements you want:
189
- - "Make it more modern and minimalist"
190
- - "Add a dark mode toggle"
191
- - "Improve the mobile layout"
192
- - "Use a different color scheme"
193
-
194
- 3. **Enable Web Search**: Toggle the web search feature to get the latest design trends and best practices
195
-
196
- 4. **Generate**: Click "Generate" to create your redesigned website
197
-
198
- ### Example Usage
199
-
200
- ```
201
- URL: https://example.com
202
- Description: Redesign this website with a modern, minimalist approach. Use a clean typography and improve the mobile experience.
203
- ```
204
-
205
- The AI will analyze the original website content and create a completely redesigned version that maintains the core functionality while providing a better user experience.
206
-
207
- ### Supported Websites
208
-
209
- The feature works with most public websites, including:
210
- - Business websites
211
- - Portfolio sites
212
- - Blog platforms
213
- - E-commerce sites
214
- - Landing pages
215
- - Documentation sites
216
-
217
- **Note**: Some websites may block automated access or require JavaScript to load content. In such cases, the extraction may be limited.
218
-
219
- ## Available Models
220
-
221
- - **DeepSeek V3**: Advanced code generation model
222
- - **DeepSeek R1**: Specialized for code generation tasks
223
- - **ERNIE-4.5-VL**: Multimodal model with image support
224
- - **MiniMax M1**: General-purpose AI model
225
- - **Qwen3-235B-A22B**: Large language model for code generation
226
-
227
- ## Environment Variables
228
-
229
- - `HF_TOKEN`: Your Hugging Face API token (required)
230
- - `TAVILY_API_KEY`: Your Tavily API key (optional, for web search)
231
-
232
- ## License
233
-
234
- [Add your license information here]
235
-
236
- ## Project Structure
237
-
238
- ```
239
- anycoder/
240
- ├── app.py # Main application (everything included)
241
- ├── app.css # Basic styling
242
- ├── pyproject.toml # Dependencies
243
- └── README.md # This file
244
- ```
245
-
246
- ## Setup
247
-
248
- 1. Set your Hugging Face API token:
249
- ```bash
250
- export HF_TOKEN="your_huggingface_token"
251
- ```
252
-
253
- 2. Install dependencies:
254
- ```bash
255
- uv sync
256
- ```
257
-
258
- 3. Run the application:
259
- ```bash
260
- uv run python app.py
261
- ```
262
-
263
- ## Usage
264
-
265
- 1. **Sign in with your Hugging Face account** using the login button at the top left.
266
- 2. Enter your application requirements in the text area
267
- 3. Click "send" to generate code
268
- 4. View the generated code in the code drawer
269
- 5. See the live preview in the sandbox area
270
- 6. Use example cards for quick prompts
271
-
272
- ## Code Example
273
-
274
- ```python
275
- import os
276
- from huggingface_hub import InferenceClient
277
-
278
- client = InferenceClient(
279
- provider="novita",
280
- api_key=os.environ["HF_TOKEN"],
281
- bill_to="huggingface"
282
- )
283
-
284
- completion = client.chat.completions.create(
285
- model="deepseek-ai/DeepSeek-V3-0324",
286
- messages=[
287
- {
288
- "role": "user",
289
- "content": "Create a simple todo app"
290
- }
291
- ],
292
- )
293
- ```
294
-
295
- ## Architecture
296
-
297
- The application uses:
298
- - **Gradio**: For the web interface
299
- - **Hugging Face Hub**: For model inference
300
- - **ModelScope Studio**: For UI components
301
- - **OAuth Login**: Requires users to sign in with Hugging Face for code generation
302
- - **Streaming**: For real-time code generation
303
-
304
- # Hugging Face Coder
305
-
306
- A Gradio-based application that uses Hugging Face models to generate code based on user requirements. The app supports both text-only and multimodal (text + image) code generation.
307
-
308
- ## Features
309
-
310
- - **Multiple Model Support**: DeepSeek V3, DeepSeek R1, and ERNIE-4.5-VL
311
- - **Multimodal Input**: Upload images to help describe your requirements
312
- - **Real-time Code Generation**: Stream responses from the models
313
- - **Live Preview**: See your generated code in action with the built-in sandbox
314
- - **History Management**: Keep track of your previous generations
315
- - **Example Templates**: Quick-start with predefined application templates
316
-
317
- ## Setup
318
-
319
- 1. Install dependencies:
320
- ```bash
321
- pip install -r requirements.txt
322
- ```
323
-
324
- 2. Set your Hugging Face API token as an environment variable:
325
- ```bash
326
- export HF_TOKEN="your_huggingface_token"
327
- ```
328
-
329
- 3. Run the application:
330
- ```bash
331
- python app.py
332
- ```
333
-
334
- ## Usage
335
-
336
- 1. **Text-only Generation**: Simply type your requirements in the text area
337
- 2. **Multimodal Generation**: Upload an image and describe what you want to create
338
- 3. **Model Selection**: Switch between different models using the model selector
339
- 4. **Examples**: Use the provided example templates to get started quickly
340
-
341
- ## Supported Models
342
-
343
- - **DeepSeek V3**: General code generation
344
- - **DeepSeek R1**: Advanced code generation
345
- - **ERNIE-4.5-VL**: Multimodal code generation with image understanding
346
-
347
- ## Environment Variables
348
-
349
- - `HF_TOKEN`: Your Hugging Face API token (required)
350
-
351
- ## Examples
352
-
353
- - Todo App
354
- - Calculator
355
- - Weather Dashboard
356
- - Chat Interface
357
- - E-commerce Product Card
358
- - Login Form
359
- - Dashboard Layout
360
- - Data Table
361
- - Image Gallery
362
- - UI from Image (multimodal)
 
4
  colorFrom: indigo
5
  colorTo: indigo
6
  sdk: gradio
7
+ sdk_version: 5.23.3
8
  app_file: app.py
9
  pinned: false
10
  disable_embedding: true
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py CHANGED
@@ -1,1183 +1,54 @@
1
- import os
2
- import re
3
- from http import HTTPStatus
4
- from typing import Dict, List, Optional, Tuple
5
- import base64
6
- import mimetypes
7
- import PyPDF2
8
- import docx
9
- import cv2
10
- import numpy as np
11
- from PIL import Image
12
- import pytesseract
13
- import requests
14
- from urllib.parse import urlparse, urljoin
15
- from bs4 import BeautifulSoup
16
- import html2text
17
- import json
18
- import time
19
-
20
  import gradio as gr
21
- from huggingface_hub import InferenceClient
22
- from tavily import TavilyClient
23
-
24
- # Search/Replace Constants
25
- SEARCH_START = "<<<<<<< SEARCH"
26
- DIVIDER = "======="
27
- REPLACE_END = ">>>>>>> REPLACE"
28
-
29
- # Configuration
30
- SystemPrompt = """ONLY USE HTML, CSS AND JAVASCRIPT. If you want to use ICON make sure to import the library first. Try to create the best UI possible by using only HTML, CSS and JAVASCRIPT. MAKE IT RESPONSIVE USING TAILWINDCSS. Use as much as you can TailwindCSS for the CSS, if you can't do something with TailwindCSS, then use custom CSS (make sure to import <script src="https://cdn.tailwindcss.com"></script> in the head). Also, try to ellaborate as much as you can, to create something unique. ALWAYS GIVE THE RESPONSE INTO A SINGLE HTML FILE
31
-
32
- For website redesign tasks:
33
- - Use the provided original HTML code as the starting point for redesign
34
- - Preserve all original content, structure, and functionality
35
- - Keep the same semantic HTML structure but enhance the styling
36
- - Reuse all original images and their URLs from the HTML code
37
- - Create a modern, responsive design with improved typography and spacing
38
- - Use modern CSS frameworks and design patterns
39
- - Ensure accessibility and mobile responsiveness
40
- - Maintain the same navigation and user flow
41
- - Enhance the visual design while keeping the original layout structure
42
-
43
- If an image is provided, analyze it and use the visual information to better understand the user's requirements.
44
-
45
- Always respond with code that can be executed or rendered directly.
46
-
47
- Always output only the HTML code inside a ```html ... ``` code block, and do not include any explanations or extra text."""
48
-
49
- # System prompt with search capability
50
- SystemPromptWithSearch = """ONLY USE HTML, CSS AND JAVASCRIPT. If you want to use ICON make sure to import the library first. Try to create the best UI possible by using only HTML, CSS and JAVASCRIPT. MAKE IT RESPONSIVE USING TAILWINDCSS. Use as much as you can TailwindCSS for the CSS, if you can't do something with TailwindCSS, then use custom CSS (make sure to import <script src="https://cdn.tailwindcss.com"></script> in the head). Also, try to ellaborate as much as you can, to create something unique. ALWAYS GIVE THE RESPONSE INTO A SINGLE HTML FILE
51
-
52
- You have access to real-time web search. When needed, use web search to find the latest information, best practices, or specific technologies.
53
-
54
- For website redesign tasks:
55
- - Use the provided original HTML code as the starting point for redesign
56
- - Preserve all original content, structure, and functionality
57
- - Keep the same semantic HTML structure but enhance the styling
58
- - Reuse all original images and their URLs from the HTML code
59
- - Use web search to find current design trends and best practices for the specific type of website
60
- - Create a modern, responsive design with improved typography and spacing
61
- - Use modern CSS frameworks and design patterns
62
- - Ensure accessibility and mobile responsiveness
63
- - Maintain the same navigation and user flow
64
- - Enhance the visual design while keeping the original layout structure
65
-
66
- If an image is provided, analyze it and use the visual information to better understand the user's requirements.
67
-
68
- Always respond with code that can be executed or rendered directly.
69
-
70
- Always output only the HTML code inside a ```html ... ``` code block, and do not include any explanations or extra text."""
71
-
72
- # Follow-up system prompt for modifying existing HTML files
73
- FollowUpSystemPrompt = f"""You are an expert web developer modifying an existing HTML file.
74
- The user wants to apply changes based on their request.
75
- You MUST output ONLY the changes required using the following SEARCH/REPLACE block format. Do NOT output the entire file.
76
- Explain the changes briefly *before* the blocks if necessary, but the code changes THEMSELVES MUST be within the blocks.
77
- Format Rules:
78
- 1. Start with {SEARCH_START}
79
- 2. Provide the exact lines from the current code that need to be replaced.
80
- 3. Use {DIVIDER} to separate the search block from the replacement.
81
- 4. Provide the new lines that should replace the original lines.
82
- 5. End with {REPLACE_END}
83
- 6. You can use multiple SEARCH/REPLACE blocks if changes are needed in different parts of the file.
84
- 7. To insert code, use an empty SEARCH block (only {SEARCH_START} and {DIVIDER} on their lines) if inserting at the very beginning, otherwise provide the line *before* the insertion point in the SEARCH block and include that line plus the new lines in the REPLACE block.
85
- 8. To delete code, provide the lines to delete in the SEARCH block and leave the REPLACE block empty (only {DIVIDER} and {REPLACE_END} on their lines).
86
- 9. IMPORTANT: The SEARCH block must *exactly* match the current code, including indentation and whitespace.
87
- Example Modifying Code:
88
- ```
89
- Some explanation...
90
- {SEARCH_START}
91
- <h1>Old Title</h1>
92
- {DIVIDER}
93
- <h1>New Title</h1>
94
- {REPLACE_END}
95
- {SEARCH_START}
96
- </body>
97
- {DIVIDER}
98
- <script>console.log("Added script");</script>
99
- </body>
100
- {REPLACE_END}
101
- ```
102
- Example Deleting Code:
103
- ```
104
- Removing the paragraph...
105
- {SEARCH_START}
106
- <p>This paragraph will be deleted.</p>
107
- {DIVIDER}
108
- {REPLACE_END}
109
- ```"""
110
-
111
- # Available models
112
- AVAILABLE_MODELS = [
113
- {
114
- "name": "Moonshot Kimi-K2",
115
- "id": "moonshotai/Kimi-K2-Instruct",
116
- "description": "Moonshot AI Kimi-K2-Instruct model for code generation and general tasks"
117
- },
118
- {
119
- "name": "DeepSeek V3",
120
- "id": "deepseek-ai/DeepSeek-V3-0324",
121
- "description": "DeepSeek V3 model for code generation"
122
- },
123
- {
124
- "name": "DeepSeek R1",
125
- "id": "deepseek-ai/DeepSeek-R1-0528",
126
- "description": "DeepSeek R1 model for code generation"
127
- },
128
- {
129
- "name": "ERNIE-4.5-VL",
130
- "id": "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT",
131
- "description": "ERNIE-4.5-VL model for multimodal code generation with image support"
132
- },
133
- {
134
- "name": "MiniMax M1",
135
- "id": "MiniMaxAI/MiniMax-M1-80k",
136
- "description": "MiniMax M1 model for code generation and general tasks"
137
- },
138
- {
139
- "name": "Qwen3-235B-A22B",
140
- "id": "Qwen/Qwen3-235B-A22B",
141
- "description": "Qwen3-235B-A22B model for code generation and general tasks"
142
- },
143
- {
144
- "name": "SmolLM3-3B",
145
- "id": "HuggingFaceTB/SmolLM3-3B",
146
- "description": "SmolLM3-3B model for code generation and general tasks"
147
- },
148
- {
149
- "name": "GLM-4.1V-9B-Thinking",
150
- "id": "THUDM/GLM-4.1V-9B-Thinking",
151
- "description": "GLM-4.1V-9B-Thinking model for multimodal code generation with image support"
152
- }
153
- ]
154
-
155
- DEMO_LIST = [
156
- {
157
- "title": "Todo App",
158
- "description": "Create a simple todo application with add, delete, and mark as complete functionality"
159
- },
160
- {
161
- "title": "Calculator",
162
- "description": "Build a basic calculator with addition, subtraction, multiplication, and division"
163
- },
164
- {
165
- "title": "Weather Dashboard",
166
- "description": "Create a weather dashboard that displays current weather information"
167
- },
168
- {
169
- "title": "Chat Interface",
170
- "description": "Build a chat interface with message history and user input"
171
- },
172
- {
173
- "title": "E-commerce Product Card",
174
- "description": "Create a product card component for an e-commerce website"
175
- },
176
- {
177
- "title": "Login Form",
178
- "description": "Build a responsive login form with validation"
179
- },
180
- {
181
- "title": "Dashboard Layout",
182
- "description": "Create a dashboard layout with sidebar navigation and main content area"
183
- },
184
- {
185
- "title": "Data Table",
186
- "description": "Build a data table with sorting and filtering capabilities"
187
- },
188
- {
189
- "title": "Image Gallery",
190
- "description": "Create an image gallery with lightbox functionality and responsive grid layout"
191
- },
192
- {
193
- "title": "UI from Image",
194
- "description": "Upload an image of a UI design and I'll generate the HTML/CSS code for it"
195
- },
196
- {
197
- "title": "Extract Text from Image",
198
- "description": "Upload an image containing text and I'll extract and process the text content"
199
- },
200
- {
201
- "title": "Website Redesign",
202
- "description": "Enter a website URL to extract its content and redesign it with a modern, responsive layout"
203
- },
204
- {
205
- "title": "Modify HTML",
206
- "description": "After generating HTML, ask me to modify it with specific changes using search/replace format"
207
- },
208
- {
209
- "title": "Search/Replace Example",
210
- "description": "Generate HTML first, then ask: 'Change the title to My New Title' or 'Add a blue background to the body'"
211
- }
212
- ]
213
-
214
- # HF Inference Client
215
- HF_TOKEN = os.getenv('HF_TOKEN')
216
- client = InferenceClient(
217
- provider="auto",
218
- api_key=HF_TOKEN,
219
- bill_to="huggingface"
220
- )
221
-
222
- # Type definitions
223
- History = List[Tuple[str, str]]
224
- Messages = List[Dict[str, str]]
225
-
226
- # Tavily Search Client
227
- TAVILY_API_KEY = os.getenv('TAVILY_API_KEY')
228
- tavily_client = None
229
- if TAVILY_API_KEY:
230
- try:
231
- tavily_client = TavilyClient(api_key=TAVILY_API_KEY)
232
- except Exception as e:
233
- print(f"Failed to initialize Tavily client: {e}")
234
- tavily_client = None
235
-
236
- def history_to_messages(history: History, system: str) -> Messages:
237
- messages = [{'role': 'system', 'content': system}]
238
- for h in history:
239
- # Handle multimodal content in history
240
- user_content = h[0]
241
- if isinstance(user_content, list):
242
- # Extract text from multimodal content
243
- text_content = ""
244
- for item in user_content:
245
- if isinstance(item, dict) and item.get("type") == "text":
246
- text_content += item.get("text", "")
247
- user_content = text_content if text_content else str(user_content)
248
-
249
- messages.append({'role': 'user', 'content': user_content})
250
- messages.append({'role': 'assistant', 'content': h[1]})
251
- return messages
252
-
253
- def messages_to_history(messages: Messages) -> Tuple[str, History]:
254
- assert messages[0]['role'] == 'system'
255
- history = []
256
- for q, r in zip(messages[1::2], messages[2::2]):
257
- # Extract text content from multimodal messages for history
258
- user_content = q['content']
259
- if isinstance(user_content, list):
260
- text_content = ""
261
- for item in user_content:
262
- if isinstance(item, dict) and item.get("type") == "text":
263
- text_content += item.get("text", "")
264
- user_content = text_content if text_content else str(user_content)
265
-
266
- history.append([user_content, r['content']])
267
- return history
268
-
269
- def history_to_chatbot_messages(history: History) -> List[Dict[str, str]]:
270
- """Convert history tuples to chatbot message format"""
271
- messages = []
272
- for user_msg, assistant_msg in history:
273
- # Handle multimodal content
274
- if isinstance(user_msg, list):
275
- text_content = ""
276
- for item in user_msg:
277
- if isinstance(item, dict) and item.get("type") == "text":
278
- text_content += item.get("text", "")
279
- user_msg = text_content if text_content else str(user_msg)
280
-
281
- messages.append({"role": "user", "content": user_msg})
282
- messages.append({"role": "assistant", "content": assistant_msg})
283
- return messages
284
-
285
- def remove_code_block(text):
286
- # Try to match code blocks with language markers
287
- patterns = [
288
- r'```(?:html|HTML)\n([\s\S]+?)\n```', # Match ```html or ```HTML
289
- r'```\n([\s\S]+?)\n```', # Match code blocks without language markers
290
- r'```([\s\S]+?)```' # Match code blocks without line breaks
291
- ]
292
- for pattern in patterns:
293
- match = re.search(pattern, text, re.DOTALL)
294
- if match:
295
- extracted = match.group(1).strip()
296
- return extracted
297
- # If no code block is found, check if the entire text is HTML
298
- if text.strip().startswith('<!DOCTYPE html>') or text.strip().startswith('<html') or text.strip().startswith('<'):
299
- return text.strip()
300
- return text.strip()
301
-
302
- def history_render(history: History):
303
- return gr.update(visible=True), history
304
-
305
- def clear_history():
306
- return [], [], None, "" # Empty lists for both tuple format and chatbot messages, None for file, empty string for website URL
307
-
308
- def update_image_input_visibility(model):
309
- """Update image input visibility based on selected model"""
310
- is_ernie_vl = model.get("id") == "baidu/ERNIE-4.5-VL-424B-A47B-Base-PT"
311
- is_glm_vl = model.get("id") == "THUDM/GLM-4.1V-9B-Thinking"
312
- return gr.update(visible=is_ernie_vl or is_glm_vl)
313
-
314
- def process_image_for_model(image):
315
- """Convert image to base64 for model input"""
316
- if image is None:
317
- return None
318
-
319
- # Convert numpy array to PIL Image if needed
320
- import io
321
- import base64
322
- import numpy as np
323
- from PIL import Image
324
-
325
- # Handle numpy array from Gradio
326
- if isinstance(image, np.ndarray):
327
- image = Image.fromarray(image)
328
-
329
- buffer = io.BytesIO()
330
- image.save(buffer, format='PNG')
331
- img_str = base64.b64encode(buffer.getvalue()).decode()
332
- return f"data:image/png;base64,{img_str}"
333
-
334
- def create_multimodal_message(text, image=None):
335
- """Create a multimodal message with text and optional image"""
336
- if image is None:
337
- return {"role": "user", "content": text}
338
-
339
- content = [
340
- {
341
- "type": "text",
342
- "text": text
343
- },
344
- {
345
- "type": "image_url",
346
- "image_url": {
347
- "url": process_image_for_model(image)
348
- }
349
- }
350
- ]
351
-
352
- return {"role": "user", "content": content}
353
-
354
- def apply_search_replace_changes(original_html: str, changes_text: str) -> str:
355
- """Apply search/replace changes to HTML content"""
356
- if not changes_text.strip():
357
- return original_html
358
-
359
- # Split the changes text into individual search/replace blocks
360
- blocks = []
361
- current_block = ""
362
- lines = changes_text.split('\n')
363
-
364
- for line in lines:
365
- if line.strip() == SEARCH_START:
366
- if current_block.strip():
367
- blocks.append(current_block.strip())
368
- current_block = line + '\n'
369
- elif line.strip() == REPLACE_END:
370
- current_block += line + '\n'
371
- blocks.append(current_block.strip())
372
- current_block = ""
373
- else:
374
- current_block += line + '\n'
375
-
376
- if current_block.strip():
377
- blocks.append(current_block.strip())
378
-
379
- modified_html = original_html
380
-
381
- for block in blocks:
382
- if not block.strip():
383
- continue
384
-
385
- # Parse the search/replace block
386
- lines = block.split('\n')
387
- search_lines = []
388
- replace_lines = []
389
- in_search = False
390
- in_replace = False
391
-
392
- for line in lines:
393
- if line.strip() == SEARCH_START:
394
- in_search = True
395
- in_replace = False
396
- elif line.strip() == DIVIDER:
397
- in_search = False
398
- in_replace = True
399
- elif line.strip() == REPLACE_END:
400
- in_replace = False
401
- elif in_search:
402
- search_lines.append(line)
403
- elif in_replace:
404
- replace_lines.append(line)
405
-
406
- # Apply the search/replace
407
- if search_lines:
408
- search_text = '\n'.join(search_lines).strip()
409
- replace_text = '\n'.join(replace_lines).strip()
410
-
411
- if search_text in modified_html:
412
- modified_html = modified_html.replace(search_text, replace_text)
413
- else:
414
- print(f"Warning: Search text not found in HTML: {search_text[:100]}...")
415
-
416
- return modified_html
417
-
418
- # Updated for faster Tavily search and closer prompt usage
419
- # Uses 'advanced' search_depth and auto_parameters=True for speed and relevance
420
-
421
- def perform_web_search(query: str, max_results: int = 5, include_domains=None, exclude_domains=None) -> str:
422
- """Perform web search using Tavily with default parameters"""
423
- if not tavily_client:
424
- return "Web search is not available. Please set the TAVILY_API_KEY environment variable."
425
-
426
- try:
427
- # Use Tavily defaults with advanced search depth for better results
428
- search_params = {
429
- "search_depth": "advanced",
430
- "max_results": min(max(1, max_results), 20)
431
- }
432
- if include_domains is not None:
433
- search_params["include_domains"] = include_domains
434
- if exclude_domains is not None:
435
- search_params["exclude_domains"] = exclude_domains
436
 
437
- response = tavily_client.search(query, **search_params)
438
-
439
- search_results = []
440
- for result in response.get('results', []):
441
- title = result.get('title', 'No title')
442
- url = result.get('url', 'No URL')
443
- content = result.get('content', 'No content')
444
- search_results.append(f"Title: {title}\nURL: {url}\nContent: {content}\n")
445
-
446
- if search_results:
447
- return "Web Search Results:\n\n" + "\n---\n".join(search_results)
448
- else:
449
- return "No search results found."
450
-
451
- except Exception as e:
452
- return f"Search error: {str(e)}"
453
-
454
- def enhance_query_with_search(query: str, enable_search: bool) -> str:
455
- """Enhance the query with web search results if search is enabled"""
456
- if not enable_search or not tavily_client:
457
- return query
458
-
459
- # Perform search to get relevant information
460
- search_results = perform_web_search(query)
461
-
462
- # Combine original query with search results
463
- enhanced_query = f"""Original Query: {query}
464
-
465
- {search_results}
466
-
467
- Please use the search results above to help create the requested application with the most up-to-date information and best practices."""
468
-
469
- return enhanced_query
470
-
471
- def send_to_sandbox(code):
472
- # Add a wrapper to inject necessary permissions and ensure full HTML
473
- wrapped_code = f"""
474
- <!DOCTYPE html>
475
- <html>
476
- <head>
477
- <meta charset=\"UTF-8\">
478
- <meta name=\"viewport\" content=\"width=device-width, initial-scale=1.0\">
479
- <script>
480
- // Safe localStorage polyfill
481
- const safeStorage = {{
482
- _data: {{}},
483
- getItem: function(key) {{ return this._data[key] || null; }},
484
- setItem: function(key, value) {{ this._data[key] = value; }},
485
- removeItem: function(key) {{ delete this._data[key]; }},
486
- clear: function() {{ this._data = {{}}; }}
487
- }};
488
- Object.defineProperty(window, 'localStorage', {{
489
- value: safeStorage,
490
- writable: false
491
- }});
492
- window.onerror = function(message, source, lineno, colno, error) {{
493
- console.error('Error:', message);
494
- }};
495
- </script>
496
- </head>
497
- <body>
498
- {code}
499
- </body>
500
- </html>
501
- """
502
- encoded_html = base64.b64encode(wrapped_code.encode('utf-8')).decode('utf-8')
503
- data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
504
- iframe = f'<iframe src="{data_uri}" width="100%" height="920px" sandbox="allow-scripts allow-same-origin allow-forms allow-popups allow-modals allow-presentation" allow="display-capture"></iframe>'
505
- return iframe
506
-
507
- def demo_card_click(e: gr.EventData):
508
- try:
509
- # Get the index from the event data
510
- if hasattr(e, '_data') and e._data:
511
- # Try different ways to get the index
512
- if 'index' in e._data:
513
- index = e._data['index']
514
- elif 'component' in e._data and 'index' in e._data['component']:
515
- index = e._data['component']['index']
516
- elif 'target' in e._data and 'index' in e._data['target']:
517
- index = e._data['target']['index']
518
- else:
519
- # If we can't get the index, try to extract it from the card data
520
- index = 0
521
- else:
522
- index = 0
523
-
524
- # Ensure index is within bounds
525
- if index >= len(DEMO_LIST):
526
- index = 0
527
-
528
- return DEMO_LIST[index]['description']
529
- except (KeyError, IndexError, AttributeError) as e:
530
- # Return the first demo description as fallback
531
- return DEMO_LIST[0]['description']
532
-
533
- def extract_text_from_image(image_path):
534
- """Extract text from image using OCR"""
535
- try:
536
- # Check if tesseract is available
537
- try:
538
- pytesseract.get_tesseract_version()
539
- except Exception:
540
- return "Error: Tesseract OCR is not installed. Please install Tesseract to extract text from images. See install_tesseract.md for instructions."
541
-
542
- # Read image using OpenCV
543
- image = cv2.imread(image_path)
544
- if image is None:
545
- return "Error: Could not read image file"
546
-
547
- # Convert to RGB (OpenCV uses BGR)
548
- image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
549
-
550
- # Preprocess image for better OCR results
551
- # Convert to grayscale
552
- gray = cv2.cvtColor(image_rgb, cv2.COLOR_RGB2GRAY)
553
-
554
- # Apply thresholding to get binary image
555
- _, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
556
-
557
- # Extract text using pytesseract
558
- text = pytesseract.image_to_string(binary, config='--psm 6')
559
-
560
- return text.strip() if text.strip() else "No text found in image"
561
-
562
- except Exception as e:
563
- return f"Error extracting text from image: {e}"
564
-
565
- def extract_text_from_file(file_path):
566
- if not file_path:
567
- return ""
568
- mime, _ = mimetypes.guess_type(file_path)
569
- ext = os.path.splitext(file_path)[1].lower()
570
- try:
571
- if ext == ".pdf":
572
- with open(file_path, "rb") as f:
573
- reader = PyPDF2.PdfReader(f)
574
- return "\n".join(page.extract_text() or "" for page in reader.pages)
575
- elif ext in [".txt", ".md"]:
576
- with open(file_path, "r", encoding="utf-8") as f:
577
- return f.read()
578
- elif ext == ".csv":
579
- with open(file_path, "r", encoding="utf-8") as f:
580
- return f.read()
581
- elif ext == ".docx":
582
- doc = docx.Document(file_path)
583
- return "\n".join([para.text for para in doc.paragraphs])
584
- elif ext.lower() in [".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif", ".gif", ".webp"]:
585
- return extract_text_from_image(file_path)
586
- else:
587
- return ""
588
- except Exception as e:
589
- return f"Error extracting text: {e}"
590
-
591
- def extract_website_content(url: str) -> str:
592
- """Extract HTML code and content from a website URL"""
593
- try:
594
- # Validate URL
595
- parsed_url = urlparse(url)
596
- if not parsed_url.scheme:
597
- url = "https://" + url
598
- parsed_url = urlparse(url)
599
-
600
- if not parsed_url.netloc:
601
- return "Error: Invalid URL provided"
602
-
603
- # Set comprehensive headers to mimic a real browser request
604
- headers = {
605
- 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
606
- 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
607
- 'Accept-Language': 'en-US,en;q=0.9',
608
- 'Accept-Encoding': 'gzip, deflate, br',
609
- 'DNT': '1',
610
- 'Connection': 'keep-alive',
611
- 'Upgrade-Insecure-Requests': '1',
612
- 'Sec-Fetch-Dest': 'document',
613
- 'Sec-Fetch-Mode': 'navigate',
614
- 'Sec-Fetch-Site': 'none',
615
- 'Sec-Fetch-User': '?1',
616
- 'Cache-Control': 'max-age=0'
617
- }
618
-
619
- # Create a session to maintain cookies and handle redirects
620
- session = requests.Session()
621
- session.headers.update(headers)
622
-
623
- # Make the request with retry logic
624
- max_retries = 3
625
- for attempt in range(max_retries):
626
- try:
627
- response = session.get(url, timeout=15, allow_redirects=True)
628
- response.raise_for_status()
629
- break
630
- except requests.exceptions.HTTPError as e:
631
- if e.response.status_code == 403 and attempt < max_retries - 1:
632
- # Try with different User-Agent on 403
633
- session.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
634
- continue
635
- else:
636
- raise
637
-
638
- # Get the raw HTML content with proper encoding
639
- try:
640
- # Try to get the content with automatic encoding detection
641
- response.encoding = response.apparent_encoding
642
- raw_html = response.text
643
- except:
644
- # Fallback to UTF-8 if encoding detection fails
645
- raw_html = response.content.decode('utf-8', errors='ignore')
646
-
647
- # Debug: Check if we got valid HTML
648
- if not raw_html.strip().startswith('<!DOCTYPE') and not raw_html.strip().startswith('<html'):
649
- print(f"Warning: Response doesn't look like HTML. First 200 chars: {raw_html[:200]}")
650
- print(f"Response headers: {dict(response.headers)}")
651
- print(f"Response encoding: {response.encoding}")
652
- print(f"Apparent encoding: {response.apparent_encoding}")
653
-
654
- # Try alternative approaches
655
- try:
656
- raw_html = response.content.decode('latin-1', errors='ignore')
657
- print("Tried latin-1 decoding")
658
- except:
659
- try:
660
- raw_html = response.content.decode('utf-8', errors='ignore')
661
- print("Tried UTF-8 decoding")
662
- except:
663
- raw_html = response.content.decode('cp1252', errors='ignore')
664
- print("Tried cp1252 decoding")
665
-
666
- # Parse HTML content for analysis
667
- soup = BeautifulSoup(raw_html, 'html.parser')
668
-
669
- # Check if this is a JavaScript-heavy site
670
- script_tags = soup.find_all('script')
671
- if len(script_tags) > 10:
672
- print(f"Warning: This site has {len(script_tags)} script tags - it may be a JavaScript-heavy site")
673
- print("The content might be loaded dynamically and not available in the initial HTML")
674
-
675
- # Extract title
676
- title = soup.find('title')
677
- title_text = title.get_text().strip() if title else "No title found"
678
-
679
- # Extract meta description
680
- meta_desc = soup.find('meta', attrs={'name': 'description'})
681
- description = meta_desc.get('content', '') if meta_desc else ""
682
-
683
- # Extract main content areas for analysis
684
- content_sections = []
685
- main_selectors = [
686
- 'main', 'article', '.content', '.main-content', '.post-content',
687
- '#content', '#main', '.entry-content', '.post-body'
688
- ]
689
-
690
- for selector in main_selectors:
691
- elements = soup.select(selector)
692
- for element in elements:
693
- text = element.get_text().strip()
694
- if len(text) > 100: # Only include substantial content
695
- content_sections.append(text)
696
-
697
- # Extract navigation links for analysis
698
- nav_links = []
699
- nav_elements = soup.find_all(['nav', 'header'])
700
- for nav in nav_elements:
701
- links = nav.find_all('a')
702
- for link in links:
703
- link_text = link.get_text().strip()
704
- link_href = link.get('href', '')
705
- if link_text and link_href:
706
- nav_links.append(f"{link_text}: {link_href}")
707
-
708
- # Extract and fix image URLs in the HTML
709
- img_elements = soup.find_all('img')
710
- for img in img_elements:
711
- src = img.get('src', '')
712
- if src:
713
- # Handle different URL formats
714
- if src.startswith('//'):
715
- # Protocol-relative URL
716
- absolute_src = 'https:' + src
717
- img['src'] = absolute_src
718
- elif src.startswith('/'):
719
- # Root-relative URL
720
- absolute_src = urljoin(url, src)
721
- img['src'] = absolute_src
722
- elif not src.startswith(('http://', 'https://')):
723
- # Relative URL
724
- absolute_src = urljoin(url, src)
725
- img['src'] = absolute_src
726
- # If it's already absolute, keep it as is
727
-
728
- # Also check for data-src (lazy loading) and other common attributes
729
- data_src = img.get('data-src', '')
730
- if data_src and not src:
731
- # Use data-src if src is empty
732
- if data_src.startswith('//'):
733
- absolute_data_src = 'https:' + data_src
734
- img['src'] = absolute_data_src
735
- elif data_src.startswith('/'):
736
- absolute_data_src = urljoin(url, data_src)
737
- img['src'] = absolute_data_src
738
- elif not data_src.startswith(('http://', 'https://')):
739
- absolute_data_src = urljoin(url, data_src)
740
- img['src'] = absolute_data_src
741
- else:
742
- img['src'] = data_src
743
-
744
- # Also fix background image URLs in style attributes
745
- elements_with_style = soup.find_all(attrs={'style': True})
746
- for element in elements_with_style:
747
- style_attr = element.get('style', '')
748
- # Find and replace relative URLs in background-image
749
- import re
750
- bg_pattern = r'background-image:\s*url\(["\']?([^"\']+)["\']?\)'
751
- matches = re.findall(bg_pattern, style_attr, re.IGNORECASE)
752
- for match in matches:
753
- if match:
754
- if match.startswith('//'):
755
- absolute_bg = 'https:' + match
756
- style_attr = style_attr.replace(match, absolute_bg)
757
- elif match.startswith('/'):
758
- absolute_bg = urljoin(url, match)
759
- style_attr = style_attr.replace(match, absolute_bg)
760
- elif not match.startswith(('http://', 'https://')):
761
- absolute_bg = urljoin(url, match)
762
- style_attr = style_attr.replace(match, absolute_bg)
763
- element['style'] = style_attr
764
-
765
- # Fix background images in <style> tags
766
- style_elements = soup.find_all('style')
767
- for style in style_elements:
768
- if style.string:
769
- style_content = style.string
770
- # Find and replace relative URLs in background-image
771
- bg_pattern = r'background-image:\s*url\(["\']?([^"\']+)["\']?\)'
772
- matches = re.findall(bg_pattern, style_content, re.IGNORECASE)
773
- for match in matches:
774
- if match:
775
- if match.startswith('//'):
776
- absolute_bg = 'https:' + match
777
- style_content = style_content.replace(match, absolute_bg)
778
- elif match.startswith('/'):
779
- absolute_bg = urljoin(url, match)
780
- style_content = style_content.replace(match, absolute_bg)
781
- elif not match.startswith(('http://', 'https://')):
782
- absolute_bg = urljoin(url, match)
783
- style_content = style_content.replace(match, absolute_bg)
784
- style.string = style_content
785
-
786
- # Extract images for analysis (after fixing URLs)
787
- images = []
788
- img_elements = soup.find_all('img')
789
- for img in img_elements:
790
- src = img.get('src', '')
791
- alt = img.get('alt', '')
792
- if src:
793
- images.append({'src': src, 'alt': alt})
794
-
795
- # Debug: Print some image URLs to see what we're getting
796
- print(f"Found {len(images)} images:")
797
- for i, img in enumerate(images[:5]): # Show first 5 images
798
- print(f" {i+1}. {img['alt'] or 'No alt'} - {img['src']}")
799
-
800
- # Test a few image URLs to see if they're accessible
801
- def test_image_url(img_url):
802
- try:
803
- test_response = requests.head(img_url, timeout=5, allow_redirects=True)
804
- return test_response.status_code == 200
805
- except:
806
- return False
807
-
808
- # Test first few images
809
- working_images = []
810
- for img in images[:10]: # Test first 10 images
811
- if test_image_url(img['src']):
812
- working_images.append(img)
813
- else:
814
- print(f" ❌ Broken image: {img['src']}")
815
-
816
- print(f"Working images: {len(working_images)} out of {len(images)}")
817
-
818
- # Get the modified HTML with absolute URLs
819
- modified_html = str(soup)
820
-
821
- # Clean and format the HTML for better readability
822
- # Remove unnecessary whitespace and comments
823
- import re
824
- cleaned_html = re.sub(r'<!--.*?-->', '', modified_html, flags=re.DOTALL) # Remove HTML comments
825
- cleaned_html = re.sub(r'\s+', ' ', cleaned_html) # Normalize whitespace
826
- cleaned_html = re.sub(r'>\s+<', '><', cleaned_html) # Remove whitespace between tags
827
-
828
- # Limit HTML size to avoid token limits (keep first 15000 chars)
829
- if len(cleaned_html) > 15000:
830
- cleaned_html = cleaned_html[:15000] + "\n<!-- ... HTML truncated for length ... -->"
831
-
832
- # Check if we got any meaningful content
833
- if not title_text or title_text == "No title found":
834
- title_text = url.split('/')[-1] or url.split('/')[-2] or "Website"
835
-
836
- # If we couldn't extract any meaningful content, provide a fallback
837
- if len(cleaned_html.strip()) < 100:
838
- website_content = f"""
839
- WEBSITE REDESIGN - EXTRACTION FAILED
840
- ====================================
841
-
842
- URL: {url}
843
- Title: {title_text}
844
-
845
- ERROR: Could not extract meaningful HTML content from this website. This could be due to:
846
- 1. The website uses heavy JavaScript to load content dynamically
847
- 2. The website has anti-bot protection
848
- 3. The website requires authentication
849
- 4. The website is using advanced compression or encoding
850
-
851
- FALLBACK APPROACH:
852
- Please create a modern, responsive website design for a {title_text.lower()} website. Since I couldn't extract the original content, you can:
853
-
854
- 1. Create a typical layout for this type of website
855
- 2. Use placeholder content that would be appropriate
856
- 3. Include modern design elements and responsive features
857
- 4. Use a clean, professional design with good typography
858
- 5. Make it mobile-friendly and accessible
859
-
860
- The website appears to be: {title_text}
861
- """
862
- return website_content.strip()
863
-
864
- # Compile the extracted content with the actual HTML code
865
- website_content = f"""
866
- WEBSITE REDESIGN - ORIGINAL HTML CODE
867
- =====================================
868
-
869
- URL: {url}
870
- Title: {title_text}
871
- Description: {description}
872
-
873
- PAGE ANALYSIS:
874
- - This appears to be a {title_text.lower()} website
875
- - Contains {len(content_sections)} main content sections
876
- - Has {len(nav_links)} navigation links
877
- - Includes {len(images)} images
878
-
879
- IMAGES FOUND (use these exact URLs in your redesign):
880
- {chr(10).join([f"• {img['alt'] or 'Image'} - {img['src']}" for img in working_images[:20]]) if working_images else "No working images found"}
881
-
882
- ALL IMAGES (including potentially broken ones):
883
- {chr(10).join([f"• {img['alt'] or 'Image'} - {img['src']}" for img in images[:20]]) if images else "No images found"}
884
-
885
- ORIGINAL HTML CODE (use this as the base for redesign):
886
- ```html
887
- {cleaned_html}
888
- ```
889
-
890
- REDESIGN INSTRUCTIONS:
891
- Please redesign this website with a modern, responsive layout while:
892
- 1. Preserving all the original content and structure
893
- 2. Maintaining the same navigation and functionality
894
- 3. Using the original images and their URLs (listed above)
895
- 4. Creating a modern, clean design with improved typography and spacing
896
- 5. Making it fully responsive for mobile devices
897
- 6. Using modern CSS frameworks and best practices
898
- 7. Keeping the same semantic structure but with enhanced styling
899
-
900
- IMPORTANT: All image URLs in the HTML code above have been converted to absolute URLs and are ready to use. Make sure to preserve these exact image URLs in your redesigned version.
901
-
902
- The HTML code above contains the complete original website structure with all images properly linked. Use it as your starting point and create a modernized version.
903
- """
904
-
905
- return website_content.strip()
906
-
907
- except requests.exceptions.HTTPError as e:
908
- if e.response.status_code == 403:
909
- return f"Error: Website blocked access (403 Forbidden). This website may have anti-bot protection. Try a different website or provide a description of what you want to build instead."
910
- elif e.response.status_code == 404:
911
- return f"Error: Website not found (404). Please check the URL and try again."
912
- elif e.response.status_code >= 500:
913
- return f"Error: Website server error ({e.response.status_code}). Please try again later."
914
- else:
915
- return f"Error accessing website: HTTP {e.response.status_code} - {str(e)}"
916
- except requests.exceptions.Timeout:
917
- return "Error: Request timed out. The website may be slow or unavailable."
918
- except requests.exceptions.ConnectionError:
919
- return "Error: Could not connect to the website. Please check your internet connection and the URL."
920
- except requests.exceptions.RequestException as e:
921
- return f"Error accessing website: {str(e)}"
922
- except Exception as e:
923
- return f"Error extracting website content: {str(e)}"
924
-
925
- def generation_code(query: Optional[str], image: Optional[gr.Image], file: Optional[str], website_url: Optional[str], _setting: Dict[str, str], _history: Optional[History], _current_model: Dict, enable_search: bool = False):
926
- if query is None:
927
- query = ''
928
- if _history is None:
929
- _history = []
930
-
931
- # Check if there's existing HTML content in history to determine if this is a modification request
932
- has_existing_html = False
933
- if _history:
934
- # Check the last assistant message for HTML content
935
- last_assistant_msg = _history[-1][1] if len(_history) > 0 else ""
936
- if '<!DOCTYPE html>' in last_assistant_msg or '<html' in last_assistant_msg:
937
- has_existing_html = True
938
-
939
- # Choose system prompt based on context
940
- if has_existing_html:
941
- # Use follow-up prompt for modifying existing HTML
942
- system_prompt = FollowUpSystemPrompt
943
- else:
944
- # Use regular prompt for new generation
945
- system_prompt = SystemPromptWithSearch if enable_search else _setting['system']
946
-
947
- messages = history_to_messages(_history, system_prompt)
948
-
949
- # Extract file text and append to query if file is present
950
- file_text = ""
951
- if file:
952
- file_text = extract_text_from_file(file)
953
- if file_text:
954
- file_text = file_text[:5000] # Limit to 5000 chars for prompt size
955
- query = f"{query}\n\n[Reference file content below]\n{file_text}"
956
-
957
- # Extract website content and append to query if website URL is present
958
- website_text = ""
959
- if website_url and website_url.strip():
960
- website_text = extract_website_content(website_url.strip())
961
- if website_text and not website_text.startswith("Error"):
962
- website_text = website_text[:8000] # Limit to 8000 chars for prompt size
963
- query = f"{query}\n\n[Website content to redesign below]\n{website_text}"
964
- elif website_text.startswith("Error"):
965
- # Provide helpful guidance when website extraction fails
966
- fallback_guidance = """
967
- Since I couldn't extract the website content, please provide additional details about what you'd like to build:
968
-
969
- 1. What type of website is this? (e.g., e-commerce, blog, portfolio, dashboard)
970
- 2. What are the main features you want?
971
- 3. What's the target audience?
972
- 4. Any specific design preferences? (colors, style, layout)
973
-
974
- This will help me create a better design for you."""
975
- query = f"{query}\n\n[Error extracting website: {website_text}]{fallback_guidance}"
976
-
977
- # Enhance query with search if enabled
978
- enhanced_query = enhance_query_with_search(query, enable_search)
979
-
980
- if image is not None:
981
- messages.append(create_multimodal_message(enhanced_query, image))
982
- else:
983
- messages.append({'role': 'user', 'content': enhanced_query})
984
- try:
985
- completion = client.chat.completions.create(
986
- model=_current_model["id"],
987
- messages=messages,
988
- stream=True,
989
- max_tokens=5000
990
- )
991
- content = ""
992
- for chunk in completion:
993
- if chunk.choices[0].delta.content:
994
- content += chunk.choices[0].delta.content
995
- clean_code = remove_code_block(content)
996
- search_status = " (with web search)" if enable_search and tavily_client else ""
997
- yield {
998
- code_output: clean_code,
999
- history_output: history_to_chatbot_messages(_history),
1000
- }
1001
- # Handle response based on whether this is a modification or new generation
1002
- if has_existing_html:
1003
- # Apply search/replace changes to existing HTML
1004
- last_html = _history[-1][1] if _history else ""
1005
- modified_html = apply_search_replace_changes(last_html, remove_code_block(content))
1006
- clean_html = remove_code_block(modified_html)
1007
-
1008
- # Update history with the cleaned HTML
1009
- _history = messages_to_history(messages + [{
1010
- 'role': 'assistant',
1011
- 'content': clean_html
1012
- }])
1013
-
1014
- yield {
1015
- code_output: clean_html,
1016
- history: _history,
1017
- sandbox: send_to_sandbox(clean_html),
1018
- history_output: history_to_chatbot_messages(_history),
1019
- }
1020
- else:
1021
- # Regular generation - use the content as is
1022
- _history = messages_to_history(messages + [{
1023
- 'role': 'assistant',
1024
- 'content': content
1025
- }])
1026
- yield {
1027
- code_output: remove_code_block(content),
1028
- history: _history,
1029
- sandbox: send_to_sandbox(remove_code_block(content)),
1030
- history_output: history_to_chatbot_messages(_history),
1031
- }
1032
- except Exception as e:
1033
- error_message = f"Error: {str(e)}"
1034
- yield {
1035
- code_output: error_message,
1036
- history_output: history_to_chatbot_messages(_history),
1037
- }
1038
-
1039
- # Main application
1040
- with gr.Blocks(
1041
- theme=gr.themes.Base(
1042
- primary_hue="blue",
1043
- secondary_hue="gray",
1044
- neutral_hue="gray",
1045
- font=gr.themes.GoogleFont("Inter"),
1046
- font_mono=gr.themes.GoogleFont("JetBrains Mono"),
1047
- text_size=gr.themes.sizes.text_md,
1048
- spacing_size=gr.themes.sizes.spacing_md,
1049
- radius_size=gr.themes.sizes.radius_md
1050
- ),
1051
- title="AnyCoder - AI Code Generator"
1052
- ) as demo:
1053
- history = gr.State([])
1054
- setting = gr.State({
1055
- "system": SystemPrompt,
1056
- })
1057
- current_model = gr.State(AVAILABLE_MODELS[1]) # DeepSeek V3
1058
- open_panel = gr.State(None)
1059
-
1060
- with gr.Sidebar():
1061
- gr.Markdown("# AnyCoder")
1062
- gr.Markdown("*AI-Powered Code Generator*")
1063
-
1064
- gr.Markdown("---") # Separator
1065
-
1066
- # Main input section
1067
- input = gr.Textbox(
1068
- label="What would you like to build?",
1069
- placeholder="Describe your application...",
1070
- lines=3
1071
- )
1072
-
1073
- # URL input for website redesign
1074
- website_url_input = gr.Textbox(
1075
- label="Website URL for redesign",
1076
- placeholder="https://example.com",
1077
- lines=1,
1078
- visible=True
1079
- )
1080
-
1081
- # File upload (minimal)
1082
- file_input = gr.File(
1083
- label="Reference file",
1084
- file_types=[".pdf", ".txt", ".md", ".csv", ".docx", ".jpg", ".jpeg", ".png", ".bmp", ".tiff", ".tif", ".gif", ".webp"],
1085
- visible=True
1086
- )
1087
-
1088
- # Image input (only for ERNIE model)
1089
- image_input = gr.Image(
1090
- label="UI design image",
1091
- visible=False
1092
- )
1093
-
1094
- # Action buttons
1095
- with gr.Row():
1096
- btn = gr.Button("Generate", variant="primary", size="lg", scale=2)
1097
- clear_btn = gr.Button("Clear", variant="secondary", size="sm", scale=1)
1098
-
1099
- # Search toggle (minimal)
1100
- search_toggle = gr.Checkbox(
1101
- label="🔍 Web search",
1102
- value=False
1103
- )
1104
-
1105
- # Model selection (minimal)
1106
- model_dropdown = gr.Dropdown(
1107
- choices=[model['name'] for model in AVAILABLE_MODELS],
1108
- value=AVAILABLE_MODELS[1]['name'], # DeepSeek V3
1109
- label="Model"
1110
- )
1111
-
1112
- # Quick examples (minimal)
1113
- gr.Markdown("**Quick start**")
1114
- with gr.Column():
1115
- for i, demo_item in enumerate(DEMO_LIST[:3]):
1116
- demo_card = gr.Button(
1117
- value=demo_item['title'],
1118
- variant="secondary",
1119
- size="sm"
1120
- )
1121
- demo_card.click(
1122
- fn=lambda idx=i: gr.update(value=DEMO_LIST[idx]['description']),
1123
- outputs=input
1124
- )
1125
-
1126
- # Status indicators (minimal)
1127
- if not tavily_client:
1128
- gr.Markdown("⚠️ Web search unavailable")
1129
- else:
1130
- gr.Markdown("✅ Web search available")
1131
-
1132
- # Hidden elements for functionality
1133
- model_display = gr.Markdown(f"**Model:** {AVAILABLE_MODELS[1]['name']}", visible=False) # DeepSeek V3
1134
-
1135
- def on_model_change(model_name):
1136
- for m in AVAILABLE_MODELS:
1137
- if m['name'] == model_name:
1138
- return m, f"**Model:** {m['name']}", update_image_input_visibility(m)
1139
- return AVAILABLE_MODELS[1], f"**Model:** {AVAILABLE_MODELS[1]['name']}", update_image_input_visibility(AVAILABLE_MODELS[1]) # DeepSeek V3 fallback
1140
-
1141
- def save_prompt(input):
1142
- return {setting: {"system": input}}
1143
-
1144
- model_dropdown.change(
1145
- on_model_change,
1146
- inputs=model_dropdown,
1147
- outputs=[current_model, model_display, image_input]
1148
- )
1149
-
1150
- # System prompt (collapsed by default)
1151
- with gr.Accordion("Advanced", open=False):
1152
- systemPromptInput = gr.Textbox(
1153
- value=SystemPrompt,
1154
- label="System prompt",
1155
- lines=5
1156
- )
1157
- save_prompt_btn = gr.Button("Save", variant="primary", size="sm")
1158
- save_prompt_btn.click(save_prompt, inputs=systemPromptInput, outputs=setting)
1159
-
1160
- with gr.Column():
1161
- with gr.Tabs():
1162
- with gr.Tab("Code"):
1163
- code_output = gr.Code(
1164
- language="html",
1165
- lines=25,
1166
- interactive=False,
1167
- label="Generated code"
1168
- )
1169
- with gr.Tab("Preview"):
1170
- sandbox = gr.HTML(label="Live preview")
1171
- with gr.Tab("History"):
1172
- history_output = gr.Chatbot(show_label=False, height=400, type="messages")
1173
-
1174
- # Event handlers
1175
- btn.click(
1176
- generation_code,
1177
- inputs=[input, image_input, file_input, website_url_input, setting, history, current_model, search_toggle],
1178
- outputs=[code_output, history, sandbox, history_output]
1179
  )
1180
- clear_btn.click(clear_history, outputs=[history, history_output, file_input, website_url_input])
1181
 
1182
  if __name__ == "__main__":
1183
- demo.queue(default_concurrency_limit=20).launch(ssr_mode=True, mcp_server=True)
 
1
+ from app_huggingface import demo as demo_huggingface
2
+ from app_gemini_coder import demo as demo_gemini
3
+ from utils import get_app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ # Create mapping of providers to their code snippets
7
+ PROVIDER_SNIPPETS = {
8
+ "Hugging Face": """
9
+ import gradio as gr
10
+ import ai_gradio
11
+ gr.load(
12
+ name='huggingface:deepseek-ai/DeepSeek-R1',
13
+ src=ai_gradio.registry,
14
+ coder=True,
15
+ provider="together"
16
+ ).launch()""",
17
+ "Gemini Coder": """
18
+ import gradio as gr
19
+ import ai_gradio
20
+ gr.load(
21
+ name='gemini:gemini-2.5-pro-exp-03-25',
22
+ src=ai_gradio.registry,
23
+ coder=True,
24
+ provider="together"
25
+ ).launch()
26
+ """,
27
+ }
28
+ # Create mapping of providers to their demos
29
+ PROVIDERS = {
30
+ "Hugging Face": demo_huggingface,
31
+ "Gemini Coder": demo_gemini,
32
+ }
33
+
34
+ # Modified get_app implementation
35
+ demo = gr.Blocks()
36
+ with demo:
37
+
38
+ provider_dropdown = gr.Dropdown(choices=list(PROVIDERS.keys()), value="Hugging Face", label="Select code snippet")
39
+ code_display = gr.Code(label="Provider Code Snippet", language="python", value=PROVIDER_SNIPPETS["Hugging Face"])
40
+
41
+ def update_code(provider):
42
+ return PROVIDER_SNIPPETS.get(provider, "Code snippet not available")
43
+
44
+ provider_dropdown.change(fn=update_code, inputs=[provider_dropdown], outputs=[code_display])
45
+
46
+ selected_demo = get_app(
47
+ models=list(PROVIDERS.keys()),
48
+ default_model="Hugging Face",
49
+ src=PROVIDERS,
50
+ dropdown_label="Select Provider",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  )
 
52
 
53
  if __name__ == "__main__":
54
+ demo.queue(api_open=False).launch(show_api=False)
app_allenai.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio_client import Client
3
+
4
+ MODELS = {"OLMo-2-1124-13B-Instruct": "akhaliq/olmo-anychat", "Llama-3.1-Tulu-3-8B": "akhaliq/allen-test"}
5
+
6
+
7
+ def create_chat_fn(client):
8
+ def chat(message, history):
9
+ response = client.predict(
10
+ message=message,
11
+ system_prompt="You are a helpful AI assistant.",
12
+ temperature=0.7,
13
+ max_new_tokens=1024,
14
+ top_k=40,
15
+ repetition_penalty=1.1,
16
+ top_p=0.95,
17
+ api_name="/chat",
18
+ )
19
+ return response
20
+
21
+ return chat
22
+
23
+
24
+ def set_client_for_session(model_name, request: gr.Request):
25
+ headers = {}
26
+ if request and hasattr(request, "request") and hasattr(request.request, "headers"):
27
+ x_ip_token = request.request.headers.get("x-ip-token")
28
+ if x_ip_token:
29
+ headers["X-IP-Token"] = x_ip_token
30
+
31
+ return Client(MODELS[model_name], headers=headers)
32
+
33
+
34
+ def safe_chat_fn(message, history, client):
35
+ if client is None:
36
+ return "Error: Client not initialized. Please refresh the page."
37
+ return create_chat_fn(client)(message, history)
38
+
39
+
40
+ with gr.Blocks() as demo:
41
+ client = gr.State()
42
+
43
+ model_dropdown = gr.Dropdown(
44
+ choices=list(MODELS.keys()), value="OLMo-2-1124-13B-Instruct", label="Select Model", interactive=True
45
+ )
46
+
47
+ chat_interface = gr.ChatInterface(fn=safe_chat_fn, additional_inputs=[client])
48
+
49
+ # Update client when model changes
50
+ def update_model(model_name, request):
51
+ return set_client_for_session(model_name, request)
52
+
53
+ model_dropdown.change(
54
+ fn=update_model,
55
+ inputs=[model_dropdown],
56
+ outputs=[client],
57
+ )
58
+
59
+ # Initialize client on page load
60
+ demo.load(
61
+ fn=set_client_for_session,
62
+ inputs=gr.State("OLMo-2-1124-13B-Instruct"),
63
+ outputs=client,
64
+ )
65
+
66
+ if __name__ == "__main__":
67
+ demo.launch()
app_cerebras.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import cerebras_gradio
4
+
5
+ from utils import get_app
6
+
7
+ demo = get_app(
8
+ models=[
9
+ "llama3.1-8b",
10
+ "llama3.1-70b",
11
+ "llama3.1-405b",
12
+ ],
13
+ default_model="llama3.1-70b",
14
+ src=cerebras_gradio.registry,
15
+ accept_token=not os.getenv("CEREBRAS_API_KEY"),
16
+ )
17
+
18
+ if __name__ == "__main__":
19
+ demo.launch()
app_claude.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import anthropic_gradio
4
+
5
+ from utils import get_app
6
+
7
+ demo = get_app(
8
+ models=[
9
+ "claude-3-5-sonnet-20241022",
10
+ "claude-3-5-haiku-20241022",
11
+ "claude-3-opus-20240229",
12
+ "claude-3-sonnet-20240229",
13
+ "claude-3-haiku-20240307",
14
+ ],
15
+ default_model="claude-3-5-sonnet-20241022",
16
+ src=anthropic_gradio.registry,
17
+ accept_token=not os.getenv("ANTHROPIC_API_KEY"),
18
+ )
19
+
20
+ if __name__ == "__main__":
21
+ demo.launch()
app_cohere.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import cohere_gradio
4
+
5
+ from utils import get_app
6
+
7
+ demo = get_app(
8
+ models=[
9
+ "command-r",
10
+ "command-r-08-2024",
11
+ "command-r-plus",
12
+ "command-r-plus-08-2024",
13
+ "command-r7b-12-2024",
14
+ ],
15
+ default_model="command-r7b-12-2024",
16
+ src=cohere_gradio.registry,
17
+ accept_token=not os.getenv("COHERE_API_KEY"),
18
+ )
19
+
20
+ if __name__ == "__main__":
21
+ demo.launch()
app_compare.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import google.generativeai as genai
4
+ import gradio as gr
5
+ import openai
6
+ from anthropic import Anthropic
7
+ from openai import OpenAI # Add explicit OpenAI import
8
+
9
+
10
+ def get_all_models():
11
+ """Get all available models from the registries."""
12
+ return [
13
+ "SambaNova: Meta-Llama-3.2-1B-Instruct",
14
+ "SambaNova: Meta-Llama-3.2-3B-Instruct",
15
+ "SambaNova: Llama-3.2-11B-Vision-Instruct",
16
+ "SambaNova: Llama-3.2-90B-Vision-Instruct",
17
+ "SambaNova: Meta-Llama-3.1-8B-Instruct",
18
+ "SambaNova: Meta-Llama-3.1-70B-Instruct",
19
+ "SambaNova: Meta-Llama-3.1-405B-Instruct",
20
+ "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct",
21
+ "Hyperbolic: meta-llama/Llama-3.2-3B-Instruct",
22
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct",
23
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct",
24
+ "Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct",
25
+ "Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B",
26
+ "Hyperbolic: Qwen/Qwen2.5-72B-Instruct",
27
+ "Hyperbolic: deepseek-ai/DeepSeek-V2.5",
28
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
29
+ ]
30
+
31
+
32
+ def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str:
33
+ """Generate a prompt for models to discuss and build upon previous
34
+ responses.
35
+ """
36
+ prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
37
+
38
+ Previous responses from other AI models:
39
+ {chr(10).join(f"- {response}" for response in previous_responses)}
40
+
41
+ Please provide your perspective while:
42
+ 1. Acknowledging key insights from previous responses
43
+ 2. Adding any missing important points
44
+ 3. Respectfully noting if you disagree with anything and explaining why
45
+ 4. Building towards a complete answer
46
+
47
+ Keep your response focused and concise (max 3-4 paragraphs)."""
48
+ return prompt
49
+
50
+
51
+ def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str:
52
+ """Generate a prompt for final consensus building."""
53
+ return f"""Review this multi-AI discussion about: "{original_question}"
54
+
55
+ Discussion history:
56
+ {chr(10).join(discussion_history)}
57
+
58
+ As a final synthesizer, please:
59
+ 1. Identify the key points where all models agreed
60
+ 2. Explain how any disagreements were resolved
61
+ 3. Present a clear, unified answer that represents our collective best understanding
62
+ 4. Note any remaining uncertainties or caveats
63
+
64
+ Keep the final consensus concise but complete."""
65
+
66
+
67
+ def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str:
68
+ import openai
69
+
70
+ client = openai.OpenAI(api_key=api_key)
71
+ response = client.chat.completions.create(model=model, messages=messages)
72
+ return response.choices[0].message.content
73
+
74
+
75
+ def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str:
76
+ """Chat with Anthropic's Claude model."""
77
+ client = Anthropic(api_key=api_key)
78
+ response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
79
+ return response.content[0].text
80
+
81
+
82
+ def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
83
+ """Chat with Gemini Pro model."""
84
+ genai.configure(api_key=api_key)
85
+ model = genai.GenerativeModel("gemini-pro")
86
+
87
+ # Convert messages to Gemini format
88
+ gemini_messages = []
89
+ for msg in messages:
90
+ role = "user" if msg["role"] == "user" else "model"
91
+ gemini_messages.append({"role": role, "parts": [msg["content"]]})
92
+
93
+ response = model.generate_content([m["parts"][0] for m in gemini_messages])
94
+ return response.text
95
+
96
+
97
+ def chat_with_sambanova(
98
+ messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
99
+ ) -> str:
100
+ """Chat with SambaNova's models using their OpenAI-compatible API."""
101
+ client = openai.OpenAI(
102
+ api_key=api_key,
103
+ base_url="https://api.sambanova.ai/v1",
104
+ )
105
+
106
+ response = client.chat.completions.create(
107
+ model=model_name,
108
+ messages=messages,
109
+ temperature=0.1,
110
+ top_p=0.1, # Use the specific model name passed in
111
+ )
112
+ return response.choices[0].message.content
113
+
114
+
115
+ def chat_with_hyperbolic(
116
+ messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
117
+ ) -> str:
118
+ """Chat with Hyperbolic's models using their OpenAI-compatible API."""
119
+ client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
120
+
121
+ # Add system message to the start of the messages list
122
+ full_messages = [
123
+ {"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
124
+ *messages,
125
+ ]
126
+
127
+ response = client.chat.completions.create(
128
+ model=model_name, # Use the specific model name passed in
129
+ messages=full_messages,
130
+ temperature=0.7,
131
+ max_tokens=1024,
132
+ )
133
+ return response.choices[0].message.content
134
+
135
+
136
+ def multi_model_consensus(
137
+ question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
138
+ ) -> list[tuple[str, str]]:
139
+ if not selected_models:
140
+ raise gr.Error("Please select at least one model to chat with.")
141
+
142
+ chat_history = []
143
+ progress(0, desc="Getting responses from all models...")
144
+
145
+ # Get responses from all models in parallel
146
+ for i, model in enumerate(selected_models):
147
+ provider, model_name = model.split(": ", 1)
148
+ progress((i + 1) / len(selected_models), desc=f"Getting response from {model}...")
149
+
150
+ try:
151
+ if provider == "Anthropic":
152
+ api_key = os.getenv("ANTHROPIC_API_KEY")
153
+ response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key)
154
+ elif provider == "SambaNova":
155
+ api_key = os.getenv("SAMBANOVA_API_KEY")
156
+ response = chat_with_sambanova(
157
+ messages=[
158
+ {"role": "system", "content": "You are a helpful assistant"},
159
+ {"role": "user", "content": question},
160
+ ],
161
+ api_key=api_key,
162
+ model_name=model_name,
163
+ )
164
+ elif provider == "Hyperbolic":
165
+ api_key = os.getenv("HYPERBOLIC_API_KEY")
166
+ response = chat_with_hyperbolic(
167
+ messages=[{"role": "user", "content": question}],
168
+ api_key=api_key,
169
+ model_name=model_name,
170
+ )
171
+ else: # Gemini
172
+ api_key = os.getenv("GEMINI_API_KEY")
173
+ response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key)
174
+
175
+ chat_history.append((model, response))
176
+ except Exception as e:
177
+ chat_history.append((model, f"Error: {e!s}"))
178
+
179
+ progress(1.0, desc="Done!")
180
+ return chat_history
181
+
182
+
183
+ with gr.Blocks() as demo:
184
+ gr.Markdown("# Model Response Comparison")
185
+ gr.Markdown("""Select multiple models to compare their responses""")
186
+
187
+ with gr.Row():
188
+ with gr.Column():
189
+ model_selector = gr.Dropdown(
190
+ choices=get_all_models(),
191
+ multiselect=True,
192
+ label="Select Models",
193
+ info="Choose models to compare",
194
+ value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
195
+ )
196
+
197
+ chatbot = gr.Chatbot(height=600, label="Model Responses")
198
+ msg = gr.Textbox(label="Prompt", placeholder="Ask a question to compare model responses...")
199
+
200
+ def respond(message, selected_models):
201
+ chat_history = multi_model_consensus(message, selected_models, rounds=1)
202
+ return chat_history
203
+
204
+ msg.submit(respond, [msg, model_selector], [chatbot])
205
+
206
+ for fn in demo.fns.values():
207
+ fn.api_name = False
208
+
209
+ if __name__ == "__main__":
210
+ demo.launch()
app_crew.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+ import gradio as gr
3
+
4
+ demo = gr.load(
5
+ name="crewai:gpt-4-turbo",
6
+ crew_type="article", # or 'support'
7
+ src=ai_gradio.registry,
8
+ )
app_deepseek.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ DEEPSEEK_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("deepseek:")]
7
+
8
+ # Create display names without the prefix
9
+ DEEPSEEK_MODELS_DISPLAY = [k.replace("deepseek:", "") for k in DEEPSEEK_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=DEEPSEEK_MODELS_FULL, # Use the full names with prefix
15
+ default_model=DEEPSEEK_MODELS_FULL[-1],
16
+ dropdown_label="Select DeepSeek Model",
17
+ choices=DEEPSEEK_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True,
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_experimental.py ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+
4
+ import google.generativeai as genai
5
+ import gradio as gr
6
+ import openai
7
+ from anthropic import Anthropic
8
+ from openai import OpenAI # Add explicit OpenAI import
9
+
10
+
11
+ def get_all_models():
12
+ """Get all available models from the registries."""
13
+ return [
14
+ "SambaNova: Meta-Llama-3.2-1B-Instruct",
15
+ "SambaNova: Meta-Llama-3.2-3B-Instruct",
16
+ "SambaNova: Llama-3.2-11B-Vision-Instruct",
17
+ "SambaNova: Llama-3.2-90B-Vision-Instruct",
18
+ "SambaNova: Meta-Llama-3.1-8B-Instruct",
19
+ "SambaNova: Meta-Llama-3.1-70B-Instruct",
20
+ "SambaNova: Meta-Llama-3.1-405B-Instruct",
21
+ "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct",
22
+ "Hyperbolic: meta-llama/Llama-3.2-3B-Instruct",
23
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-8B-Instruct",
24
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-70B-Instruct",
25
+ "Hyperbolic: meta-llama/Meta-Llama-3-70B-Instruct",
26
+ "Hyperbolic: NousResearch/Hermes-3-Llama-3.1-70B",
27
+ "Hyperbolic: Qwen/Qwen2.5-72B-Instruct",
28
+ "Hyperbolic: deepseek-ai/DeepSeek-V2.5",
29
+ "Hyperbolic: meta-llama/Meta-Llama-3.1-405B-Instruct",
30
+ ]
31
+
32
+
33
+ def generate_discussion_prompt(original_question: str, previous_responses: list[str]) -> str:
34
+ """Generate a prompt for models to discuss and build upon previous
35
+ responses.
36
+ """
37
+ prompt = f"""You are participating in a multi-AI discussion about this question: "{original_question}"
38
+
39
+ Previous responses from other AI models:
40
+ {chr(10).join(f"- {response}" for response in previous_responses)}
41
+
42
+ Please provide your perspective while:
43
+ 1. Acknowledging key insights from previous responses
44
+ 2. Adding any missing important points
45
+ 3. Respectfully noting if you disagree with anything and explaining why
46
+ 4. Building towards a complete answer
47
+
48
+ Keep your response focused and concise (max 3-4 paragraphs)."""
49
+ return prompt
50
+
51
+
52
+ def generate_consensus_prompt(original_question: str, discussion_history: list[str]) -> str:
53
+ """Generate a prompt for final consensus building."""
54
+ return f"""Review this multi-AI discussion about: "{original_question}"
55
+
56
+ Discussion history:
57
+ {chr(10).join(discussion_history)}
58
+
59
+ As a final synthesizer, please:
60
+ 1. Identify the key points where all models agreed
61
+ 2. Explain how any disagreements were resolved
62
+ 3. Present a clear, unified answer that represents our collective best understanding
63
+ 4. Note any remaining uncertainties or caveats
64
+
65
+ Keep the final consensus concise but complete."""
66
+
67
+
68
+ def chat_with_openai(model: str, messages: list[dict], api_key: str | None) -> str:
69
+ import openai
70
+
71
+ client = openai.OpenAI(api_key=api_key)
72
+ response = client.chat.completions.create(model=model, messages=messages)
73
+ return response.choices[0].message.content
74
+
75
+
76
+ def chat_with_anthropic(messages: list[dict], api_key: str | None) -> str:
77
+ """Chat with Anthropic's Claude model."""
78
+ client = Anthropic(api_key=api_key)
79
+ response = client.messages.create(model="claude-3-sonnet-20240229", messages=messages, max_tokens=1024)
80
+ return response.content[0].text
81
+
82
+
83
+ def chat_with_gemini(messages: list[dict], api_key: str | None) -> str:
84
+ """Chat with Gemini Pro model."""
85
+ genai.configure(api_key=api_key)
86
+ model = genai.GenerativeModel("gemini-pro")
87
+
88
+ # Convert messages to Gemini format
89
+ gemini_messages = []
90
+ for msg in messages:
91
+ role = "user" if msg["role"] == "user" else "model"
92
+ gemini_messages.append({"role": role, "parts": [msg["content"]]})
93
+
94
+ response = model.generate_content([m["parts"][0] for m in gemini_messages])
95
+ return response.text
96
+
97
+
98
+ def chat_with_sambanova(
99
+ messages: list[dict], api_key: str | None, model_name: str = "Llama-3.2-90B-Vision-Instruct"
100
+ ) -> str:
101
+ """Chat with SambaNova's models using their OpenAI-compatible API."""
102
+ client = openai.OpenAI(
103
+ api_key=api_key,
104
+ base_url="https://api.sambanova.ai/v1",
105
+ )
106
+
107
+ response = client.chat.completions.create(
108
+ model=model_name,
109
+ messages=messages,
110
+ temperature=0.1,
111
+ top_p=0.1, # Use the specific model name passed in
112
+ )
113
+ return response.choices[0].message.content
114
+
115
+
116
+ def chat_with_hyperbolic(
117
+ messages: list[dict], api_key: str | None, model_name: str = "Qwen/Qwen2.5-Coder-32B-Instruct"
118
+ ) -> str:
119
+ """Chat with Hyperbolic's models using their OpenAI-compatible API."""
120
+ client = OpenAI(api_key=api_key, base_url="https://api.hyperbolic.xyz/v1")
121
+
122
+ # Add system message to the start of the messages list
123
+ full_messages = [
124
+ {"role": "system", "content": "You are a helpful assistant. Be descriptive and clear."},
125
+ *messages,
126
+ ]
127
+
128
+ response = client.chat.completions.create(
129
+ model=model_name, # Use the specific model name passed in
130
+ messages=full_messages,
131
+ temperature=0.7,
132
+ max_tokens=1024,
133
+ )
134
+ return response.choices[0].message.content
135
+
136
+
137
+ def multi_model_consensus(
138
+ question: str, selected_models: list[str], rounds: int = 3, progress: gr.Progress = gr.Progress()
139
+ ) -> list[tuple[str, str]]:
140
+ if not selected_models:
141
+ raise gr.Error("Please select at least one model to chat with.")
142
+
143
+ chat_history = []
144
+ discussion_history = []
145
+
146
+ # Initial responses
147
+ progress(0, desc="Getting initial responses...")
148
+ initial_responses = []
149
+ for i, model in enumerate(selected_models):
150
+ provider, model_name = model.split(": ", 1)
151
+
152
+ try:
153
+ if provider == "Anthropic":
154
+ api_key = os.getenv("ANTHROPIC_API_KEY")
155
+ response = chat_with_anthropic(messages=[{"role": "user", "content": question}], api_key=api_key)
156
+ elif provider == "SambaNova":
157
+ api_key = os.getenv("SAMBANOVA_API_KEY")
158
+ response = chat_with_sambanova(
159
+ messages=[
160
+ {"role": "system", "content": "You are a helpful assistant"},
161
+ {"role": "user", "content": question},
162
+ ],
163
+ api_key=api_key,
164
+ )
165
+ elif provider == "Hyperbolic": # Add Hyperbolic case
166
+ api_key = os.getenv("HYPERBOLIC_API_KEY")
167
+ response = chat_with_hyperbolic(messages=[{"role": "user", "content": question}], api_key=api_key)
168
+ else: # Gemini
169
+ api_key = os.getenv("GEMINI_API_KEY")
170
+ response = chat_with_gemini(messages=[{"role": "user", "content": question}], api_key=api_key)
171
+
172
+ initial_responses.append(f"{model}: {response}")
173
+ discussion_history.append(f"Initial response from {model}:\n{response}")
174
+ chat_history.append((f"Initial response from {model}", response))
175
+ except Exception as e:
176
+ chat_history.append((f"Error from {model}", str(e)))
177
+
178
+ # Discussion rounds
179
+ for round_num in range(rounds):
180
+ progress((round_num + 1) / (rounds + 2), desc=f"Discussion round {round_num + 1}...")
181
+ round_responses = []
182
+
183
+ random.shuffle(selected_models) # Randomize order each round
184
+ for model in selected_models:
185
+ provider, model_name = model.split(": ", 1)
186
+
187
+ try:
188
+ discussion_prompt = generate_discussion_prompt(question, discussion_history)
189
+ if provider == "Anthropic":
190
+ api_key = os.getenv("ANTHROPIC_API_KEY")
191
+ response = chat_with_anthropic(
192
+ messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
193
+ )
194
+ elif provider == "SambaNova":
195
+ api_key = os.getenv("SAMBANOVA_API_KEY")
196
+ response = chat_with_sambanova(
197
+ messages=[
198
+ {"role": "system", "content": "You are a helpful assistant"},
199
+ {"role": "user", "content": discussion_prompt},
200
+ ],
201
+ api_key=api_key,
202
+ )
203
+ elif provider == "Hyperbolic": # Add Hyperbolic case
204
+ api_key = os.getenv("HYPERBOLIC_API_KEY")
205
+ response = chat_with_hyperbolic(
206
+ messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
207
+ )
208
+ else: # Gemini
209
+ api_key = os.getenv("GEMINI_API_KEY")
210
+ response = chat_with_gemini(
211
+ messages=[{"role": "user", "content": discussion_prompt}], api_key=api_key
212
+ )
213
+
214
+ round_responses.append(f"{model}: {response}")
215
+ discussion_history.append(f"Round {round_num + 1} - {model}:\n{response}")
216
+ chat_history.append((f"Round {round_num + 1} - {model}", response))
217
+ except Exception as e:
218
+ chat_history.append((f"Error from {model} in round {round_num + 1}", str(e)))
219
+
220
+ # Final consensus
221
+ progress(0.9, desc="Building final consensus...")
222
+ model = selected_models[0]
223
+ provider, model_name = model.split(": ", 1)
224
+
225
+ try:
226
+ consensus_prompt = generate_consensus_prompt(question, discussion_history)
227
+ if provider == "Anthropic":
228
+ api_key = os.getenv("ANTHROPIC_API_KEY")
229
+ final_consensus = chat_with_anthropic(
230
+ messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
231
+ )
232
+ elif provider == "SambaNova":
233
+ api_key = os.getenv("SAMBANOVA_API_KEY")
234
+ final_consensus = chat_with_sambanova(
235
+ messages=[
236
+ {"role": "system", "content": "You are a helpful assistant"},
237
+ {"role": "user", "content": consensus_prompt},
238
+ ],
239
+ api_key=api_key,
240
+ )
241
+ elif provider == "Hyperbolic": # Add Hyperbolic case
242
+ api_key = os.getenv("HYPERBOLIC_API_KEY")
243
+ final_consensus = chat_with_hyperbolic(
244
+ messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
245
+ )
246
+ else: # Gemini
247
+ api_key = os.getenv("GEMINI_API_KEY")
248
+ final_consensus = chat_with_gemini(
249
+ messages=[{"role": "user", "content": consensus_prompt}], api_key=api_key
250
+ )
251
+ except Exception as e:
252
+ final_consensus = f"Error getting consensus from {model}: {e!s}"
253
+
254
+ chat_history.append(("Final Consensus", final_consensus))
255
+
256
+ progress(1.0, desc="Done!")
257
+ return chat_history
258
+
259
+
260
+ with gr.Blocks() as demo:
261
+ gr.Markdown("# Experimental Multi-Model Consensus Chat")
262
+ gr.Markdown(
263
+ """Select multiple models to collaborate on answering your question.
264
+ The models will discuss with each other and attempt to reach a consensus.
265
+ Maximum 3 models can be selected at once."""
266
+ )
267
+
268
+ with gr.Row():
269
+ with gr.Column():
270
+ model_selector = gr.Dropdown(
271
+ choices=get_all_models(),
272
+ multiselect=True,
273
+ label="Select Models (max 3)",
274
+ info="Choose up to 3 models to participate in the discussion",
275
+ value=["SambaNova: Llama-3.2-90B-Vision-Instruct", "Hyperbolic: Qwen/Qwen2.5-Coder-32B-Instruct"],
276
+ max_choices=3,
277
+ )
278
+ rounds_slider = gr.Slider(
279
+ minimum=1,
280
+ maximum=2,
281
+ value=1,
282
+ step=1,
283
+ label="Discussion Rounds",
284
+ info="Number of rounds of discussion between models",
285
+ )
286
+
287
+ chatbot = gr.Chatbot(height=600, label="Multi-Model Discussion")
288
+ msg = gr.Textbox(label="Your Question", placeholder="Ask a question for the models to discuss...")
289
+
290
+ def respond(message, selected_models, rounds):
291
+ chat_history = multi_model_consensus(message, selected_models, rounds)
292
+ return chat_history
293
+
294
+ msg.submit(respond, [msg, model_selector, rounds_slider], [chatbot], api_name="consensus_chat")
295
+
296
+ for fn in demo.fns.values():
297
+ fn.api_name = False
298
+
299
+ if __name__ == "__main__":
300
+ demo.launch()
app_fal.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import fal_gradio
2
+
3
+ from utils import get_app
4
+
5
+ demo = get_app(
6
+ models=[
7
+ "fal-ai/ltx-video",
8
+ "fal-ai/ltx-video/image-to-video",
9
+ "fal-ai/luma-photon",
10
+ ],
11
+ default_model="fal-ai/luma-photon",
12
+ src=fal_gradio.registry,
13
+ )
14
+
15
+ if __name__ == "__main__":
16
+ demo.launch()
app_fireworks.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import fireworks_gradio
4
+
5
+ from utils import get_app
6
+
7
+ demo = get_app(
8
+ models=[
9
+ "f1-preview",
10
+ "f1-mini-preview",
11
+ "llama-v3p3-70b-instruct",
12
+ ],
13
+ default_model="llama-v3p3-70b-instruct",
14
+ src=fireworks_gradio.registry,
15
+ accept_token=not os.getenv("FIREWORKS_API_KEY"),
16
+ )
17
+
18
+ if __name__ == "__main__":
19
+ demo.launch()
app_gemini.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the Gemini models but keep their full names for loading
6
+ GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
7
+
8
+ # Create display names without the prefix
9
+ GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=GEMINI_MODELS_FULL, # Use the full names with prefix
14
+ default_model=GEMINI_MODELS_FULL[-1],
15
+ dropdown_label="Select Gemini Model",
16
+ choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
17
+ src=ai_gradio.registry,
18
+ fill_height=True,
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
app_gemini_camera.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the Gemini models but keep their full names for loading
6
+ GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
7
+
8
+ # Create display names without the prefix
9
+ GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=GEMINI_MODELS_FULL, # Use the full names with prefix
14
+ default_model=GEMINI_MODELS_FULL[-2],
15
+ dropdown_label="Select Gemini Model",
16
+ choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
17
+ src=ai_gradio.registry,
18
+ camera=True,
19
+ fill_height=True,
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_gemini_coder.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the Gemini models but keep their full names for loading
6
+ GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
7
+
8
+ # Create display names without the prefix
9
+ GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=GEMINI_MODELS_FULL, # Use the full names with prefix
14
+ default_model=GEMINI_MODELS_FULL[0],
15
+ dropdown_label="Select Gemini Model",
16
+ choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
17
+ src=ai_gradio.registry,
18
+ fill_height=True,
19
+ coder=True,
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_gemini_voice.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the Gemini models but keep their full names for loading
6
+ GEMINI_MODELS_FULL = [k for k in ai_gradio.registry if k.startswith("gemini:")]
7
+
8
+ # Create display names without the prefix
9
+ GEMINI_MODELS_DISPLAY = [k.replace("gemini:", "") for k in GEMINI_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=GEMINI_MODELS_FULL, # Use the full names with prefix
14
+ default_model=GEMINI_MODELS_FULL[-2],
15
+ dropdown_label="Select Gemini Model",
16
+ choices=GEMINI_MODELS_DISPLAY, # Display names without prefix
17
+ src=ai_gradio.registry,
18
+ enable_voice=True,
19
+ fill_height=True,
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_groq.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the Groq models from the registry
6
+ GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
7
+
8
+ # Create display names without the prefix
9
+ GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
10
+
11
+ demo = get_app(
12
+ models=GROQ_MODELS_FULL,
13
+ default_model=GROQ_MODELS_FULL[-2],
14
+ src=ai_gradio.registry,
15
+ dropdown_label="Select Groq Model",
16
+ choices=GROQ_MODELS_DISPLAY,
17
+ fill_height=True,
18
+ )
19
+
20
+ if __name__ == "__main__":
21
+ demo.launch()
app_groq_coder.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the Groq models but keep their full names for loading
6
+ GROQ_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("groq:")]
7
+
8
+ # Create display names without the prefix
9
+ GROQ_MODELS_DISPLAY = [k.replace("groq:", "") for k in GROQ_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=GROQ_MODELS_FULL, # Use the full names with prefix
14
+ default_model=GROQ_MODELS_FULL[-1],
15
+ dropdown_label="Select Groq Model",
16
+ choices=GROQ_MODELS_DISPLAY, # Display names without prefix
17
+ fill_height=True,
18
+ coder=True,
19
+ )
20
+
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_hf.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from utils import get_app
2
+
3
+ demo = get_app(
4
+ models=[
5
+ "microsoft/Phi-3.5-mini-instruct",
6
+ "HuggingFaceTB/SmolLM2-1.7B-Instruct",
7
+ "google/gemma-2-2b-it",
8
+ "openai-community/gpt2",
9
+ "microsoft/phi-2",
10
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
11
+ ],
12
+ default_model="HuggingFaceTB/SmolLM2-1.7B-Instruct",
13
+ src="models",
14
+ )
15
+
16
+ if __name__ == "__main__":
17
+ demo.launch()
app_huggingface.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ HUGGINGFACE_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("huggingface:")]
7
+
8
+ # Create display names without the prefix
9
+ HUGGINGFACE_MODELS_DISPLAY = [k.replace("huggingface:", "") for k in HUGGINGFACE_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=HUGGINGFACE_MODELS_FULL, # Use the full names with prefix
15
+ default_model=HUGGINGFACE_MODELS_FULL[0],
16
+ dropdown_label="Select Huggingface Model",
17
+ choices=HUGGINGFACE_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True,
20
+ provider="fireworks-ai",
21
+ bill_to="huggingface"
22
+ )
app_hyperbolic.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
7
+
8
+ # Create display names without the prefix
9
+ HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=HYPERBOLIC_MODELS_FULL, # Use the full names with prefix
15
+ default_model=HYPERBOLIC_MODELS_FULL[-2],
16
+ dropdown_label="Select Hyperbolic Model",
17
+ choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ )
app_hyperbolic_coder.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ HYPERBOLIC_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("hyperbolic:")]
7
+
8
+ # Create display names without the prefix
9
+ HYPERBOLIC_MODELS_DISPLAY = [k.replace("hyperbolic:", "") for k in HYPERBOLIC_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=HYPERBOLIC_MODELS_FULL, # Use the full names with prefix
15
+ default_model=HYPERBOLIC_MODELS_FULL[-2],
16
+ dropdown_label="Select Hyperbolic Model",
17
+ choices=HYPERBOLIC_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True,
20
+ )
app_langchain.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ LANGCHAIN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("langchain:")]
7
+
8
+ # Create display names without the prefix
9
+ LANGCHAIN_MODELS_DISPLAY = [k.replace("langchain:", "") for k in LANGCHAIN_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=LANGCHAIN_MODELS_FULL, # Use the full names with prefix
15
+ default_model=LANGCHAIN_MODELS_FULL[0],
16
+ dropdown_label="Select Langchain Model",
17
+ choices=LANGCHAIN_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
23
+
app_lumaai.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import lumaai_gradio
3
+
4
+ demo = gr.load(
5
+ name="dream-machine",
6
+ src=lumaai_gradio.registry,
7
+ )
app_marco_o1.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spaces
3
+ import transformers_gradio
4
+
5
+ demo = gr.load(name="AIDC-AI/Marco-o1", src=transformers_gradio.registry)
6
+ demo.fn = spaces.GPU()(demo.fn)
7
+
8
+ for fn in demo.fns.values():
9
+ fn.api_name = False
10
+
11
+ if __name__ == "__main__":
12
+ demo.launch()
app_meta.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ demo = gr.load("models/meta-llama/Llama-3.3-70B-Instruct")
4
+
5
+ if __name__ == "__main__":
6
+ demo.launch()
app_mindsearch.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ # Load the Gradio space
4
+ demo = gr.load(name="internlm/MindSearch", src="spaces")
5
+
6
+ # Disable API access for all functions
7
+ if hasattr(demo, "fns"):
8
+ for fn in demo.fns.values():
9
+ fn.api_name = False
10
+
11
+ if __name__ == "__main__":
12
+ demo.launch()
app_minimax.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")]
7
+
8
+ # Create display names without the prefix
9
+ MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=MINIMAX_MODELS_FULL, # Use the full names with prefix
15
+ default_model=MINIMAX_MODELS_FULL[0],
16
+ dropdown_label="Select Minimax Model",
17
+ choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
app_minimax_coder.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the hyperbolic models but keep their full names for loading
6
+ MINIMAX_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("minimax:")]
7
+
8
+ # Create display names without the prefix
9
+ MINIMAX_MODELS_DISPLAY = [k.replace("minimax:", "") for k in MINIMAX_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=MINIMAX_MODELS_FULL, # Use the full names with prefix
15
+ default_model=MINIMAX_MODELS_FULL[0],
16
+ dropdown_label="Select Minimax Model",
17
+ choices=MINIMAX_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_mistral.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the mistral models but keep their full names for loading
6
+ MISTRAL_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("mistral:")]
7
+
8
+ # Create display names without the prefix
9
+ MISTRAL_MODELS_DISPLAY = [k.replace("mistral:", "") for k in MISTRAL_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=MISTRAL_MODELS_FULL, # Use the full names with prefix
15
+ default_model=MISTRAL_MODELS_FULL[5],
16
+ dropdown_label="Select Mistral Model",
17
+ choices=MISTRAL_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_moondream.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ # Load the Gradio space
4
+ demo = gr.load(name="akhaliq/moondream", src="spaces")
5
+
6
+
7
+ # Disable API access for all functions
8
+ if hasattr(demo, "fns"):
9
+ for fn in demo.fns.values():
10
+ fn.api_name = False
11
+
12
+ if __name__ == "__main__":
13
+ demo.launch()
app_nvidia.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the nvidia models but keep their full names for loading
6
+ NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")]
7
+
8
+ # Create display names without the prefix
9
+ NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=NVIDIA_MODELS_FULL, # Use the full names with prefix
15
+ default_model=NVIDIA_MODELS_FULL[0],
16
+ dropdown_label="Select Nvidia Model",
17
+ choices=NVIDIA_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
app_nvidia_coder.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the nvidia models but keep their full names for loading
6
+ NVIDIA_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("nvidia:")]
7
+
8
+ # Create display names without the prefix
9
+ NVIDIA_MODELS_DISPLAY = [k.replace("nvidia:", "") for k in NVIDIA_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=NVIDIA_MODELS_FULL, # Use the full names with prefix
15
+ default_model=NVIDIA_MODELS_FULL[-1],
16
+ dropdown_label="Select Nvidia Model",
17
+ choices=NVIDIA_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_omini.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ # Load the Gradio space
4
+ demo = gr.load(name="Yuanshi/OminiControl", src="spaces")
5
+
6
+
7
+ # Disable API access for all functions
8
+ if hasattr(demo, "fns"):
9
+ for fn in demo.fns.values():
10
+ fn.api_name = False
app_openai.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the OpenAI models but keep their full names for loading
6
+ OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
7
+
8
+ # Create display names without the prefix
9
+ OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=OPENAI_MODELS_FULL, # Use the full names with prefix
14
+ default_model=OPENAI_MODELS_FULL[-1],
15
+ dropdown_label="Select OpenAI Model",
16
+ choices=OPENAI_MODELS_DISPLAY, # Display names without prefix
17
+ fill_height=True,
18
+ )
19
+
20
+ if __name__ == "__main__":
21
+ demo.launch()
app_openai_coder.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the OpenAI models but keep their full names for loading
6
+ OPENAI_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openai:")]
7
+
8
+ # Create display names without the prefix
9
+ OPENAI_MODELS_DISPLAY = [k.replace("openai:", "") for k in OPENAI_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=OPENAI_MODELS_FULL, # Use the full names with prefix
14
+ default_model=OPENAI_MODELS_FULL[-1],
15
+ dropdown_label="Select OpenAI Model",
16
+ choices=OPENAI_MODELS_DISPLAY, # Display names without prefix
17
+ fill_height=True,
18
+ coder=True,
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
app_openai_voice.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import openai_gradio
4
+
5
+ from utils import get_app
6
+
7
+ demo = get_app(
8
+ models=[
9
+ "gpt-4o-realtime-preview",
10
+ "gpt-4o-realtime-preview-2024-12-17",
11
+ "gpt-4o-realtime-preview-2024-10-01",
12
+ "gpt-4o-mini-realtime-preview",
13
+ "gpt-4o-mini-realtime-preview-2024-12-17",
14
+ ],
15
+ default_model="gpt-4o-mini-realtime-preview-2024-12-17",
16
+ src=openai_gradio.registry,
17
+ accept_token=not os.getenv("OPENAI_API_KEY"),
18
+ twilio_sid=os.getenv("TWILIO_SID_OPENAI"),
19
+ twilio_token=os.getenv("TWILIO_AUTH_OPENAI"),
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_openrouter.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the OpenAI models but keep their full names for loading
6
+ OPENROUTER_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("openrouter:")]
7
+
8
+ # Create display names without the prefix
9
+ OPENROUTER_MODELS_DISPLAY = [k.replace("openrouter:", "") for k in OPENROUTER_MODELS_FULL]
10
+
11
+ # Create and launch the interface using get_app utility
12
+ demo = get_app(
13
+ models=OPENROUTER_MODELS_FULL, # Use the full names with prefix
14
+ default_model=OPENROUTER_MODELS_FULL[-1],
15
+ dropdown_label="Select OpenRouter Model",
16
+ choices=OPENROUTER_MODELS_DISPLAY, # Display names without prefix
17
+ fill_height=True,
18
+ coder=True,
19
+ )
20
+
21
+ if __name__ == "__main__":
22
+ demo.launch()
app_paligemma.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio_client import Client, handle_file
3
+
4
+ MODELS = {"Paligemma-10B": "akhaliq/paligemma2-10b-ft-docci-448"}
5
+
6
+
7
+ def create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p):
8
+ def chat(message, history):
9
+ text = message.get("text", "")
10
+ files = message.get("files", [])
11
+ processed_files = [handle_file(f) for f in files]
12
+
13
+ response = client.predict(
14
+ message={"text": text, "files": processed_files},
15
+ system_prompt=system_prompt,
16
+ temperature=temperature,
17
+ max_new_tokens=max_tokens,
18
+ top_k=top_k,
19
+ repetition_penalty=rep_penalty,
20
+ top_p=top_p,
21
+ api_name="/chat",
22
+ )
23
+ return response
24
+
25
+ return chat
26
+
27
+
28
+ def set_client_for_session(model_name, request: gr.Request):
29
+ headers = {}
30
+ if request and hasattr(request, "headers"):
31
+ x_ip_token = request.headers.get("x-ip-token")
32
+ if x_ip_token:
33
+ headers["X-IP-Token"] = x_ip_token
34
+
35
+ return Client(MODELS[model_name], headers=headers)
36
+
37
+
38
+ def safe_chat_fn(message, history, client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p):
39
+ if client is None:
40
+ return "Error: Client not initialized. Please refresh the page."
41
+ try:
42
+ return create_chat_fn(client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p)(
43
+ message, history
44
+ )
45
+ except Exception as e:
46
+ print(f"Error during chat: {e!s}")
47
+ return f"Error during chat: {e!s}"
48
+
49
+
50
+ with gr.Blocks() as demo:
51
+ client = gr.State()
52
+
53
+ with gr.Accordion("Advanced Settings", open=False):
54
+ system_prompt = gr.Textbox(value="You are a helpful AI assistant.", label="System Prompt")
55
+ with gr.Row():
56
+ temperature = gr.Slider(minimum=0.0, maximum=2.0, value=0.7, label="Temperature")
57
+ top_p = gr.Slider(minimum=0.0, maximum=1.0, value=0.95, label="Top P")
58
+ with gr.Row():
59
+ top_k = gr.Slider(minimum=1, maximum=100, value=40, step=1, label="Top K")
60
+ rep_penalty = gr.Slider(minimum=1.0, maximum=2.0, value=1.1, label="Repetition Penalty")
61
+ max_tokens = gr.Slider(minimum=64, maximum=4096, value=1024, step=64, label="Max Tokens")
62
+
63
+ chat_interface = gr.ChatInterface(
64
+ fn=safe_chat_fn,
65
+ additional_inputs=[client, system_prompt, temperature, max_tokens, top_k, rep_penalty, top_p],
66
+ multimodal=True,
67
+ )
68
+
69
+ # Initialize client on page load with default model
70
+ demo.load(fn=set_client_for_session, inputs=[gr.State("Paligemma-10B")], outputs=[client]) # Using default model
71
+
72
+ # Move the API access check here, after demo is defined
73
+ if hasattr(demo, "fns"):
74
+ for fn in demo.fns.values():
75
+ fn.api_name = False
76
+
77
+ if __name__ == "__main__":
78
+ demo.launch()
app_perplexity.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import perplexity_gradio
4
+
5
+ from utils import get_app
6
+
7
+ demo = get_app(
8
+ models=[
9
+ "llama-3.1-sonar-large-128k-online",
10
+ "llama-3.1-sonar-small-128k-online",
11
+ "llama-3.1-sonar-huge-128k-online",
12
+ "llama-3.1-sonar-small-128k-chat",
13
+ "llama-3.1-sonar-large-128k-chat",
14
+ "llama-3.1-8b-instruct",
15
+ "llama-3.1-70b-instruct",
16
+ ],
17
+ default_model="llama-3.1-sonar-huge-128k-online",
18
+ src=perplexity_gradio.registry,
19
+ accept_token=not os.getenv("PERPLEXITY_API_KEY"),
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
app_playai.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import playai_gradio
3
+
4
+ demo = gr.load(
5
+ name="PlayDialog",
6
+ src=playai_gradio.registry,
7
+ )
8
+
9
+ for fn in demo.fns.values():
10
+ fn.api_name = False
app_qwen.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the qwen models but keep their full names for loading
6
+ QWEN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("qwen:")]
7
+
8
+ # Create display names without the prefix
9
+ QWEN_MODELS_DISPLAY = [k.replace("qwen:", "") for k in QWEN_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=QWEN_MODELS_FULL, # Use the full names with prefix
15
+ default_model=QWEN_MODELS_FULL[-1],
16
+ dropdown_label="Select Qwen Model",
17
+ choices=QWEN_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ )
app_qwen_coder.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ai_gradio
2
+
3
+ from utils_ai_gradio import get_app
4
+
5
+ # Get the qwen models but keep their full names for loading
6
+ QWEN_MODELS_FULL = [k for k in ai_gradio.registry.keys() if k.startswith("qwen:")]
7
+
8
+ # Create display names without the prefix
9
+ QWEN_MODELS_DISPLAY = [k.replace("qwen:", "") for k in QWEN_MODELS_FULL]
10
+
11
+
12
+ # Create and launch the interface using get_app utility
13
+ demo = get_app(
14
+ models=QWEN_MODELS_FULL, # Use the full names with prefix
15
+ default_model=QWEN_MODELS_FULL[-1],
16
+ dropdown_label="Select Qwen Model",
17
+ choices=QWEN_MODELS_DISPLAY, # Display names without prefix
18
+ fill_height=True,
19
+ coder=True,
20
+ )
app_replicate.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import replicate_gradio
2
+
3
+ from utils import get_app
4
+
5
+ demo = get_app(
6
+ models=[
7
+ "black-forest-labs/flux-depth-pro",
8
+ "black-forest-labs/flux-canny-pro",
9
+ "black-forest-labs/flux-fill-pro",
10
+ "black-forest-labs/flux-depth-dev",
11
+ "tencent/hunyuan-video:140176772be3b423d14fdaf5403e6d4e38b85646ccad0c3fd2ed07c211f0cad1",
12
+ ],
13
+ default_model="tencent/hunyuan-video:140176772be3b423d14fdaf5403e6d4e38b85646ccad0c3fd2ed07c211f0cad1",
14
+ src=replicate_gradio.registry,
15
+ )
16
+
17
+ if __name__ == "__main__":
18
+ demo.launch()