jbilcke-hf HF Staff commited on
Commit
f26b729
·
1 Parent(s): ef8ee9e

small bug fixes

Browse files
DEPLOYMENT.md CHANGED
@@ -17,15 +17,24 @@ source .python_venv/bin/activate
17
  python3 -m pip install --no-cache-dir --upgrade -r requirements.txt
18
  ```
19
 
20
- ### Deployment to production
21
 
22
- To deploy the aitube2 api to production:
 
 
 
 
23
 
24
- $ git push space main
25
 
26
- To deploy the aitube2 client to production, simply run:
 
 
27
 
28
- $ flutter run web
 
 
 
29
 
30
  and upload the assets to:
31
 
 
17
  python3 -m pip install --no-cache-dir --upgrade -r requirements.txt
18
  ```
19
 
20
+ ### Local testing
21
 
22
+ First you need to build the app:
23
+
24
+ $ flutter build web
25
+
26
+ Then run the server.
27
 
28
+ See paragraph "Running the gateway scheduler"
29
 
30
+ ### Deployment to production
31
+
32
+ To deploy the aitube2 api to production:
33
 
34
+ $ flutter build web
35
+ $ git add .
36
+ $ got commit -m "<description>"
37
+ $ git push public main
38
 
39
  and upload the assets to:
40
 
PROMPT_CONTEXT.md CHANGED
@@ -1,3 +1,10 @@
1
- For some context: our app is an AI video generation platform, where people use the frontend app (written in Flutter) to virtually "search for video (the video synopsis and their content are generated on the fly using AI). This uses a custom API written in Python, with a WebSockets communication.
 
 
 
 
 
 
 
2
 
3
  Task to perform: <fill your demand here>
 
1
+ For some context:
2
+
3
+ our app is an AI video generation platform, where people use the frontend app (written in Flutter) to virtually "search" for video (the video synopsis and their content are generated on the fly using AI).
4
+
5
+ Note that this uses a custom API written in Python, with a WebSockets communication.
6
+
7
+ To go back to the Flutter app, when the user open a thumbnail card after doing such generative AI search, it opens a full view for a video (with a player, title, description, chat section etc).
8
+
9
 
10
  Task to perform: <fill your demand here>
api_core.py CHANGED
@@ -298,16 +298,16 @@ class VideoGenerationAPI:
298
  """Generate a single search result using HF text generation"""
299
  prompt = f"""# Instruction
300
  Your response MUST be a YAML object containing a title, description, and tags, consistent with what we can find on a video sharing platform.
301
- Format your YAML response with only those fields: "title" (single string of a short sentence), "description" (single string of a few sentences to describe the visuals), and "tags" (array of strings). Do not add any other field.
302
- The description is a prompt for a generative AI, so please describe the visual elements of the scene in details, including: camera angle and focus, people's appearance, their age, actions, precise look, clothing, the location characteristics, lighting, action, objects, weather.
303
  Make the result unique and different from previous search results. ONLY RETURN YAML AND WITH ENGLISH CONTENT, NOT CHINESE - DO NOT ADD ANY OTHER COMMENT!
304
 
305
  # Context
306
  This is attempt {attempt_count} at generating search result number {search_count}.
307
 
308
  # Input
309
- Describe the video for this theme: "{query}".
310
- Don't forget to repeat singular elements about the characters, location.. in your description.
311
 
312
  # Output
313
 
@@ -321,7 +321,7 @@ title: \""""
321
  lambda: self.inference_client.text_generation(
322
  prompt,
323
  model=TEXT_MODEL,
324
- max_new_tokens=300,
325
  temperature=0.6
326
  )
327
  )
@@ -368,8 +368,12 @@ title: \""""
368
  'description': description,
369
  'thumbnailUrl': thumbnail,
370
  'videoUrl': '',
 
 
371
  'isLatent': True,
 
372
  'useFixedSeed': "webcam" in description.lower(),
 
373
  'seed': generate_seed(),
374
  'views': 0,
375
  'tags': tags
@@ -389,7 +393,7 @@ title: \""""
389
  lambda: self.inference_client.text_to_image(
390
  prompt=image_prompt,
391
  model=IMAGE_MODEL,
392
- width=1024,
393
  height=512
394
  )
395
  )
 
298
  """Generate a single search result using HF text generation"""
299
  prompt = f"""# Instruction
300
  Your response MUST be a YAML object containing a title, description, and tags, consistent with what we can find on a video sharing platform.
301
+ Format your YAML response with only those fields: "title" (single string of a short sentence), "description" (single string of a few sentences to describe the visuals: characters, age, gender, action, location, lighting, country, costume, time, weather, textures, color palette), and "tags" (array of strings). Do not add any other field.
302
+ The description is a prompt for a generative AI, so please describe the visual elements of the scene in details, including: camera angle and focus, people's appearance, their age, actions, precise look, clothing, the location characteristics, lighting, action, objects, weather, texture, color palette. Write as if you were describing the scene to a photograph.
303
  Make the result unique and different from previous search results. ONLY RETURN YAML AND WITH ENGLISH CONTENT, NOT CHINESE - DO NOT ADD ANY OTHER COMMENT!
304
 
305
  # Context
306
  This is attempt {attempt_count} at generating search result number {search_count}.
307
 
308
  # Input
309
+ Describe the appearance of a video scene for this theme: "{query}".
310
+ Don't use bullet points or titles/prefixes in your description. Just describe it in plain natural language.
311
 
312
  # Output
313
 
 
321
  lambda: self.inference_client.text_generation(
322
  prompt,
323
  model=TEXT_MODEL,
324
+ max_new_tokens=330,
325
  temperature=0.6
326
  )
327
  )
 
368
  'description': description,
369
  'thumbnailUrl': thumbnail,
370
  'videoUrl': '',
371
+
372
+ # not really used yet, maybe one day if we pre-generate or store content
373
  'isLatent': True,
374
+
375
  'useFixedSeed': "webcam" in description.lower(),
376
+
377
  'seed': generate_seed(),
378
  'views': 0,
379
  'tags': tags
 
393
  lambda: self.inference_client.text_to_image(
394
  prompt=image_prompt,
395
  model=IMAGE_MODEL,
396
+ width=768,
397
  height=512
398
  )
399
  )
build/web/assets/assets/config/aitube_low.yaml CHANGED
@@ -1,6 +1,7 @@
1
  ui:
2
  product_name: AiTube
3
-
 
4
  render_queue:
5
  # how many clips should be stored in advance
6
  buffer_size: 3
 
1
  ui:
2
  product_name: AiTube
3
+ showChatInVideoView: false
4
+
5
  render_queue:
6
  # how many clips should be stored in advance
7
  buffer_size: 3
build/web/assets/assets/config/default.yaml CHANGED
@@ -1,5 +1,6 @@
1
  ui:
2
  product_name: AppName
 
3
 
4
  render_queue:
5
  # how many clips should be stored in advance
 
1
  ui:
2
  product_name: AppName
3
+ showChatInVideoView: true
4
 
5
  render_queue:
6
  # how many clips should be stored in advance
build/web/assets/fonts/MaterialIcons-Regular.otf CHANGED
Binary files a/build/web/assets/fonts/MaterialIcons-Regular.otf and b/build/web/assets/fonts/MaterialIcons-Regular.otf differ
 
build/web/flutter_bootstrap.js CHANGED
@@ -39,6 +39,6 @@ _flutter.buildConfig = {"engineRevision":"382be0028d370607f76215a9be322e5514b263
39
 
40
  _flutter.loader.load({
41
  serviceWorkerSettings: {
42
- serviceWorkerVersion: "1946662225"
43
  }
44
  });
 
39
 
40
  _flutter.loader.load({
41
  serviceWorkerSettings: {
42
+ serviceWorkerVersion: "3310482292"
43
  }
44
  });
build/web/flutter_service_worker.js CHANGED
@@ -3,11 +3,11 @@ const MANIFEST = 'flutter-app-manifest';
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
- const RESOURCES = {"flutter_bootstrap.js": "73af8f030b3b05ff5d0df3ad67a75121",
7
  "version.json": "b5eaae4fc120710a3c35125322173615",
8
  "index.html": "f34c56fffc6b38f62412a5db2315dec8",
9
  "/": "f34c56fffc6b38f62412a5db2315dec8",
10
- "main.dart.js": "0085961ce9c20b57a5ddbe570d5024a2",
11
  "flutter.js": "83d881c1dbb6d6bcd6b42e274605b69c",
12
  "favicon.png": "5dcef449791fa27946b3d35ad8803796",
13
  "icons/Icon-192.png": "ac9a721a12bbc803b44f645561ecb1e1",
@@ -22,12 +22,12 @@ const RESOURCES = {"flutter_bootstrap.js": "73af8f030b3b05ff5d0df3ad67a75121",
22
  "assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "33b7d9392238c04c131b6ce224e13711",
23
  "assets/shaders/ink_sparkle.frag": "ecc85a2e95f5e9f53123dcaf8cb9b6ce",
24
  "assets/AssetManifest.bin": "5894fe5676e62dc22403a833f2313e43",
25
- "assets/fonts/MaterialIcons-Regular.otf": "ad353551d900c9d287176103e249f13e",
26
  "assets/assets/config/private.yaml": "97a9ec367206bea5dce64faf94b66332",
27
  "assets/assets/config/README.md": "07a87720dd00dd1ca98c9d6884440e31",
28
  "assets/assets/config/aitube_high.yaml": "c030f221344557ecf05aeef30f224502",
29
- "assets/assets/config/default.yaml": "d3d68c3d543051e4e24c1694731f219c",
30
- "assets/assets/config/aitube_low.yaml": "4933e2a3345cf1143ec043d074a6444d",
31
  "canvaskit/skwasm.js": "ea559890a088fe28b4ddf70e17e60052",
32
  "canvaskit/skwasm.js.symbols": "9fe690d47b904d72c7d020bd303adf16",
33
  "canvaskit/canvaskit.js.symbols": "27361387bc24144b46a745f1afe92b50",
 
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
+ const RESOURCES = {"flutter_bootstrap.js": "ad3dd4404e2156ccb1734ec07b249cd5",
7
  "version.json": "b5eaae4fc120710a3c35125322173615",
8
  "index.html": "f34c56fffc6b38f62412a5db2315dec8",
9
  "/": "f34c56fffc6b38f62412a5db2315dec8",
10
+ "main.dart.js": "92b9f48aec1daf04b5f8ee31dec89bf9",
11
  "flutter.js": "83d881c1dbb6d6bcd6b42e274605b69c",
12
  "favicon.png": "5dcef449791fa27946b3d35ad8803796",
13
  "icons/Icon-192.png": "ac9a721a12bbc803b44f645561ecb1e1",
 
22
  "assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "33b7d9392238c04c131b6ce224e13711",
23
  "assets/shaders/ink_sparkle.frag": "ecc85a2e95f5e9f53123dcaf8cb9b6ce",
24
  "assets/AssetManifest.bin": "5894fe5676e62dc22403a833f2313e43",
25
+ "assets/fonts/MaterialIcons-Regular.otf": "f7c7cc97f118137db94cf3e17143bf62",
26
  "assets/assets/config/private.yaml": "97a9ec367206bea5dce64faf94b66332",
27
  "assets/assets/config/README.md": "07a87720dd00dd1ca98c9d6884440e31",
28
  "assets/assets/config/aitube_high.yaml": "c030f221344557ecf05aeef30f224502",
29
+ "assets/assets/config/default.yaml": "e98187e5a53a8b0d8bf2c5cd9dd0a365",
30
+ "assets/assets/config/aitube_low.yaml": "45750c9b2982d06eff21bd302ed0f8c4",
31
  "canvaskit/skwasm.js": "ea559890a088fe28b4ddf70e17e60052",
32
  "canvaskit/skwasm.js.symbols": "9fe690d47b904d72c7d020bd303adf16",
33
  "canvaskit/canvaskit.js.symbols": "27361387bc24144b46a745f1afe92b50",
build/web/main.dart.js CHANGED
The diff for this file is too large to render. See raw diff
 
lib/config/config.dart CHANGED
@@ -63,6 +63,9 @@ class Configuration {
63
 
64
  String get uiProductName =>
65
  _config['ui']['product_name'];
 
 
 
66
 
67
  // how many clips should be stored in advance
68
  int get renderQueueBufferSize =>
 
63
 
64
  String get uiProductName =>
65
  _config['ui']['product_name'];
66
+
67
+ bool get showChatInVideoView =>
68
+ _config['ui']['showChatInVideoView'] ?? true;
69
 
70
  // how many clips should be stored in advance
71
  int get renderQueueBufferSize =>
lib/screens/settings_screen.dart CHANGED
@@ -115,7 +115,7 @@ class _SettingsScreenState extends State<SettingsScreen> {
115
  ),
116
  ),
117
  const SizedBox(height: 16),
118
- // Cache Card (existing code)
119
  Card(
120
  child: Padding(
121
  padding: const EdgeInsets.all(16),
 
115
  ),
116
  ),
117
  const SizedBox(height: 16),
118
+ // Cache Card (existing code)
119
  Card(
120
  child: Padding(
121
  padding: const EdgeInsets.all(16),
lib/screens/video_screen.dart CHANGED
@@ -2,6 +2,7 @@
2
  import 'package:aitube2/widgets/chat_widget.dart';
3
  import 'package:aitube2/widgets/search_box.dart';
4
  import 'package:flutter/material.dart';
 
5
  import '../models/video_result.dart';
6
  import '../services/websocket_api_service.dart';
7
  import '../services/cache_service.dart';
@@ -165,27 +166,30 @@ class _VideoScreenState extends State<VideoScreen> {
165
  Expanded(
166
  child: _buildMainContent(),
167
  ),
168
- const SizedBox(width: 16),
169
- Padding(
170
- padding: const EdgeInsets.only(right: 16),
171
- child: ChatWidget(videoId: widget.video.id),
172
- ),
 
 
173
  ],
174
  )
175
  : Column(
176
  children: [
177
  _buildMainContent(),
178
- const SizedBox(height: 16),
179
- // Modified this part
180
- Expanded(
181
- child: Padding(
182
- padding: const EdgeInsets.symmetric(horizontal: 16),
183
- child: ChatWidget(
184
- videoId: widget.video.id,
185
- isCompact: true,
 
186
  ),
187
  ),
188
- ),
189
  ],
190
  ),
191
  ),
 
2
  import 'package:aitube2/widgets/chat_widget.dart';
3
  import 'package:aitube2/widgets/search_box.dart';
4
  import 'package:flutter/material.dart';
5
+ import '../config/config.dart';
6
  import '../models/video_result.dart';
7
  import '../services/websocket_api_service.dart';
8
  import '../services/cache_service.dart';
 
166
  Expanded(
167
  child: _buildMainContent(),
168
  ),
169
+ if (Configuration.instance.showChatInVideoView) ...[
170
+ const SizedBox(width: 16),
171
+ Padding(
172
+ padding: const EdgeInsets.only(right: 16),
173
+ child: ChatWidget(videoId: widget.video.id),
174
+ ),
175
+ ],
176
  ],
177
  )
178
  : Column(
179
  children: [
180
  _buildMainContent(),
181
+ if (Configuration.instance.showChatInVideoView) ...[
182
+ const SizedBox(height: 16),
183
+ Expanded(
184
+ child: Padding(
185
+ padding: const EdgeInsets.symmetric(horizontal: 16),
186
+ child: ChatWidget(
187
+ videoId: widget.video.id,
188
+ isCompact: true,
189
+ ),
190
  ),
191
  ),
192
+ ],
193
  ],
194
  ),
195
  ),
lib/services/websocket_api_service.dart CHANGED
@@ -54,6 +54,15 @@ class WebSocketApiService {
54
  // Get the current host and protocol from the browser window
55
  final location = Uri.base;
56
  final protocol = location.scheme == 'https' ? 'wss' : 'ws';
 
 
 
 
 
 
 
 
 
57
  final url = '$protocol://${location.host}/ws';
58
  debugPrint('WebSocketApiService: Using dynamic WebSocket URL: $url');
59
  return url;
@@ -195,10 +204,27 @@ class WebSocketApiService {
195
 
196
  // First check if server is in maintenance mode by making an HTTP request to the status endpoint
197
  try {
198
- // Determine HTTP URL based on WebSocket URL
199
- final wsUri = Uri.parse(_wsUrl);
200
- final protocol = wsUri.scheme == 'wss' ? 'https' : 'http';
201
- final httpUrl = '$protocol://${wsUri.authority}/api/status';
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202
 
203
  // Use conditional import to handle platform differences
204
  if (kIsWeb) {
 
54
  // Get the current host and protocol from the browser window
55
  final location = Uri.base;
56
  final protocol = location.scheme == 'https' ? 'wss' : 'ws';
57
+
58
+ // For localhost, explicitly include port 8080
59
+ if (location.host == 'localhost' || location.host.startsWith('localhost:')) {
60
+ final url = '$protocol://localhost:8080/ws';
61
+ debugPrint('WebSocketApiService: Using localhost:8080 WebSocket URL: $url');
62
+ return url;
63
+ }
64
+
65
+ // For other hosts, include the original port number in the URL
66
  final url = '$protocol://${location.host}/ws';
67
  debugPrint('WebSocketApiService: Using dynamic WebSocket URL: $url');
68
  return url;
 
204
 
205
  // First check if server is in maintenance mode by making an HTTP request to the status endpoint
206
  try {
207
+ // Determine HTTP URL based on WebSocket URL and current location
208
+ String httpUrl;
209
+ if (kIsWeb) {
210
+ // In web, use the current location with api/status appended
211
+ final location = Uri.base;
212
+ final protocol = location.scheme;
213
+
214
+ // For localhost, explicitly include port 8080
215
+ if (location.host == 'localhost' || location.host.startsWith('localhost:')) {
216
+ httpUrl = '$protocol://localhost:8080/api/status';
217
+ } else {
218
+ httpUrl = '$protocol://${location.host}/api/status';
219
+ }
220
+ } else {
221
+ // For non-web, derive from WebSocket URL
222
+ final wsUri = Uri.parse(_wsUrl);
223
+ final protocol = wsUri.scheme == 'wss' ? 'https' : 'http';
224
+ httpUrl = '$protocol://${wsUri.authority}/api/status';
225
+ }
226
+
227
+ debugPrint('WebSocketApiService: Checking maintenance status at: $httpUrl');
228
 
229
  // Use conditional import to handle platform differences
230
  if (kIsWeb) {
lib/widgets/video_card.dart CHANGED
@@ -25,7 +25,9 @@ class VideoCard extends StatelessWidget {
25
  ),
26
  SizedBox(height: 8),
27
  Text(
28
- 'Generating preview...',
 
 
29
  style: TextStyle(
30
  color: AiTubeColors.onSurfaceVariant,
31
  fontSize: 12,
@@ -108,39 +110,32 @@ class VideoCard extends StatelessWidget {
108
  fit: StackFit.expand,
109
  children: [
110
  _buildThumbnail(),
111
- if (video.isLatent)
112
- Positioned(
113
- right: 8,
114
- top: 8,
115
- child: Container(
116
- padding: const EdgeInsets.symmetric(
117
- horizontal: 8,
118
- vertical: 4,
119
- ),
120
- decoration: BoxDecoration(
121
- color: Colors.black.withOpacity(0.7),
122
- borderRadius: BorderRadius.circular(4),
123
- ),
124
- child: const Row(
125
- mainAxisSize: MainAxisSize.min,
126
- children: [
127
- Icon(
128
- Icons.ac_unit,
129
- size: 16,
130
  color: AiTubeColors.onBackground,
 
131
  ),
132
- SizedBox(width: 4),
133
- Text(
134
- 'Latent',
135
- style: TextStyle(
136
- color: AiTubeColors.onBackground,
137
- fontSize: 12,
138
- ),
139
- ),
140
- ],
141
- ),
142
  ),
143
  ),
 
144
  ],
145
  ),
146
  ),
 
25
  ),
26
  SizedBox(height: 8),
27
  Text(
28
+ // 'Generating preview...',
29
+ // thumbnail generation
30
+ '(TODO: thumbnails)',
31
  style: TextStyle(
32
  color: AiTubeColors.onSurfaceVariant,
33
  fontSize: 12,
 
110
  fit: StackFit.expand,
111
  children: [
112
  _buildThumbnail(),
113
+ Positioned(
114
+ right: 8,
115
+ top: 8,
116
+ child: Container(
117
+ padding: const EdgeInsets.symmetric(
118
+ horizontal: 8,
119
+ vertical: 4,
120
+ ),
121
+ decoration: BoxDecoration(
122
+ color: Colors.black.withOpacity(0.7),
123
+ borderRadius: BorderRadius.circular(4),
124
+ ),
125
+ child: const Row(
126
+ mainAxisSize: MainAxisSize.min,
127
+ children: [
128
+ Text(
129
+ 'LTX Video',
130
+ style: TextStyle(
 
131
  color: AiTubeColors.onBackground,
132
+ fontSize: 12,
133
  ),
134
+ ),
135
+ ],
 
 
 
 
 
 
 
 
136
  ),
137
  ),
138
+ ),
139
  ],
140
  ),
141
  ),