jbilcke-hf HF Staff commited on
Commit
31fe0d9
·
1 Parent(s): 47e79c3

testing a fix for the status badge + default LLM

Browse files
.gitignore CHANGED
@@ -1,7 +1,7 @@
1
  # secret codes
2
  lib/config/secrets.dart
3
  run_locally_with_secrets.sh
4
-
5
  # Python venv
6
  .python_venv/
7
 
 
1
  # secret codes
2
  lib/config/secrets.dart
3
  run_locally_with_secrets.sh
4
+
5
  # Python venv
6
  .python_venv/
7
 
DEPLOYMENT.md CHANGED
@@ -57,6 +57,10 @@ curl https://api.endpoints.huggingface.cloud/v2/endpoint/<YOUR_ACCOUNT_NAME> -X
57
 
58
  #### Running the gateway scheduler
59
 
 
 
 
 
60
  ```bash
61
  # load the environment
62
  # (if you haven't done it already for this shell session)
@@ -73,11 +77,12 @@ PRODUCT_NAME="TikSlop" \
73
  VIDEO_ROUND_ROBIN_SERVER_4="https://<USE YOUR OWN SERVER>.endpoints.huggingface.cloud" \
74
  VIDEO_ROUND_ROBIN_SERVER_5="https://<USE YOUR OWN SERVER>.endpoints.huggingface.cloud" \
75
  VIDEO_ROUND_ROBIN_SERVER_6="https://<USE YOUR OWN SERVER>.endpoints.huggingface.cloud" \
76
- HF_IMAGE_MODEL="https://<USE YOUR OWN SERVER>.endpoints.huggingface.cloud" \
77
  HF_TEXT_MODEL="https://<USE YOUR OWN SERVER>.endpoints.huggingface.cloud" \
78
  python3 api.py
79
  ```
80
 
 
81
  ### Run the client (web)
82
 
83
  ```bash
 
57
 
58
  #### Running the gateway scheduler
59
 
60
+ For convenience, the script `build_and_run.sh` tries to run a `run_locally_with_secret.sh` file that launches the Python backend.
61
+
62
+ Here is a an example:
63
+
64
  ```bash
65
  # load the environment
66
  # (if you haven't done it already for this shell session)
 
77
  VIDEO_ROUND_ROBIN_SERVER_4="https://<USE YOUR OWN SERVER>.endpoints.huggingface.cloud" \
78
  VIDEO_ROUND_ROBIN_SERVER_5="https://<USE YOUR OWN SERVER>.endpoints.huggingface.cloud" \
79
  VIDEO_ROUND_ROBIN_SERVER_6="https://<USE YOUR OWN SERVER>.endpoints.huggingface.cloud" \
80
+ HF_IMAGE_MODEL="https://<PAY YOUR OWN SERVER>.endpoints.huggingface.cloud" \
81
  HF_TEXT_MODEL="https://<USE YOUR OWN SERVER>.endpoints.huggingface.cloud" \
82
  python3 api.py
83
  ```
84
 
85
+
86
  ### Run the client (web)
87
 
88
  ```bash
api_config.py CHANGED
@@ -35,10 +35,6 @@ VIDEO_ROUND_ROBIN_ENDPOINT_URLS = filtered_urls[:MAX_NODES]
35
 
36
  HF_TOKEN = os.environ.get('HF_TOKEN')
37
 
38
- # Control whether to allow fallback to server's HF API key for LLM calls
39
- # (Video generation will always use the server's HF token)
40
- ALLOW_USING_SERVER_HF_API_KEY_FOR_LLM_CALLS = os.environ.get('ALLOW_USING_SERVER_HF_API_KEY_FOR_LLM_CALLS', 'false').lower() in ('true', 'yes', '1', 't')
41
-
42
  # use the same secret token as you used to secure your BASE_SPACE_NAME spaces
43
  SECRET_TOKEN = os.environ.get('SECRET_TOKEN')
44
 
 
35
 
36
  HF_TOKEN = os.environ.get('HF_TOKEN')
37
 
 
 
 
 
38
  # use the same secret token as you used to secure your BASE_SPACE_NAME spaces
39
  SECRET_TOKEN = os.environ.get('SECRET_TOKEN')
40
 
api_core.py CHANGED
@@ -223,7 +223,7 @@ class VideoGenerationAPI:
223
  Priority order for API keys:
224
  1. Provider-specific API key (if provided)
225
  2. User's HF token (if provided)
226
- 3. Server's HF token (only if ALLOW_USING_SERVER_HF_API_KEY_FOR_LLM_CALLS is true)
227
  4. Raise exception if no valid key is available
228
  """
229
  if not llm_config:
@@ -252,16 +252,16 @@ class VideoGenerationAPI:
252
 
253
  hf_provider = provider_mapping.get(provider)
254
 
255
- # Handle built-in provider first (uses server's HF token and default model)
256
  if provider == 'builtin':
257
- if ALLOW_USING_SERVER_HF_API_KEY_FOR_LLM_CALLS and HF_TOKEN:
258
  # Use server's default model from HF_TEXT_MODEL
259
  return InferenceClient(
260
  model=TEXT_MODEL if TEXT_MODEL else model,
261
  token=HF_TOKEN
262
  )
263
  else:
264
- raise ValueError("Built-in provider is not available. Server is not configured to allow fallback to server API key.")
265
 
266
  # Priority 1: Use provider-specific API key if available
267
  if api_key and hf_provider:
@@ -284,15 +284,8 @@ class VideoGenerationAPI:
284
  token=hf_token
285
  )
286
 
287
- # Priority 3: Use server's HF token only if explicitly allowed
288
- if ALLOW_USING_SERVER_HF_API_KEY_FOR_LLM_CALLS and HF_TOKEN:
289
- logger.warning(f"Using server's HF token for {provider} model {model} - no user API key provided")
290
- return InferenceClient(
291
- model=model,
292
- token=HF_TOKEN
293
- )
294
-
295
  # No valid API key available
 
296
  if provider == 'huggingface':
297
  raise ValueError("No API key provided. Please provide your Hugging Face API key.")
298
  else:
@@ -303,16 +296,8 @@ class VideoGenerationAPI:
303
  raise
304
  except Exception as e:
305
  logger.error(f"Error creating InferenceClient with config {llm_config}: {e}")
306
- # For other errors, fallback to default client only if server token is allowed
307
- if ALLOW_USING_SERVER_HF_API_KEY_FOR_LLM_CALLS:
308
- return self.inference_client
309
- else:
310
- raise
311
-
312
- except Exception as e:
313
- logger.error(f"Error creating InferenceClient with config {llm_config}: {e}")
314
- # Fallback to default client
315
- return self.inference_client
316
 
317
  async def _generate_text(self, prompt: str, llm_config: Optional[dict] = None,
318
  max_new_tokens: int = 200, temperature: float = 0.7,
@@ -621,7 +606,7 @@ title: \""""
621
  'thumbnailUrl': '',
622
  'videoUrl': '',
623
  'isLatent': True,
624
- 'useFixedSeed': "query" in description.lower(),
625
  'seed': generate_seed(),
626
  'views': 0,
627
  'tags': []
 
223
  Priority order for API keys:
224
  1. Provider-specific API key (if provided)
225
  2. User's HF token (if provided)
226
+ 3. Server's HF token (only for built-in provider)
227
  4. Raise exception if no valid key is available
228
  """
229
  if not llm_config:
 
252
 
253
  hf_provider = provider_mapping.get(provider)
254
 
255
+ # Handle built-in provider first (always uses server's HF token and default model)
256
  if provider == 'builtin':
257
+ if HF_TOKEN:
258
  # Use server's default model from HF_TEXT_MODEL
259
  return InferenceClient(
260
  model=TEXT_MODEL if TEXT_MODEL else model,
261
  token=HF_TOKEN
262
  )
263
  else:
264
+ raise ValueError("Built-in provider is not available. Server HF_TOKEN is not configured.")
265
 
266
  # Priority 1: Use provider-specific API key if available
267
  if api_key and hf_provider:
 
284
  token=hf_token
285
  )
286
 
 
 
 
 
 
 
 
 
287
  # No valid API key available
288
+ # Note: Server's HF token is NEVER used for inference providers
289
  if provider == 'huggingface':
290
  raise ValueError("No API key provided. Please provide your Hugging Face API key.")
291
  else:
 
296
  raise
297
  except Exception as e:
298
  logger.error(f"Error creating InferenceClient with config {llm_config}: {e}")
299
+ # Re-raise all other exceptions
300
+ raise
 
 
 
 
 
 
 
 
301
 
302
  async def _generate_text(self, prompt: str, llm_config: Optional[dict] = None,
303
  max_new_tokens: int = 200, temperature: float = 0.7,
 
606
  'thumbnailUrl': '',
607
  'videoUrl': '',
608
  'isLatent': True,
609
+ 'useFixedSeed': "query" in query.lower(),
610
  'seed': generate_seed(),
611
  'views': 0,
612
  'tags': []
build/web/flutter_bootstrap.js CHANGED
@@ -38,6 +38,6 @@ _flutter.buildConfig = {"engineRevision":"1c9c20e7c3dd48c66f400a24d48ea806b4ab31
38
 
39
  _flutter.loader.load({
40
  serviceWorkerSettings: {
41
- serviceWorkerVersion: "2889703515"
42
  }
43
  });
 
38
 
39
  _flutter.loader.load({
40
  serviceWorkerSettings: {
41
+ serviceWorkerVersion: "2693113717"
42
  }
43
  });
build/web/flutter_service_worker.js CHANGED
@@ -3,12 +3,12 @@ const MANIFEST = 'flutter-app-manifest';
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
- const RESOURCES = {"flutter_bootstrap.js": "c4149ebfe81a914993dabffbdb6fd86e",
7
  "version.json": "68350cac7987de2728345c72918dd067",
8
  "tikslop.png": "570e1db759046e2d224fef729983634e",
9
  "index.html": "3a7029b3672560e7938aab6fa4d30a46",
10
  "/": "3a7029b3672560e7938aab6fa4d30a46",
11
- "main.dart.js": "289dec6beee3b5a37f9fc410580f04c3",
12
  "tikslop.svg": "26140ba0d153b213b122bc6ebcc17f6c",
13
  "flutter.js": "888483df48293866f9f41d3d9274a779",
14
  "favicon.png": "c8a183c516004e648a7bac7497c89b97",
 
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
+ const RESOURCES = {"flutter_bootstrap.js": "4e3f72d0d30bbd04184daf8bbd46ea55",
7
  "version.json": "68350cac7987de2728345c72918dd067",
8
  "tikslop.png": "570e1db759046e2d224fef729983634e",
9
  "index.html": "3a7029b3672560e7938aab6fa4d30a46",
10
  "/": "3a7029b3672560e7938aab6fa4d30a46",
11
+ "main.dart.js": "b586f88a7fb24fe005b0482de36b3822",
12
  "tikslop.svg": "26140ba0d153b213b122bc6ebcc17f6c",
13
  "flutter.js": "888483df48293866f9f41d3d9274a779",
14
  "favicon.png": "c8a183c516004e648a7bac7497c89b97",
build/web/index.html CHANGED
@@ -156,7 +156,7 @@
156
  </script>
157
 
158
  <!-- Add version parameter for cache busting -->
159
- <script src="flutter_bootstrap.js?v=1753291318" async></script>
160
 
161
  <!-- Add cache busting script -->
162
  <script>
 
156
  </script>
157
 
158
  <!-- Add version parameter for cache busting -->
159
+ <script src="flutter_bootstrap.js?v=1753357418" async></script>
160
 
161
  <!-- Add cache busting script -->
162
  <script>
build/web/main.dart.js CHANGED
@@ -93152,6 +93152,8 @@ case 7:o.a=null
93152
  case 3:o.eq(B.hk)
93153
  o.w=0
93154
  o.k1=null
 
 
93155
  s=9
93156
  return A.p(o.jd(),$async$xd)
93157
  case 9:return A.y(null,r)
 
93152
  case 3:o.eq(B.hk)
93153
  o.w=0
93154
  o.k1=null
93155
+ o.cy="anon"
93156
+ o.db.E(0,"anon")
93157
  s=9
93158
  return A.p(o.jd(),$async$xd)
93159
  case 9:return A.y(null,r)
build_and_run.sh ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ./build.sh
2
+ ./run_locally_with_secrets.sh
lib/services/websocket_api_service.dart CHANGED
@@ -463,6 +463,10 @@ class WebSocketApiService {
463
  _reconnectAttempts = 0;
464
  _connectionId = null; // Clear connection ID to force a new one
465
 
 
 
 
 
466
  // Connect again
467
  await connect();
468
  }
 
463
  _reconnectAttempts = 0;
464
  _connectionId = null; // Clear connection ID to force a new one
465
 
466
+ // Reset user role to anon until we get the new role from server
467
+ _userRole = 'anon';
468
+ _userRoleController.add(_userRole);
469
+
470
  // Connect again
471
  await connect();
472
  }