jbilcke-hf HF Staff commited on
Commit
233cefb
·
1 Parent(s): 7f178fb

improving the model selection

Browse files
assets/config/curated_models.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Curated list of models known to work well with #tikslop
2
+ models:
3
+ - model_id: deepseek-ai/DeepSeek-R1-0528-Qwen3-8B
4
+ display_name: DeepSeek R1 Qwen3 8B (0528)
5
+ num_of_parameters: 8B
6
+
7
+ - model_id: mistralai/Mistral-Small-3.2-24B-Instruct-2506
8
+ display_name: Mistral Small 3.2 24B (Instruct, 2506)
9
+ num_of_parameters: 24B
10
+
11
+ - model_id: HuggingFaceTB/SmolLM3-3B
12
+ display_name: SmolLM3 3B
13
+ num_of_parameters: 3B
14
+
15
+ - model_id: Qwen/Qwen3-0.6B
16
+ display_name: Qwen3 0.6B
17
+ num_of_parameters: 0.6B
18
+
19
+ - model_id: google/gemma-3n-E2B-it
20
+ display_name: Gemma 3n E2B IT (Instruct)
21
+ num_of_parameters: 2B
22
+
23
+ - model_id: google/gemma-3n-E4B-it
24
+ display_name: Gemma 3n E4B IT (Instruct)
25
+ num_of_parameters: 4B
build/web/assets/AssetManifest.bin CHANGED
@@ -1 +1 @@
1
-
 
1
+
build/web/assets/AssetManifest.bin.json CHANGED
@@ -1 +1 @@
1
- "DQgHFGFzc2V0cy9hZHMvUkVBRE1FLm1kDAENAQcFYXNzZXQHFGFzc2V0cy9hZHMvUkVBRE1FLm1kBxZhc3NldHMvYWRzL2xlcm9ib3QuZ2lmDAENAQcFYXNzZXQHFmFzc2V0cy9hZHMvbGVyb2JvdC5naWYHGWFzc2V0cy9hZHMvc21vbGFnZW50cy5naWYMAQ0BBwVhc3NldAcZYXNzZXRzL2Fkcy9zbW9sYWdlbnRzLmdpZgcXYXNzZXRzL2NvbmZpZy9SRUFETUUubWQMAQ0BBwVhc3NldAcXYXNzZXRzL2NvbmZpZy9SRUFETUUubWQHGWFzc2V0cy9jb25maWcvY3VzdG9tLnlhbWwMAQ0BBwVhc3NldAcZYXNzZXRzL2NvbmZpZy9jdXN0b20ueWFtbAcaYXNzZXRzL2NvbmZpZy9kZWZhdWx0LnlhbWwMAQ0BBwVhc3NldAcaYXNzZXRzL2NvbmZpZy9kZWZhdWx0LnlhbWwHGmFzc2V0cy9jb25maWcvdGlrc2xvcC55YW1sDAENAQcFYXNzZXQHGmFzc2V0cy9jb25maWcvdGlrc2xvcC55YW1sBzJwYWNrYWdlcy9jdXBlcnRpbm9faWNvbnMvYXNzZXRzL0N1cGVydGlub0ljb25zLnR0ZgwBDQEHBWFzc2V0BzJwYWNrYWdlcy9jdXBlcnRpbm9faWNvbnMvYXNzZXRzL0N1cGVydGlub0ljb25zLnR0Zg=="
 
1
+ "DQkHFGFzc2V0cy9hZHMvUkVBRE1FLm1kDAENAQcFYXNzZXQHFGFzc2V0cy9hZHMvUkVBRE1FLm1kBxZhc3NldHMvYWRzL2xlcm9ib3QuZ2lmDAENAQcFYXNzZXQHFmFzc2V0cy9hZHMvbGVyb2JvdC5naWYHGWFzc2V0cy9hZHMvc21vbGFnZW50cy5naWYMAQ0BBwVhc3NldAcZYXNzZXRzL2Fkcy9zbW9sYWdlbnRzLmdpZgcXYXNzZXRzL2NvbmZpZy9SRUFETUUubWQMAQ0BBwVhc3NldAcXYXNzZXRzL2NvbmZpZy9SRUFETUUubWQHIWFzc2V0cy9jb25maWcvY3VyYXRlZF9tb2RlbHMueWFtbAwBDQEHBWFzc2V0ByFhc3NldHMvY29uZmlnL2N1cmF0ZWRfbW9kZWxzLnlhbWwHGWFzc2V0cy9jb25maWcvY3VzdG9tLnlhbWwMAQ0BBwVhc3NldAcZYXNzZXRzL2NvbmZpZy9jdXN0b20ueWFtbAcaYXNzZXRzL2NvbmZpZy9kZWZhdWx0LnlhbWwMAQ0BBwVhc3NldAcaYXNzZXRzL2NvbmZpZy9kZWZhdWx0LnlhbWwHGmFzc2V0cy9jb25maWcvdGlrc2xvcC55YW1sDAENAQcFYXNzZXQHGmFzc2V0cy9jb25maWcvdGlrc2xvcC55YW1sBzJwYWNrYWdlcy9jdXBlcnRpbm9faWNvbnMvYXNzZXRzL0N1cGVydGlub0ljb25zLnR0ZgwBDQEHBWFzc2V0BzJwYWNrYWdlcy9jdXBlcnRpbm9faWNvbnMvYXNzZXRzL0N1cGVydGlub0ljb25zLnR0Zg=="
build/web/assets/AssetManifest.json CHANGED
@@ -1 +1 @@
1
- {"assets/ads/README.md":["assets/ads/README.md"],"assets/ads/lerobot.gif":["assets/ads/lerobot.gif"],"assets/ads/smolagents.gif":["assets/ads/smolagents.gif"],"assets/config/README.md":["assets/config/README.md"],"assets/config/custom.yaml":["assets/config/custom.yaml"],"assets/config/default.yaml":["assets/config/default.yaml"],"assets/config/tikslop.yaml":["assets/config/tikslop.yaml"],"packages/cupertino_icons/assets/CupertinoIcons.ttf":["packages/cupertino_icons/assets/CupertinoIcons.ttf"]}
 
1
+ {"assets/ads/README.md":["assets/ads/README.md"],"assets/ads/lerobot.gif":["assets/ads/lerobot.gif"],"assets/ads/smolagents.gif":["assets/ads/smolagents.gif"],"assets/config/README.md":["assets/config/README.md"],"assets/config/curated_models.yaml":["assets/config/curated_models.yaml"],"assets/config/custom.yaml":["assets/config/custom.yaml"],"assets/config/default.yaml":["assets/config/default.yaml"],"assets/config/tikslop.yaml":["assets/config/tikslop.yaml"],"packages/cupertino_icons/assets/CupertinoIcons.ttf":["packages/cupertino_icons/assets/CupertinoIcons.ttf"]}
build/web/assets/assets/config/curated_models.yaml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Curated list of models known to work well with #tikslop
2
+ models:
3
+
4
+ #- model_id: HuggingFaceTB/SmolLM3-3B
5
+ # display_name: SmolLM3 3B
6
+ # num_of_parameters: 3B
7
+
8
+ - model_id: Qwen/Qwen2.5-0.5B-Instruct
9
+ display_name: Qwen2.5 0.5B
10
+ num_of_parameters: 0.5B
11
+
12
+ - model_id: Qwen/Qwen3-0.6B
13
+ display_name: Qwen3 0.6B
14
+ num_of_parameters: 0.6B
15
+
16
+ - model_id: meta-llama/Llama-3.2-1B-Instruct
17
+ display_name: Llama 3.2 1B
18
+ num_of_parameters: 1B
19
+
20
+ - model_id: Unbabel/Tower-Plus-2B
21
+ display_name: Tower Plus 2B
22
+ num_of_parameters: 2B
23
+
24
+ - model_id: microsoft/phi-2
25
+ display_name: Phi 2
26
+ num_of_parameters: 2.7B
27
+
28
+ - model_id: microsoft/Phi-3-mini-128k-instruct
29
+ display_name: Phi 3 mini (128k)
30
+ num_of_parameters: 4B
31
+
32
+ - model_id: google/gemma-3-4b-it
33
+ display_name: Gemma 3 4B
34
+ num_of_parameters: 4B
35
+
36
+ - model_id: Qwen/Qwen3-4B-Base
37
+ display_name: Qwen3 4B
38
+ num_of_parameters: 4B
39
+
40
+ - model_id: Qwen/Qwen3-32B
41
+ display_name: Qwen3 32B
42
+ num_of_parameters: 32B
43
+
44
+ - model_id: deepseek-ai/DeepSeek-R1-0528-Qwen3-8B
45
+ display_name: DeepSeek R1 Qwen3 8B (0528)
46
+ num_of_parameters: 8B
47
+
48
+ - model_id: meta-llama/Llama-4-Scout-17B-16E-Instruct
49
+ display_name: Llama 4 Scout 17B
50
+ num_of_parameters: 17B
51
+
52
+ - model_id: mistralai/Mistral-Small-3.2-24B-Instruct-2506
53
+ display_name: Mistral Small 3.2 24B
54
+ num_of_parameters: 24B
55
+
56
+ - model_id: Qwen/Qwen3-235B-A22B-Instruct-2507
57
+ display_name: Qwen3 235B A22B
58
+ num_of_parameters: 235B
59
+
60
+ - model_id: deepseek-ai/DeepSeek-V3-0324
61
+ display_name: DeepSeek V3
62
+ num_of_parameters: 685B
63
+
64
+
65
+ # Gemma 3n models are not available on the Inference Providers yet
66
+ #- model_id: google/gemma-3n-E2B-it
67
+ # display_name: Gemma 3n E2B IT (Instruct)
68
+ #
69
+ # # While the raw parameter count of this model is 6B, the architecture design allows the model to be run with a memory footprint comparable to a traditional 2B model by offloading low-utilization matrices from the accelerator.
70
+ # #num_of_parameters: 6B
71
+ # num_of_parameters: 2B
72
+ #
73
+ #- model_id: google/gemma-3n-E4B-it
74
+ # display_name: Gemma 3n E4B IT (Instruct)
75
+ #
76
+ # # While the raw parameter count of this model is 8B, the architecture design allows the model to be run with a memory footprint comparable to a traditional 4B model by offloading low-utilization matrices from the accelerator.
77
+ # #num_of_parameters: 8B
78
+ # num_of_parameters: 4B
79
+
build/web/assets/fonts/MaterialIcons-Regular.otf CHANGED
Binary files a/build/web/assets/fonts/MaterialIcons-Regular.otf and b/build/web/assets/fonts/MaterialIcons-Regular.otf differ
 
build/web/flutter_bootstrap.js CHANGED
@@ -38,6 +38,6 @@ _flutter.buildConfig = {"engineRevision":"1c9c20e7c3dd48c66f400a24d48ea806b4ab31
38
 
39
  _flutter.loader.load({
40
  serviceWorkerSettings: {
41
- serviceWorkerVersion: "573687876"
42
  }
43
  });
 
38
 
39
  _flutter.loader.load({
40
  serviceWorkerSettings: {
41
+ serviceWorkerVersion: "1152030249"
42
  }
43
  });
build/web/flutter_service_worker.js CHANGED
@@ -3,12 +3,12 @@ const MANIFEST = 'flutter-app-manifest';
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
- const RESOURCES = {"flutter_bootstrap.js": "581857f4e7740fdd5901e95f9d2a7bc3",
7
  "version.json": "68350cac7987de2728345c72918dd067",
8
  "tikslop.png": "570e1db759046e2d224fef729983634e",
9
  "index.html": "3a7029b3672560e7938aab6fa4d30a46",
10
  "/": "3a7029b3672560e7938aab6fa4d30a46",
11
- "main.dart.js": "d0545221a5e9d6dd47253d8149064a25",
12
  "tikslop.svg": "26140ba0d153b213b122bc6ebcc17f6c",
13
  "flutter.js": "888483df48293866f9f41d3d9274a779",
14
  "favicon.png": "c8a183c516004e648a7bac7497c89b97",
@@ -17,17 +17,18 @@ const RESOURCES = {"flutter_bootstrap.js": "581857f4e7740fdd5901e95f9d2a7bc3",
17
  "icons/Icon-maskable-512.png": "8682b581a7dab984ef4f9b7f21976a64",
18
  "icons/Icon-512.png": "8682b581a7dab984ef4f9b7f21976a64",
19
  "manifest.json": "c0904388ddaba6a9bd572a80f79a8dcc",
20
- "assets/AssetManifest.json": "7c3f24a308a466794e1c04bd7b46567e",
21
  "assets/NOTICES": "90391d860ca53976ec1b643b3e6286b7",
22
  "assets/FontManifest.json": "dc3d03800ccca4601324923c0b1d6d57",
23
- "assets/AssetManifest.bin.json": "b4f8d70a60cc7fe6916c636377e8d4bc",
24
  "assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "33b7d9392238c04c131b6ce224e13711",
25
  "assets/shaders/ink_sparkle.frag": "ecc85a2e95f5e9f53123dcaf8cb9b6ce",
26
- "assets/AssetManifest.bin": "afdc174fb4cb8a6401bd2328a67e184c",
27
- "assets/fonts/MaterialIcons-Regular.otf": "06b86454c633cc9510ad85ddc0523a91",
28
  "assets/assets/ads/smolagents.gif": "45338af5a4d440b707d02f364be8195c",
29
  "assets/assets/ads/README.md": "1959fb6b85a966348396f2f0f9c3f32a",
30
  "assets/assets/ads/lerobot.gif": "0f90b2fc4d15eefb5572363724d6d925",
 
31
  "assets/assets/config/README.md": "07a87720dd00dd1ca98c9d6884440e31",
32
  "assets/assets/config/custom.yaml": "52bd30aa4d8b980626a5eb02d0871c01",
33
  "assets/assets/config/default.yaml": "9ca1d05d06721c2b6f6382a1ba40af48",
 
3
  const TEMP = 'flutter-temp-cache';
4
  const CACHE_NAME = 'flutter-app-cache';
5
 
6
+ const RESOURCES = {"flutter_bootstrap.js": "4293255503dbf79f2be0ef00f170c7b9",
7
  "version.json": "68350cac7987de2728345c72918dd067",
8
  "tikslop.png": "570e1db759046e2d224fef729983634e",
9
  "index.html": "3a7029b3672560e7938aab6fa4d30a46",
10
  "/": "3a7029b3672560e7938aab6fa4d30a46",
11
+ "main.dart.js": "c53f6a142be0eaf2a83400fd9070161f",
12
  "tikslop.svg": "26140ba0d153b213b122bc6ebcc17f6c",
13
  "flutter.js": "888483df48293866f9f41d3d9274a779",
14
  "favicon.png": "c8a183c516004e648a7bac7497c89b97",
 
17
  "icons/Icon-maskable-512.png": "8682b581a7dab984ef4f9b7f21976a64",
18
  "icons/Icon-512.png": "8682b581a7dab984ef4f9b7f21976a64",
19
  "manifest.json": "c0904388ddaba6a9bd572a80f79a8dcc",
20
+ "assets/AssetManifest.json": "42eb9cb8cfb5d55547c6e8e355ddcfac",
21
  "assets/NOTICES": "90391d860ca53976ec1b643b3e6286b7",
22
  "assets/FontManifest.json": "dc3d03800ccca4601324923c0b1d6d57",
23
+ "assets/AssetManifest.bin.json": "d21861b1d7161e95775d5c89361428d5",
24
  "assets/packages/cupertino_icons/assets/CupertinoIcons.ttf": "33b7d9392238c04c131b6ce224e13711",
25
  "assets/shaders/ink_sparkle.frag": "ecc85a2e95f5e9f53123dcaf8cb9b6ce",
26
+ "assets/AssetManifest.bin": "3311f7126492dcfa7fb84733f178441e",
27
+ "assets/fonts/MaterialIcons-Regular.otf": "adb729127dd99d9fd52c132e18ef0b8f",
28
  "assets/assets/ads/smolagents.gif": "45338af5a4d440b707d02f364be8195c",
29
  "assets/assets/ads/README.md": "1959fb6b85a966348396f2f0f9c3f32a",
30
  "assets/assets/ads/lerobot.gif": "0f90b2fc4d15eefb5572363724d6d925",
31
+ "assets/assets/config/curated_models.yaml": "244e9a0acd25da220c9d49cc53c8e358",
32
  "assets/assets/config/README.md": "07a87720dd00dd1ca98c9d6884440e31",
33
  "assets/assets/config/custom.yaml": "52bd30aa4d8b980626a5eb02d0871c01",
34
  "assets/assets/config/default.yaml": "9ca1d05d06721c2b6f6382a1ba40af48",
build/web/index.html CHANGED
@@ -156,7 +156,7 @@
156
  </script>
157
 
158
  <!-- Add version parameter for cache busting -->
159
- <script src="flutter_bootstrap.js?v=1753208400" async></script>
160
 
161
  <!-- Add cache busting script -->
162
  <script>
 
156
  </script>
157
 
158
  <!-- Add version parameter for cache busting -->
159
+ <script src="flutter_bootstrap.js?v=1753272934" async></script>
160
 
161
  <!-- Add cache busting script -->
162
  <script>
build/web/main.dart.js CHANGED
The diff for this file is too large to render. See raw diff
 
lib/models/curated_model.dart ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import 'package:flutter/services.dart';
2
+ import 'package:yaml/yaml.dart';
3
+
4
+ /// Represents a curated LLM model with metadata
5
+ class CuratedModel {
6
+ final String modelId;
7
+ final String displayName;
8
+ final String numOfParameters;
9
+
10
+ const CuratedModel({
11
+ required this.modelId,
12
+ required this.displayName,
13
+ required this.numOfParameters,
14
+ });
15
+
16
+ /// Get speed category based on parameter count
17
+ String get speedCategory {
18
+ final paramValue = _parseParameters(numOfParameters);
19
+
20
+ if (paramValue <= 1) return 'Fastest';
21
+ if (paramValue <= 2) return 'Faster';
22
+ if (paramValue <= 4) return 'Fast';
23
+ if (paramValue < 8) return 'Normal';
24
+ if (paramValue < 17) return 'Slow';
25
+ if (paramValue < 32) return 'Slower';
26
+ return 'Slowest';
27
+ }
28
+
29
+ /// Get speed emoji for visual representation
30
+ String get speedEmoji {
31
+ switch (speedCategory) {
32
+ case 'Fastest':
33
+ return '🚀';
34
+ case 'Faster':
35
+ return '⚡';
36
+ case 'Fast':
37
+ return '🏃';
38
+ case 'Normal':
39
+ return '🚶';
40
+ case 'Slow':
41
+ return '🐌';
42
+ case 'Slower':
43
+ return '🐢';
44
+ case 'Slowest':
45
+ return '🦥';
46
+ default:
47
+ return '❓';
48
+ }
49
+ }
50
+
51
+ /// Parse parameter string to numeric value (in billions)
52
+ double _parseParameters(String params) {
53
+ final numStr = params.replaceAll('B', '').trim();
54
+ return double.tryParse(numStr) ?? 0.0;
55
+ }
56
+
57
+ /// Create from YAML map
58
+ factory CuratedModel.fromYaml(YamlMap yaml) {
59
+ return CuratedModel(
60
+ modelId: yaml['model_id'] as String,
61
+ displayName: yaml['display_name'] as String,
62
+ numOfParameters: yaml['num_of_parameters'] as String,
63
+ );
64
+ }
65
+
66
+ /// Load all curated models from assets
67
+ static Future<List<CuratedModel>> loadFromAssets() async {
68
+ try {
69
+ final yamlString = await rootBundle.loadString('assets/config/curated_models.yaml');
70
+ final yamlData = loadYaml(yamlString);
71
+
72
+ final models = <CuratedModel>[];
73
+ if (yamlData['models'] != null) {
74
+ for (final modelYaml in yamlData['models']) {
75
+ models.add(CuratedModel.fromYaml(modelYaml));
76
+ }
77
+ }
78
+
79
+ // Sort by parameter count (smallest first)
80
+ models.sort((a, b) => a._parseParameters(a.numOfParameters)
81
+ .compareTo(b._parseParameters(b.numOfParameters)));
82
+
83
+ return models;
84
+ } catch (e) {
85
+ // Return default models if loading fails
86
+ return _defaultModels;
87
+ }
88
+ }
89
+
90
+ /// Default models in case asset loading fails
91
+ static const List<CuratedModel> _defaultModels = [
92
+ CuratedModel(
93
+ modelId: 'meta-llama/Llama-3.2-3B-Instruct',
94
+ displayName: 'Llama 3.2 3B Instruct',
95
+ numOfParameters: '3B',
96
+ ),
97
+ CuratedModel(
98
+ modelId: 'HuggingFaceTB/SmolLM3-3B',
99
+ displayName: 'SmolLM3 3B',
100
+ numOfParameters: '3B',
101
+ ),
102
+ ];
103
+
104
+ @override
105
+ bool operator ==(Object other) =>
106
+ identical(this, other) ||
107
+ other is CuratedModel &&
108
+ runtimeType == other.runtimeType &&
109
+ modelId == other.modelId;
110
+
111
+ @override
112
+ int get hashCode => modelId.hashCode;
113
+
114
+ @override
115
+ String toString() => 'CuratedModel(modelId: $modelId, displayName: $displayName, parameters: $numOfParameters)';
116
+ }
lib/models/llm_provider.dart ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /// LLM Provider configuration for Hugging Face supported providers
2
+ class LLMProvider {
3
+ final String id;
4
+ final String name;
5
+ final String? apiBaseUrl;
6
+ final String routerBaseUrl;
7
+ final bool supportsHuggingFaceKey;
8
+ final bool isAvailable;
9
+
10
+ const LLMProvider({
11
+ required this.id,
12
+ required this.name,
13
+ this.apiBaseUrl,
14
+ required this.routerBaseUrl,
15
+ this.supportsHuggingFaceKey = true,
16
+ this.isAvailable = true,
17
+ });
18
+
19
+ /// Create a copy with updated availability
20
+ LLMProvider copyWith({bool? isAvailable}) {
21
+ return LLMProvider(
22
+ id: id,
23
+ name: name,
24
+ apiBaseUrl: apiBaseUrl,
25
+ routerBaseUrl: routerBaseUrl,
26
+ supportsHuggingFaceKey: supportsHuggingFaceKey,
27
+ isAvailable: isAvailable ?? this.isAvailable,
28
+ );
29
+ }
30
+
31
+ /// Get the API key label for this provider
32
+ String get apiKeyLabel {
33
+ if (!supportsHuggingFaceKey) {
34
+ return '$name API Key';
35
+ }
36
+ return 'Hugging Face API Key';
37
+ }
38
+
39
+ /// List of all supported providers based on HF documentation
40
+ static const List<LLMProvider> supportedProviders = [
41
+ LLMProvider(
42
+ id: 'built-in',
43
+ name: 'Built-in (free, slow)',
44
+ routerBaseUrl: '',
45
+ supportsHuggingFaceKey: false,
46
+ ),
47
+ LLMProvider(
48
+ id: 'cerebras',
49
+ name: 'Cerebras',
50
+ apiBaseUrl: 'https://api.cerebras.ai/v1',
51
+ routerBaseUrl: 'https://router.huggingface.co/cerebras/v1',
52
+ ),
53
+ LLMProvider(
54
+ id: 'cohere',
55
+ name: 'Cohere',
56
+ apiBaseUrl: 'https://api.cohere.com/compatibility/v1',
57
+ routerBaseUrl: 'https://router.huggingface.co/cohere/v1',
58
+ ),
59
+ LLMProvider(
60
+ id: 'fal-ai',
61
+ name: 'Fal AI',
62
+ apiBaseUrl: 'https://api.fal.ai/v1',
63
+ routerBaseUrl: 'https://router.huggingface.co/fal-ai/v1',
64
+ ),
65
+ LLMProvider(
66
+ id: 'featherless',
67
+ name: 'Featherless AI',
68
+ apiBaseUrl: 'https://api.featherless.ai/v1',
69
+ routerBaseUrl: 'https://router.huggingface.co/featherless/v1',
70
+ ),
71
+ LLMProvider(
72
+ id: 'fireworks',
73
+ name: 'Fireworks',
74
+ apiBaseUrl: 'https://api.fireworks.ai/inference/v1',
75
+ routerBaseUrl: 'https://router.huggingface.co/fireworks/v1',
76
+ ),
77
+ LLMProvider(
78
+ id: 'groq',
79
+ name: 'Groq',
80
+ apiBaseUrl: 'https://api.groq.com/openai/v1',
81
+ routerBaseUrl: 'https://router.huggingface.co/groq/v1',
82
+ ),
83
+ LLMProvider(
84
+ id: 'hf-inference',
85
+ name: 'HF Inference',
86
+ apiBaseUrl: 'https://api-inference.huggingface.co/v1',
87
+ routerBaseUrl: 'https://router.huggingface.co/hf-inference/v1',
88
+ ),
89
+ LLMProvider(
90
+ id: 'hyperbolic',
91
+ name: 'Hyperbolic',
92
+ apiBaseUrl: 'https://api.hyperbolic.xyz/v1',
93
+ routerBaseUrl: 'https://router.huggingface.co/hyperbolic/v1',
94
+ ),
95
+ LLMProvider(
96
+ id: 'nebius',
97
+ name: 'Nebius',
98
+ apiBaseUrl: 'https://api.studio.nebius.ai/v1',
99
+ routerBaseUrl: 'https://router.huggingface.co/nebius/v1',
100
+ ),
101
+ LLMProvider(
102
+ id: 'novita',
103
+ name: 'Novita',
104
+ apiBaseUrl: 'https://api.novita.ai/v3/openai',
105
+ routerBaseUrl: 'https://router.huggingface.co/novita/v1',
106
+ ),
107
+ LLMProvider(
108
+ id: 'nscale',
109
+ name: 'Nscale',
110
+ apiBaseUrl: 'https://inference.api.nscale.com/v1',
111
+ routerBaseUrl: 'https://router.huggingface.co/nscale/v1',
112
+ ),
113
+ LLMProvider(
114
+ id: 'replicate',
115
+ name: 'Replicate',
116
+ apiBaseUrl: 'https://api.replicate.com/v1',
117
+ routerBaseUrl: 'https://router.huggingface.co/replicate/v1',
118
+ ),
119
+ LLMProvider(
120
+ id: 'sambanova',
121
+ name: 'SambaNova',
122
+ apiBaseUrl: 'https://api.sambanova.ai/v1',
123
+ routerBaseUrl: 'https://router.huggingface.co/sambanova/v1',
124
+ ),
125
+ LLMProvider(
126
+ id: 'together',
127
+ name: 'Together',
128
+ apiBaseUrl: 'https://api.together.xyz/v1',
129
+ routerBaseUrl: 'https://router.huggingface.co/together/v1',
130
+ ),
131
+ ];
132
+
133
+ /// Get provider by ID
134
+ static LLMProvider? getById(String id) {
135
+ try {
136
+ return supportedProviders.firstWhere((provider) => provider.id == id);
137
+ } catch (e) {
138
+ return null;
139
+ }
140
+ }
141
+
142
+ /// Get default provider
143
+ static LLMProvider get defaultProvider {
144
+ return supportedProviders.first; // Built-in is first in the list
145
+ }
146
+ }
lib/screens/settings_screen.dart CHANGED
@@ -1,6 +1,9 @@
1
  import 'package:flutter/material.dart';
2
  import '../services/settings_service.dart';
3
  import '../services/websocket_api_service.dart';
 
 
 
4
  import '../theme/colors.dart';
5
 
6
  class SettingsScreen extends StatefulWidget {
@@ -15,11 +18,20 @@ class _SettingsScreenState extends State<SettingsScreen> {
15
  final _negativePromptController = TextEditingController();
16
  final _hfApiKeyController = TextEditingController();
17
  final _llmApiKeyController = TextEditingController();
 
18
  final _settingsService = SettingsService();
 
19
  bool _showSceneDebugInfo = false;
20
  bool _enableSimulation = true;
21
- String _selectedLlmProvider = 'openai';
22
- String _selectedLlmModel = 'gpt-4';
 
 
 
 
 
 
 
23
 
24
  @override
25
  void initState() {
@@ -31,16 +43,28 @@ class _SettingsScreenState extends State<SettingsScreen> {
31
  _showSceneDebugInfo = _settingsService.showSceneDebugInfo;
32
  _enableSimulation = _settingsService.enableSimulation;
33
 
34
- // Auto-select built-in provider if no HF API key
35
  if (_settingsService.huggingfaceApiKey.isEmpty) {
36
- _selectedLlmProvider = 'builtin';
37
- _selectedLlmModel = 'default';
 
38
  // Save the auto-selected values
39
- _settingsService.setLlmProvider('builtin');
40
- _settingsService.setLlmModel('default');
41
  } else {
42
  _selectedLlmProvider = _settingsService.llmProvider;
43
  _selectedLlmModel = _settingsService.llmModel;
 
 
 
 
 
 
 
 
 
 
 
44
  }
45
  }
46
 
@@ -50,8 +74,102 @@ class _SettingsScreenState extends State<SettingsScreen> {
50
  _negativePromptController.dispose();
51
  _hfApiKeyController.dispose();
52
  _llmApiKeyController.dispose();
 
53
  super.dispose();
54
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
 
56
  @override
57
  Widget build(BuildContext context) {
@@ -82,7 +200,7 @@ class _SettingsScreenState extends State<SettingsScreen> {
82
  controller: _hfApiKeyController,
83
  decoration: const InputDecoration(
84
  labelText: 'Hugging Face API Key',
85
- helperText: 'Your HF token for API access and higher-resolution rendering',
86
  helperMaxLines: 2,
87
  ),
88
  obscureText: true,
@@ -90,16 +208,24 @@ class _SettingsScreenState extends State<SettingsScreen> {
90
  await _settingsService.setHuggingfaceApiKey(value);
91
 
92
  // Auto-select built-in provider if API key is removed
93
- if (value.isEmpty && _selectedLlmProvider != 'builtin') {
94
  setState(() {
95
- _selectedLlmProvider = 'builtin';
96
- _selectedLlmModel = 'default';
 
 
 
 
97
  });
98
- await _settingsService.setLlmProvider('builtin');
99
- await _settingsService.setLlmModel('default');
100
  } else if (value.isNotEmpty) {
101
  // Trigger rebuild to enable/disable fields
102
  setState(() {});
 
 
 
 
103
  }
104
 
105
  // Show a snackbar to indicate the API key was saved
@@ -147,105 +273,318 @@ class _SettingsScreenState extends State<SettingsScreen> {
147
  },
148
  ),
149
  const SizedBox(height: 16),
 
150
  DropdownButtonFormField<String>(
151
  decoration: InputDecoration(
152
- labelText: 'LLM Provider',
153
- helperText: _hfApiKeyController.text.isEmpty
154
- ? 'Enter HF API key to unlock providers'
155
- : 'Select your preferred LLM provider',
 
 
 
 
 
 
 
 
 
 
 
 
156
  ),
157
- initialValue: _selectedLlmProvider,
158
- onChanged: _hfApiKeyController.text.isEmpty ? null : (String? newValue) {
159
- if (newValue != null) {
160
- // Prevent selecting non-builtin providers without HF API key
161
- if (_hfApiKeyController.text.isEmpty && newValue != 'builtin') {
162
- ScaffoldMessenger.of(context).showSnackBar(
163
- const SnackBar(
164
- content: Text('Please provide a Hugging Face API key to use external providers'),
165
- backgroundColor: Colors.orange,
166
- ),
167
- );
168
- return;
169
- }
170
  setState(() {
171
- _selectedLlmProvider = newValue;
172
- // Reset model when provider changes
173
- if (newValue == 'builtin') {
174
- _selectedLlmModel = 'default';
175
- } else {
176
- _selectedLlmModel = _getModelsForProvider(newValue).first.value!;
177
- }
178
  });
179
- _settingsService.setLlmProvider(newValue);
180
- _settingsService.setLlmModel(_selectedLlmModel);
181
- }
182
- },
183
- items: const [
184
- DropdownMenuItem(
185
- value: 'builtin',
186
- child: Text('Built-in (free, slow)'),
187
- ),
188
- DropdownMenuItem(
189
- value: 'openai',
190
- child: Text('OpenAI'),
191
- ),
192
- DropdownMenuItem(
193
- value: 'anthropic',
194
- child: Text('Anthropic'),
195
- ),
196
- DropdownMenuItem(
197
- value: 'google',
198
- child: Text('Google'),
199
- ),
200
- DropdownMenuItem(
201
- value: 'cohere',
202
- child: Text('Cohere'),
203
- ),
204
- DropdownMenuItem(
205
- value: 'together',
206
- child: Text('Together AI'),
207
- ),
208
- DropdownMenuItem(
209
- value: 'huggingface',
210
- child: Text('Hugging Face'),
211
- ),
212
- ],
213
- ),
214
- const SizedBox(height: 16),
215
- DropdownButtonFormField<String>(
216
- decoration: InputDecoration(
217
- labelText: 'LLM Model',
218
- helperText: _hfApiKeyController.text.isEmpty
219
- ? 'Using default built-in model'
220
- : 'Select the model to use',
221
- ),
222
- initialValue: _selectedLlmModel,
223
- onChanged: _hfApiKeyController.text.isEmpty ? null : (String? newValue) {
224
- if (newValue != null) {
225
  setState(() {
226
- _selectedLlmModel = newValue;
 
 
 
 
 
 
 
227
  });
228
- _settingsService.setLlmModel(newValue);
 
 
 
 
 
229
  }
230
  },
231
- items: _getModelsForProvider(_selectedLlmProvider),
232
- ),
233
- const SizedBox(height: 16),
234
- TextField(
235
- controller: _llmApiKeyController,
236
- decoration: InputDecoration(
237
- labelText: _getLlmApiKeyLabel(),
238
- helperText: _hfApiKeyController.text.isEmpty
239
- ? 'Enter HF API key above to enable provider options'
240
- : 'Optional - will use your HF API key if not provided',
241
- helperMaxLines: 2,
242
- ),
243
- obscureText: true,
244
- enabled: _hfApiKeyController.text.isNotEmpty,
245
- onChanged: (value) async {
246
- await _settingsService.setLlmApiKey(value);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
248
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
  ],
250
  ),
251
  ),
@@ -384,74 +723,5 @@ class _SettingsScreenState extends State<SettingsScreen> {
384
  );
385
  }
386
 
387
- List<DropdownMenuItem<String>> _getModelsForProvider(String provider) {
388
- switch (provider) {
389
- case 'builtin':
390
- return const [
391
- DropdownMenuItem(value: 'default', child: Text('Default Model')),
392
- ];
393
- case 'openai':
394
- return const [
395
- DropdownMenuItem(value: 'gpt-4', child: Text('GPT-4')),
396
- DropdownMenuItem(value: 'gpt-4-turbo', child: Text('GPT-4 Turbo')),
397
- DropdownMenuItem(value: 'gpt-3.5-turbo', child: Text('GPT-3.5 Turbo')),
398
- ];
399
- case 'anthropic':
400
- return const [
401
- DropdownMenuItem(value: 'claude-3-opus', child: Text('Claude 3 Opus')),
402
- DropdownMenuItem(value: 'claude-3-sonnet', child: Text('Claude 3 Sonnet')),
403
- DropdownMenuItem(value: 'claude-3-haiku', child: Text('Claude 3 Haiku')),
404
- ];
405
- case 'google':
406
- return const [
407
- DropdownMenuItem(value: 'gemini-1.5-pro', child: Text('Gemini 1.5 Pro')),
408
- DropdownMenuItem(value: 'gemini-1.5-flash', child: Text('Gemini 1.5 Flash')),
409
- DropdownMenuItem(value: 'gemini-pro', child: Text('Gemini Pro')),
410
- ];
411
- case 'cohere':
412
- return const [
413
- DropdownMenuItem(value: 'command-r-plus', child: Text('Command R Plus')),
414
- DropdownMenuItem(value: 'command-r', child: Text('Command R')),
415
- DropdownMenuItem(value: 'command', child: Text('Command')),
416
- ];
417
- case 'together':
418
- return const [
419
- DropdownMenuItem(value: 'meta-llama/Llama-3.2-3B-Instruct', child: Text('Llama 3.2 3B')),
420
- DropdownMenuItem(value: 'mistralai/Mixtral-8x7B-Instruct-v0.1', child: Text('Mixtral 8x7B')),
421
- DropdownMenuItem(value: 'deepseek-ai/deepseek-coder', child: Text('DeepSeek Coder')),
422
- ];
423
- case 'huggingface':
424
- return const [
425
- DropdownMenuItem(value: 'HuggingFaceTB/SmolLM3-3B', child: Text('SmolLM3 3B')),
426
- DropdownMenuItem(value: 'meta-llama/Llama-3.2-3B-Instruct', child: Text('Llama 3.2 3B')),
427
- DropdownMenuItem(value: 'microsoft/Phi-3-mini-4k-instruct', child: Text('Phi-3 Mini')),
428
- ];
429
- default:
430
- return const [
431
- DropdownMenuItem(value: 'default', child: Text('Default Model')),
432
- ];
433
- }
434
- }
435
-
436
- String _getLlmApiKeyLabel() {
437
- switch (_selectedLlmProvider) {
438
- case 'builtin':
439
- return 'API Key (Not required for built-in)';
440
- case 'openai':
441
- return 'OpenAI API Key';
442
- case 'anthropic':
443
- return 'Anthropic API Key';
444
- case 'google':
445
- return 'Google AI API Key';
446
- case 'cohere':
447
- return 'Cohere API Key';
448
- case 'together':
449
- return 'Together AI API Key';
450
- case 'huggingface':
451
- return 'Hugging Face API Key';
452
- default:
453
- return 'API Key';
454
- }
455
- }
456
 
457
  }
 
1
  import 'package:flutter/material.dart';
2
  import '../services/settings_service.dart';
3
  import '../services/websocket_api_service.dart';
4
+ import '../services/model_availability_service.dart';
5
+ import '../models/llm_provider.dart';
6
+ import '../models/curated_model.dart';
7
  import '../theme/colors.dart';
8
 
9
  class SettingsScreen extends StatefulWidget {
 
18
  final _negativePromptController = TextEditingController();
19
  final _hfApiKeyController = TextEditingController();
20
  final _llmApiKeyController = TextEditingController();
21
+ final _modelNameController = TextEditingController();
22
  final _settingsService = SettingsService();
23
+ final _availabilityService = ModelAvailabilityService();
24
  bool _showSceneDebugInfo = false;
25
  bool _enableSimulation = true;
26
+ String _selectedLlmProvider = 'built-in';
27
+ String _selectedLlmModel = 'meta-llama/Llama-3.2-3B-Instruct';
28
+ LLMProvider? _currentProvider;
29
+ List<LLMProvider> _availableProviders = LLMProvider.supportedProviders.where((p) => p.id != 'built-in').toList();
30
+ List<CuratedModel> _curatedModels = [];
31
+ CuratedModel? _selectedCuratedModel;
32
+ bool _isCheckingAvailability = false;
33
+ bool _isLoadingModels = true;
34
+ bool _isBuiltInModelSelected = true;
35
 
36
  @override
37
  void initState() {
 
43
  _showSceneDebugInfo = _settingsService.showSceneDebugInfo;
44
  _enableSimulation = _settingsService.enableSimulation;
45
 
46
+ // Auto-select built-in model if no HF API key
47
  if (_settingsService.huggingfaceApiKey.isEmpty) {
48
+ _selectedLlmProvider = 'built-in';
49
+ _selectedLlmModel = 'built-in';
50
+ _isBuiltInModelSelected = true;
51
  // Save the auto-selected values
52
+ _settingsService.setLlmProvider('built-in');
53
+ _settingsService.setLlmModel('built-in');
54
  } else {
55
  _selectedLlmProvider = _settingsService.llmProvider;
56
  _selectedLlmModel = _settingsService.llmModel;
57
+ _isBuiltInModelSelected = _selectedLlmModel == 'built-in';
58
+ }
59
+ _currentProvider = _isBuiltInModelSelected ? null : LLMProvider.getById(_selectedLlmProvider);
60
+ _modelNameController.text = _selectedLlmModel;
61
+
62
+ // Load curated models
63
+ _loadCuratedModels();
64
+
65
+ // Check model availability on startup
66
+ if (!_isBuiltInModelSelected) {
67
+ _checkModelAvailability();
68
  }
69
  }
70
 
 
74
  _negativePromptController.dispose();
75
  _hfApiKeyController.dispose();
76
  _llmApiKeyController.dispose();
77
+ _modelNameController.dispose();
78
  super.dispose();
79
  }
80
+
81
+ Future<void> _loadCuratedModels() async {
82
+ try {
83
+ final models = await CuratedModel.loadFromAssets();
84
+ setState(() {
85
+ _curatedModels = models;
86
+ _isLoadingModels = false;
87
+
88
+ // Find the currently selected model in the curated list
89
+ if (!_isBuiltInModelSelected) {
90
+ try {
91
+ _selectedCuratedModel = _curatedModels.firstWhere(
92
+ (model) => model.modelId == _selectedLlmModel,
93
+ );
94
+ } catch (e) {
95
+ // If current model not found in curated list, use first available
96
+ _selectedCuratedModel = _curatedModels.isNotEmpty ? _curatedModels.first : null;
97
+ }
98
+ } else {
99
+ _selectedCuratedModel = null;
100
+ }
101
+ });
102
+ } catch (e) {
103
+ setState(() {
104
+ _isLoadingModels = false;
105
+ });
106
+ }
107
+ }
108
+
109
+ Future<void> _checkModelAvailability() async {
110
+ if (!mounted || _selectedLlmModel.isEmpty) return;
111
+
112
+ setState(() {
113
+ _isCheckingAvailability = true;
114
+ });
115
+
116
+ try {
117
+ final availability = await _availabilityService.getModelAvailability(
118
+ _selectedLlmModel,
119
+ );
120
+ if (availability != null && mounted) {
121
+ final compatibleProviders = _availabilityService.getCompatibleProviders(
122
+ _selectedLlmModel,
123
+ );
124
+
125
+ // Update provider availability
126
+ final updatedProviders = LLMProvider.supportedProviders.map((provider) {
127
+ if (provider.id == 'built-in') {
128
+ return provider; // Built-in is always available
129
+ }
130
+ final isCompatible = compatibleProviders.contains(provider.id);
131
+ return provider.copyWith(isAvailable: isCompatible);
132
+ }).toList();
133
+
134
+ setState(() {
135
+ _availableProviders = updatedProviders;
136
+ _isCheckingAvailability = false;
137
+ });
138
+
139
+ // Auto-switch provider if current one is not compatible
140
+ if (!compatibleProviders.contains(_selectedLlmProvider) && !_isBuiltInModelSelected) {
141
+ if (compatibleProviders.isNotEmpty) {
142
+ // Switch to first compatible provider
143
+ setState(() {
144
+ _selectedLlmProvider = compatibleProviders.first;
145
+ _currentProvider = LLMProvider.getById(_selectedLlmProvider);
146
+ });
147
+ await _settingsService.setLlmProvider(_selectedLlmProvider);
148
+ } else {
149
+ // No compatible providers, switch to built-in model
150
+ setState(() {
151
+ _selectedLlmProvider = 'built-in';
152
+ _selectedLlmModel = 'built-in';
153
+ _isBuiltInModelSelected = true;
154
+ _selectedCuratedModel = null;
155
+ _currentProvider = null;
156
+ });
157
+ await _settingsService.setLlmProvider('built-in');
158
+ await _settingsService.setLlmModel('built-in');
159
+ }
160
+ }
161
+ }
162
+ } catch (e) {
163
+ if (mounted) {
164
+ setState(() {
165
+ _isCheckingAvailability = false;
166
+ });
167
+ ScaffoldMessenger.of(context).showSnackBar(
168
+ SnackBar(content: Text('Failed to check model availability: $e')),
169
+ );
170
+ }
171
+ }
172
+ }
173
 
174
  @override
175
  Widget build(BuildContext context) {
 
200
  controller: _hfApiKeyController,
201
  decoration: const InputDecoration(
202
  labelText: 'Hugging Face API Key',
203
+ helperText: 'Providing a HF API key allows you to select faster or better LLMs (billed to your account)',
204
  helperMaxLines: 2,
205
  ),
206
  obscureText: true,
 
208
  await _settingsService.setHuggingfaceApiKey(value);
209
 
210
  // Auto-select built-in provider if API key is removed
211
+ if (value.isEmpty && !_isBuiltInModelSelected) {
212
  setState(() {
213
+ _selectedLlmProvider = 'built-in';
214
+ _selectedLlmModel = 'built-in';
215
+ _isBuiltInModelSelected = true;
216
+ _currentProvider = null;
217
+ _selectedCuratedModel = null;
218
+ _modelNameController.text = _selectedLlmModel;
219
  });
220
+ await _settingsService.setLlmProvider('built-in');
221
+ await _settingsService.setLlmModel('built-in');
222
  } else if (value.isNotEmpty) {
223
  // Trigger rebuild to enable/disable fields
224
  setState(() {});
225
+ // Check model availability when HF key is provided
226
+ if (!_isBuiltInModelSelected) {
227
+ _checkModelAvailability();
228
+ }
229
  }
230
 
231
  // Show a snackbar to indicate the API key was saved
 
273
  },
274
  ),
275
  const SizedBox(height: 16),
276
+ // Model selection dropdown
277
  DropdownButtonFormField<String>(
278
  decoration: InputDecoration(
279
+ labelText: 'Model',
280
+ helperText: _isBuiltInModelSelected
281
+ ? 'The built-in model is free, but shared among users and may be out of capacity sometimes'
282
+ : _hfApiKeyController.text.isEmpty
283
+ ? 'Enter HF API key to select models'
284
+ : _isCheckingAvailability
285
+ ? 'Checking model availability...'
286
+ : 'Select a curated model optimized for #tikslop',
287
+ helperMaxLines: 2,
288
+ suffixIcon: _isCheckingAvailability || _isLoadingModels
289
+ ? const SizedBox(
290
+ width: 16,
291
+ height: 16,
292
+ child: CircularProgressIndicator(strokeWidth: 2),
293
+ )
294
+ : null,
295
  ),
296
+ value: _isBuiltInModelSelected ? 'built-in' : _selectedCuratedModel?.modelId,
297
+ onChanged: (String? newValue) async {
298
+ if (newValue == 'built-in') {
 
 
 
 
 
 
 
 
 
 
299
  setState(() {
300
+ _isBuiltInModelSelected = true;
301
+ _selectedLlmModel = 'built-in';
302
+ _selectedLlmProvider = 'built-in';
303
+ _selectedCuratedModel = null;
304
+ _currentProvider = null;
 
 
305
  });
306
+ await _settingsService.setLlmModel('built-in');
307
+ await _settingsService.setLlmProvider('built-in');
308
+ } else if (newValue != null) {
309
+ final newModel = _curatedModels.firstWhere(
310
+ (model) => model.modelId == newValue,
311
+ );
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
  setState(() {
313
+ _isBuiltInModelSelected = false;
314
+ _selectedCuratedModel = newModel;
315
+ _selectedLlmModel = newModel.modelId;
316
+ // Reset to first available provider if we had built-in selected
317
+ if (_selectedLlmProvider == 'built-in') {
318
+ _selectedLlmProvider = _availableProviders.isNotEmpty ? _availableProviders.first.id : 'hf-inference';
319
+ _currentProvider = LLMProvider.getById(_selectedLlmProvider);
320
+ }
321
  });
322
+ await _settingsService.setLlmModel(newModel.modelId);
323
+ if (_selectedLlmProvider != 'built-in') {
324
+ await _settingsService.setLlmProvider(_selectedLlmProvider);
325
+ }
326
+ // Check availability after model change
327
+ _checkModelAvailability();
328
  }
329
  },
330
+ selectedItemBuilder: (BuildContext context) {
331
+ final allItems = <String>['built-in', ..._curatedModels.map((m) => m.modelId)];
332
+ return allItems.map((itemValue) {
333
+ if (itemValue == 'built-in') {
334
+ return const Row(
335
+ children: [
336
+ Text('🏠'),
337
+ SizedBox(width: 8),
338
+ Expanded(
339
+ child: Text(
340
+ 'Built-in (default, free)',
341
+ style: TextStyle(
342
+ fontWeight: FontWeight.w500,
343
+ ),
344
+ overflow: TextOverflow.ellipsis,
345
+ ),
346
+ ),
347
+ ],
348
+ );
349
+ } else {
350
+ final model = _curatedModels.firstWhere((m) => m.modelId == itemValue);
351
+ return Row(
352
+ children: [
353
+ Text(model.speedEmoji),
354
+ const SizedBox(width: 8),
355
+ Expanded(
356
+ child: Text(
357
+ model.displayName,
358
+ style: const TextStyle(
359
+ fontWeight: FontWeight.w500,
360
+ ),
361
+ overflow: TextOverflow.ellipsis,
362
+ ),
363
+ ),
364
+ ],
365
+ );
366
+ }
367
+ }).toList();
368
  },
369
+ items: _hfApiKeyController.text.isNotEmpty
370
+ ? [
371
+ // Scenario 1: HF API key provided - show all models including built-in
372
+ const DropdownMenuItem<String>(
373
+ value: 'built-in',
374
+ child: Column(
375
+ crossAxisAlignment: CrossAxisAlignment.start,
376
+ mainAxisSize: MainAxisSize.min,
377
+ children: [
378
+ Row(
379
+ children: [
380
+ Text('🏠'),
381
+ SizedBox(width: 8),
382
+ Expanded(
383
+ child: Text(
384
+ 'Built-in (default, free)',
385
+ style: TextStyle(
386
+ fontWeight: FontWeight.w500,
387
+ ),
388
+ ),
389
+ ),
390
+ ],
391
+ ),
392
+ Padding(
393
+ padding: EdgeInsets.only(left: 24),
394
+ child: Text(
395
+ 'Slow and unreliable',
396
+ style: TextStyle(
397
+ fontSize: 12,
398
+ color: Colors.grey,
399
+ ),
400
+ ),
401
+ ),
402
+ ],
403
+ ),
404
+ ),
405
+ ..._curatedModels.map((model) {
406
+ return DropdownMenuItem<String>(
407
+ value: model.modelId,
408
+ child: Column(
409
+ crossAxisAlignment: CrossAxisAlignment.start,
410
+ mainAxisSize: MainAxisSize.min,
411
+ children: [
412
+ Row(
413
+ children: [
414
+ Text(model.speedEmoji),
415
+ const SizedBox(width: 8),
416
+ Expanded(
417
+ child: Text(
418
+ model.displayName,
419
+ style: const TextStyle(
420
+ fontWeight: FontWeight.w500,
421
+ ),
422
+ ),
423
+ ),
424
+ ],
425
+ ),
426
+ Padding(
427
+ padding: const EdgeInsets.only(left: 24),
428
+ child: Text(
429
+ '${model.numOfParameters} • ${model.speedCategory}',
430
+ style: TextStyle(
431
+ fontSize: 12,
432
+ color: Colors.grey[600],
433
+ ),
434
+ ),
435
+ ),
436
+ ],
437
+ ),
438
+ );
439
+ }),
440
+ ]
441
+ : [
442
+ // Scenario 2: No HF API key - only show built-in and disabled message
443
+ const DropdownMenuItem<String>(
444
+ value: 'built-in',
445
+ child: Column(
446
+ crossAxisAlignment: CrossAxisAlignment.start,
447
+ mainAxisSize: MainAxisSize.min,
448
+ children: [
449
+ Row(
450
+ children: [
451
+ Text('🏠'),
452
+ SizedBox(width: 8),
453
+ Expanded(
454
+ child: Text(
455
+ 'Built-in (default, free)',
456
+ style: TextStyle(
457
+ fontWeight: FontWeight.w500,
458
+ ),
459
+ ),
460
+ ),
461
+ ],
462
+ ),
463
+ Padding(
464
+ padding: EdgeInsets.only(left: 24),
465
+ child: Text(
466
+ 'Slow and unreliable',
467
+ style: TextStyle(
468
+ fontSize: 12,
469
+ color: Colors.grey,
470
+ ),
471
+ ),
472
+ ),
473
+ ],
474
+ ),
475
+ ),
476
+ const DropdownMenuItem<String>(
477
+ value: null,
478
+ enabled: false,
479
+ child: Padding(
480
+ padding: EdgeInsets.symmetric(vertical: 8.0),
481
+ child: Text(
482
+ 'To use other models you need a HF API key',
483
+ style: TextStyle(
484
+ color: Colors.grey,
485
+ fontStyle: FontStyle.italic,
486
+ ),
487
+ ),
488
+ ),
489
+ ),
490
+ ],
491
  ),
492
+ if (!_isBuiltInModelSelected) ...[
493
+ const SizedBox(height: 16),
494
+ DropdownButtonFormField<String>(
495
+ decoration: InputDecoration(
496
+ labelText: 'LLM Provider',
497
+ helperText: _hfApiKeyController.text.isEmpty
498
+ ? 'Enter HF API key to unlock providers'
499
+ : _isCheckingAvailability
500
+ ? 'Checking model availability...'
501
+ : 'Select from available providers for this model',
502
+ helperMaxLines: 2,
503
+ ),
504
+ value: _selectedLlmProvider == 'built-in' ? null : _selectedLlmProvider,
505
+ onChanged: _hfApiKeyController.text.isEmpty ? null : (String? newValue) {
506
+ if (newValue != null) {
507
+ // Check if provider is available for this model
508
+ final provider = _availableProviders.firstWhere(
509
+ (p) => p.id == newValue,
510
+ orElse: () => _availableProviders.first,
511
+ );
512
+
513
+ if (!provider.isAvailable) {
514
+ ScaffoldMessenger.of(context).showSnackBar(
515
+ SnackBar(
516
+ content: Text('${provider.name} does not support this model'),
517
+ backgroundColor: Colors.orange,
518
+ ),
519
+ );
520
+ return;
521
+ }
522
+
523
+ setState(() {
524
+ _selectedLlmProvider = newValue;
525
+ _currentProvider = provider;
526
+ });
527
+ _settingsService.setLlmProvider(newValue);
528
+ }
529
+ },
530
+ items: _availableProviders.map((provider) {
531
+ final isAvailable = provider.isAvailable;
532
+ return DropdownMenuItem(
533
+ value: provider.id,
534
+ enabled: isAvailable,
535
+ child: Row(
536
+ children: [
537
+ Expanded(
538
+ child: Text(
539
+ provider.name,
540
+ style: TextStyle(
541
+ color: isAvailable ? null : Colors.grey,
542
+ ),
543
+ ),
544
+ ),
545
+ if (!isAvailable)
546
+ const Icon(
547
+ Icons.lock,
548
+ size: 16,
549
+ color: Colors.grey,
550
+ ),
551
+ ],
552
+ ),
553
+ );
554
+ }).toList(),
555
+ ),
556
+ ],
557
+ /*
558
+ The Hugging Face Inference Providers allow the user to either use their HF API key,
559
+ which will bill them automatically on the HF account, or pass a provider-specific
560
+ API key, which will bill them on their provider account.
561
+
562
+ This is a nice feature, but for now let's just use the transparent/automatic billing.
563
+
564
+ So I've disabled this whole section:
565
+
566
+ if (!_isBuiltInModelSelected) ...[
567
+ const SizedBox(height: 16),
568
+ TextField(
569
+ controller: _llmApiKeyController,
570
+ decoration: InputDecoration(
571
+ labelText: _currentProvider?.apiKeyLabel ?? 'API Key',
572
+ helperText: _hfApiKeyController.text.isEmpty
573
+ ? 'Enter HF API key above to enable provider options'
574
+ : _currentProvider?.supportsHuggingFaceKey == true
575
+ ? 'Your HF API key will be automatically used for this provider'
576
+ : 'Optional - provider-specific API key',
577
+ helperMaxLines: 2,
578
+ ),
579
+ obscureText: true,
580
+ enabled: _hfApiKeyController.text.isNotEmpty &&
581
+ _currentProvider?.supportsHuggingFaceKey == false,
582
+ onChanged: (value) async {
583
+ await _settingsService.setLlmApiKey(value);
584
+ },
585
+ ),
586
+ ],
587
+ */
588
  ],
589
  ),
590
  ),
 
723
  );
724
  }
725
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
726
 
727
  }
lib/services/model_availability_service.dart ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import 'dart:convert';
2
+ import 'package:http/http.dart' as http;
3
+
4
+ /// Represents the availability status of a model for a specific provider
5
+ class ModelProviderAvailability {
6
+ final String providerId;
7
+ final String status; // 'live' or 'staging'
8
+ final String task; // e.g., 'conversational'
9
+ final String? mappedLLMProviderId; // Our internal provider ID
10
+
11
+ const ModelProviderAvailability({
12
+ required this.providerId,
13
+ required this.status,
14
+ required this.task,
15
+ this.mappedLLMProviderId,
16
+ });
17
+
18
+ bool get isLive => status == 'live';
19
+
20
+ factory ModelProviderAvailability.fromJson(
21
+ String hfProviderId,
22
+ Map<String, dynamic> json,
23
+ ) {
24
+ return ModelProviderAvailability(
25
+ providerId: json['providerId'] ?? hfProviderId,
26
+ status: json['status'] ?? 'unknown',
27
+ task: json['task'] ?? 'unknown',
28
+ );
29
+ }
30
+ }
31
+
32
+ /// Cached model availability information
33
+ class ModelAvailabilityCache {
34
+ final String modelId;
35
+ final List<ModelProviderAvailability> providers;
36
+ final DateTime lastUpdated;
37
+
38
+ const ModelAvailabilityCache({
39
+ required this.modelId,
40
+ required this.providers,
41
+ required this.lastUpdated,
42
+ });
43
+
44
+ bool get isExpired {
45
+ final now = DateTime.now();
46
+ final difference = now.difference(lastUpdated);
47
+ return difference.inSeconds > 30; // 30 seconds cache duration
48
+ }
49
+
50
+ List<ModelProviderAvailability> get liveProviders =>
51
+ providers.where((p) => p.isLive).toList();
52
+ }
53
+
54
+ /// Service for querying and caching model availability from Hugging Face API
55
+ class ModelAvailabilityService {
56
+ static const String _baseUrl = 'https://huggingface.co/api/models';
57
+
58
+ // Cache for model availability data
59
+ final Map<String, ModelAvailabilityCache> _cache = {};
60
+
61
+ /// Mapping from HF provider IDs to our internal LLM provider IDs
62
+ static const Map<String, String> _providerMapping = {
63
+ 'cerebras': 'cerebras',
64
+ 'cohere': 'cohere',
65
+ 'fal-ai': 'fal-ai',
66
+ 'featherless': 'featherless',
67
+ 'fireworks': 'fireworks',
68
+ 'groq': 'groq',
69
+ 'hf-inference': 'hf-inference',
70
+ 'hyperbolic': 'hyperbolic',
71
+ 'nebius': 'nebius',
72
+ 'novita': 'novita',
73
+ 'nscale': 'nscale',
74
+ 'replicate': 'replicate',
75
+ 'sambanova': 'sambanova',
76
+ 'together': 'together',
77
+ };
78
+
79
+ /// Get model availability, using cache if available and not expired
80
+ Future<ModelAvailabilityCache?> getModelAvailability(String modelId) async {
81
+ // Check cache first
82
+ final cached = _cache[modelId];
83
+ if (cached != null && !cached.isExpired) {
84
+ return cached;
85
+ }
86
+
87
+ // Fetch fresh data from API
88
+ try {
89
+ final availability = await _fetchModelAvailability(modelId);
90
+ if (availability != null) {
91
+ _cache[modelId] = availability;
92
+ }
93
+ return availability;
94
+ } catch (e) {
95
+ // If API call fails and we have cached data, return it even if expired
96
+ if (cached != null) {
97
+ return cached;
98
+ }
99
+ rethrow;
100
+ }
101
+ }
102
+
103
+ /// Fetch model availability from Hugging Face API
104
+ Future<ModelAvailabilityCache?> _fetchModelAvailability(
105
+ String modelId,
106
+ ) async {
107
+ final url = '$_baseUrl/$modelId?expand[]=inferenceProviderMapping';
108
+
109
+ try {
110
+ final response = await http.get(
111
+ Uri.parse(url),
112
+ headers: {'Accept': 'application/json', 'User-Agent': '#tikslop-App/1.0'},
113
+ );
114
+
115
+ if (response.statusCode == 200) {
116
+ final data = json.decode(response.body) as Map<String, dynamic>;
117
+ return _parseModelAvailability(modelId, data);
118
+ } else if (response.statusCode == 404) {
119
+ // Model not found, return empty availability
120
+ return ModelAvailabilityCache(
121
+ modelId: modelId,
122
+ providers: [],
123
+ lastUpdated: DateTime.now(),
124
+ );
125
+ } else {
126
+ throw ModelAvailabilityException(
127
+ 'Failed to fetch model availability: HTTP ${response.statusCode}',
128
+ );
129
+ }
130
+ } catch (e) {
131
+ if (e is ModelAvailabilityException) {
132
+ rethrow;
133
+ }
134
+ throw ModelAvailabilityException('Network error: $e');
135
+ }
136
+ }
137
+
138
+ /// Parse the API response into ModelAvailabilityCache
139
+ ModelAvailabilityCache _parseModelAvailability(
140
+ String modelId,
141
+ Map<String, dynamic> data,
142
+ ) {
143
+ final providers = <ModelProviderAvailability>[];
144
+
145
+ final inferenceMapping =
146
+ data['inferenceProviderMapping'] as Map<String, dynamic>?;
147
+ if (inferenceMapping != null) {
148
+ for (final entry in inferenceMapping.entries) {
149
+ final hfProviderId = entry.key;
150
+ final providerData = entry.value as Map<String, dynamic>;
151
+
152
+ final availability = ModelProviderAvailability.fromJson(
153
+ hfProviderId,
154
+ providerData,
155
+ );
156
+
157
+ // Map HF provider ID to our internal provider ID
158
+ final mappedProviderId = _providerMapping[hfProviderId];
159
+ if (mappedProviderId != null) {
160
+ providers.add(
161
+ ModelProviderAvailability(
162
+ providerId: availability.providerId,
163
+ status: availability.status,
164
+ task: availability.task,
165
+ mappedLLMProviderId: mappedProviderId,
166
+ ),
167
+ );
168
+ } else {
169
+ // Keep unmapped providers for potential future use
170
+ providers.add(availability);
171
+ }
172
+ }
173
+ }
174
+
175
+ return ModelAvailabilityCache(
176
+ modelId: modelId,
177
+ providers: providers,
178
+ lastUpdated: DateTime.now(),
179
+ );
180
+ }
181
+
182
+ /// Get list of compatible LLM providers for a model
183
+ List<String> getCompatibleProviders(String modelId) {
184
+ final cached = _cache[modelId];
185
+ if (cached == null) {
186
+ return [];
187
+ }
188
+
189
+ return cached.liveProviders
190
+ .where((p) => p.mappedLLMProviderId != null)
191
+ .map((p) => p.mappedLLMProviderId!)
192
+ .toList();
193
+ }
194
+
195
+ /// Check if a specific provider supports a model
196
+ bool isProviderCompatible(String modelId, String llmProviderId) {
197
+ final compatibleProviders = getCompatibleProviders(modelId);
198
+ return compatibleProviders.contains(llmProviderId);
199
+ }
200
+
201
+ /// Get the provider-specific model name for a given model and provider
202
+ String? getProviderSpecificModelName(String modelId, String llmProviderId) {
203
+ final cached = _cache[modelId];
204
+ if (cached == null) {
205
+ return null;
206
+ }
207
+
208
+ final providerAvailability = cached.liveProviders
209
+ .where((p) => p.mappedLLMProviderId == llmProviderId)
210
+ .firstOrNull;
211
+
212
+ return providerAvailability?.providerId;
213
+ }
214
+
215
+ /// Clear cache for a specific model
216
+ void clearCache(String modelId) {
217
+ _cache.remove(modelId);
218
+ }
219
+
220
+ /// Clear all cached data
221
+ void clearAllCache() {
222
+ _cache.clear();
223
+ }
224
+
225
+ /// Get cache status for debugging
226
+ Map<String, bool> getCacheStatus() {
227
+ return _cache.map((key, value) => MapEntry(key, !value.isExpired));
228
+ }
229
+ }
230
+
231
+ /// Exception thrown when model availability operations fail
232
+ class ModelAvailabilityException implements Exception {
233
+ final String message;
234
+
235
+ ModelAvailabilityException(this.message);
236
+
237
+ @override
238
+ String toString() => 'ModelAvailabilityException: $message';
239
+ }
lib/services/settings_service.dart CHANGED
@@ -61,14 +61,14 @@ class SettingsService {
61
  _settingsController.add(null);
62
  }
63
 
64
- String get llmProvider => _prefs.getString(_llmProviderKey) ?? 'openai';
65
 
66
  Future<void> setLlmProvider(String provider) async {
67
  await _prefs.setString(_llmProviderKey, provider);
68
  _settingsController.add(null);
69
  }
70
 
71
- String get llmModel => _prefs.getString(_llmModelKey) ?? 'gpt-4';
72
 
73
  Future<void> setLlmModel(String model) async {
74
  await _prefs.setString(_llmModelKey, model);
 
61
  _settingsController.add(null);
62
  }
63
 
64
+ String get llmProvider => _prefs.getString(_llmProviderKey) ?? 'built-in';
65
 
66
  Future<void> setLlmProvider(String provider) async {
67
  await _prefs.setString(_llmProviderKey, provider);
68
  _settingsController.add(null);
69
  }
70
 
71
+ String get llmModel => _prefs.getString(_llmModelKey) ?? 'meta-llama/Llama-3.2-3B-Instruct';
72
 
73
  Future<void> setLlmModel(String model) async {
74
  await _prefs.setString(_llmModelKey, model);