Upload folder using huggingface_hub
Browse files- config.json +10 -4
- generation_config.json +1 -1
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
config.json
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
{
|
2 |
"_attn_implementation_autoset": true,
|
3 |
-
"_name_or_path": "/
|
4 |
"architectures": [
|
5 |
"LlamaForCausalLM"
|
6 |
],
|
@@ -26,7 +26,13 @@
|
|
26 |
"rope_scaling": null,
|
27 |
"rope_theta": 100000,
|
28 |
"tie_word_embeddings": true,
|
29 |
-
"transformers_version": "4.
|
30 |
"use_cache": true,
|
31 |
-
"vocab_size": 49154
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
{
|
2 |
"_attn_implementation_autoset": true,
|
3 |
+
"_name_or_path": "livekit/turn-detector",
|
4 |
"architectures": [
|
5 |
"LlamaForCausalLM"
|
6 |
],
|
|
|
26 |
"rope_scaling": null,
|
27 |
"rope_theta": 100000,
|
28 |
"tie_word_embeddings": true,
|
29 |
+
"transformers_version": "4.49.0",
|
30 |
"use_cache": true,
|
31 |
+
"vocab_size": 49154,
|
32 |
+
"transformers.js_config": {
|
33 |
+
"kv_cache_dtype": {
|
34 |
+
"q4f16": "float16",
|
35 |
+
"fp16": "float16"
|
36 |
+
}
|
37 |
+
}
|
38 |
+
}
|
generation_config.json
CHANGED
@@ -2,5 +2,5 @@
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 0,
|
4 |
"eos_token_id": 0,
|
5 |
-
"transformers_version": "4.
|
6 |
}
|
|
|
2 |
"_from_model_config": true,
|
3 |
"bos_token_id": 0,
|
4 |
"eos_token_id": 0,
|
5 |
+
"transformers_version": "4.49.0"
|
6 |
}
|
tokenizer.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
CHANGED
@@ -164,6 +164,7 @@
|
|
164 |
"chat_template": "{% for message in messages %}{{'<|im_start|>' + '<|' + message['role'] + '|>' + message['content'] + '<|im_end|>'}}{% endfor %}",
|
165 |
"clean_up_tokenization_spaces": false,
|
166 |
"eos_token": "<|endoftext|>",
|
|
|
167 |
"model_max_length": 8192,
|
168 |
"pad_token": "<|endoftext|>",
|
169 |
"tokenizer_class": "GPT2Tokenizer",
|
|
|
164 |
"chat_template": "{% for message in messages %}{{'<|im_start|>' + '<|' + message['role'] + '|>' + message['content'] + '<|im_end|>'}}{% endfor %}",
|
165 |
"clean_up_tokenization_spaces": false,
|
166 |
"eos_token": "<|endoftext|>",
|
167 |
+
"extra_special_tokens": {},
|
168 |
"model_max_length": 8192,
|
169 |
"pad_token": "<|endoftext|>",
|
170 |
"tokenizer_class": "GPT2Tokenizer",
|