Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -20,6 +20,7 @@ from transformers import (
|
|
20 |
TextIteratorStreamer,
|
21 |
Qwen2VLForConditionalGeneration,
|
22 |
AutoProcessor,
|
|
|
23 |
)
|
24 |
from transformers.image_utils import load_image
|
25 |
|
@@ -48,11 +49,20 @@ def progress_bar_html(label: str) -> str:
|
|
48 |
</style>
|
49 |
'''
|
50 |
|
51 |
-
# TEXT MODEL - Utiliser Napoleon 4B
|
52 |
model_id = "baconnier/Napoleon_4B_V0.0"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
54 |
model = AutoModelForCausalLM.from_pretrained(
|
55 |
model_id,
|
|
|
56 |
device_map="auto",
|
57 |
torch_dtype=torch.bfloat16,
|
58 |
)
|
|
|
20 |
TextIteratorStreamer,
|
21 |
Qwen2VLForConditionalGeneration,
|
22 |
AutoProcessor,
|
23 |
+
AutoConfig,
|
24 |
)
|
25 |
from transformers.image_utils import load_image
|
26 |
|
|
|
49 |
</style>
|
50 |
'''
|
51 |
|
52 |
+
# TEXT MODEL - Utiliser Napoleon 4B avec configuration modifiée
|
53 |
model_id = "baconnier/Napoleon_4B_V0.0"
|
54 |
+
|
55 |
+
# Charger la configuration
|
56 |
+
config = AutoConfig.from_pretrained(model_id)
|
57 |
+
|
58 |
+
# Ajouter l'attribut vocab_size manuellement si nécessaire
|
59 |
+
if not hasattr(config, "vocab_size"):
|
60 |
+
config.vocab_size = 32000 # Valeur standard pour les modèles basés sur Llama
|
61 |
+
|
62 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
63 |
model = AutoModelForCausalLM.from_pretrained(
|
64 |
model_id,
|
65 |
+
config=config,
|
66 |
device_map="auto",
|
67 |
torch_dtype=torch.bfloat16,
|
68 |
)
|