Spaces:
Running
on
Zero
Running
on
Zero
fix: change model
Browse files
app.py
CHANGED
@@ -3,14 +3,17 @@ import spaces
|
|
3 |
import torch
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
|
|
|
|
|
|
|
6 |
|
7 |
@spaces.GPU
|
8 |
def test():
|
9 |
device = torch.device("cuda")
|
10 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
11 |
model = AutoModelForCausalLM.from_pretrained(
|
12 |
-
|
13 |
-
model_file=
|
14 |
model_type="mistral", gpu_layers=50, hf=True).to(device)
|
15 |
|
16 |
inputs = tokenizer("Cześć Bielik, jak się masz?", return_tensors="pt").to(device)
|
|
|
3 |
import torch
|
4 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
|
6 |
+
MODEL_NAME = "speakleash/Bielik-11B-v2.3-Instruct-GGUF"
|
7 |
+
MODEL_FILE = "Bielik-11B-v2.3-Instruct.Q4_K_M.gguf"
|
8 |
+
|
9 |
|
10 |
@spaces.GPU
|
11 |
def test():
|
12 |
device = torch.device("cuda")
|
13 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
|
14 |
model = AutoModelForCausalLM.from_pretrained(
|
15 |
+
MODEL_NAME,
|
16 |
+
model_file=MODEL_FILE,
|
17 |
model_type="mistral", gpu_layers=50, hf=True).to(device)
|
18 |
|
19 |
inputs = tokenizer("Cześć Bielik, jak się masz?", return_tensors="pt").to(device)
|