Commit
·
3a28db6
1
Parent(s):
382e610
Initial
Browse files
app.py
CHANGED
@@ -4,14 +4,14 @@ import threading
|
|
4 |
|
5 |
# Third-party imports
|
6 |
import gradio as gr
|
7 |
-
from peft import PeftModel
|
8 |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
|
|
9 |
|
10 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
11 |
|
12 |
tokenizer = AutoTokenizer.from_pretrained(
|
13 |
"bunyaminergen/Qwen2.5-Coder-1.5B-Instruct-Reasoning",
|
14 |
-
|
15 |
trust_remote_code=True
|
16 |
)
|
17 |
|
@@ -19,12 +19,16 @@ base_model = AutoModelForCausalLM.from_pretrained(
|
|
19 |
"Qwen/Qwen2.5-Coder-1.5B-Instruct",
|
20 |
device_map="auto",
|
21 |
torch_dtype="auto",
|
22 |
-
|
23 |
)
|
|
|
|
|
|
|
|
|
24 |
model = PeftModel.from_pretrained(
|
25 |
base_model,
|
26 |
"bunyaminergen/Qwen2.5-Coder-1.5B-Instruct-Reasoning",
|
27 |
-
|
28 |
)
|
29 |
model.eval()
|
30 |
|
|
|
4 |
|
5 |
# Third-party imports
|
6 |
import gradio as gr
|
|
|
7 |
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
8 |
+
from peft import PeftModel
|
9 |
|
10 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
11 |
|
12 |
tokenizer = AutoTokenizer.from_pretrained(
|
13 |
"bunyaminergen/Qwen2.5-Coder-1.5B-Instruct-Reasoning",
|
14 |
+
token=HF_TOKEN,
|
15 |
trust_remote_code=True
|
16 |
)
|
17 |
|
|
|
19 |
"Qwen/Qwen2.5-Coder-1.5B-Instruct",
|
20 |
device_map="auto",
|
21 |
torch_dtype="auto",
|
22 |
+
token=HF_TOKEN
|
23 |
)
|
24 |
+
|
25 |
+
base_model.resize_token_embeddings(len(tokenizer))
|
26 |
+
|
27 |
+
# 4️⃣ PEFT adapter’ı yükle
|
28 |
model = PeftModel.from_pretrained(
|
29 |
base_model,
|
30 |
"bunyaminergen/Qwen2.5-Coder-1.5B-Instruct-Reasoning",
|
31 |
+
token=HF_TOKEN
|
32 |
)
|
33 |
model.eval()
|
34 |
|