bunyaminergen commited on
Commit
3a28db6
·
1 Parent(s): 382e610
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -4,14 +4,14 @@ import threading
4
 
5
  # Third-party imports
6
  import gradio as gr
7
- from peft import PeftModel
8
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
 
9
 
10
  HF_TOKEN = os.getenv("HF_TOKEN")
11
 
12
  tokenizer = AutoTokenizer.from_pretrained(
13
  "bunyaminergen/Qwen2.5-Coder-1.5B-Instruct-Reasoning",
14
- use_auth_token=HF_TOKEN,
15
  trust_remote_code=True
16
  )
17
 
@@ -19,12 +19,16 @@ base_model = AutoModelForCausalLM.from_pretrained(
19
  "Qwen/Qwen2.5-Coder-1.5B-Instruct",
20
  device_map="auto",
21
  torch_dtype="auto",
22
- use_auth_token=HF_TOKEN
23
  )
 
 
 
 
24
  model = PeftModel.from_pretrained(
25
  base_model,
26
  "bunyaminergen/Qwen2.5-Coder-1.5B-Instruct-Reasoning",
27
- use_auth_token=HF_TOKEN
28
  )
29
  model.eval()
30
 
 
4
 
5
  # Third-party imports
6
  import gradio as gr
 
7
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
8
+ from peft import PeftModel
9
 
10
  HF_TOKEN = os.getenv("HF_TOKEN")
11
 
12
  tokenizer = AutoTokenizer.from_pretrained(
13
  "bunyaminergen/Qwen2.5-Coder-1.5B-Instruct-Reasoning",
14
+ token=HF_TOKEN,
15
  trust_remote_code=True
16
  )
17
 
 
19
  "Qwen/Qwen2.5-Coder-1.5B-Instruct",
20
  device_map="auto",
21
  torch_dtype="auto",
22
+ token=HF_TOKEN
23
  )
24
+
25
+ base_model.resize_token_embeddings(len(tokenizer))
26
+
27
+ # 4️⃣ PEFT adapter’ı yükle
28
  model = PeftModel.from_pretrained(
29
  base_model,
30
  "bunyaminergen/Qwen2.5-Coder-1.5B-Instruct-Reasoning",
31
+ token=HF_TOKEN
32
  )
33
  model.eval()
34