Mansuriya Brij commited on
Commit
7c0fc12
Β·
1 Parent(s): 679c32f

πŸ’‘ Add DeepSeek model + code generation

Browse files
Files changed (1) hide show
  1. app.py +38 -3
app.py CHANGED
@@ -1,7 +1,42 @@
1
  import gradio as gr
 
 
 
2
 
3
- def greet(name):
4
- return f"Hello, {name}! πŸ‘‹"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ from peft import PeftModel
4
+ import torch
5
 
6
+ # πŸ”Ή Load tokenizer and base model
7
+ base_model_id = "deepseek-ai/deepseek-coder-1.3b-base"
8
+ lora_model_id = "brijmansuriya/deepseek-lora" # βœ… Your LoRA fine-tuned model repo
9
+
10
+ tokenizer = AutoTokenizer.from_pretrained(lora_model_id, trust_remote_code=True)
11
+ base_model = AutoModelForCausalLM.from_pretrained(
12
+ base_model_id,
13
+ device_map="auto",
14
+ torch_dtype=torch.float16,
15
+ trust_remote_code=True
16
+ )
17
+
18
+ # πŸ”Ή Load LoRA adapter
19
+ model = PeftModel.from_pretrained(base_model, lora_model_id)
20
+
21
+ # πŸ”Ή Define the function
22
+ def generate_code(prompt):
23
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
24
+ outputs = model.generate(
25
+ **inputs,
26
+ max_new_tokens=200,
27
+ temperature=0.7,
28
+ do_sample=True,
29
+ top_p=0.95,
30
+ )
31
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
32
+
33
+ # πŸ”Ή Gradio UI
34
+ demo = gr.Interface(
35
+ fn=generate_code,
36
+ inputs=gr.Textbox(label="Enter your coding prompt"),
37
+ outputs=gr.Textbox(label="Generated Code"),
38
+ title="πŸ€– DeepSeek Code Generator (LoRA)",
39
+ description="This app uses DeepSeek-Coder with Brijbhai's fine-tuned LoRA model to generate code from natural language prompts."
40
+ )
41
 
 
42
  demo.launch()