import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM from peft import PeftModel import torch # 🔹 Load tokenizer and base model base_model_id = "deepseek-ai/deepseek-coder-1.3b-base" lora_model_id = "brijmansuriya/deepseek-lora" # ✅ Your LoRA fine-tuned model repo tokenizer = AutoTokenizer.from_pretrained(lora_model_id, trust_remote_code=True) base_model = AutoModelForCausalLM.from_pretrained( base_model_id, device_map="auto", torch_dtype=torch.float16, trust_remote_code=True ) # 🔹 Load LoRA adapter model = PeftModel.from_pretrained(base_model, lora_model_id) # 🔹 Define the function def generate_code(prompt): inputs = tokenizer(prompt, return_tensors="pt").to(model.device) outputs = model.generate( **inputs, max_new_tokens=200, temperature=0.7, do_sample=True, top_p=0.95, ) return tokenizer.decode(outputs[0], skip_special_tokens=True) # 🔹 Gradio UI demo = gr.Interface( fn=generate_code, inputs=gr.Textbox(label="Enter your coding prompt"), outputs=gr.Textbox(label="Generated Code"), title="🤖 DeepSeek Code Generator (LoRA)", description="This app uses DeepSeek-Coder with Brijbhai's fine-tuned LoRA model to generate code from natural language prompts." ) demo.launch()