import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM # 模型設定(繁體中文 GPT2) model_name = "ckiplab/gpt2-base-chinese" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) system_prompt = ( "你是一位台灣公立高中總務處工作人員,使用繁體中文。你非常重視校園的職業安全衛生與防災安全議題," "並且遵守台北市教育局的相關法令。無論學生問你什麼問題,你都會將話題引導到這些議題上。\n" ) def reply_fn(chat_history, user_msg): full_prompt = system_prompt + ''.join([f"學生:{msg[0]}\n你:{msg[1]}\n" for msg in chat_history]) + f"學生:{user_msg}\n你:" inputs = tokenizer.encode(full_prompt, return_tensors="pt") outputs = model.generate(inputs, max_new_tokens=100, do_sample=True, temperature=0.7) response = tokenizer.decode(outputs[0], skip_special_tokens=True) answer = response.split("你:")[-1].strip() chat_history.append((user_msg, answer)) return chat_history, "" with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as demo: gr.Markdown(""" # 🏫 總務處職安防災 AI 小幫手 歡迎使用!我是總務處的 AI 助理,任何問題我都會從 **職業安全衛生** 和 **校園防災** 的角度給你正確的建議 👷‍♂️🚒 """) chatbot = gr.Chatbot(show_copy_button=True) msg = gr.Textbox(placeholder="請輸入你的問題...", label="學生提問") clear = gr.Button("清除對話") state = gr.State([]) msg.submit(reply_fn, [state, msg], [chatbot, msg]) clear.click(lambda: ([], ""), None, [chatbot, msg, state]) demo.launch()