|
import gradio as gr |
|
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
emotion_classifier = pipeline( |
|
"text-classification", |
|
model="IDEA-CCNL/Erlangshen-Roberta-110M-Sentiment", |
|
return_all_scores=False |
|
) |
|
|
|
) |
|
|
|
|
|
SAFE_EMOTIONS = { |
|
"生气": "当你生气的时候,可以试着深呼吸哦,放松一下自己。", |
|
"郁闷": "听起来你有点郁闷,不如试试做些你喜欢的事情,让心情变好一点。", |
|
"难受": "觉得难受的时候,记得告诉家人或老师,他们一定会帮助你的。", |
|
"难过": "难过的时候可以试试和朋友聊聊,或者做点轻松的活动,让自己慢慢好起来。" |
|
} |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("uer/gpt2-chinese-cluecorpussmall") |
|
model = AutoModelForCausalLM.from_pretrained("uer/gpt2-chinese-cluecorpussmall") |
|
|
|
def generate_response(user_input, history): |
|
|
|
if not user_input: |
|
return "请跟我说说你的心情哦!" |
|
|
|
|
|
emotion_result = emotion_classifier(user_input)[0] |
|
detected_emotion = emotion_result["label"] |
|
print("检测到情绪:", detected_emotion, ",置信度:", emotion_result["score"]) |
|
|
|
|
|
if detected_emotion in SAFE_EMOTIONS: |
|
return SAFE_EMOTIONS[detected_emotion] |
|
|
|
|
|
try: |
|
inputs = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt") |
|
reply_ids = model.generate( |
|
inputs, |
|
max_length=100, |
|
pad_token_id=tokenizer.eos_token_id, |
|
no_repeat_ngram_size=2 |
|
) |
|
response_text = tokenizer.decode(reply_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True) |
|
return response_text |
|
except Exception as e: |
|
print("生成回复时发生异常:", e) |
|
return "抱歉,我现在有点小问题,能再跟我说说吗?" |
|
|
|
|
|
demo = gr.ChatInterface( |
|
fn=generate_response, |
|
title="儿童情绪安抚助手", |
|
description="跟我聊聊天,如果你觉得生气、郁闷或难受,我会帮你放松心情哦!" |
|
) |
|
|
|
demo.launch() |
|
|