Li / app.py
QiLi520's picture
Update app.py
b3daa2a verified
raw
history blame
2.52 kB
import gradio as gr
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
# 初始化中文情绪检测模型(请确保此模型适用于中文情绪分类)
emotion_classifier = pipeline(
"text-classification",
model="IDEA-CCNL/Erlangshen-Roberta-110M-Sentiment",
return_all_scores=False
)
)
# 定义需要安抚的小朋友情绪列表及对应的回复语句
SAFE_EMOTIONS = {
"生气": "当你生气的时候,可以试着深呼吸哦,放松一下自己。",
"郁闷": "听起来你有点郁闷,不如试试做些你喜欢的事情,让心情变好一点。",
"难受": "觉得难受的时候,记得告诉家人或老师,他们一定会帮助你的。",
"难过": "难过的时候可以试试和朋友聊聊,或者做点轻松的活动,让自己慢慢好起来。"
}
# 初始化中文对话生成模型(这里使用的是一个中文GPT2模型)
tokenizer = AutoTokenizer.from_pretrained("uer/gpt2-chinese-cluecorpussmall")
model = AutoModelForCausalLM.from_pretrained("uer/gpt2-chinese-cluecorpussmall")
def generate_response(user_input, history):
# 如果输入为空,则提示输入内容
if not user_input:
return "请跟我说说你的心情哦!"
# 检测小朋友的情绪
emotion_result = emotion_classifier(user_input)[0]
detected_emotion = emotion_result["label"]
print("检测到情绪:", detected_emotion, ",置信度:", emotion_result["score"])
# 如果检测到的情绪属于负面情绪,则直接返回安抚回复
if detected_emotion in SAFE_EMOTIONS:
return SAFE_EMOTIONS[detected_emotion]
# 否则,使用对话生成模型生成回复
try:
inputs = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors="pt")
reply_ids = model.generate(
inputs,
max_length=100,
pad_token_id=tokenizer.eos_token_id,
no_repeat_ngram_size=2
)
response_text = tokenizer.decode(reply_ids[:, inputs.shape[-1]:][0], skip_special_tokens=True)
return response_text
except Exception as e:
print("生成回复时发生异常:", e)
return "抱歉,我现在有点小问题,能再跟我说说吗?"
# 创建 Gradio 聊天界面
demo = gr.ChatInterface(
fn=generate_response,
title="儿童情绪安抚助手",
description="跟我聊聊天,如果你觉得生气、郁闷或难受,我会帮你放松心情哦!"
)
demo.launch()