nurqoneah commited on
Commit
8d6e3ff
Β·
1 Parent(s): 73fa8d4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +112 -0
app.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
4
+ from langchain_community.llms import HuggingFaceHub
5
+ from langchain_huggingface import HuggingFaceEmbeddings
6
+ from langchain_community.vectorstores import Chroma
7
+ from langchain.memory import ConversationBufferMemory
8
+ from langchain.chains import ConversationalRetrievalChain
9
+ from langchain.prompts import PromptTemplate
10
+
11
+ # Environment
12
+ os.environ["HUGGINGFACEHUB_API_TOKEN"] = os.getenv("HUGGINGFACEHUB_API_TOKEN")
13
+ EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
14
+
15
+ @st.cache_resource
16
+ def get_response(question):
17
+ result = st.session_state.conversational_chain({"question": question})
18
+ response_text = result.get("answer", "Maaf, saya tidak mengetahui jawaban itu.")
19
+
20
+ # Membersihkan jawaban dari teks yang tidak diperlukan
21
+ if "Answer:" in response_text:
22
+ response_text = response_text.split("Answer:")[1].strip()
23
+ return response_text
24
+
25
+
26
+ def setup_vectorstore():
27
+ persist_directory = "./data"
28
+ embeddings = HuggingFaceEmbeddings(model_name=EMBEDDING_MODEL_NAME)
29
+ return Chroma(persist_directory=persist_directory, embedding_function=embeddings)
30
+
31
+ def chat_chain(vectorstore):
32
+ hf_hub_llm = HuggingFaceHub(
33
+ repo_id="SeaLLMs/SeaLLMs-v3-7B-Chat",
34
+ model_kwargs={"temperature": 1, "max_new_tokens": 1024},
35
+ )
36
+
37
+ prompt_template = """
38
+ You are an assistant specialized in women's health. Use the retrieved documents to answer the user's question.
39
+ If you don't know the answer or the information is not in the documents, reply with: "I'm sorry, I don't know."
40
+
41
+ Chat History:
42
+ {chat_history}
43
+
44
+ Question:
45
+ {question}
46
+
47
+ Answer:"""
48
+ prompt = PromptTemplate(input_variables=["chat_history", "question"], template=prompt_template)
49
+
50
+
51
+ # qa_prompt = ChatPromptTemplate.from_messages(messages)
52
+
53
+ retriever = vectorstore.as_retriever(
54
+ search_type="similarity",
55
+ search_kwargs={"k": 2}
56
+ )
57
+
58
+ memory = ConversationBufferMemory(
59
+ llm=hf_hub_llm,
60
+ output_key="answer",
61
+ memory_key="chat_history",
62
+ return_messages=True
63
+ )
64
+
65
+ chain = ConversationalRetrievalChain.from_llm(
66
+ llm=hf_hub_llm,
67
+ retriever=retriever,
68
+ chain_type="stuff",
69
+ memory=memory,
70
+ verbose=True,
71
+ combine_docs_chain_kwargs={"prompt": prompt},
72
+ )
73
+ return chain
74
+
75
+ # Streamlit App
76
+ st.set_page_config(
77
+ page_title="Asisten Kesehatan Wanita",
78
+ page_icon="πŸ’Š",
79
+ layout="centered"
80
+ )
81
+
82
+ st.title("πŸ’Š Asisten Kesehatan Wanita")
83
+
84
+ if "chat_history" not in st.session_state:
85
+ st.session_state.chat_history = []
86
+
87
+ if "vectorstore" not in st.session_state:
88
+ st.session_state.vectorstore = setup_vectorstore()
89
+
90
+ if "conversational_chain" not in st.session_state:
91
+ st.session_state.conversational_chain = chat_chain(st.session_state.vectorstore)
92
+
93
+ # Display Chat History
94
+ for message in st.session_state.chat_history:
95
+ with st.chat_message(message["role"]):
96
+ st.markdown(message["content"])
97
+
98
+ # User Input
99
+ user_input = st.chat_input("Tanyakan sesuatu...")
100
+
101
+ if user_input:
102
+ st.session_state.chat_history.append({"role": "user", "content": user_input})
103
+
104
+ with st.chat_message("user"):
105
+ st.markdown(user_input)
106
+
107
+
108
+ with st.chat_message("assistant"):
109
+ response = st.session_state.conversational_chain({"question": user_input})
110
+ assistant_response = response["answer"]
111
+ st.markdown(assistant_response)
112
+ st.session_state.chat_history.append({"role": "assistant", "content": assistant_response})