mMonika commited on
Commit
e795ee7
·
verified ·
1 Parent(s): 4f22f6e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +171 -0
app.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # from langgraph.graph import Graph
2
+ # from langchain_groq import ChatGroq
3
+ # llm = langchain_groq(model="llama3-70b-8192")
4
+ # llm.invoke("hi how are you")
5
+ import streamlit as st
6
+ import os
7
+ import base64
8
+ from dotenv import load_dotenv
9
+ from langchain_groq import ChatGroq
10
+ from langchain.chains import LLMMathChain, LLMChain
11
+ from langchain.prompts import PromptTemplate
12
+ from langchain_community.utilities import WikipediaAPIWrapper
13
+ from langchain.agents.agent_types import AgentType
14
+ from langchain.agents import Tool, initialize_agent
15
+ from langchain_community.callbacks.streamlit import StreamlitCallbackHandler
16
+ from groq import Groq
17
+
18
+ load_dotenv()
19
+ groq_api_key = os.getenv("GROQ_API_KEY")
20
+
21
+ if not groq_api_key:
22
+ st.error("Groq API Key not found in .env file")
23
+ st.stop()
24
+
25
+ st.set_page_config(page_title="Medical Bot", page_icon="👨‍🔬")
26
+ st.title("Medical Bot")
27
+ llm_text = ChatGroq(model="gemma2-9b-it", groq_api_key=groq_api_key)
28
+ llm_image = ChatGroq(model="llama-3.2-90b-vision-preview", groq_api_key=groq_api_key)
29
+
30
+ wikipedia_wrapper = WikipediaAPIWrapper()
31
+ wikipedia_tool = Tool(
32
+ name="Wikipedia",
33
+ func=wikipedia_wrapper.run,
34
+ description="A tool for searching the Internet to find various information on the topics mentioned."
35
+ )
36
+ math_chain = LLMMathChain.from_llm(llm=llm_text)
37
+ calculator = Tool(
38
+ name="Calculator",
39
+ func=math_chain.run,
40
+ description="A tool for solving mathematical problems. Provide only the mathematical expressions."
41
+ )
42
+
43
+ prompt = """
44
+ You are a mathematical problem-solving assistant tasked with helping users solve their questions. Arrive at the solution logically, providing a clear and step-by-step explanation. Present your response in a structured point-wise format for better understanding.
45
+ Question: {question}
46
+ Answer:
47
+ """
48
+
49
+ prompt_template = PromptTemplate(
50
+ input_variables=["question"],
51
+ template=prompt
52
+ )
53
+ # Combine all the tools into a chain for text questions
54
+ chain = LLMChain(llm=llm_text, prompt=prompt_template)
55
+
56
+ reasoning_tool = Tool(
57
+ name="Reasoning Tool",
58
+ func=chain.run,
59
+ description="A tool for answering logic-based and reasoning questions."
60
+ )
61
+
62
+ # Initialize the agents for text questions
63
+ assistant_agent_text = initialize_agent(
64
+ tools=[wikipedia_tool, calculator, reasoning_tool],
65
+ llm=llm_text,
66
+ agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
67
+ verbose=False,
68
+ handle_parsing_errors=True
69
+ )
70
+
71
+ if "messages" not in st.session_state:
72
+ st.session_state["messages"] = [
73
+ {"role": "assistant", "content": "Welcome! I am your Assistant. How can I help you today?"}
74
+ ]
75
+
76
+ for msg in st.session_state.messages:
77
+ if msg["role"] == "user" and "image" in msg:
78
+ st.chat_message(msg["role"]).write(msg['content'])
79
+ st.image(msg["image"], caption='Uploaded Image', use_column_width=True)
80
+ else:
81
+ st.chat_message(msg["role"]).write(msg['content'])
82
+
83
+ st.sidebar.header("Navigation")
84
+ if st.sidebar.button("Text Question"):
85
+ st.session_state["section"] = "text"
86
+ if st.sidebar.button("Image Question"):
87
+ st.session_state["section"] = "image"
88
+
89
+ if "section" not in st.session_state:
90
+ st.session_state["section"] = "text"
91
+
92
+ def clean_response(response):
93
+ if "```" in response:
94
+ response = response.split("```")[1].strip()
95
+ return response
96
+
97
+ if st.session_state["section"] == "text":
98
+ st.header("Text Question")
99
+ st.write("Please enter your question below, and I will provide a detailed description of the problem and suggest a solution for it.")
100
+ question = st.text_area("Your Question:")
101
+ if st.button("Get Answer"):
102
+ if question:
103
+ with st.spinner("Generating response..."):
104
+ st.session_state.messages.append({"role": "user", "content": question})
105
+ st.chat_message("user").write(question)
106
+
107
+ st_cb = StreamlitCallbackHandler(st.container(), expand_new_thoughts=False)
108
+ try:
109
+ response = assistant_agent_text.run(st.session_state.messages, callbacks=[st_cb])
110
+ cleaned_response = clean_response(response)
111
+ st.session_state.messages.append({'role': 'assistant', "content": cleaned_response})
112
+ st.write('### Response:')
113
+ st.success(cleaned_response)
114
+ except ValueError as e:
115
+ st.error(f"An error occurred: {e}")
116
+ else:
117
+ st.warning("Please enter a question to get an answer.")
118
+
119
+ elif st.session_state["section"] == "image":
120
+ st.header("Image Question")
121
+ st.write("Please enter your question below and upload the medical image. I will provide a detailed description of the problem and suggest a solution for it.")
122
+ question = st.text_area("Your Question:", "Example: What is the patient suffering from?")
123
+ uploaded_file = st.file_uploader("Upload an image", type=["jpg", "jpeg", "png"])
124
+
125
+ if st.button("Get Answer"):
126
+ if question and uploaded_file is not None:
127
+ with st.spinner("Generating response..."):
128
+ image_data = uploaded_file.read()
129
+ image_data_url = f"data:image/jpeg;base64,{base64.b64encode(image_data).decode()}"
130
+ st.session_state.messages.append({"role": "user", "content": question, "image": image_data})
131
+ st.chat_message("user").write(question)
132
+ st.image(image_data, caption='Uploaded Image', use_column_width=True)
133
+
134
+ client = Groq()
135
+
136
+ messages = [
137
+ {
138
+ "role": "user",
139
+ "content": [
140
+ {
141
+ "type": "text",
142
+ "text": question
143
+ },
144
+ {
145
+ "type": "image_url",
146
+ "image_url": {
147
+ "url": image_data_url
148
+ }
149
+ }
150
+ ]
151
+ }
152
+ ]
153
+ try:
154
+ completion = client.chat.completions.create(
155
+ model="llama-3.2-90b-vision-preview",
156
+ messages=messages,
157
+ temperature=1,
158
+ max_tokens=1024,
159
+ top_p=1,
160
+ stream=False,
161
+ stop=None,
162
+ )
163
+ response = completion.choices[0].message.content
164
+ cleaned_response = clean_response(response)
165
+ st.session_state.messages.append({'role': 'assistant', "content": cleaned_response})
166
+ st.write('### Response:')
167
+ st.success(cleaned_response)
168
+ except ValueError as e:
169
+ st.error(f"An error occurred: {e}")
170
+ else:
171
+ st.warning("Please enter a question and upload an image to get an answer.")