Haseeb-001 commited on
Commit
cbcefdb
Β·
verified Β·
1 Parent(s): da612b2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -64
app.py CHANGED
@@ -37,85 +37,121 @@ index = faiss.IndexFlatL2(embedding_dim)
37
  def preprocess_query(query):
38
  tokens = query.lower().split()
39
  epilepsy_keywords = ["seizure", "epilepsy", "convulsion", "neurology", "brain activity"]
40
- healthcare_keywords = ["headache", "fever", "blood pressure", "diabetes", "cough", "flu", "nutrition", "mental health", "pain", "legs", "body pain"] # Added "pain" and "legs" and "body pain"
41
 
42
  is_epilepsy_related = any(k in tokens for k in epilepsy_keywords)
43
- is_healthcare_related = any(k in tokens for k in healthcare_keywords) and not is_epilepsy_related
44
 
45
- return tokens, is_epilepsy_related, is_healthcare_related
46
 
47
  # Function to Generate Response with Chat History
48
  def generate_response(user_query, chat_history):
49
- tokens, is_epilepsy_related, is_healthcare_related = preprocess_query(user_query)
50
-
51
- # Greeting Responses
52
- greetings = ["hello", "hi", "hey"]
53
- if any(word in tokens for word in greetings):
54
- return "πŸ‘‹ Hello! How can I assist you today?"
55
-
56
- # If Healthcare Related but Not Epilepsy - Provide General Wellness Tips
57
- if is_healthcare_related:
58
- general_health_tips = (
59
- "For general health and well-being:\n"
60
- "- πŸ’§ Stay hydrated by drinking plenty of water throughout the day.\n"
61
- "- 🍎 Maintain a balanced diet rich in fruits, vegetables, and whole grains.\n"
62
- "- πŸšΆβ€β™€οΈ Incorporate regular physical activity into your daily routine.\n"
63
- "- 😴 Ensure you get adequate sleep to allow your body to rest and recover.\n"
64
- "- 🧘 Practice stress-reducing activities such as deep breathing or meditation.\n"
65
- "- 🩺 **Important:** These tips are for general wellness. Always consult a healthcare professional for any specific health concerns or before making significant changes to your health regimen."
66
- )
67
- return (
68
- f"**NeuroGuard:** 🩺 It sounds like your question '{user_query}' is about general health. While I specialize in epilepsy, here are some general wellness tips that might be helpful:\n\n"
69
- f"{general_health_tips}"
70
- )
71
-
72
- # If Not Healthcare or Epilepsy Related
73
- if not is_epilepsy_related:
74
- return (
75
- f"**NeuroGuard:** πŸ’‘ Your query '{user_query}' doesn't seem to be directly related to epilepsy or general healthcare. \n\n"
76
- "For general information:\n"
77
- "- πŸ“š Always seek information from reliable sources and consult experts when needed."
78
- )
79
-
80
- # Try Getting Medical Insights from PubMedBERT
81
  try:
82
- pubmedbert_embeddings = pubmedbert_pipeline(user_query)
83
- embedding_mean = np.mean(pubmedbert_embeddings[0], axis=0)
84
- index.add(np.array([embedding_mean]))
85
- pubmedbert_insights = "**PubMedBERT Analysis:** βœ… Query is relevant to epilepsy research."
86
- except Exception as e:
87
- pubmedbert_insights = f"⚠️ Error during PubMedBERT analysis: {e}"
88
-
89
- # Use LLaMA for Final Response Generation with Chat History Context
90
- try:
91
- prompt_history = ""
92
- if chat_history:
93
- prompt_history += "**Chat History:**\n"
94
- for message in chat_history:
95
- prompt_history += f"{message['role'].capitalize()}: {message['content']}\n"
96
- prompt_history += "\n"
97
-
98
- prompt = f"""
99
- {prompt_history}
100
- **User Query:** {user_query}
101
- **Instructions:** Provide a concise, structured, and human-friendly response specifically about epilepsy or seizures, considering the conversation history if available.
102
  """
103
-
104
- chat_completion = client.chat.completions.create(
105
- messages=[{"role": "user", "content": prompt}],
106
  model="llama-3.3-70b-versatile",
107
  stream=False,
108
  )
109
- model_response = chat_completion.choices[0].message.content.strip()
 
 
 
110
  except Exception as e:
111
- model_response = f"⚠️ Error generating response with LLaMA: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
 
113
- return f"**NeuroGuard:** βœ… **Analysis:**\n{pubmedbert_insights}\n\n**Response:**\n{model_response}"
114
 
115
  # Streamlit UI Setup
116
- st.set_page_config(page_title="NeuroGuard: Epilepsy Chatbot", layout="wide")
117
- st.title("🧠 NeuroGuard: Epilepsy & Seizure Chatbot")
118
- st.write("πŸ’¬ Ask me anything about epilepsy and seizures. I remember our conversation!")
119
 
120
  # Initialize Chat History in Session State
121
  if "chat_history" not in st.session_state:
 
37
  def preprocess_query(query):
38
  tokens = query.lower().split()
39
  epilepsy_keywords = ["seizure", "epilepsy", "convulsion", "neurology", "brain activity"]
 
40
 
41
  is_epilepsy_related = any(k in tokens for k in epilepsy_keywords)
 
42
 
43
+ return tokens, is_epilepsy_related
44
 
45
  # Function to Generate Response with Chat History
46
  def generate_response(user_query, chat_history):
47
+ # Grammatical Correction using LLaMA (Hidden from User)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
  try:
49
+ correction_prompt = f"""
50
+ Correct the following user query for grammar and spelling errors, but keep the original intent intact.
51
+ Do not add or remove any information, just fix the grammar.
52
+ User Query: {user_query}
53
+ Corrected Query:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  """
55
+ grammar_completion = client.chat.completions.create(
56
+ messages=[{"role": "user", "content": correction_prompt}],
 
57
  model="llama-3.3-70b-versatile",
58
  stream=False,
59
  )
60
+ corrected_query = grammar_completion.choices[0].message.content.strip()
61
+ # If correction fails or returns empty, use original query
62
+ if not corrected_query:
63
+ corrected_query = user_query
64
  except Exception as e:
65
+ corrected_query = user_query # Fallback to original query if correction fails
66
+ print(f"⚠️ Grammar correction error: {e}") # Optional: Log the error for debugging
67
+
68
+ tokens, is_epilepsy_related = preprocess_query(corrected_query) # Use corrected query for processing
69
+
70
+ # Greeting Responses
71
+ greetings = ["hello", "hi", "hey"]
72
+ if any(word in tokens for word in greetings):
73
+ return "πŸ‘‹ Hello! How can I assist you today?"
74
+
75
+ # If Epilepsy Related - Use Epilepsy Focused Response
76
+ if is_epilepsy_related:
77
+ # Try Getting Medical Insights from PubMedBERT
78
+ try:
79
+ pubmedbert_embeddings = pubmedbert_pipeline(corrected_query) # Use corrected query for PubMedBERT
80
+ embedding_mean = np.mean(pubmedbert_embeddings[0], axis=0)
81
+ index.add(np.array([embedding_mean]))
82
+ pubmedbert_insights = "**PubMedBERT Analysis:** βœ… Query is relevant to epilepsy research."
83
+ except Exception as e:
84
+ pubmedbert_insights = f"⚠️ Error during PubMedBERT analysis: {e}"
85
+
86
+ # Use LLaMA for Final Response Generation with Chat History Context (Epilepsy Focus)
87
+ try:
88
+ prompt_history = ""
89
+ if chat_history:
90
+ prompt_history += "**Chat History:**\n"
91
+ for message in chat_history:
92
+ prompt_history += f"{message['role'].capitalize()}: {message['content']}\n"
93
+ prompt_history += "\n"
94
+
95
+ epilepsy_prompt = f"""
96
+ {prompt_history}
97
+ **User Query:** {corrected_query} # Use corrected query for final response generation
98
+ **Instructions:** Provide a concise, structured, and human-friendly response specifically about epilepsy or seizures, considering the conversation history if available.
99
+ """
100
+
101
+ chat_completion = client.chat.completions.create(
102
+ messages=[{"role": "user", "content": epilepsy_prompt}],
103
+ model="llama-3.3-70b-versatile",
104
+ stream=False,
105
+ )
106
+ model_response = chat_completion.choices[0].message.content.strip()
107
+ except Exception as e:
108
+ model_response = f"⚠️ Error generating response with LLaMA: {e}"
109
+
110
+ return f"**NeuroGuard:** βœ… **Analysis:**\n{pubmedbert_insights}\n\n**Response:**\n{model_response}"
111
+
112
+
113
+ # If Not Epilepsy Related - Try to Answer as General Health Query
114
+ else:
115
+ # Try Getting Medical Insights from PubMedBERT (even for general health)
116
+ try:
117
+ pubmedbert_embeddings = pubmedbert_pipeline(corrected_query)
118
+ embedding_mean = np.mean(pubmedbert_embeddings[0], axis=0)
119
+ index.add(np.array([embedding_mean]))
120
+ pubmedbert_insights = "**PubMedBERT Analysis:** PubMed analysis performed for health-related context." # General analysis message
121
+ except Exception as e:
122
+ pubmedbert_insights = f"⚠️ Error during PubMedBERT analysis: {e}"
123
+
124
+ # Use LLaMA for General Health Response Generation with Chat History Context
125
+ try:
126
+ prompt_history = ""
127
+ if chat_history:
128
+ prompt_history += "**Chat History:**\n"
129
+ for message in chat_history:
130
+ prompt_history += f"{message['role'].capitalize()}: {message['content']}\n"
131
+ prompt_history += "\n"
132
+
133
+ general_health_prompt = f"""
134
+ {prompt_history}
135
+ **User Query:** {corrected_query}
136
+ **Instructions:** Provide a concise, structured, and human-friendly response to the general health query, considering the conversation history if available. If the query is clearly not health-related, respond generally.
137
+ """
138
+
139
+ chat_completion = client.chat.completions.create(
140
+ messages=[{"role": "user", "content": general_health_prompt}],
141
+ model="llama-3.3-70b-versatile",
142
+ stream=False,
143
+ )
144
+ model_response = chat_completion.choices[0].message.content.strip()
145
+ except Exception as e:
146
+ model_response = f"⚠️ Error generating response with LLaMA: {e}"
147
+
148
+ return f"**NeuroGuard:** βœ… **Analysis:**\n{pubmedbert_insights}\n\n**Response:**\n{model_response}"
149
 
 
150
 
151
  # Streamlit UI Setup
152
+ st.set_page_config(page_title="NeuroGuard: Epilepsy & Health Chatbot", layout="wide") # Updated title
153
+ st.title("🧠 NeuroGuard: Epilepsy & Health Chatbot") # Updated title
154
+ st.write("πŸ’¬ Ask me anything about epilepsy, seizures, and general health. I remember our conversation!") # Updated description
155
 
156
  # Initialize Chat History in Session State
157
  if "chat_history" not in st.session_state: