Spaces:
Running
Running
Extract the string content from the LLM response object before passing it to `format_response_for_user`, and ensure it is a dictionary.
Browse files
app.py
CHANGED
@@ -110,11 +110,16 @@ def on_submit(symptoms_text, history):
|
|
110 |
# Call LLM
|
111 |
response = llm.complete(prompt)
|
112 |
raw = response
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
if isinstance(raw, str):
|
114 |
try:
|
115 |
raw = json.loads(raw)
|
116 |
except Exception:
|
117 |
-
# If not JSON, wrap in a dict or handle gracefully
|
118 |
raw = {"diagnoses": [], "confidences": [], "follow_up": raw}
|
119 |
assistant_msg = format_response_for_user(raw)
|
120 |
history = history + [{"role": "assistant", "content": assistant_msg}]
|
|
|
110 |
# Call LLM
|
111 |
response = llm.complete(prompt)
|
112 |
raw = response
|
113 |
+
# Extract text from CompletionResponse if needed
|
114 |
+
if hasattr(raw, "text"):
|
115 |
+
raw = raw.text
|
116 |
+
elif hasattr(raw, "content"):
|
117 |
+
raw = raw.content
|
118 |
+
# Now ensure it's a dict
|
119 |
if isinstance(raw, str):
|
120 |
try:
|
121 |
raw = json.loads(raw)
|
122 |
except Exception:
|
|
|
123 |
raw = {"diagnoses": [], "confidences": [], "follow_up": raw}
|
124 |
assistant_msg = format_response_for_user(raw)
|
125 |
history = history + [{"role": "assistant", "content": assistant_msg}]
|