Update app.py
Browse files
app.py
CHANGED
@@ -75,11 +75,15 @@ def load_sample_phrase():
|
|
75 |
return [save_temp_image(sample["frontal"]), sample["phrase"]]
|
76 |
|
77 |
def generate_report(frontal_path, lateral_path, indication, technique, comparison,
|
78 |
-
|
79 |
"""Generate radiology report with authentication check"""
|
80 |
if not MODEL_STATE["authenticated"]:
|
81 |
return "⚠️ Please authenticate with your Hugging Face token first!"
|
82 |
|
|
|
|
|
|
|
|
|
83 |
try:
|
84 |
current_frontal = Image.open(frontal_path)
|
85 |
current_lateral = Image.open(lateral_path)
|
@@ -117,6 +121,10 @@ def ground_phrase(frontal_path, phrase):
|
|
117 |
if not MODEL_STATE["authenticated"]:
|
118 |
return "⚠️ Please authenticate with your Hugging Face token first!"
|
119 |
|
|
|
|
|
|
|
|
|
120 |
try:
|
121 |
frontal = Image.open(frontal_path)
|
122 |
processed = MODEL_STATE["processor"].format_and_preprocess_phrase_grounding_input(
|
@@ -125,6 +133,9 @@ def ground_phrase(frontal_path, phrase):
|
|
125 |
return_tensors="pt"
|
126 |
).to("cpu")
|
127 |
|
|
|
|
|
|
|
128 |
outputs = MODEL_STATE["model"].generate(
|
129 |
**processed,
|
130 |
max_new_tokens=150,
|
@@ -187,12 +198,12 @@ with gr.Blocks(title="MAIRA-2 Medical Assistant") as demo:
|
|
187 |
sample_btn.click(
|
188 |
load_sample_findings,
|
189 |
outputs=[frontal, lateral, indication, technique, comparison,
|
190 |
-
|
191 |
)
|
192 |
generate_btn.click(
|
193 |
generate_report,
|
194 |
inputs=[frontal, lateral, indication, technique, comparison,
|
195 |
-
|
196 |
outputs=report_output
|
197 |
)
|
198 |
|
|
|
75 |
return [save_temp_image(sample["frontal"]), sample["phrase"]]
|
76 |
|
77 |
def generate_report(frontal_path, lateral_path, indication, technique, comparison,
|
78 |
+
prior_frontal_path, prior_lateral_path, prior_report, grounding):
|
79 |
"""Generate radiology report with authentication check"""
|
80 |
if not MODEL_STATE["authenticated"]:
|
81 |
return "⚠️ Please authenticate with your Hugging Face token first!"
|
82 |
|
83 |
+
# Check that mandatory image paths are provided.
|
84 |
+
if not frontal_path or not lateral_path:
|
85 |
+
return "❌ Please upload both the frontal and lateral images for the current study."
|
86 |
+
|
87 |
try:
|
88 |
current_frontal = Image.open(frontal_path)
|
89 |
current_lateral = Image.open(lateral_path)
|
|
|
121 |
if not MODEL_STATE["authenticated"]:
|
122 |
return "⚠️ Please authenticate with your Hugging Face token first!"
|
123 |
|
124 |
+
|
125 |
+
if not frontal_path:
|
126 |
+
return "❌ Please upload the frontal image for phrase grounding."
|
127 |
+
|
128 |
try:
|
129 |
frontal = Image.open(frontal_path)
|
130 |
processed = MODEL_STATE["processor"].format_and_preprocess_phrase_grounding_input(
|
|
|
133 |
return_tensors="pt"
|
134 |
).to("cpu")
|
135 |
|
136 |
+
|
137 |
+
processed.pop("image_sizes", None)
|
138 |
+
|
139 |
outputs = MODEL_STATE["model"].generate(
|
140 |
**processed,
|
141 |
max_new_tokens=150,
|
|
|
198 |
sample_btn.click(
|
199 |
load_sample_findings,
|
200 |
outputs=[frontal, lateral, indication, technique, comparison,
|
201 |
+
prior_frontal, prior_lateral, prior_report, grounding]
|
202 |
)
|
203 |
generate_btn.click(
|
204 |
generate_report,
|
205 |
inputs=[frontal, lateral, indication, technique, comparison,
|
206 |
+
prior_frontal, prior_lateral, prior_report, grounding],
|
207 |
outputs=report_output
|
208 |
)
|
209 |
|