title change
Browse files
README.md
CHANGED
@@ -1,13 +1,13 @@
|
|
1 |
---
|
2 |
title: T-BOD
|
3 |
-
emoji:
|
4 |
colorFrom: indigo
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
sdk_version: 3.29.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
-
license:
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
title: T-BOD
|
3 |
+
emoji: 🤖
|
4 |
colorFrom: indigo
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
sdk_version: 3.29.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
10 |
+
license: unknown
|
11 |
---
|
12 |
|
13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
@@ -57,7 +57,7 @@ def query_image(input_img, query, binarize, eval_threshold, crop_mode, crop_pct)
|
|
57 |
|
58 |
# Gradio interface
|
59 |
description = """
|
60 |
-
Gradio demo for an object detection architecture, introduced in my
|
61 |
\n\n
|
62 |
You can use this architecture to detect objects using textual queries. To use it, simply upload an image and enter any query you want.
|
63 |
It can be a single word or a sentence. The model is trained to recognize only 80 categories from the COCO Detection 2017 dataset.
|
@@ -107,7 +107,7 @@ demo = gr.Interface(
|
|
107 |
gr.Radio(["center", "squash", "border"], value='center', label='crop_mode'), gr.Slider(0.7, 1, value=0.9, step=0.01)],
|
108 |
#outputs="image",
|
109 |
outputs=gr.Image(type='numpy', label='output').style(height=600, width=600),
|
110 |
-
title="Text-
|
111 |
description=description,
|
112 |
examples=[
|
113 |
["examples/img1.jpeg", "Find a person.", True, 0.45],
|
|
|
57 |
|
58 |
# Gradio interface
|
59 |
description = """
|
60 |
+
Gradio demo for an object detection architecture, introduced in my bachelor thesis (link will be added).
|
61 |
\n\n
|
62 |
You can use this architecture to detect objects using textual queries. To use it, simply upload an image and enter any query you want.
|
63 |
It can be a single word or a sentence. The model is trained to recognize only 80 categories from the COCO Detection 2017 dataset.
|
|
|
107 |
gr.Radio(["center", "squash", "border"], value='center', label='crop_mode'), gr.Slider(0.7, 1, value=0.9, step=0.01)],
|
108 |
#outputs="image",
|
109 |
outputs=gr.Image(type='numpy', label='output').style(height=600, width=600),
|
110 |
+
title="Text-Based Object Detection",
|
111 |
description=description,
|
112 |
examples=[
|
113 |
["examples/img1.jpeg", "Find a person.", True, 0.45],
|