Spaces:
Sleeping
Sleeping
Nathan Slaughter
commited on
Commit
·
cf1cddb
1
Parent(s):
b8d2f65
improve prompt and interface
Browse files- app/interface.py +3 -4
- app/pipeline.py +3 -2
app/interface.py
CHANGED
@@ -47,14 +47,14 @@ def create_interface():
|
|
47 |
with gr.Blocks() as interface:
|
48 |
gr.Markdown("# Flashcard Studio")
|
49 |
gr.Markdown(
|
50 |
-
"Make flashcards from uploaded files or directly input text. Choose
|
51 |
)
|
52 |
with gr.Tab("Upload File"):
|
53 |
with gr.Row():
|
54 |
with gr.Column():
|
55 |
file_input = gr.File(
|
56 |
label="Upload a File",
|
57 |
-
file_types=['.
|
58 |
)
|
59 |
format_selector = gr.Radio(
|
60 |
choices=["CSV", "JSON"],
|
@@ -106,8 +106,7 @@ def create_interface():
|
|
106 |
"""
|
107 |
---
|
108 |
**Notes:**
|
109 |
-
- Supported file types: `.
|
110 |
-
- Ensure that the input text is clear and well-structured for optimal flashcard extraction.
|
111 |
"""
|
112 |
)
|
113 |
|
|
|
47 |
with gr.Blocks() as interface:
|
48 |
gr.Markdown("# Flashcard Studio")
|
49 |
gr.Markdown(
|
50 |
+
"Make flashcards from uploaded files or directly input text. Choose CSV or JSON output format."
|
51 |
)
|
52 |
with gr.Tab("Upload File"):
|
53 |
with gr.Row():
|
54 |
with gr.Column():
|
55 |
file_input = gr.File(
|
56 |
label="Upload a File",
|
57 |
+
file_types=['.txt', '.md']
|
58 |
)
|
59 |
format_selector = gr.Radio(
|
60 |
choices=["CSV", "JSON"],
|
|
|
106 |
"""
|
107 |
---
|
108 |
**Notes:**
|
109 |
+
- Supported file types: `.txt`, `.md`.
|
|
|
110 |
"""
|
111 |
)
|
112 |
|
app/pipeline.py
CHANGED
@@ -10,7 +10,7 @@ class Pipeline:
|
|
10 |
def __init__(self, model_name: str = "Qwen/Qwen2.5-7B-Instruct"):
|
11 |
self.torch_pipe = pipeline(
|
12 |
"text-generation",
|
13 |
-
|
14 |
torch_dtype="auto",
|
15 |
device_map="auto"
|
16 |
)
|
@@ -20,6 +20,8 @@ class Pipeline:
|
|
20 |
{"role": "system", "content": """You are an expert flashcard creator.
|
21 |
- You ALWAYS include a single knowledge item per flashcard.
|
22 |
- You ALWAYS respond in valid JSON format.
|
|
|
|
|
23 |
|
24 |
Format responses like the example below.
|
25 |
|
@@ -51,4 +53,3 @@ class Pipeline:
|
|
51 |
return torch.device("mps")
|
52 |
else:
|
53 |
return torch.device("cpu")
|
54 |
-
|
|
|
10 |
def __init__(self, model_name: str = "Qwen/Qwen2.5-7B-Instruct"):
|
11 |
self.torch_pipe = pipeline(
|
12 |
"text-generation",
|
13 |
+
model_name,
|
14 |
torch_dtype="auto",
|
15 |
device_map="auto"
|
16 |
)
|
|
|
20 |
{"role": "system", "content": """You are an expert flashcard creator.
|
21 |
- You ALWAYS include a single knowledge item per flashcard.
|
22 |
- You ALWAYS respond in valid JSON format.
|
23 |
+
- You ALWAYS make flashcards accurate and comprehensivce.
|
24 |
+
- If the text includes code snippets, you consider snippets a knowledge item testing the user's understanding of how to write the code and what it does.
|
25 |
|
26 |
Format responses like the example below.
|
27 |
|
|
|
53 |
return torch.device("mps")
|
54 |
else:
|
55 |
return torch.device("cpu")
|
|