Spaces:
Paused
Paused
File size: 4,184 Bytes
c8f429c f1c33ac 18952ad c8f429c fd9ea07 c8f429c 7475b39 e8b2dd0 f1c33ac c8f429c f1c33ac c8f429c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 |
import sys
import gradio as gr
from llm import *
from utils import *
from presets import *
from overwrites import *
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
PromptHelper.compact_text_chunks = compact_text_chunks
with gr.Blocks(css="") as demo:
with gr.Box():
gr.Markdown("<h1 style='font-size: 48px; text-align: center;'>πͺ WizardLM Doc Chat π</h1>")
gr.Markdown("<h3 style='text-align: center;'>π¦ Local LLM Do It For U π¦</h3>")
chat_context = gr.State([])
new_google_chat_context = gr.State([])
with gr.Tab("Upload"):
with gr.Row():
with gr.Column():
index_type = gr.Dropdown(choices=["GPTVectorStoreIndex"], label="index_type", value="GPTVectorStoreIndex")
upload_file = gr.Files(label="upload_file .txt, .pdf, .epub")
new_index_name = gr.Textbox(placeholder="new_index_name: ", show_label=False).style(container=False)
construct_btn = gr.Button("βοΈ Index", variant="primary")
with gr.Row():
with gr.Column():
with gr.Row():
max_input_size = gr.Slider(256, 4096, 4096, step=1, label="max_input_size", interactive=True, show_label=True)
num_outputs = gr.Slider(256, 4096, 512, step=1, label="num_outputs", interactive=True, show_label=True)
with gr.Row():
max_chunk_overlap = gr.Slider(0, 100, 20, step=1, label="max_chunk_overlap", interactive=True, show_label=True)
chunk_size_limit = gr.Slider(0, 4096, 0, step=1, label="chunk_size_limit", interactive=True, show_label=True)
with gr.Row():
embedding_limit = gr.Slider(0, 100, 0, step=1, label="embedding_limit", interactive=True, show_label=True)
separator = gr.Textbox(show_label=False, label="separator", placeholder=",", value="", interactive=True)
with gr.Row():
num_children = gr.Slider(2, 100, 10, step=1, label="num_children", interactive=False, show_label=True)
max_keywords_per_chunk = gr.Slider(1, 100, 10, step=1, label="max_keywords_per_chunk", interactive=False, show_label=True)
with gr.Tab("Search"):
with gr.Row():
with gr.Column(scale=3):
search_options_checkbox = gr.CheckboxGroup(label="APIs", choices=["π Google", "π‘ Porch", "Your API Here"])
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(min_width=50, scale=1):
chat_empty_btn = gr.Button("π§Ή", variant="secondary")
with gr.Column(scale=12):
chat_input = gr.Textbox(show_label=False, placeholder="Enter text...").style(container=False)
with gr.Column(min_width=50, scale=1):
chat_submit_btn = gr.Button("π", variant="primary")
index_select = []
prompt_tmpl = "Default"
chat_input.submit(chat_ai, [index_select, chat_input, prompt_tmpl, refine_tmpl, sim_k, chat_tone, chat_context, chatbot, search_options_checkbox], [chat_context, chatbot])
chat_input.submit(reset_textbox, [], [chat_input])
chat_submit_btn.click(chat_ai, [index_select, chat_input, prompt_tmpl, refine_tmpl, sim_k, chat_tone, chat_context, chatbot, search_options_checkbox], [chat_context, chatbot])
chat_submit_btn.click(reset_textbox, [], [chat_input])
chat_empty_btn.click(lambda: ([], []), None, [chat_context, chatbot])
tmpl_select.change(change_prompt_tmpl, [tmpl_select], [prompt_tmpl])
refine_select.change(change_refine_tmpl, [refine_select], [refine_tmpl])
index_type.change(lock_params, [index_type], [num_children, max_keywords_per_chunk])
construct_btn.click(construct_index, [upload_file, new_index_name, index_type, max_input_size, num_outputs, max_chunk_overlap, chunk_size_limit, embedding_limit, separator, num_children], [index_select])
if __name__ == "__main__":
demo.title = "WizardLM Doc Chat"
demo.queue().launch()
|