Bhaskar2611 commited on
Commit
0491281
·
verified ·
1 Parent(s): 76997ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +116 -20
app.py CHANGED
@@ -1,12 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
- client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct")
 
 
 
 
8
 
9
- def respond(message, history: list[tuple[str, str]]):
 
 
 
 
10
  system_message = "You are a friendly Chatbot. If the user query is product-related, provide structured product recommendations based on intent and relevance."
11
  max_tokens = 2048
12
  temperature = 0.7
@@ -20,33 +81,68 @@ def respond(message, history: list[tuple[str, str]]):
20
  if val[1]:
21
  messages.append({"role": "assistant", "content": val[1]})
22
 
23
- # Append product recommendation prompt if the user query is relevant
24
- product_prompt = ("Given a user's search query, recommend the most relevant products from the catalog. "
25
- "Consider synonyms, user intent, and semantic meaning rather than just keyword matching. "
26
- "If the query is vague, infer potential needs based on common shopping behavior. "
27
- "Provide a ranked list of product recommendations with a short explanation for each suggestion. "
28
- "Ensure the recommendations are diverse and cover multiple relevant categories if applicable. "
29
- f"Now, based on the user query: '{message}', generate a well-structured product recommendation list.")
30
-
31
- messages.append({"role": "user", "content": product_prompt})
32
 
33
  response = ""
34
-
35
- for message in client.chat_completion(
36
  messages,
37
  max_tokens=max_tokens,
38
  stream=True,
39
  temperature=temperature,
40
  top_p=top_p,
41
  ):
42
- token = message.choices[0].delta.content
43
  response += token
44
  yield response
45
 
 
 
 
 
 
 
 
 
 
 
 
46
  """
47
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
48
- """
49
- demo = gr.ChatInterface(respond)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  if __name__ == "__main__":
52
  demo.launch()
 
1
+ # import gradio as gr
2
+ # from huggingface_hub import InferenceClient
3
+
4
+ # """
5
+ # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ # """
7
+ # client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct")
8
+
9
+ # def respond(message, history: list[tuple[str, str]]):
10
+ # system_message = "You are a friendly Chatbot. If the user query is product-related, provide structured product recommendations based on intent and relevance."
11
+ # max_tokens = 2048
12
+ # temperature = 0.7
13
+ # top_p = 0.95
14
+
15
+ # messages = [{"role": "system", "content": system_message}]
16
+
17
+ # for val in history:
18
+ # if val[0]:
19
+ # messages.append({"role": "user", "content": val[0]})
20
+ # if val[1]:
21
+ # messages.append({"role": "assistant", "content": val[1]})
22
+
23
+ # # Append product recommendation prompt if the user query is relevant
24
+ # product_prompt = ("Given a user's search query, recommend the most relevant products from the catalog. "
25
+ # "Consider synonyms, user intent, and semantic meaning rather than just keyword matching. "
26
+ # "If the query is vague, infer potential needs based on common shopping behavior. "
27
+ # "Provide a ranked list of product recommendations with a short explanation for each suggestion. "
28
+ # "Ensure the recommendations are diverse and cover multiple relevant categories if applicable. "
29
+ # f"Now, based on the user query: '{message}', generate a well-structured product recommendation list.")
30
+
31
+ # messages.append({"role": "user", "content": product_prompt})
32
+
33
+ # response = ""
34
+
35
+ # for message in client.chat_completion(
36
+ # messages,
37
+ # max_tokens=max_tokens,
38
+ # stream=True,
39
+ # temperature=temperature,
40
+ # top_p=top_p,
41
+ # ):
42
+ # token = message.choices[0].delta.content
43
+ # response += token
44
+ # yield response
45
+
46
+ # """
47
+ # For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
48
+ # """
49
+ # demo = gr.ChatInterface(respond)
50
+
51
+ # if __name__ == "__main__":
52
+ # demo.launch()
53
  import gradio as gr
54
  from huggingface_hub import InferenceClient
55
+ import tempfile
56
 
57
+ # Initialize clients for both chat and speech-to-text
58
+ chat_client = InferenceClient("Qwen/Qwen2.5-Coder-32B-Instruct")
59
+ stt_client = InferenceClient("openai/whisper-large-v3") # Using OpenAI's Whisper model for STT
60
+
61
+ def transcribe_audio(audio_file):
62
+ """Convert audio to text using Whisper model"""
63
+ with open(audio_file, "rb") as f:
64
+ return stt_client.automatic_speech_recognition(f.read())
65
 
66
+ def respond(message, history: list[tuple[str, str]], audio=None):
67
+ # If audio input is provided, transcribe it first
68
+ if audio:
69
+ message = transcribe_audio(audio)
70
+
71
  system_message = "You are a friendly Chatbot. If the user query is product-related, provide structured product recommendations based on intent and relevance."
72
  max_tokens = 2048
73
  temperature = 0.7
 
81
  if val[1]:
82
  messages.append({"role": "assistant", "content": val[1]})
83
 
84
+ product_prompt = ("Given a user's search query, recommend relevant products...")
85
+ messages.append({"role": "user", "content": f"{product_prompt}\n\nUser query: {message}"})
 
 
 
 
 
 
 
86
 
87
  response = ""
88
+ for chunk in chat_client.chat_completion(
 
89
  messages,
90
  max_tokens=max_tokens,
91
  stream=True,
92
  temperature=temperature,
93
  top_p=top_p,
94
  ):
95
+ token = chunk.choices[0].delta.content
96
  response += token
97
  yield response
98
 
99
+ # Custom CSS for better visual appearance
100
+ css = """
101
+ .gradio-container {
102
+ background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
103
+ min-height: 100vh;
104
+ }
105
+ .audio-input {
106
+ background: white !important;
107
+ border-radius: 10px !important;
108
+ padding: 20px !important;
109
+ }
110
  """
111
+
112
+ with gr.Blocks(css=css) as demo:
113
+ gr.Markdown("# Smart Product Assistant 🎤🛒")
114
+ with gr.Row():
115
+ with gr.Column(scale=2):
116
+ chatbot = gr.Chatbot(height=600)
117
+ with gr.Column(scale=1):
118
+ gr.Markdown("## Input Methods")
119
+ with gr.Tab("Text Input"):
120
+ text_input = gr.Textbox(placeholder="Type your query here...", label="Text Input")
121
+ with gr.Tab("Voice Input"):
122
+ audio_input = gr.Audio(
123
+ sources="microphone",
124
+ type="filepath",
125
+ label="Speak your query",
126
+ elem_classes="audio-input"
127
+ )
128
+ submit_btn = gr.Button("Submit", variant="primary")
129
+
130
+ def process_input(text, audio, history):
131
+ if audio:
132
+ return audio, history
133
+ elif text:
134
+ return text, history
135
+ return "", history
136
+
137
+ submit_btn.click(
138
+ process_input,
139
+ [text_input, audio_input, chatbot],
140
+ [text_input, chatbot]
141
+ ).then(
142
+ respond,
143
+ [text_input, chatbot, audio_input],
144
+ chatbot
145
+ )
146
 
147
  if __name__ == "__main__":
148
  demo.launch()