Nymbo commited on
Commit
70d58c7
·
verified ·
1 Parent(s): 45b3867

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -77
app.py CHANGED
@@ -10,27 +10,34 @@ ACCESS_TOKEN = os.getenv("HF_TOKEN")
10
  print("Access token loaded.")
11
 
12
  # Function to encode image to base64
13
- def encode_image(image):
14
- if image is None:
 
15
  return None
16
 
17
- # Convert to PIL Image if needed
18
- if not isinstance(image, Image.Image):
19
- try:
20
- image = Image.open(image)
21
- except Exception as e:
22
- print(f"Error opening image: {e}")
23
- return None
24
-
25
- # Convert to RGB if image has an alpha channel (RGBA)
26
- if image.mode == 'RGBA':
27
- image = image.convert('RGB')
28
-
29
- # Encode to base64
30
- buffered = io.BytesIO()
31
- image.save(buffered, format="JPEG")
32
- img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
33
- return img_str
 
 
 
 
 
 
34
 
35
  def respond(
36
  message,
@@ -221,20 +228,18 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
221
  )
222
  print("Chatbot interface created.")
223
 
224
- with gr.Row():
225
- # Multimodal textbox for messages (combines text and file uploads)
226
- msg = gr.MultimodalTextbox(
227
- placeholder="Type a message or upload images...",
228
- show_label=False,
229
- container=False,
230
- scale=12,
231
- file_types=["image"],
232
- file_count="multiple",
233
- sources=["upload"]
234
- )
235
-
236
- # Send button for messages
237
- submit_btn = gr.Button("Send", variant="primary")
238
 
239
  # Create accordion for settings
240
  with gr.Accordion("Settings", open=False):
@@ -396,54 +401,79 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
396
 
397
  # Function for the chat interface
398
  def user(user_message, history):
 
 
 
399
  # Skip if message is empty (no text and no files)
400
- if (not user_message["text"] or user_message["text"].strip() == "") and not user_message["files"]:
 
401
  return history
402
 
403
- # Process images and text into a display message
404
- display_message = ""
 
405
 
406
- # Add text if present
407
- if user_message["text"] and user_message["text"].strip() != "":
408
- display_message += user_message["text"]
409
 
410
- # Add image references if present
411
- file_displays = []
412
- for file in user_message["files"]:
413
- file_displays.append(file)
414
 
415
- # Return updated history with display message
416
- if file_displays:
417
- return history + [[(display_message, file_displays), None]]
 
 
 
 
 
 
 
 
 
 
 
418
  else:
419
- return history + [[display_message, None]]
 
 
420
 
421
- # Define chat interface
422
  def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model):
423
- # Extract the last user message
424
  if not history or len(history) == 0:
 
425
  return history
426
 
 
427
  user_message = history[-1][0]
 
428
 
429
- # Determine if the message is multimodal or text-only
430
- is_multimodal = False
431
- text_parts = ""
432
  image_files = []
433
 
434
- # Process text and images from the message
435
  if isinstance(user_message, tuple):
436
- text_parts = user_message[0]
437
- image_files = user_message[1]
438
- is_multimodal = True
 
 
 
 
 
439
  else:
440
- text_parts = user_message
 
 
441
 
442
  # Process message through respond function
443
  history[-1][1] = ""
444
  for response in respond(
445
- text_parts, # Text part
446
- image_files if is_multimodal else None, # Image part
447
  history[:-1],
448
  system_msg,
449
  max_tokens,
@@ -460,7 +490,7 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
460
  history[-1][1] = response
461
  yield history
462
 
463
- # Event handlers
464
  msg.submit(
465
  user,
466
  [msg, chatbot],
@@ -478,23 +508,6 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
478
  [msg]
479
  )
480
 
481
- submit_btn.click(
482
- user,
483
- [msg, chatbot],
484
- [chatbot],
485
- queue=False
486
- ).then(
487
- bot,
488
- [chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
489
- frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
490
- model_search_box, featured_model_radio],
491
- [chatbot]
492
- ).then(
493
- lambda: {"text": "", "files": []}, # Clear inputs after submission
494
- None,
495
- [msg]
496
- )
497
-
498
  # Connect the model filter to update the radio choices
499
  model_search_box.change(
500
  fn=filter_models,
 
10
  print("Access token loaded.")
11
 
12
  # Function to encode image to base64
13
+ def encode_image(image_path):
14
+ if not image_path:
15
+ print("No image path provided")
16
  return None
17
 
18
+ try:
19
+ print(f"Encoding image from path: {image_path}")
20
+
21
+ # If it's already a PIL Image
22
+ if isinstance(image_path, Image.Image):
23
+ image = image_path
24
+ else:
25
+ # Try to open the image file
26
+ image = Image.open(image_path)
27
+
28
+ # Convert to RGB if image has an alpha channel (RGBA)
29
+ if image.mode == 'RGBA':
30
+ image = image.convert('RGB')
31
+
32
+ # Encode to base64
33
+ buffered = io.BytesIO()
34
+ image.save(buffered, format="JPEG")
35
+ img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
36
+ print("Image encoded successfully")
37
+ return img_str
38
+ except Exception as e:
39
+ print(f"Error encoding image: {e}")
40
+ return None
41
 
42
  def respond(
43
  message,
 
228
  )
229
  print("Chatbot interface created.")
230
 
231
+ # Multimodal textbox for messages (combines text and file uploads)
232
+ msg = gr.MultimodalTextbox(
233
+ placeholder="Type a message or upload images...",
234
+ show_label=False,
235
+ container=False,
236
+ scale=12,
237
+ file_types=["image"],
238
+ file_count="multiple",
239
+ sources=["upload"]
240
+ )
241
+
242
+ # Note: We're removing the separate submit button since MultimodalTextbox has its own
 
 
243
 
244
  # Create accordion for settings
245
  with gr.Accordion("Settings", open=False):
 
401
 
402
  # Function for the chat interface
403
  def user(user_message, history):
404
+ # Debug logging for troubleshooting
405
+ print(f"User message received: {user_message}")
406
+
407
  # Skip if message is empty (no text and no files)
408
+ if not user_message or (not user_message.get("text") and not user_message.get("files")):
409
+ print("Empty message, skipping")
410
  return history
411
 
412
+ # Prepare multimodal message format
413
+ text_content = user_message.get("text", "").strip()
414
+ files = user_message.get("files", [])
415
 
416
+ print(f"Text content: {text_content}")
417
+ print(f"Files: {files}")
 
418
 
419
+ # If both text and files are empty, skip
420
+ if not text_content and not files:
421
+ print("No content to display")
422
+ return history
423
 
424
+ # Process multimodal content
425
+ if files:
426
+ # For multimodal messages with files
427
+ for file_path in files:
428
+ print(f"Processing file: {file_path}")
429
+ if not file_path:
430
+ continue
431
+
432
+ # Add a combined message with text and file
433
+ history.append([(text_content, file_path), None])
434
+ # Reset text content for subsequent files if there are multiple
435
+ text_content = ""
436
+
437
+ return history
438
  else:
439
+ # For text-only messages
440
+ history.append([text_content, None])
441
+ return history
442
 
443
+ # Define bot response function
444
  def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model):
445
+ # Check if history is valid
446
  if not history or len(history) == 0:
447
+ print("No history to process")
448
  return history
449
 
450
+ # Extract the last user message
451
  user_message = history[-1][0]
452
+ print(f"Processing user message: {user_message}")
453
 
454
+ # Determine if the message contains images
455
+ text_content = ""
 
456
  image_files = []
457
 
 
458
  if isinstance(user_message, tuple):
459
+ # Tuple format: (text, image_path)
460
+ text_content = user_message[0] if user_message[0] else ""
461
+ # Handle both single image path and list of paths
462
+ if isinstance(user_message[1], list):
463
+ image_files = user_message[1]
464
+ else:
465
+ image_files = [user_message[1]]
466
+ print(f"Multimodal message detected - Text: {text_content}, Images: {image_files}")
467
  else:
468
+ # Text-only message
469
+ text_content = user_message
470
+ print(f"Text-only message detected: {text_content}")
471
 
472
  # Process message through respond function
473
  history[-1][1] = ""
474
  for response in respond(
475
+ text_content,
476
+ image_files if image_files else None,
477
  history[:-1],
478
  system_msg,
479
  max_tokens,
 
490
  history[-1][1] = response
491
  yield history
492
 
493
+ # Event handlers - only using the MultimodalTextbox's built-in submit functionality
494
  msg.submit(
495
  user,
496
  [msg, chatbot],
 
508
  [msg]
509
  )
510
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
511
  # Connect the model filter to update the radio choices
512
  model_search_box.change(
513
  fn=filter_models,