baxin commited on
Commit
d2dfd31
·
1 Parent(s): 93aca94

fix ui issue

Browse files
Files changed (1) hide show
  1. chat_column.py +4 -0
chat_column.py CHANGED
@@ -17,6 +17,7 @@ def render_chat_column(st, llm_client, model_option, max_tokens, BASE_PROMPT):
17
  st.markdown(message["content"])
18
 
19
  # --- Chat Input at the bottom ---
 
20
  if prompt := st.chat_input("Enter topic to generate JSON for Veo3..."):
21
  if len(prompt.strip()) == 0:
22
  st.warning("Please enter a topic.", icon="⚠️")
@@ -84,6 +85,9 @@ def render_chat_column(st, llm_client, model_option, max_tokens, BASE_PROMPT):
84
  # Update existing assistant message if needed
85
  st.session_state.messages[-1]['content'] = full_response
86
 
 
 
 
87
  except Exception as e:
88
  st.error(
89
  f"Error during LLM response generation: {str(e)}", icon="🚨")
 
17
  st.markdown(message["content"])
18
 
19
  # --- Chat Input at the bottom ---
20
+ # Use a simple approach - just place the input after all messages
21
  if prompt := st.chat_input("Enter topic to generate JSON for Veo3..."):
22
  if len(prompt.strip()) == 0:
23
  st.warning("Please enter a topic.", icon="⚠️")
 
85
  # Update existing assistant message if needed
86
  st.session_state.messages[-1]['content'] = full_response
87
 
88
+ # Rerun to ensure proper layout
89
+ st.rerun()
90
+
91
  except Exception as e:
92
  st.error(
93
  f"Error during LLM response generation: {str(e)}", icon="🚨")