Spaces:
Running
Running
refactor: move LLM max input length to global config
Browse filesThe LLM max input length configuration was moved from the session state in app.py to the GlobalConfig class in global_config.py. This change centralizes the configuration and improves maintainability by avoiding hardcoded values in the application logic.
- app.py +1 -14
- global_config.py +1 -0
app.py
CHANGED
@@ -172,19 +172,6 @@ with st.sidebar:
|
|
172 |
horizontal=True
|
173 |
)
|
174 |
|
175 |
-
# --- LLM Max Input Length Config ---
|
176 |
-
# Only set session state if not already set by user
|
177 |
-
if 'llm_max_input_length' not in st.session_state:
|
178 |
-
st.session_state['llm_max_input_length'] = 400
|
179 |
-
llm_max_input_length = st.sidebar.number_input(
|
180 |
-
'Max LLM Input Length (characters)',
|
181 |
-
min_value=100,
|
182 |
-
max_value=20000,
|
183 |
-
step=100,
|
184 |
-
key='llm_max_input_length',
|
185 |
-
help='Maximum number of characters allowed for the LLM input prompt.'
|
186 |
-
)
|
187 |
-
|
188 |
if RUN_IN_OFFLINE_MODE:
|
189 |
llm_provider_to_use = st.text_input(
|
190 |
label='2: Enter Ollama model name to use (e.g., mistral:v0.2):',
|
@@ -304,7 +291,7 @@ def set_up_chat_ui():
|
|
304 |
|
305 |
if prompt := st.chat_input(
|
306 |
placeholder=APP_TEXT['chat_placeholder'],
|
307 |
-
max_chars=
|
308 |
accept_file=True,
|
309 |
file_type=['pdf', ],
|
310 |
):
|
|
|
172 |
horizontal=True
|
173 |
)
|
174 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
if RUN_IN_OFFLINE_MODE:
|
176 |
llm_provider_to_use = st.text_input(
|
177 |
label='2: Enter Ollama model name to use (e.g., mistral:v0.2):',
|
|
|
291 |
|
292 |
if prompt := st.chat_input(
|
293 |
placeholder=APP_TEXT['chat_placeholder'],
|
294 |
+
max_chars=GlobalConfig.LLM_MODEL_MAX_INPUT_LENGTH,
|
295 |
accept_file=True,
|
296 |
file_type=['pdf', ],
|
297 |
):
|
global_config.py
CHANGED
@@ -97,6 +97,7 @@ class GlobalConfig:
|
|
97 |
DEFAULT_MODEL_INDEX = int(os.environ.get('DEFAULT_MODEL_INDEX', '4'))
|
98 |
LLM_MODEL_TEMPERATURE = 0.2
|
99 |
MAX_PAGE_COUNT = 50
|
|
|
100 |
|
101 |
LOG_LEVEL = 'DEBUG'
|
102 |
COUNT_TOKENS = False
|
|
|
97 |
DEFAULT_MODEL_INDEX = int(os.environ.get('DEFAULT_MODEL_INDEX', '4'))
|
98 |
LLM_MODEL_TEMPERATURE = 0.2
|
99 |
MAX_PAGE_COUNT = 50
|
100 |
+
LLM_MODEL_MAX_INPUT_LENGTH = 20000 # characters
|
101 |
|
102 |
LOG_LEVEL = 'DEBUG'
|
103 |
COUNT_TOKENS = False
|