rsrini7 commited on
Commit
3b3664d
·
1 Parent(s): ebb3b12

open router with openai api using langchain sdk

Browse files
Files changed (2) hide show
  1. app.py +17 -23
  2. helpers/llm_helper.py +14 -33
app.py CHANGED
@@ -345,30 +345,24 @@ def set_up_chat_ui():
345
  )
346
  return
347
 
348
- if provider == GlobalConfig.PROVIDER_OPENROUTER:
349
- # OpenRouter returns a function, not a LangChain LLM. Call it directly.
350
- response_json = llm(formatted_template)
351
- # Extract the text from the OpenAI-compatible response
352
- try:
353
- response = response_json["choices"][0]["message"]["content"]
354
- except Exception as ex:
355
- handle_error(f"Failed to parse OpenRouter response: {ex}\nRaw response: {response_json}", True)
356
- return
357
- else:
358
- for chunk in llm.stream(formatted_template):
359
- if isinstance(chunk, str):
360
- response += chunk
361
  else:
362
- response += chunk.content # AIMessageChunk
363
-
364
- # Update the progress bar with an approx progress percentage
365
- progress_bar.progress(
366
- min(
367
- len(response) / gcfg.get_max_output_tokens(llm_provider_to_use),
368
- 0.95
369
- ),
370
- text='Streaming content...this might take a while...'
371
- )
372
  except (httpx.ConnectError, requests.exceptions.ConnectionError):
373
  handle_error(
374
  'A connection error occurred while streaming content from the LLM endpoint.'
 
345
  )
346
  return
347
 
348
+ for chunk in llm.stream(formatted_template):
349
+ if isinstance(chunk, str):
350
+ response += chunk
351
+ else:
352
+ content = getattr(chunk, 'content', None)
353
+ if content is not None:
354
+ response += content
 
 
 
 
 
 
355
  else:
356
+ response += str(chunk)
357
+
358
+ # Update the progress bar with an approx progress percentage
359
+ progress_bar.progress(
360
+ min(
361
+ len(response) / gcfg.get_max_output_tokens(llm_provider_to_use),
362
+ 0.95
363
+ ),
364
+ text='Streaming content...this might take a while...'
365
+ )
366
  except (httpx.ConnectError, requests.exceptions.ConnectionError):
367
  handle_error(
368
  'A connection error occurred while streaming content from the LLM endpoint.'
helpers/llm_helper.py CHANGED
@@ -11,7 +11,8 @@ import requests
11
  from requests.adapters import HTTPAdapter
12
  from urllib3.util import Retry
13
  from langchain_core.language_models import BaseLLM, BaseChatModel
14
-
 
15
 
16
  sys.path.append('..')
17
 
@@ -189,39 +190,19 @@ def get_langchain_llm(
189
  )
190
 
191
  if provider == GlobalConfig.PROVIDER_OPENROUTER:
 
192
  logger.debug('Getting LLM via OpenRouter: %s', model)
193
- OPENROUTER_API_URL = "https://openrouter.ai/api/v1/chat/completions"
194
- OPENROUTER_API_KEY = api_key
195
- import os
196
- import requests
197
-
198
- def openrouter_completion(prompt, model=model, api_key=OPENROUTER_API_KEY):
199
- headers = {
200
- "Authorization": f"Bearer {api_key}",
201
- "Content-Type": "application/json",
202
- }
203
- # Optionally add analytics headers if available
204
- site_url = os.getenv("OPENROUTER_SITE_URL")
205
- app_name = os.getenv("OPENROUTER_SITE_NAME")
206
- if site_url:
207
- headers["HTTP-Referer"] = site_url
208
- if app_name:
209
- headers["X-Title"] = app_name
210
- data = {
211
- "model": model,
212
- "messages": [
213
- {"role": "system", "content": "You are a helpful assistant summarizing technical support information. Provide a concise summary or key action points based on the provided context."},
214
- {"role": "user", "content": prompt},
215
- ]
216
- }
217
- response = requests.post(
218
- url=OPENROUTER_API_URL,
219
- headers=headers,
220
- json=data
221
- )
222
- response.raise_for_status()
223
- return response.json()
224
- return openrouter_completion
225
 
226
  if provider == GlobalConfig.PROVIDER_COHERE:
227
  from langchain_cohere.llms import Cohere
 
11
  from requests.adapters import HTTPAdapter
12
  from urllib3.util import Retry
13
  from langchain_core.language_models import BaseLLM, BaseChatModel
14
+ from langchain_openai import ChatOpenAI
15
+ import os
16
 
17
  sys.path.append('..')
18
 
 
190
  )
191
 
192
  if provider == GlobalConfig.PROVIDER_OPENROUTER:
193
+ # Use langchain-openai's ChatOpenAI for OpenRouter
194
  logger.debug('Getting LLM via OpenRouter: %s', model)
195
+ openrouter_api_key = api_key or os.environ.get("OPENROUTER_API_KEY")
196
+ base_url = "https://openrouter.ai/api/v1"
197
+ # NOTE: model should be passed as model_name
198
+ return ChatOpenAI(
199
+ base_url=base_url,
200
+ openai_api_key=openrouter_api_key,
201
+ model_name=model,
202
+ temperature=GlobalConfig.LLM_MODEL_TEMPERATURE,
203
+ max_tokens=max_new_tokens,
204
+ streaming=True,
205
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
 
207
  if provider == GlobalConfig.PROVIDER_COHERE:
208
  from langchain_cohere.llms import Cohere