Spaces:
Sleeping
Sleeping
Update tools.py
Browse files
tools.py
CHANGED
@@ -22,6 +22,7 @@ from langchain_community.tools.wikipedia.tool import WikipediaQueryRun
|
|
22 |
from langchain_google_community import SpeechToTextLoader
|
23 |
from langchain_community.tools import YouTubeSearchTool
|
24 |
from youtube_transcript_api import YouTubeTranscriptApi
|
|
|
25 |
from langchain_community.tools.file_management.read import ReadFileTool
|
26 |
from langchain.chains.summarize import load_summarize_chain
|
27 |
from langchain.prompts import PromptTemplate
|
@@ -122,7 +123,7 @@ def search_and_extract(query: str) -> list[dict]:
|
|
122 |
|
123 |
|
124 |
@tool
|
125 |
-
def aggregate_information(
|
126 |
"""
|
127 |
Processes a list of unstructured text chunks (e.g., search results) and produces a concise, query-specific summary.
|
128 |
|
@@ -144,9 +145,9 @@ def aggregate_information(results: list[str], query: str) -> str:
|
|
144 |
# Convert to LangChain Document objects
|
145 |
docs = [Document(page_content=chunk) for chunk in results]
|
146 |
|
147 |
-
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0
|
148 |
|
149 |
-
# Map Prompt — Summarize each document in light of the query
|
150 |
map_prompt = PromptTemplate.from_template(
|
151 |
"You are analyzing a search result in the context of the question: '{query}'.\n\n"
|
152 |
"Search result:\n{text}\n\n"
|
@@ -158,13 +159,12 @@ def aggregate_information(results: list[str], query: str) -> str:
|
|
158 |
"Relevant Summary:"
|
159 |
)
|
160 |
|
161 |
-
# Combine Prompt — Aggregate the summaries to one final answer
|
162 |
combine_prompt = PromptTemplate.from_template(
|
163 |
-
"You are aggregating information to answer the following question: '{query}'.\n\n"
|
164 |
"Here are the summaries from filtered search results:\n{text}\n\n"
|
165 |
-
"
|
166 |
-
"
|
167 |
-
"Final Answer:"
|
168 |
)
|
169 |
|
170 |
chain = load_summarize_chain(
|
|
|
22 |
from langchain_google_community import SpeechToTextLoader
|
23 |
from langchain_community.tools import YouTubeSearchTool
|
24 |
from youtube_transcript_api import YouTubeTranscriptApi
|
25 |
+
from langchain_community.document_loaders.blob_loaders.youtube_audio import YoutubeAudioLoader
|
26 |
from langchain_community.tools.file_management.read import ReadFileTool
|
27 |
from langchain.chains.summarize import load_summarize_chain
|
28 |
from langchain.prompts import PromptTemplate
|
|
|
123 |
|
124 |
|
125 |
@tool
|
126 |
+
def aggregate_information(query: str, results: list[str]) -> str:
|
127 |
"""
|
128 |
Processes a list of unstructured text chunks (e.g., search results) and produces a concise, query-specific summary.
|
129 |
|
|
|
145 |
# Convert to LangChain Document objects
|
146 |
docs = [Document(page_content=chunk) for chunk in results]
|
147 |
|
148 |
+
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
|
149 |
|
150 |
+
# 🔍 Map Prompt — Summarize each document in light of the query
|
151 |
map_prompt = PromptTemplate.from_template(
|
152 |
"You are analyzing a search result in the context of the question: '{query}'.\n\n"
|
153 |
"Search result:\n{text}\n\n"
|
|
|
159 |
"Relevant Summary:"
|
160 |
)
|
161 |
|
162 |
+
# 🧠 Combine Prompt — Aggregate the summaries to one final answer
|
163 |
combine_prompt = PromptTemplate.from_template(
|
164 |
+
"You are aggregating information to provide context to answer the following question: '{query}'.\n\n"
|
165 |
"Here are the summaries from filtered search results:\n{text}\n\n"
|
166 |
+
"Use the provided summaries to construct a context that directly supports the query without answering it.\n"
|
167 |
+
"Context:"
|
|
|
168 |
)
|
169 |
|
170 |
chain = load_summarize_chain(
|