Spaces:
Paused
Paused
File size: 1,239 Bytes
f6091f7 916e00a f6091f7 2a8011b 916e00a 82ddd60 916e00a 82ddd60 f6091f7 82ddd60 916e00a 82ddd60 916e00a 2a8011b 916e00a 2a8011b 9daa0fe 2a8011b 916e00a 82ddd60 916e00a 82ddd60 29df9bc 916e00a 2016044 82ddd60 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import { GoogleCustomSearch } from "openai-function-calling-tools";
import { LLMError, LLMStream } from './stream';
const handler = async (req, res) => {
try {
const { question } = (await req.body);
const googleCustomSearch = new GoogleCustomSearch({
apiKey: process.env.API_KEY,
googleCSEId: process.env.CONTEXT_KEY,
});
const messages = [
{
role: "user",
content: question,
},
];
const functions = {
googleCustomSearch
};
let promptToSend = "You are a helpful assistant, a search term is provided and you are given search results to help provide a useful response.";
const stream = await LLMStream({ id: "gpt-3.5-turbo-0613" }, promptToSend, 0.8, messages, functions);
let data = '';
const decoder = new TextDecoder();
for await (const chunk of stream) {
data += decoder.decode(chunk);
res.write(data);
}
return res.end();
} catch (error) {
console.error(error);
if (error instanceof LLMError) {
return new Response('Error', { status: 500, statusText: error.message });
} else {
return new Response('Error', { status: 500 });
}
}
};
export default handler; |