zunairanureen commited on
Commit
c2780c6
·
verified ·
1 Parent(s): a67cf9e

Upload 3 files

Browse files
Files changed (3) hide show
  1. .env +1 -0
  2. app.py +92 -0
  3. requirements.txt +11 -0
.env ADDED
@@ -0,0 +1 @@
 
 
1
+ groq_api_key="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"
app.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import time
3
+ import streamlit as st
4
+ from langchain_groq import ChatGroq
5
+ from langchain_community.document_loaders import PyPDFDirectoryLoader
6
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
7
+ from langchain_huggingface import HuggingFaceEmbeddings
8
+ from langchain.chains import create_retrieval_chain
9
+ from langchain.chains.combine_documents import create_stuff_documents_chain
10
+ from langchain_core.prompts import ChatPromptTemplate
11
+ from langchain_community.vectorstores import FAISS
12
+ from dotenv import load_dotenv
13
+ from sentence_transformers import SentenceTransformer
14
+
15
+ # Load environment variables
16
+ load_dotenv()
17
+ groq_api_key = os.getenv("GROQ_API_KEY")
18
+
19
+ # Streamlit Title
20
+ st.title("ChatGroq RAG with PDF")
21
+
22
+ # Initialize LLM
23
+ llm = ChatGroq(groq_api_key=groq_api_key, model="llama3-8b-8192")
24
+
25
+ # Define Prompt Template
26
+ prompt = ChatPromptTemplate.from_template(
27
+ """
28
+ Answer the questions based on the provided context only.
29
+ Please provide the most accurate response based on the question
30
+ <context>
31
+ {context}
32
+ <context>
33
+
34
+ Question: {input}
35
+ """
36
+ )
37
+
38
+ # Initialize Embedding Model
39
+
40
+ # Embedding Function
41
+ def vector_embedding():
42
+ if "vectors" not in st.session_state:
43
+
44
+ st.session_state.loader = PyPDFDirectoryLoader("./pdf")
45
+ st.session_state.docs = st.session_state.loader.load()
46
+ st.session_state.text_splitter = RecursiveCharacterTextSplitter(
47
+ chunk_size=1000, chunk_overlap=200
48
+ )
49
+ st.session_state.final_document = st.session_state.text_splitter.split_documents(
50
+ st.session_state.docs
51
+ )
52
+ model_name = "sentence-transformers/all-mpnet-base-v2"
53
+ st.session_state.embeddings = HuggingFaceEmbeddings(model_name=model_name)
54
+ #model = SentenceTransformer("jxm/cde-small-v1", trust_remote_code=True)
55
+ #st.session_state.embeddings = HuggingFaceEmbeddings(model=model)
56
+ st.session_state.vectors = FAISS.from_documents(
57
+ st.session_state.final_document, st.session_state.embeddings
58
+ )
59
+
60
+ # UI for User Input
61
+ prompt1 = st.text_input("Enter Your Question from Documents")
62
+
63
+ # Embed Documents Button
64
+ if st.button("Document Embedding"):
65
+ with st.spinner("Embedding documents..."):
66
+ vector_embedding()
67
+ st.success("Vector Store created.")
68
+
69
+ # Handle Queries
70
+ if prompt1.strip():
71
+ if "vectors" not in st.session_state or st.session_state.vectors is None:
72
+ st.error("Please embed the documents first by clicking the 'Document Embedding' button.")
73
+ else:
74
+ with st.spinner("Fetching response..."):
75
+ start = time.time()
76
+ document_chain = create_stuff_documents_chain(llm, prompt)
77
+ retriever = st.session_state.vectors.as_retriever()
78
+ retrieval_chain = create_retrieval_chain(retriever, document_chain)
79
+ response = retrieval_chain.invoke({"input": prompt1})
80
+ end = time.time()
81
+
82
+ st.write(response['answer'])
83
+ st.write(f"Response generated in {end - start:.2f} seconds.")
84
+
85
+ with st.expander("Document Similarity Search"):
86
+ context = response.get('context', [])
87
+ if not context:
88
+ st.write("No similar documents found.")
89
+ else:
90
+ for i, doc in enumerate(context):
91
+ st.write(doc.page_content)
92
+ st.write("-----------------------------------------------")
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ langchain
3
+ langchain-community
4
+ langchain-core
5
+ langchain-text-splitters
6
+ langchain-huggingface
7
+ langchain-groq
8
+ sentence-transformers
9
+ faiss-cpu
10
+ python-dotenv
11
+ pypdf