pankajsingh3012 commited on
Commit
445dac6
·
verified ·
1 Parent(s): 325521f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -12
app.py CHANGED
@@ -85,17 +85,16 @@ if st.button('Crawl CUDA Documentation'):
85
  with st.spinner('Crawling CUDA documentation...'):
86
  crawled_data, crawled_urls = crawl("https://docs.nvidia.com/cuda/", max_depth=1, delay=0.1)
87
  st.write(f"Processed {len(crawled_data)} pages.")
88
-
89
  texts = []
90
  for url, text in crawled_data:
91
  chunks = chunk_text(text, max_chunk_size=1024)
92
  texts.extend(chunks)
93
  st.success("Crawling and processing completed.")
94
-
95
  # Create embeddings
96
- embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2',
97
- model_kwargs={'device': 'cpu'})
98
-
99
  # Store embeddings in FAISS
100
  st.session_state.vector_store = FAISS.from_texts(texts, embeddings)
101
  st.session_state.documents_loaded = True
@@ -109,16 +108,13 @@ if query and st.session_state.documents_loaded:
109
  llm = GoogleGenerativeAI(model='gemini-1.0-pro', google_api_key="AIzaSyC1AvHnvobbycU8XSCXh-gRq3DUfG0EP98")
110
 
111
  # Create a PromptTemplate for the QA chain
112
- qa_prompt = PromptTemplate(
113
- template="Answer the following question based on the context provided:\n\n{context}\n\nQuestion: {question}\nAnswer:",
114
- input_variables=["context", "question"])
115
 
116
  # Create the retrieval QA chain
117
- qa_chain = RetrievalQA.from_chain_type(
118
- chain_type="map_rerank",
119
  retriever=st.session_state.vector_store.as_retriever(),
120
- combine_documents_chain=qa_prompt,
121
- llm=llm
122
  )
123
 
124
  response = qa_chain({"question": query})
 
85
  with st.spinner('Crawling CUDA documentation...'):
86
  crawled_data, crawled_urls = crawl("https://docs.nvidia.com/cuda/", max_depth=1, delay=0.1)
87
  st.write(f"Processed {len(crawled_data)} pages.")
88
+
89
  texts = []
90
  for url, text in crawled_data:
91
  chunks = chunk_text(text, max_chunk_size=1024)
92
  texts.extend(chunks)
93
  st.success("Crawling and processing completed.")
94
+
95
  # Create embeddings
96
+ embeddings = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2', model_kwargs={'device': 'cpu'})
97
+
 
98
  # Store embeddings in FAISS
99
  st.session_state.vector_store = FAISS.from_texts(texts, embeddings)
100
  st.session_state.documents_loaded = True
 
108
  llm = GoogleGenerativeAI(model='gemini-1.0-pro', google_api_key="AIzaSyC1AvHnvobbycU8XSCXh-gRq3DUfG0EP98")
109
 
110
  # Create a PromptTemplate for the QA chain
111
+ qa_prompt = PromptTemplate(template="Answer the following question based on the context provided:\n\n{context}\n\nQuestion: {question}\nAnswer:", input_variables=["context", "question"])
 
 
112
 
113
  # Create the retrieval QA chain
114
+ qa_chain = RetrievalQA(
 
115
  retriever=st.session_state.vector_store.as_retriever(),
116
+ llm=llm,
117
+ prompt=qa_prompt
118
  )
119
 
120
  response = qa_chain({"question": query})