Shreyas094 commited on
Commit
5999644
·
verified ·
1 Parent(s): ff9e8ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -77,7 +77,7 @@ def update_vectors(files, parser):
77
 
78
  return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files using {parser}."
79
 
80
- def generate_chunked_response(prompt, model, max_tokens=1000, num_calls=3, temperature=0.2, should_stop=False):
81
  print(f"Starting generate_chunked_response with {num_calls} calls")
82
  client = InferenceClient(model, token=huggingface_token)
83
  full_response = ""
@@ -172,7 +172,7 @@ def respond(message, history, model, temperature, num_calls, use_web_search):
172
  for partial_response, _ in get_response_from_pdf(message, model, num_calls=num_calls, temperature=temperature):
173
  yield partial_response
174
 
175
- def get_response_with_search(query, model, num_calls=3, temperature=0.2):
176
  search_results = duckduckgo_search(query)
177
  context = "\n".join(f"{result['title']}\n{result['body']}\nSource: {result['href']}\n"
178
  for result in search_results if 'body' in result)
@@ -197,7 +197,7 @@ After writing the document, please provide a list of sources used in your respon
197
  main_content += chunk
198
  yield main_content, "" # Yield partial main content without sources
199
 
200
- def get_response_from_pdf(query, model, num_calls=3, temperature=0.2):
201
  embed = get_embeddings()
202
  if os.path.exists("faiss_database"):
203
  database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
@@ -211,7 +211,9 @@ def get_response_from_pdf(query, model, num_calls=3, temperature=0.2):
211
 
212
  prompt = f"""Using the following context from the PDF documents:
213
  {context_str}
214
- Write a detailed and complete response that answers the following user question: '{query}'"""
 
 
215
 
216
  client = InferenceClient(model, token=huggingface_token)
217
 
@@ -219,7 +221,7 @@ Write a detailed and complete response that answers the following user question:
219
  for i in range(num_calls):
220
  for message in client.chat_completion(
221
  messages=[{"role": "user", "content": prompt}],
222
- max_tokens=1000,
223
  temperature=temperature,
224
  stream=True,
225
  ):
 
77
 
78
  return f"Vector store updated successfully. Processed {total_chunks} chunks from {len(files)} files using {parser}."
79
 
80
+ def generate_chunked_response(prompt, model, max_tokens=1000, num_calls=5, temperature=0.2, should_stop=False):
81
  print(f"Starting generate_chunked_response with {num_calls} calls")
82
  client = InferenceClient(model, token=huggingface_token)
83
  full_response = ""
 
172
  for partial_response, _ in get_response_from_pdf(message, model, num_calls=num_calls, temperature=temperature):
173
  yield partial_response
174
 
175
+ def get_response_with_search(query, model, num_calls=5, temperature=0.2):
176
  search_results = duckduckgo_search(query)
177
  context = "\n".join(f"{result['title']}\n{result['body']}\nSource: {result['href']}\n"
178
  for result in search_results if 'body' in result)
 
197
  main_content += chunk
198
  yield main_content, "" # Yield partial main content without sources
199
 
200
+ def get_response_from_pdf(query, model, num_calls=5, temperature=0.2):
201
  embed = get_embeddings()
202
  if os.path.exists("faiss_database"):
203
  database = FAISS.load_local("faiss_database", embed, allow_dangerous_deserialization=True)
 
211
 
212
  prompt = f"""Using the following context from the PDF documents:
213
  {context_str}
214
+ Write a detailed and complete response that fully answers the following user question.
215
+ Ensure your response covers all relevant information and is not cut off: '{query}'
216
+ If the response is long, please continue until you have provided a comprehensive answer."""
217
 
218
  client = InferenceClient(model, token=huggingface_token)
219
 
 
221
  for i in range(num_calls):
222
  for message in client.chat_completion(
223
  messages=[{"role": "user", "content": prompt}],
224
+ max_tokens=2000,
225
  temperature=temperature,
226
  stream=True,
227
  ):