Manel commited on
Commit
779e8a4
·
verified ·
1 Parent(s): fa357ac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -125,7 +125,7 @@ def fetch_context(db, model, model_name, query, template, use_compressor=True):
125
  #logger.info(f"User Query : {query}")
126
  compressed_docs = compression_retriever.get_relevant_documents(query)
127
  #logger.info(f"Retrieved Compressed Docs : {compressed_docs}")
128
- print(f"Compressed context Generation Time: {time.time() - start_time}")")
129
  return compressed_docs
130
 
131
  docs = db.max_marginal_relevance_search(query)
@@ -266,6 +266,7 @@ if __name__=="__main__":
266
  device = "cuda" if torch.cuda.is_available() else "cpu"
267
  model_name = "llama" if device=="cpu" else "mistral"
268
  logger.info(f"Running {model_name} model for inference on {device}")
 
269
 
270
  # Loading and caching db and model
271
  #bar = st.progress(0, "Loading Database. Please wait.")
 
125
  #logger.info(f"User Query : {query}")
126
  compressed_docs = compression_retriever.get_relevant_documents(query)
127
  #logger.info(f"Retrieved Compressed Docs : {compressed_docs}")
128
+ print(f"Compressed context Generation Time: {time.time() - start_time}")
129
  return compressed_docs
130
 
131
  docs = db.max_marginal_relevance_search(query)
 
266
  device = "cuda" if torch.cuda.is_available() else "cpu"
267
  model_name = "llama" if device=="cpu" else "mistral"
268
  logger.info(f"Running {model_name} model for inference on {device}")
269
+ print(f"Running {model_name} model for inference on {device}")
270
 
271
  # Loading and caching db and model
272
  #bar = st.progress(0, "Loading Database. Please wait.")