Spaces:
Sleeping
Sleeping
Update app.py
Browse filesadded deug statements to observe code execution
app.py
CHANGED
@@ -26,20 +26,21 @@ def store_document(text):
|
|
26 |
print("storing document")
|
27 |
|
28 |
embedding = embedding_model.encode([text])
|
|
|
29 |
index.add(np.array(embedding, dtype=np.float32))
|
30 |
documents.append(text)
|
31 |
|
32 |
-
print(f"your document has been stored
|
33 |
|
34 |
return "Document stored!"
|
35 |
|
36 |
def retrieve_document(query):
|
37 |
-
print(f"retrieving doc based on {query}")
|
38 |
|
39 |
query_embedding = embedding_model.encode([query])
|
40 |
_, closest_idx = index.search(np.array(query_embedding, dtype=np.float32), 1)
|
41 |
|
42 |
-
print(f"retrieved: {documents[closest_idx[0][0]]}")
|
43 |
|
44 |
return documents[closest_idx[0][0]]
|
45 |
|
@@ -87,15 +88,18 @@ def chatbot(pdf_file, user_question):
|
|
87 |
return f"Error retrieving document relevant to the query: {user_question} \n{e}"
|
88 |
|
89 |
if doc:
|
|
|
90 |
# Split into smaller chunks
|
91 |
chunks = split_text(doc)
|
92 |
|
93 |
# Use only the first chunk (to optimize token usage)
|
94 |
prompt = f"Based on this document, answer the question:\n\nDocument:\n{chunks[0]}\n\nQuestion: {user_question}"
|
|
|
95 |
else:
|
96 |
prompt=user_question
|
97 |
|
98 |
try:
|
|
|
99 |
response = together.Completion.create(
|
100 |
model="mistralai/Mistral-7B-Instruct-v0.1",
|
101 |
prompt=prompt,
|
|
|
26 |
print("storing document")
|
27 |
|
28 |
embedding = embedding_model.encode([text])
|
29 |
+
print(f"embedding: \n{embedding}")
|
30 |
index.add(np.array(embedding, dtype=np.float32))
|
31 |
documents.append(text)
|
32 |
|
33 |
+
print(f"your document has been stored")
|
34 |
|
35 |
return "Document stored!"
|
36 |
|
37 |
def retrieve_document(query):
|
38 |
+
print(f"retrieving doc based on: \n{query}")
|
39 |
|
40 |
query_embedding = embedding_model.encode([query])
|
41 |
_, closest_idx = index.search(np.array(query_embedding, dtype=np.float32), 1)
|
42 |
|
43 |
+
print(f"retrieved: \n{documents[closest_idx[0][0]]}")
|
44 |
|
45 |
return documents[closest_idx[0][0]]
|
46 |
|
|
|
88 |
return f"Error retrieving document relevant to the query: {user_question} \n{e}"
|
89 |
|
90 |
if doc:
|
91 |
+
print("found doc")
|
92 |
# Split into smaller chunks
|
93 |
chunks = split_text(doc)
|
94 |
|
95 |
# Use only the first chunk (to optimize token usage)
|
96 |
prompt = f"Based on this document, answer the question:\n\nDocument:\n{chunks[0]}\n\nQuestion: {user_question}"
|
97 |
+
print(f"prompt: \n{prompt}")
|
98 |
else:
|
99 |
prompt=user_question
|
100 |
|
101 |
try:
|
102 |
+
print("asking")
|
103 |
response = together.Completion.create(
|
104 |
model="mistralai/Mistral-7B-Instruct-v0.1",
|
105 |
prompt=prompt,
|