Spaces:
Sleeping
Sleeping
app redone
Browse files
app.py
CHANGED
@@ -1,107 +1,90 @@
|
|
1 |
-
#
|
2 |
-
pip install requirments.txt
|
3 |
|
4 |
-
# Import neccessary libraries
|
5 |
import streamlit as st
|
6 |
import pandas as pd
|
7 |
import numpy as np
|
8 |
import requests
|
9 |
-
import
|
10 |
-
from tqdm.auto import tqdm
|
11 |
-
from transformers import BertModel, BertTokenizer
|
12 |
from sklearn.metrics.pairwise import cosine_similarity
|
13 |
|
14 |
-
#
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
self.batch_size = batch_size
|
21 |
-
self.df = self._download_and_process_documents(docs_url)
|
22 |
-
self.document_embeddings = self.compute_embeddings(self.df['text'].tolist())
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
# Create the DataFrame
|
39 |
-
return pd.DataFrame(documents, columns=['course', 'section', 'question', 'text'])
|
40 |
|
41 |
-
def
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
result.append(batch)
|
49 |
-
return result
|
50 |
|
51 |
-
def
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
57 |
|
58 |
-
for
|
59 |
-
|
60 |
-
|
61 |
-
outputs = self.model(**encoded_input)
|
62 |
-
hidden_states = outputs.last_hidden_state
|
63 |
-
batch_embeddings = hidden_states.mean(dim=1)
|
64 |
-
batch_embeddings_np = batch_embeddings.cpu().numpy()
|
65 |
-
all_embeddings.append(batch_embeddings_np)
|
66 |
|
67 |
-
|
68 |
-
return
|
69 |
|
70 |
-
|
71 |
-
"""
|
72 |
-
Perform a query to find the most relevant documents.
|
73 |
-
"""
|
74 |
-
query_embedding = self.compute_embeddings([query_text])
|
75 |
-
similarities = cosine_similarity(query_embedding, self.document_embeddings).flatten()
|
76 |
-
top_n_indices = similarities.argsort()[-top_n:][::-1]
|
77 |
-
top_n_documents = self.df.iloc[top_n_indices]
|
78 |
-
return top_n_documents
|
79 |
-
|
80 |
-
# Streamlit application
|
81 |
st.title("FAQ Search Engine for DataTalks")
|
82 |
|
83 |
-
#
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
86 |
|
87 |
# Input fields for query and filters
|
88 |
query = st.text_input("Enter your query:")
|
89 |
-
courses = st.multiselect("Select course(s):", options=
|
90 |
|
91 |
# Search button
|
92 |
if st.button("Search"):
|
93 |
-
|
94 |
-
|
95 |
-
# Filter results by selected courses if any
|
96 |
if courses:
|
97 |
-
|
|
|
98 |
|
99 |
-
# Display results
|
100 |
-
for i, result in enumerate(results
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
st.write(f"### Result {i+1}")
|
102 |
st.write(f"**Course**: {result['course']}")
|
103 |
st.write(f"**Section**: {result['section']}")
|
104 |
st.write(f"**Question**: {result['question']}")
|
105 |
st.write(f"**Text**: {result['text']}")
|
106 |
-
st.write("")
|
107 |
-
st.markdown("---")
|
|
|
1 |
+
# Import necessary libraries
|
|
|
2 |
|
|
|
3 |
import streamlit as st
|
4 |
import pandas as pd
|
5 |
import numpy as np
|
6 |
import requests
|
7 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
|
|
|
|
8 |
from sklearn.metrics.pairwise import cosine_similarity
|
9 |
|
10 |
+
# Function to fetch data
|
11 |
+
def fetch_data():
|
12 |
+
docs_url = 'https://github.com/alexeygrigorev/llm-rag-workshop/raw/main/notebooks/documents.json'
|
13 |
+
docs_response = requests.get(docs_url)
|
14 |
+
documents_raw = docs_response.json()
|
15 |
+
documents = []
|
|
|
|
|
|
|
16 |
|
17 |
+
for course in documents_raw:
|
18 |
+
course_name = course['course']
|
19 |
+
for doc in course['documents']:
|
20 |
+
doc['course'] = course_name
|
21 |
+
documents.append(doc)
|
22 |
+
|
23 |
+
return pd.DataFrame(documents, columns=['course', 'section', 'question', 'text'])
|
24 |
+
|
25 |
+
# TextSearch class
|
26 |
+
class TextSearch:
|
27 |
+
def __init__(self, text_fields):
|
28 |
+
self.text_fields = text_fields
|
29 |
+
self.matrices = {}
|
30 |
+
self.vectorizers = {}
|
|
|
|
|
31 |
|
32 |
+
def fit(self, records, vectorizer_params={}):
|
33 |
+
self.df = pd.DataFrame(records)
|
34 |
+
for f in self.text_fields:
|
35 |
+
cv = TfidfVectorizer(**vectorizer_params)
|
36 |
+
X = cv.fit_transform(self.df[f])
|
37 |
+
self.vectorizers[f] = cv
|
38 |
+
self.matrices[f] = X
|
|
|
|
|
39 |
|
40 |
+
def search(self, query, filters={}, boost={}):
|
41 |
+
score = np.zeros(len(self.df))
|
42 |
+
for f in self.text_fields:
|
43 |
+
b = boost.get(f, 1.0)
|
44 |
+
q = self.vectorizers[f].transform([query])
|
45 |
+
s = cosine_similarity(self.matrices[f], q).flatten()
|
46 |
+
score = score + b * s
|
47 |
|
48 |
+
for field, value in filters.items():
|
49 |
+
mask = (self.df[field] == value).values
|
50 |
+
score = score * mask
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
+
idx = np.argsort(-score)[:5]
|
53 |
+
return self.df.iloc[idx].to_dict(orient='records')
|
54 |
|
55 |
+
# Main Streamlit application
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
st.title("FAQ Search Engine for DataTalks")
|
57 |
|
58 |
+
# Load data
|
59 |
+
df = fetch_data()
|
60 |
+
|
61 |
+
# Initialize TextSearch
|
62 |
+
text_search = TextSearch(text_fields=['section', 'question', 'text'])
|
63 |
+
text_search.fit(df.to_dict(orient='records'), vectorizer_params={'stop_words': 'english', 'min_df': 3})
|
64 |
|
65 |
# Input fields for query and filters
|
66 |
query = st.text_input("Enter your query:")
|
67 |
+
courses = st.multiselect("Select course(s):", options=df['course'].unique())
|
68 |
|
69 |
# Search button
|
70 |
if st.button("Search"):
|
71 |
+
filters = {}
|
|
|
|
|
72 |
if courses:
|
73 |
+
filters['course'] = courses[0] if len(courses) == 1 else courses
|
74 |
+
results = text_search.search(query, filters=filters, boost={'question': 3.0})
|
75 |
|
76 |
+
# Display results
|
77 |
+
# for i, result in enumerate(results):
|
78 |
+
# st.write(f"### Result {i+1}")
|
79 |
+
# st.write(f"**Course**: {result['course']}")
|
80 |
+
# st.write(f"**Question**: {result['question']}")
|
81 |
+
# st.write(f"**Response**: {result['text']}")
|
82 |
+
|
83 |
+
for i, result in enumerate(results):
|
84 |
st.write(f"### Result {i+1}")
|
85 |
st.write(f"**Course**: {result['course']}")
|
86 |
st.write(f"**Section**: {result['section']}")
|
87 |
st.write(f"**Question**: {result['question']}")
|
88 |
st.write(f"**Text**: {result['text']}")
|
89 |
+
st.write("")
|
90 |
+
st.markdown("---")
|