Spaces:
Sleeping
Sleeping
File size: 2,007 Bytes
0130713 c2f0c5c f5dac9b bbc879f 3d3fc58 e44062d 5170600 bcc1f88 50fbfdd f5dac9b 0130713 b60ea35 bcc1f88 9392032 391fa92 9392032 391fa92 9392032 391fa92 5170600 9392032 5170600 50fbfdd 7471ef6 5170600 eb00b52 e44062d 7471ef6 8cb49b9 7471ef6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import streamlit as st
import pandas as pd
from torch import cuda
from langchain_community.embeddings import HuggingFaceEmbeddings, HuggingFaceInferenceAPIEmbeddings
from langchain_community.vectorstores import Qdrant
from qdrant_client import QdrantClient
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import CrossEncoderReranker
from langchain_community.cross_encoders import HuggingFaceCrossEncoder
from appStore.prep_data import process_giz_worldwide
from appStore.prep_utils import create_documents
from appStore.embed import hybrid_embed_chunks, get_local_qdrant
# get the device to be used eithe gpu or cpu
device = 'cuda' if cuda.is_available() else 'cpu'
st.set_page_config(page_title="SEARCH IATI",layout='wide')
st.title("SEARCH IATI Database")
var=st.text_input("enter keyword")
#################### Create the embeddings collection and save ######################
# the steps below need to be performed only once and then commented out any unnecssary compute over-run
##### First we process and create the chunks for relvant data source
#chunks = process_giz_worldwide()
##### Convert to langchain documents
#temp_doc = create_documents(chunks,'chunks')
##### Embed and store docs, check if collection exist then you need to update the collection
#hybrid_embed_chunks(docs= temp_doc, collection_name = "giz_worldwide")
################### Hybrid Search ######################################################
button=st.button("search")
#found_docs = vectorstore.similarity_search(var)
#print(found_docs)
# results= get_context(vectorstore, f"find the relvant paragraphs for: {var}")
if button:
st.write(f"Found {len(results)} results for query:{var}")
for i in results:
st.subheader(str(i.metadata['id'])+":"+str(i.metadata['title_main']))
st.caption(f"Status:{str(i.metadata['status'])}, Country:{str(i.metadata['country_name'])}")
st.write(i.page_content)
st.divider()
|