Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
File size: 1,930 Bytes
0130713 c2f0c5c e44062d 5a1352d c567921 cb359de 50fbfdd f5dac9b 0130713 b60ea35 e4b8dd5 9392032 391fa92 9392032 391fa92 9392032 c567921 5a1352d 5170600 9392032 5a1352d 3fe1fb4 c567921 50fbfdd 7471ef6 48e59f7 5170600 eb00b52 e44062d 7471ef6 3fe1fb4 c567921 3fe1fb4 c567921 3fe1fb4 7471ef6 c567921 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import streamlit as st
import pandas as pd
from appStore.prep_data import process_giz_worldwide
from appStore.prep_utils import create_documents, get_client
from appStore.embed import hybrid_embed_chunks
from appStore.search import hybrid_search
from torch import cuda
# get the device to be used eithe gpu or cpu
device = 'cuda' if cuda.is_available() else 'cpu'
st.set_page_config(page_title="SEARCH IATI",layout='wide')
st.title("SEARCH IATI Database")
var=st.text_input("enter keyword")
#################### Create the embeddings collection and save ######################
# the steps below need to be performed only once and then commented out any unnecssary compute over-run
##### First we process and create the chunks for relvant data source
#chunks = process_giz_worldwide()
##### Convert to langchain documents
#temp_doc = create_documents(chunks,'chunks')
##### Embed and store docs, check if collection exist then you need to update the collection
collection_name = "giz_worldwide"
#hybrid_embed_chunks(docs= temp_doc, collection_name = collection_name)
################### Hybrid Search ######################################################
client = get_client()
print(client.get_collections())
button=st.button("search")
#found_docs = vectorstore.similarity_search(var)
#print(found_docs)
# results= get_context(vectorstore, f"find the relvant paragraphs for: {var}")
if button:
results = hybrid_search(client, var, collection_name)
st.write(f"Showing Top 10 results for query:{var}")
st.write(f"Semantic: {len(results[0])}")
st.write(results[0])
st.write(f"Semantic: {len(results[1])}")
st.write(results[1])
# for i in results:
# st.subheader(str(i.metadata['id'])+":"+str(i.metadata['title_main']))
# st.caption(f"Status:{str(i.metadata['status'])}, Country:{str(i.metadata['country_name'])}")
# st.write(i.page_content)
# st.divider()
|