ppsingh commited on
Commit
5a1352d
·
1 Parent(s): e4b8dd5

getting client details

Browse files
Files changed (1) hide show
  1. app.py +6 -17
app.py CHANGED
@@ -1,15 +1,8 @@
1
  import streamlit as st
2
  import pandas as pd
3
- from torch import cuda
4
- from langchain_community.embeddings import HuggingFaceEmbeddings, HuggingFaceInferenceAPIEmbeddings
5
- from langchain_community.vectorstores import Qdrant
6
- from qdrant_client import QdrantClient
7
- from langchain.retrievers import ContextualCompressionRetriever
8
- from langchain.retrievers.document_compressors import CrossEncoderReranker
9
- from langchain_community.cross_encoders import HuggingFaceCrossEncoder
10
  from appStore.prep_data import process_giz_worldwide
11
- from appStore.prep_utils import create_documents
12
- from appStore.embed import hybrid_embed_chunks, get_local_qdrant
13
 
14
  # get the device to be used eithe gpu or cpu
15
  device = 'cuda' if cuda.is_available() else 'cpu'
@@ -19,12 +12,6 @@ st.set_page_config(page_title="SEARCH IATI",layout='wide')
19
  st.title("SEARCH IATI Database")
20
  var=st.text_input("enter keyword")
21
 
22
- import pkg_resources
23
- installed_packages = pkg_resources.working_set
24
- list_ = ""
25
- for package in installed_packages:
26
- list_ = list_ + f"{package.key}=={package.version}\n"
27
- st.download_button('Download Requirements', list_, file_name='ins_requirements.txt')
28
  #################### Create the embeddings collection and save ######################
29
  # the steps below need to be performed only once and then commented out any unnecssary compute over-run
30
  ##### First we process and create the chunks for relvant data source
@@ -32,10 +19,12 @@ st.download_button('Download Requirements', list_, file_name='ins_requirements.t
32
  ##### Convert to langchain documents
33
  #temp_doc = create_documents(chunks,'chunks')
34
  ##### Embed and store docs, check if collection exist then you need to update the collection
35
- #hybrid_embed_chunks(docs= temp_doc, collection_name = "giz_worldwide")
 
36
 
37
  ################### Hybrid Search ######################################################
38
-
 
39
 
40
 
41
  button=st.button("search")
 
1
  import streamlit as st
2
  import pandas as pd
 
 
 
 
 
 
 
3
  from appStore.prep_data import process_giz_worldwide
4
+ from appStore.prep_utils import create_documents, get_client
5
+ from appStore.embed import hybrid_embed_chunks
6
 
7
  # get the device to be used eithe gpu or cpu
8
  device = 'cuda' if cuda.is_available() else 'cpu'
 
12
  st.title("SEARCH IATI Database")
13
  var=st.text_input("enter keyword")
14
 
 
 
 
 
 
 
15
  #################### Create the embeddings collection and save ######################
16
  # the steps below need to be performed only once and then commented out any unnecssary compute over-run
17
  ##### First we process and create the chunks for relvant data source
 
19
  ##### Convert to langchain documents
20
  #temp_doc = create_documents(chunks,'chunks')
21
  ##### Embed and store docs, check if collection exist then you need to update the collection
22
+ #collection_name = "giz_worldwide"
23
+ #hybrid_embed_chunks(docs= temp_doc, collection_name = collection_name)
24
 
25
  ################### Hybrid Search ######################################################
26
+ client = get_client()
27
+ print(client.get_collections())
28
 
29
 
30
  button=st.button("search")