diazcalvi commited on
Commit
dad524c
·
1 Parent(s): c6a5e92
Files changed (5) hide show
  1. .gitignore +2 -0
  2. app.py +58 -0
  3. index.json +0 -0
  4. kion.json +0 -0
  5. requirements.txt +3 -0
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+
2
+ *.pdf
app.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import streamlit as st
3
+ from llama_index import GPTSimpleVectorIndex, SimpleDirectoryReader, ServiceContext,LLMPredictor
4
+ from langchain.chat_models import ChatOpenAI
5
+ from llama_index.llm_predictor.chatgpt import ChatGPTLLMPredictor
6
+
7
+ index_name = "./kion.json"
8
+ documents_folder = "./documents"
9
+
10
+ @st.cache_resource
11
+ def initialize_index(index_name, documents_folder):
12
+ #llm_predictor = ChatGPTLLMPredictor()
13
+ llm_predictor = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")) # text-davinci-003"))
14
+
15
+ service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
16
+ if os.path.exists(index_name):
17
+ index = GPTSimpleVectorIndex.load_from_disk(index_name, service_context=service_context)
18
+ else:
19
+ documents = SimpleDirectoryReader(documents_folder).load_data()
20
+ index = GPTSimpleVectorIndex.from_documents(documents, service_context=service_context)
21
+ index.save_to_disk(index_name)
22
+
23
+ return index
24
+
25
+
26
+ @st.cache_data(max_entries=200, persist=True)
27
+ def query_index(_index, query_text):
28
+ response = _index.query(query_text)
29
+ return str(response)
30
+
31
+
32
+ st.title("KION-Linde AI")
33
+ st.header("Welcome to the Knowledge Base Vector")
34
+ st.write("Enter a query about any KION/Linde products. You can check out the original essay [here](https://raw.githubusercontent.com/jerryjliu/llama_index/main/examples/paul_graham_essay/data/paul_graham_essay.txt). Your query will be answered using the essay as context, using embeddings from text-ada-002 and LLM completions from ChatGPT. You can read more about Llama Index and how this works in [our docs!](https://gpt-index.readthedocs.io/en/latest/index.html)")
35
+
36
+ index = None
37
+ api_key = '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'#st.text_input("Enter your OpenAI API key here:", type="password")
38
+ if api_key:
39
+ os.environ['OPENAI_API_KEY'] = api_key
40
+ index = initialize_index(index_name, documents_folder)
41
+
42
+
43
+ if index is None:
44
+ st.warning("Please enter your api key first.")
45
+
46
+ text = st.text_input("Query text:", value="What type of tyres uses the N20?")
47
+
48
+ if st.button("Run Query") and text is not None:
49
+ response = query_index(index, "Act as a KION equipment expert:" + text)
50
+ st.markdown(response)
51
+
52
+ llm_col, embed_col = st.columns(2)
53
+ with llm_col:
54
+ st.markdown(f"LLM Tokens Used: {index.service_context.llm_predictor._last_token_usage}")
55
+
56
+ with embed_col:
57
+ st.markdown(f"Embedding Tokens Used: {index.service_context.embed_model._last_token_usage}")
58
+
index.json ADDED
The diff for this file is too large to render. See raw diff
 
kion.json ADDED
The diff for this file is too large to render. See raw diff
 
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ langchain==0.0.123
2
+ llama-index==0.5.1
3
+ streamlit==1.19.0