andreped commited on
Commit
c04c29a
·
unverified ·
2 Parent(s): c58e0ef ef51884

Merge pull request #1 from andreped/dev

Browse files

Added OpenAI key to secrets; improved verbose; added Redirect class for stdout logging

Files changed (4) hide show
  1. .gitignore +2 -0
  2. app.py +13 -8
  3. chatbot/{utils.py → data.py} +22 -13
  4. chatbot/redirect.py +165 -0
.gitignore CHANGED
@@ -2,3 +2,5 @@ venv/
2
  data/
3
  .DS_Store
4
  config.json
 
 
 
2
  data/
3
  .DS_Store
4
  config.json
5
+ .streamlit/
6
+ secrets.toml
app.py CHANGED
@@ -1,9 +1,12 @@
1
- import json
2
 
3
  import streamlit as st
4
 
5
- from chatbot.utils import download_test_data
6
- from chatbot.utils import load_data
 
 
 
7
 
8
  # Initialize message history
9
  st.header("Chat with André's research 💬 📚")
@@ -11,15 +14,17 @@ st.header("Chat with André's research 💬 📚")
11
  if "messages" not in st.session_state.keys(): # Initialize the chat message history
12
  st.session_state.messages = [{"role": "assistant", "content": "Ask me a question about André's research!"}]
13
 
14
- # Load config values
15
- with open(r"config.json") as config_file:
16
- config_details = json.load(config_file)
17
-
18
 
19
  def main():
 
 
 
 
 
 
20
  # setup dataset
21
  download_test_data()
22
- index = load_data(config_details)
23
  chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
24
 
25
  if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
 
1
+ import os
2
 
3
  import streamlit as st
4
 
5
+ from chatbot.data import download_test_data
6
+ from chatbot.data import load_data
7
+
8
+ # add OpenAI API key to environemntal variables
9
+ os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
10
 
11
  # Initialize message history
12
  st.header("Chat with André's research 💬 📚")
 
14
  if "messages" not in st.session_state.keys(): # Initialize the chat message history
15
  st.session_state.messages = [{"role": "assistant", "content": "Ask me a question about André's research!"}]
16
 
 
 
 
 
17
 
18
  def main():
19
+ # setup logger sidebar
20
+ # st.sidebar.text("Standard output log:")
21
+ # _sidebar_out = st.sidebar.empty()
22
+ # with rd.stdout(to=_sidebar_out, format='text'):
23
+ # print("test")
24
+
25
  # setup dataset
26
  download_test_data()
27
+ index = load_data()
28
  chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
29
 
30
  if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
chatbot/{utils.py → data.py} RENAMED
@@ -14,37 +14,46 @@ from llama_index.llms import AzureOpenAI
14
  def download_test_data():
15
  # url = f"https://drive.google.com/drive/folders/uc?export=download&confirm=pbef&id={file_id}"
16
  url = "https://drive.google.com/drive/folders/1uDSAWtLvp1YPzfXUsK_v6DeWta16pq6y"
17
- with st.spinner(text="Downloading test data. Might take a few seconds."):
 
 
18
  download_folder(url=url, quiet=False, use_cookies=False, output="./data/")
19
 
20
 
21
  @st.cache_resource(show_spinner=False)
22
- def load_data(config_details):
23
  with st.spinner(text="Loading and indexing the provided dataset – hang tight! This may take a few seconds."):
24
  documents = SimpleDirectoryReader(input_dir="./data", recursive=True).load_data()
 
 
25
  llm = AzureOpenAI(
26
  model="gpt-3.5-turbo",
27
- engine=config_details["ENGINE"],
28
  temperature=0.5,
29
- api_key=os.getenv("OPENAI_API_KEY"),
30
- api_base=config_details["OPENAI_API_BASE"],
31
  api_type="azure",
32
- api_version=config_details["OPENAI_API_VERSION"],
33
  system_prompt="You are an expert on André's research and your job is to answer"
34
  "technical questions. Assume that all questions are related to"
35
- "André's research. Keep your answers technical and based on facts"
36
- " do not hallucinate features.",
37
  )
 
 
38
  # You need to deploy your own embedding model as well as your own chat completion model
39
  embed_model = OpenAIEmbedding(
40
  model="text-embedding-ada-002",
41
- deployment_name=config_details["ENGINE_EMBEDDING"],
42
- api_key=os.getenv("OPENAI_API_KEY"),
43
- api_base=config_details["OPENAI_API_BASE"],
44
  api_type="azure",
45
- api_version=config_details["OPENAI_API_VERSION"],
 
46
  )
47
- service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)
 
 
48
  set_global_service_context(service_context)
49
  index = VectorStoreIndex.from_documents(documents) # , service_context=service_context)
50
  return index
 
14
  def download_test_data():
15
  # url = f"https://drive.google.com/drive/folders/uc?export=download&confirm=pbef&id={file_id}"
16
  url = "https://drive.google.com/drive/folders/1uDSAWtLvp1YPzfXUsK_v6DeWta16pq6y"
17
+ with st.spinner(text="Downloading test data. This might take a minute."):
18
+ # @TODO: replace gown solution with a custom solution compatible with GitHub and
19
+ # use st.progress to get more verbose during download
20
  download_folder(url=url, quiet=False, use_cookies=False, output="./data/")
21
 
22
 
23
  @st.cache_resource(show_spinner=False)
24
+ def load_data():
25
  with st.spinner(text="Loading and indexing the provided dataset – hang tight! This may take a few seconds."):
26
  documents = SimpleDirectoryReader(input_dir="./data", recursive=True).load_data()
27
+
28
+ with st.spinner(text="Setting up Azure OpenAI..."):
29
  llm = AzureOpenAI(
30
  model="gpt-3.5-turbo",
31
+ engine=st.secrets["ENGINE"],
32
  temperature=0.5,
33
+ api_key=os.environ["OPENAI_API_KEY"],
34
+ api_base=st.secrets["OPENAI_API_BASE"],
35
  api_type="azure",
36
+ api_version=st.secrets["OPENAI_API_VERSION"],
37
  system_prompt="You are an expert on André's research and your job is to answer"
38
  "technical questions. Assume that all questions are related to"
39
+ "André's research. Keep your answers technical and based on facts;"
40
+ " do not hallucinate features.",
41
  )
42
+
43
+ with st.spinner(text="Setting up OpenAI Embedding..."):
44
  # You need to deploy your own embedding model as well as your own chat completion model
45
  embed_model = OpenAIEmbedding(
46
  model="text-embedding-ada-002",
47
+ deployment_name=st.secrets["ENGINE_EMBEDDING"],
48
+ api_key=os.environ["OPENAI_API_KEY"],
49
+ api_base=st.secrets["OPENAI_API_BASE"],
50
  api_type="azure",
51
+ api_version=st.secrets["OPENAI_API_VERSION"],
52
+ embed_batch_size=10, # set to low value to reduce rate limit -> may degrade response runtime
53
  )
54
+
55
+ with st.spinner(text="Setting up Vector Store Index..."):
56
+ service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model) # , chunk_size=512)
57
  set_global_service_context(service_context)
58
  index = VectorStoreIndex.from_documents(documents) # , service_context=service_context)
59
  return index
chatbot/redirect.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import io
3
+ import re
4
+ import sys
5
+
6
+ import streamlit as st
7
+
8
+
9
+ class _Redirect:
10
+ """
11
+ Based on: https://gist.github.com/schaumb/037f139035d93cff3ad9f4f7e5f739ce
12
+ Also see: https://github.com/streamlit/streamlit/issues/268#issuecomment-810478208
13
+ """
14
+
15
+ class IOStuff(io.StringIO):
16
+ def __init__(self, trigger, max_buffer, buffer_separator, regex, dup=None):
17
+ super().__init__()
18
+ self._trigger = trigger
19
+ self._max_buffer = max_buffer
20
+ self._buffer_separator = buffer_separator
21
+ self._regex = regex and re.compile(regex)
22
+ self._dup = dup
23
+
24
+ def write(self, __s: str) -> int:
25
+ if self._max_buffer:
26
+ concatenated_len = super().tell() + len(__s)
27
+ if concatenated_len > self._max_buffer:
28
+ rest = self.get_filtered_output()[concatenated_len - self._max_buffer :]
29
+ if self._buffer_separator is not None:
30
+ rest = rest.split(self._buffer_separator, 1)[-1]
31
+ super().seek(0)
32
+ super().write(rest)
33
+ super().truncate(super().tell() + len(__s))
34
+ res = super().write(__s)
35
+ if self._dup is not None:
36
+ self._dup.write(__s)
37
+ self._trigger(self.get_filtered_output())
38
+ return res
39
+
40
+ def get_filtered_output(self):
41
+ if self._regex is None or self._buffer_separator is None:
42
+ return self.getvalue()
43
+
44
+ return self._buffer_separator.join(
45
+ filter(self._regex.search, self.getvalue().split(self._buffer_separator))
46
+ )
47
+
48
+ def print_at_end(self):
49
+ self._trigger(self.get_filtered_output())
50
+
51
+ def __init__(
52
+ self,
53
+ stdout=None,
54
+ stderr=False,
55
+ format=None,
56
+ to=None,
57
+ max_buffer=None,
58
+ buffer_separator="\n",
59
+ regex=None,
60
+ duplicate_out=False,
61
+ ):
62
+ self.io_args = {
63
+ "trigger": self._write,
64
+ "max_buffer": max_buffer,
65
+ "buffer_separator": buffer_separator,
66
+ "regex": regex,
67
+ }
68
+ self.redirections = []
69
+ self.st = None
70
+ self.stderr = stderr is True
71
+ self.stdout = stdout is True or (stdout is None and not self.stderr)
72
+ self.format = format or "code"
73
+ self.to = to
74
+ self.fun = None
75
+ self.duplicate_out = duplicate_out or None
76
+ self.active_nested = None
77
+
78
+ if not self.stdout and not self.stderr:
79
+ raise ValueError("one of stdout or stderr must be True")
80
+
81
+ if self.format not in ["text", "markdown", "latex", "code", "write"]:
82
+ raise ValueError(
83
+ f"format need oneof the following: {', '.join(['text', 'markdown', 'latex', 'code', 'write'])}"
84
+ )
85
+
86
+ if self.to and (not hasattr(self.to, "text") or not hasattr(self.to, "empty")):
87
+ raise ValueError(f"'to' is not a streamlit container object: {self.to}")
88
+
89
+ def __enter__(self):
90
+ if self.st is not None:
91
+ if self.to is None:
92
+ if self.active_nested is None:
93
+ self.active_nested = self(
94
+ format=self.format,
95
+ max_buffer=self.io_args["max_buffer"],
96
+ buffer_separator=self.io_args["buffer_separator"],
97
+ regex=self.io_args["regex"],
98
+ duplicate_out=self.duplicate_out,
99
+ )
100
+ return self.active_nested.__enter__()
101
+ else:
102
+ raise Exception("Already entered")
103
+ to = self.to or st
104
+
105
+ to.text(
106
+ f"Redirected output from "
107
+ f"{'stdout and stderr' if self.stdout and self.stderr else 'stdout' if self.stdout else 'stderr'}"
108
+ f"{' [' + self.io_args['regex'] + ']' if self.io_args['regex'] else ''}"
109
+ f":"
110
+ )
111
+ self.st = to.empty()
112
+ self.fun = getattr(self.st, self.format)
113
+
114
+ io_obj = None
115
+
116
+ def redirect(to_duplicate):
117
+ nonlocal io_obj
118
+ io_obj = _Redirect.IOStuff(dup=self.duplicate_out and to_duplicate, **self.io_args)
119
+ redirection = contextlib.redirect_stdout(io_obj)
120
+ self.redirections.append((redirection, io_obj))
121
+ redirection.__enter__()
122
+
123
+ if self.stderr:
124
+ redirect(sys.stderr)
125
+ if self.stdout:
126
+ redirect(sys.stdout)
127
+
128
+ return io_obj
129
+
130
+ def __call__(self, to=None, format=None, max_buffer=None, buffer_separator="\n", regex=None, duplicate_out=False):
131
+ return _Redirect(
132
+ self.stdout,
133
+ self.stderr,
134
+ format=format,
135
+ to=to,
136
+ max_buffer=max_buffer,
137
+ buffer_separator=buffer_separator,
138
+ regex=regex,
139
+ duplicate_out=duplicate_out,
140
+ )
141
+
142
+ def __exit__(self, *exc):
143
+ if self.active_nested is not None:
144
+ nested = self.active_nested
145
+ if nested.active_nested is None:
146
+ self.active_nested = None
147
+ return nested.__exit__(*exc)
148
+
149
+ res = None
150
+ for redirection, io_obj in reversed(self.redirections):
151
+ res = redirection.__exit__(*exc)
152
+ io_obj.print_at_end()
153
+
154
+ self.redirections = []
155
+ self.st = None
156
+ self.fun = None
157
+ return res
158
+
159
+ def _write(self, data):
160
+ self.fun(data)
161
+
162
+
163
+ stdout = _Redirect(max_buffer=1, buffer_separator="\n")
164
+ stderr = _Redirect(stderr=True)
165
+ stdouterr = _Redirect(stdout=True, stderr=True)