enricorampazzo commited on
Commit
887083d
·
1 Parent(s): 5d399c2

now it should work. Refactored UI into smaller functions, added configurability for repository implementation and model

Browse files
app.py CHANGED
@@ -1,87 +1,41 @@
1
- import os
2
- import uuid
3
- from pathlib import Path
4
-
5
- from llm_manager.llm_parser import LlmParser
6
  from prompts.prompts_manager import PromptsManager
7
 
8
  from repository.repository import get_repository
9
  from repository.repository_abc import ModelRoles, Model
10
- from form.form import build_form_data_from_answers, write_pdf_form
11
 
12
  import streamlit as st
13
 
14
- user_msg = "Please describe what you need to do. To get the best results try to answer all the following questions:"
15
-
 
16
 
 
17
 
18
- def check_for_missing_answers(parsed_questions: dict[int, str]):
19
- return [k for k in parsed_questions if parsed_questions[k] is None]
20
 
21
  def use_streamlit():
22
  pm = PromptsManager()
23
  help_ = f"{user_msg}\n\n" + '\n'.join(pm.questions)
24
- repository = get_repository("ondemand", Model("ondemand-gpt-3.5-turbo", ModelRoles("system", "user", "assistant")))
 
 
 
25
  if not st.session_state.get("step"):
26
- with st.form("Please describe your request"):
27
- user_input = st.text_area("Your input", height=700, label_visibility="hidden", placeholder=help_, help=help_)
28
- signature = st.file_uploader("Your signature", key="file_upload")
29
- st.session_state["signature"] = signature
30
- st.session_state["session_id"] = str(uuid.uuid4())
31
- button = st.form_submit_button()
32
 
33
- if button:
34
- llama3 = "meta-llama/Meta-Llama-3-8B-Instruct"
35
- # repository = get_repository("intel_npu", Model(llama3,
36
- # ModelRoles("system", "user", "assistant")),
37
- # pm.system_prompt, Path("llm_log.txt"))
38
- st.session_state["step"] = "parsing_answers"
39
  if st.session_state.get("step") == "parsing_answers":
40
- with st.status("initialising LLM"):
41
- repository.init()
42
- with st.status("waiting for LLM"):
43
- answer = repository.send_prompt(pm.verify_user_input_prompt(user_input))
44
- st.write(f"answers from LLM: {answer['content']}")
45
- with st.status("Checking for missing answers"):
46
- st.session_state["answers"] = LlmParser.parse_verification_prompt_answers(answer['content'])
47
- st.session_state["missing_answers"] = check_for_missing_answers(st.session_state["answers"])
48
- if not st.session_state.get("missing_answers"):
49
- st.session_state["step"] = "check_category"
50
- else:
51
- st.session_state["step"] = "ask_again"
52
 
53
  if st.session_state.get("step") == "ask_again":
54
- with st.form("form1"):
55
- for ma in st.session_state["missing_answers"]:
56
- st.text_input(pm.questions[ma].lower(), key=ma)
57
- submitted = st.form_submit_button("Submit answers")
58
- if submitted:
59
- st.session_state["step"] = "check_category"
60
- for ma in st.session_state["missing_answers"]:
61
- st.session_state["answers"][ma] = st.session_state[ma]
62
-
63
  if st.session_state.get("step") == "check_category":
64
- with st.status("finding the work categories applicable to your work"):
65
- answer = repository.send_prompt(pm.get_work_category(st.session_state["answers"][1]))
66
- categories = LlmParser.parse_get_categories_answer(answer['content'])
67
-
68
- with st.status("categories found, creating PDF form"):
69
 
70
- form_filename = f"{st.session_state['session_id']}_form.pdf"
71
- st.session_state["form_filename"] = form_filename
72
- form_data, filename = build_form_data_from_answers(st.session_state["answers"], categories,
73
- st.session_state.get("signature"))
74
- st.session_state["pdf_form"] = write_pdf_form(form_data)
75
- st.session_state["pdf_form_filename"] = filename
76
- st.session_state["step"] = "form_created"
77
  if st.session_state.get("step") == "form_created":
78
- st.download_button("download form", st.session_state["pdf_form"],
79
- file_name=st.session_state["pdf_form_filename"], mime="application/pdf")
80
- start_over_button = st.button("Start over")
81
- if start_over_button:
82
- del st.session_state["step"]
83
- del st.session_state["pdf_form"]
84
- del st.session_state["pdf_form_filename"]
85
 
86
 
87
  use_streamlit()
 
 
 
 
 
 
1
  from prompts.prompts_manager import PromptsManager
2
 
3
  from repository.repository import get_repository
4
  from repository.repository_abc import ModelRoles, Model
 
5
 
6
  import streamlit as st
7
 
8
+ from ui_manager import build_ui_for_initial_state, build_ui_for_parsing_answers, build_ui_for_ask_again, \
9
+ build_ui_for_check_category, build_ui_for_form_created
10
+ from utils.env_utils import build_repo_from_environment
11
 
12
+ user_msg = "Please describe what you need to do. To get the best results try to answer all the following questions:"
13
 
 
 
14
 
15
  def use_streamlit():
16
  pm = PromptsManager()
17
  help_ = f"{user_msg}\n\n" + '\n'.join(pm.questions)
18
+ repository = (build_repo_from_environment(pm.system_prompt) or
19
+ get_repository("testing",
20
+ Model("fakeModel", ModelRoles("a", "b", "c"))))
21
+ st.markdown("## Dubai Asset Management red tape cutter")
22
  if not st.session_state.get("step"):
23
+ build_ui_for_initial_state(help_)
24
+ llama3 = "meta-llama/Meta-Llama-3-8B-Instruct"
25
+ # repository = get_repository("intel_npu", Model(llama3,
26
+ # ModelRoles("system", "user", "assistant")),
27
+ # pm.system_prompt, Path("llm_log.txt"))
 
28
 
 
 
 
 
 
 
29
  if st.session_state.get("step") == "parsing_answers":
30
+ build_ui_for_parsing_answers(repository, pm)
 
 
 
 
 
 
 
 
 
 
 
31
 
32
  if st.session_state.get("step") == "ask_again":
33
+ build_ui_for_ask_again(pm)
 
 
 
 
 
 
 
 
34
  if st.session_state.get("step") == "check_category":
35
+ build_ui_for_check_category(repository, pm)
 
 
 
 
36
 
 
 
 
 
 
 
 
37
  if st.session_state.get("step") == "form_created":
38
+ build_ui_for_form_created()
 
 
 
 
 
 
39
 
40
 
41
  use_streamlit()
llm_manager/llm_parser.py CHANGED
@@ -1,10 +1,7 @@
1
- import re
2
-
3
  from form.form import work_categories
4
 
5
 
6
  class LlmParser:
7
- _verification_prompt_answers_regex = re.compile(r"\|\s*([^|]*)\s?", re.MULTILINE)
8
 
9
  @classmethod
10
  def parse_verification_prompt_answers(cls, llm_answer) -> dict[int, str | None]:
 
 
 
1
  from form.form import work_categories
2
 
3
 
4
  class LlmParser:
 
5
 
6
  @classmethod
7
  def parse_verification_prompt_answers(cls, llm_answer) -> dict[int, str | None]:
repository/ondemand.py CHANGED
@@ -8,7 +8,8 @@ from repository.repository_abc import Repository, Model, ModelRoles
8
 
9
  class OndemandRepository(Repository):
10
  session_url = "https://api.on-demand.io/chat/v1/sessions"
11
- def __init__(self, model_info: Model, system_message: str = None, log_to_file:Path=None):
 
12
  self.model_info = model_info
13
  self.system_message = system_message
14
  self.log_to_file = log_to_file
@@ -17,7 +18,9 @@ class OndemandRepository(Repository):
17
  def init(self):
18
  if not self.session_id:
19
  headers = {"apiKey": os.getenv("API_KEY")}
20
- session_body = {"pluginIds": [], "externalUserId": "virtualDAM", "modelConfigs":{"temperature":0.2}}
 
 
21
  response = requests.post(self.session_url, headers=headers, json=session_body)
22
  response_data = response.json()
23
  self.session_id = response_data["data"]["id"]
@@ -36,4 +39,4 @@ class OndemandRepository(Repository):
36
  return {"content": response.json()["data"]["answer"]}
37
 
38
  def get_message_history(self) -> list[dict[str, str]]:
39
- return []
 
8
 
9
  class OndemandRepository(Repository):
10
  session_url = "https://api.on-demand.io/chat/v1/sessions"
11
+
12
+ def __init__(self, model_info: Model, system_message: str = None, log_to_file: Path = None):
13
  self.model_info = model_info
14
  self.system_message = system_message
15
  self.log_to_file = log_to_file
 
18
  def init(self):
19
  if not self.session_id:
20
  headers = {"apiKey": os.getenv("API_KEY")}
21
+ session_body = {"pluginIds": [], "externalUserId": "virtualDAM", "modelConfigs":
22
+ {"temperature": 0.2, "fulfillmentPrompt": self.system_message}
23
+ }
24
  response = requests.post(self.session_url, headers=headers, json=session_body)
25
  response_data = response.json()
26
  self.session_id = response_data["data"]["id"]
 
39
  return {"content": response.json()["data"]["answer"]}
40
 
41
  def get_message_history(self) -> list[dict[str, str]]:
42
+ return []
repository/repository.py CHANGED
@@ -22,10 +22,6 @@ def get_repository(implementation: str, model: Model, system_msg: str = None, lo
22
  return OndemandRepository(model, system_msg, log_to_file)
23
  if "testing" == implementation:
24
  return TestingRepository(prompts_answers=[
25
- {
26
- "role": "assistant",
27
- "content": "OK"
28
- },
29
  {
30
  "role": "assistant",
31
  "content": "What is my full name?\n\nnull\n\nWhat is the nature of the work I need to do?\n\nPest control\n\nIn which community is the work taking place?\n\nJBR\n\nIn which building?\n\nnull\n\nIn which unit/apartment number?\n\nnull\n\nAm I the owner or the tenant?\n\nTenant\n\nIn which date is the work taking place?\n\n12/09/2024\n\nIn which date will the work finish?\n\n12/09/2024\n\nWhat is my contact number?\n\nnull\n\nWhat is the name of the contracting company?\n\nnull\n\nWhat is the contact number of the contracting company?\n\nnull\n\nWhat is the email of the contracting company?\n\nnull\n\nWhat is my email?\n\nnull"
 
22
  return OndemandRepository(model, system_msg, log_to_file)
23
  if "testing" == implementation:
24
  return TestingRepository(prompts_answers=[
 
 
 
 
25
  {
26
  "role": "assistant",
27
  "content": "What is my full name?\n\nnull\n\nWhat is the nature of the work I need to do?\n\nPest control\n\nIn which community is the work taking place?\n\nJBR\n\nIn which building?\n\nnull\n\nIn which unit/apartment number?\n\nnull\n\nAm I the owner or the tenant?\n\nTenant\n\nIn which date is the work taking place?\n\n12/09/2024\n\nIn which date will the work finish?\n\n12/09/2024\n\nWhat is my contact number?\n\nnull\n\nWhat is the name of the contracting company?\n\nnull\n\nWhat is the contact number of the contracting company?\n\nnull\n\nWhat is the email of the contracting company?\n\nnull\n\nWhat is my email?\n\nnull"
ui_manager.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ from form.form import build_form_data_from_answers, write_pdf_form
4
+ from llm_manager.llm_parser import LlmParser
5
+ from utils.parsing_utils import check_for_missing_answers
6
+
7
+
8
+ def build_ui_for_initial_state(help_):
9
+ with st.form("Please describe your request"):
10
+ user_input = st.text_area("Your input", height=700, label_visibility="hidden", placeholder=help_, help=help_)
11
+ signature = st.file_uploader("Your signature", key="file_upload")
12
+ st.session_state["signature"] = signature
13
+ submit_button = st.form_submit_button()
14
+ if submit_button:
15
+ st.session_state["user_input"] = user_input
16
+ st.session_state["step"] = "parsing_answers"
17
+ st.rerun()
18
+ def build_ui_for_parsing_answers(repository, pm):
19
+ with st.status("initialising LLM"):
20
+ repository.init()
21
+ with st.status("waiting for LLM"):
22
+ answer = repository.send_prompt(pm.verify_user_input_prompt(st.session_state["user_input"]))
23
+ st.write(f"answers from LLM: {answer['content']}")
24
+ with st.status("Checking for missing answers"):
25
+ st.session_state["answers"] = LlmParser.parse_verification_prompt_answers(answer['content'])
26
+ st.session_state["missing_answers"] = check_for_missing_answers(st.session_state["answers"])
27
+ if not st.session_state.get("missing_answers"):
28
+ st.session_state["step"] = "check_category"
29
+ else:
30
+ st.session_state["step"] = "ask_again"
31
+ st.rerun()
32
+
33
+ def build_ui_for_ask_again(pm):
34
+ with st.form("form1"):
35
+ for ma in st.session_state["missing_answers"]:
36
+ st.text_input(pm.questions[ma].lower(), key=ma)
37
+ submitted = st.form_submit_button("Submit answers")
38
+ if submitted:
39
+ for ma in st.session_state["missing_answers"]:
40
+ st.session_state["answers"][ma] = st.session_state[ma]
41
+ st.session_state["step"] = "check_category"
42
+ st.rerun()
43
+ def build_ui_for_check_category(repository, pm):
44
+ with st.status("finding the work categories applicable to your work"):
45
+ answer = repository.send_prompt(pm.get_work_category(st.session_state["answers"][1]))
46
+ categories = LlmParser.parse_get_categories_answer(answer['content'])
47
+
48
+ with st.status("categories found, creating PDF form"):
49
+ form_data, filename = build_form_data_from_answers(st.session_state["answers"], categories,
50
+ st.session_state.get("signature"))
51
+ pdf_form = write_pdf_form(form_data)
52
+ pdf_form_filename = filename
53
+ st.session_state["pdf_form"] = pdf_form
54
+ st.session_state["pdf_form_filename"] = pdf_form_filename
55
+ st.session_state["step"] = "form_created"
56
+ st.rerun()
57
+ def build_ui_for_form_created():
58
+ st.download_button("download form", st.session_state["pdf_form"],
59
+ file_name=st.session_state["pdf_form_filename"], mime="application/pdf")
60
+ start_over_button = st.button("Start over")
61
+ if start_over_button:
62
+ del st.session_state["step"]
63
+ del st.session_state["pdf_form"]
64
+ del st.session_state["pdf_form_filename"]
65
+ st.rerun()
utils/env_utils.py CHANGED
@@ -1,3 +1,22 @@
1
  import os
 
 
 
 
 
2
  def in_hf() -> bool:
3
  return os.getenv("env") == "hf"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+
3
+ import repository.repository
4
+ from repository.repository_abc import Model, ModelRoles
5
+
6
+
7
  def in_hf() -> bool:
8
  return os.getenv("env") == "hf"
9
+
10
+
11
+ def build_repo_from_environment(system_prompt: str):
12
+ implementation = os.getenv("implementation")
13
+ model_name = os.getenv("model_name")
14
+
15
+ if implementation:
16
+ return repository.repository.get_repository(implementation, Model(model_name, ModelRoles("system",
17
+ "user",
18
+ "assistant")),
19
+ system_prompt)
20
+ else:
21
+ return None
22
+