DAMHelper / app.py
enricorampazzo's picture
now it should work. Refactored UI into smaller functions, added configurability for repository implementation and model
887083d
raw
history blame
1.68 kB
from prompts.prompts_manager import PromptsManager
from repository.repository import get_repository
from repository.repository_abc import ModelRoles, Model
import streamlit as st
from ui_manager import build_ui_for_initial_state, build_ui_for_parsing_answers, build_ui_for_ask_again, \
build_ui_for_check_category, build_ui_for_form_created
from utils.env_utils import build_repo_from_environment
user_msg = "Please describe what you need to do. To get the best results try to answer all the following questions:"
def use_streamlit():
pm = PromptsManager()
help_ = f"{user_msg}\n\n" + '\n'.join(pm.questions)
repository = (build_repo_from_environment(pm.system_prompt) or
get_repository("testing",
Model("fakeModel", ModelRoles("a", "b", "c"))))
st.markdown("## Dubai Asset Management red tape cutter")
if not st.session_state.get("step"):
build_ui_for_initial_state(help_)
llama3 = "meta-llama/Meta-Llama-3-8B-Instruct"
# repository = get_repository("intel_npu", Model(llama3,
# ModelRoles("system", "user", "assistant")),
# pm.system_prompt, Path("llm_log.txt"))
if st.session_state.get("step") == "parsing_answers":
build_ui_for_parsing_answers(repository, pm)
if st.session_state.get("step") == "ask_again":
build_ui_for_ask_again(pm)
if st.session_state.get("step") == "check_category":
build_ui_for_check_category(repository, pm)
if st.session_state.get("step") == "form_created":
build_ui_for_form_created()
use_streamlit()