File size: 1,680 Bytes
0fadcb9
d005419
 
 
8d06b39
224e4de
 
887083d
 
 
c3ddf65
887083d
bb7941a
7b864ba
224e4de
 
54c5926
887083d
 
 
 
224e4de
887083d
 
 
 
 
224e4de
 
887083d
224e4de
 
887083d
224e4de
887083d
224e4de
 
887083d
224e4de
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
from prompts.prompts_manager import PromptsManager

from repository.repository import get_repository
from repository.repository_abc import ModelRoles, Model

import streamlit as st

from ui_manager import build_ui_for_initial_state, build_ui_for_parsing_answers, build_ui_for_ask_again, \
    build_ui_for_check_category, build_ui_for_form_created
from utils.env_utils import build_repo_from_environment

user_msg = "Please describe what you need to do. To get the best results try to answer all the following questions:"


def use_streamlit():
    pm = PromptsManager()
    help_ = f"{user_msg}\n\n" + '\n'.join(pm.questions)
    repository = (build_repo_from_environment(pm.system_prompt) or
                  get_repository("testing",
                                 Model("fakeModel", ModelRoles("a", "b", "c"))))
    st.markdown("## Dubai Asset Management red tape cutter")
    if not st.session_state.get("step"):
        build_ui_for_initial_state(help_)
        llama3 = "meta-llama/Meta-Llama-3-8B-Instruct"
        # repository = get_repository("intel_npu", Model(llama3,
        #                                                ModelRoles("system", "user", "assistant")),
        #                             pm.system_prompt, Path("llm_log.txt"))

    if st.session_state.get("step") == "parsing_answers":
        build_ui_for_parsing_answers(repository, pm)

    if st.session_state.get("step") == "ask_again":
        build_ui_for_ask_again(pm)
    if st.session_state.get("step") == "check_category":
        build_ui_for_check_category(repository, pm)

    if st.session_state.get("step") == "form_created":
        build_ui_for_form_created()


use_streamlit()