Spaces:
Sleeping
Sleeping
Commit
·
c725745
1
Parent(s):
9714680
updating hf space
Browse files- .gitignore +1 -1
- app.py +15 -14
- outputs/professor_guide.csv +3 -3
- poetry.lock +176 -162
- pyproject.toml +2 -2
- requirements.txt +1 -1
- uw_programmatic/base_machine.py +32 -160
- uw_programmatic/single_question_machine.py +466 -0
- uw_programmatic/uw_machine.py +41 -20
.gitignore
CHANGED
@@ -67,4 +67,4 @@ uw_machines/relevant_knowledge/
|
|
67 |
|
68 |
#Outputs from the runs
|
69 |
outputs/professor_guide.xlsx
|
70 |
-
outputs/
|
|
|
67 |
|
68 |
#Outputs from the runs
|
69 |
outputs/professor_guide.xlsx
|
70 |
+
outputs/rejected_list.csv
|
app.py
CHANGED
@@ -19,7 +19,7 @@ def run_with_context(func: Callable) -> Callable:
|
|
19 |
|
20 |
def generate_questions(
|
21 |
page_lower, page_higher, question_number, taxonomy
|
22 |
-
) -> tuple[str, dict[str, Any],dict[str, Any]]:
|
23 |
if machine.value and machine.value.current_state_value == "start":
|
24 |
machine.value.start_machine() # Start the machine!
|
25 |
if not question_number or question_number <= 0:
|
@@ -65,9 +65,7 @@ def generate_questions(
|
|
65 |
gr.update(
|
66 |
visible=True, value=f"{Path.cwd().joinpath('outputs/professor_guide.csv')}"
|
67 |
),
|
68 |
-
gr.update(
|
69 |
-
visible=False
|
70 |
-
)
|
71 |
)
|
72 |
|
73 |
|
@@ -79,8 +77,9 @@ def create_statemachine() -> None:
|
|
79 |
machine.value = UWMachine.from_config_file(config_path)
|
80 |
except Exception as e:
|
81 |
raise gr.Error(str(e)) from e
|
82 |
-
|
83 |
-
|
|
|
84 |
return gr.update(interactive=True, visible=True)
|
85 |
|
86 |
|
@@ -97,13 +96,13 @@ with gr.Blocks() as demo:
|
|
97 |
question_number = gr.Number(
|
98 |
minimum=1, maximum=25, label="Number of Questions", value=3
|
99 |
)
|
100 |
-
gr.Markdown("For
|
101 |
with gr.Row():
|
102 |
page_lower = gr.Number(
|
103 |
-
label="First Page", minimum=
|
104 |
)
|
105 |
page_higher = gr.Number(
|
106 |
-
label="Last Page", minimum=
|
107 |
)
|
108 |
start_button = gr.Button(value="Generate Questions", scale=1)
|
109 |
|
@@ -119,11 +118,13 @@ with gr.Blocks() as demo:
|
|
119 |
outputs=[output, download_professor, start_button],
|
120 |
)
|
121 |
download_professor.click(
|
122 |
-
fn=run_with_context(questions_downloaded),
|
123 |
-
outputs=[start_button]
|
124 |
)
|
125 |
|
126 |
load_dotenv()
|
127 |
-
demo.launch(
|
128 |
-
|
129 |
-
|
|
|
|
|
|
|
|
19 |
|
20 |
def generate_questions(
|
21 |
page_lower, page_higher, question_number, taxonomy
|
22 |
+
) -> tuple[str, dict[str, Any], dict[str, Any]]:
|
23 |
if machine.value and machine.value.current_state_value == "start":
|
24 |
machine.value.start_machine() # Start the machine!
|
25 |
if not question_number or question_number <= 0:
|
|
|
65 |
gr.update(
|
66 |
visible=True, value=f"{Path.cwd().joinpath('outputs/professor_guide.csv')}"
|
67 |
),
|
68 |
+
gr.update(visible=False),
|
|
|
|
|
69 |
)
|
70 |
|
71 |
|
|
|
77 |
machine.value = UWMachine.from_config_file(config_path)
|
78 |
except Exception as e:
|
79 |
raise gr.Error(str(e)) from e
|
80 |
+
|
81 |
+
|
82 |
+
def questions_downloaded() -> dict[str, Any]:
|
83 |
return gr.update(interactive=True, visible=True)
|
84 |
|
85 |
|
|
|
96 |
question_number = gr.Number(
|
97 |
minimum=1, maximum=25, label="Number of Questions", value=3
|
98 |
)
|
99 |
+
gr.Markdown("For Textbook - Pages 1-348")
|
100 |
with gr.Row():
|
101 |
page_lower = gr.Number(
|
102 |
+
label="First Page", minimum=1, value=1, maximum=348
|
103 |
)
|
104 |
page_higher = gr.Number(
|
105 |
+
label="Last Page", minimum=1, value=348, maximum=348
|
106 |
)
|
107 |
start_button = gr.Button(value="Generate Questions", scale=1)
|
108 |
|
|
|
118 |
outputs=[output, download_professor, start_button],
|
119 |
)
|
120 |
download_professor.click(
|
121 |
+
fn=run_with_context(questions_downloaded), outputs=[start_button]
|
|
|
122 |
)
|
123 |
|
124 |
load_dotenv()
|
125 |
+
demo.launch(
|
126 |
+
share=True,
|
127 |
+
ssr_mode=False,
|
128 |
+
auth=(os.environ.get("HF_USERNAME", ""), os.environ.get("HF_PASSWORD", "")),
|
129 |
+
)
|
130 |
+
# demo.launch()
|
outputs/professor_guide.csv
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
-
MC,,1,Identify the
|
2 |
-
MC,,1,Identify the
|
3 |
-
MC,,1,Identify the
|
|
|
1 |
+
MC,,1,Identify the typical run time for a crowdfunding campaign according to the textbook.,5,The typical run time for a crowdfunding campaign is 120 days.,The typical run time for a crowdfunding campaign is 15 weeks.,The typical run time for a crowdfunding campaign is 90 days.,The typical run time for a crowdfunding campaign is 10 days.,The typical run time for a crowdfunding campaign is 30 to 60 days.,266-269,Knowledge
|
2 |
+
MC,,1,Identify the purpose of a 'Pretend to Own' prototype.,4,A 'Pretend to Own' prototype is used to analyze technical feasibility.,A 'Pretend to Own' prototype is used to assess production costs.,A 'Pretend to Own' prototype is used to evaluate design aesthetics.,A 'Pretend to Own' prototype is used to determine if a solution fits into the customer's day-to-day life.,A 'Pretend to Own' prototype is used to test market demand.,208-213,Knowledge
|
3 |
+
MC,,1,Identify the primary problem Topology Eyewear aims to solve with their product.,1,Poorly fitting glasses.,Fragile frames that break easily.,High cost of prescription lenses.,Limited style options for eyewear.,Uncomfortable nose pads on glasses.,300-305,Knowledge
|
poetry.lock
CHANGED
@@ -373,12 +373,12 @@ tqdm = ["tqdm"]
|
|
373 |
|
374 |
[[package]]
|
375 |
name = "gradio"
|
376 |
-
version = "5.
|
377 |
description = "Python library for easily interacting with trained machine learning models"
|
378 |
optional = false
|
379 |
python-versions = ">=3.10"
|
380 |
files = [
|
381 |
-
{file = "gradio-5.
|
382 |
]
|
383 |
|
384 |
[package.dependencies]
|
@@ -387,7 +387,7 @@ anyio = ">=3.0,<5.0"
|
|
387 |
audioop-lts = {version = "<1.0", markers = "python_version >= \"3.13\""}
|
388 |
fastapi = ">=0.115.2,<1.0"
|
389 |
ffmpy = "*"
|
390 |
-
gradio-client = "1.5.
|
391 |
httpx = ">=0.24.1"
|
392 |
huggingface-hub = ">=0.25.1"
|
393 |
jinja2 = "<4.0"
|
@@ -416,13 +416,13 @@ oauth = ["authlib", "itsdangerous"]
|
|
416 |
|
417 |
[[package]]
|
418 |
name = "gradio-client"
|
419 |
-
version = "1.5.
|
420 |
description = "Python library for easily interacting with trained machine learning models"
|
421 |
optional = false
|
422 |
python-versions = ">=3.10"
|
423 |
files = [
|
424 |
-
{file = "gradio_client-1.5.
|
425 |
-
{file = "gradio_client-1.5.
|
426 |
]
|
427 |
|
428 |
[package.dependencies]
|
@@ -459,7 +459,7 @@ files = []
|
|
459 |
develop = false
|
460 |
|
461 |
[package.dependencies]
|
462 |
-
attrs = "^24.
|
463 |
filetype = "^1.2"
|
464 |
jinja2 = "^3.1.4"
|
465 |
marshmallow = "^3.21.3"
|
@@ -529,8 +529,8 @@ loaders-sql = ["sqlalchemy (>=2.0.31,<3.0.0)"]
|
|
529 |
[package.source]
|
530 |
type = "git"
|
531 |
url = "https://github.com/griptape-ai/griptape.git"
|
532 |
-
reference = "
|
533 |
-
resolved_reference = "
|
534 |
|
535 |
[[package]]
|
536 |
name = "h11"
|
@@ -858,13 +858,13 @@ files = [
|
|
858 |
|
859 |
[[package]]
|
860 |
name = "marshmallow"
|
861 |
-
version = "3.
|
862 |
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
|
863 |
optional = false
|
864 |
python-versions = ">=3.9"
|
865 |
files = [
|
866 |
-
{file = "marshmallow-3.
|
867 |
-
{file = "marshmallow-3.
|
868 |
]
|
869 |
|
870 |
[package.dependencies]
|
@@ -872,7 +872,7 @@ packaging = ">=17.0"
|
|
872 |
|
873 |
[package.extras]
|
874 |
dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"]
|
875 |
-
docs = ["
|
876 |
tests = ["pytest", "simplejson"]
|
877 |
|
878 |
[[package]]
|
@@ -977,13 +977,13 @@ files = [
|
|
977 |
|
978 |
[[package]]
|
979 |
name = "openai"
|
980 |
-
version = "1.59.
|
981 |
description = "The official Python library for the openai API"
|
982 |
optional = false
|
983 |
python-versions = ">=3.8"
|
984 |
files = [
|
985 |
-
{file = "openai-1.59.
|
986 |
-
{file = "openai-1.59.
|
987 |
]
|
988 |
|
989 |
[package.dependencies]
|
@@ -1002,86 +1002,86 @@ realtime = ["websockets (>=13,<15)"]
|
|
1002 |
|
1003 |
[[package]]
|
1004 |
name = "orjson"
|
1005 |
-
version = "3.10.
|
1006 |
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
|
1007 |
optional = false
|
1008 |
python-versions = ">=3.8"
|
1009 |
files = [
|
1010 |
-
{file = "orjson-3.10.
|
1011 |
-
{file = "orjson-3.10.
|
1012 |
-
{file = "orjson-3.10.
|
1013 |
-
{file = "orjson-3.10.
|
1014 |
-
{file = "orjson-3.10.
|
1015 |
-
{file = "orjson-3.10.
|
1016 |
-
{file = "orjson-3.10.
|
1017 |
-
{file = "orjson-3.10.
|
1018 |
-
{file = "orjson-3.10.
|
1019 |
-
{file = "orjson-3.10.
|
1020 |
-
{file = "orjson-3.10.
|
1021 |
-
{file = "orjson-3.10.
|
1022 |
-
{file = "orjson-3.10.
|
1023 |
-
{file = "orjson-3.10.
|
1024 |
-
{file = "orjson-3.10.
|
1025 |
-
{file = "orjson-3.10.
|
1026 |
-
{file = "orjson-3.10.
|
1027 |
-
{file = "orjson-3.10.
|
1028 |
-
{file = "orjson-3.10.
|
1029 |
-
{file = "orjson-3.10.
|
1030 |
-
{file = "orjson-3.10.
|
1031 |
-
{file = "orjson-3.10.
|
1032 |
-
{file = "orjson-3.10.
|
1033 |
-
{file = "orjson-3.10.
|
1034 |
-
{file = "orjson-3.10.
|
1035 |
-
{file = "orjson-3.10.
|
1036 |
-
{file = "orjson-3.10.
|
1037 |
-
{file = "orjson-3.10.
|
1038 |
-
{file = "orjson-3.10.
|
1039 |
-
{file = "orjson-3.10.
|
1040 |
-
{file = "orjson-3.10.
|
1041 |
-
{file = "orjson-3.10.
|
1042 |
-
{file = "orjson-3.10.
|
1043 |
-
{file = "orjson-3.10.
|
1044 |
-
{file = "orjson-3.10.
|
1045 |
-
{file = "orjson-3.10.
|
1046 |
-
{file = "orjson-3.10.
|
1047 |
-
{file = "orjson-3.10.
|
1048 |
-
{file = "orjson-3.10.
|
1049 |
-
{file = "orjson-3.10.
|
1050 |
-
{file = "orjson-3.10.
|
1051 |
-
{file = "orjson-3.10.
|
1052 |
-
{file = "orjson-3.10.
|
1053 |
-
{file = "orjson-3.10.
|
1054 |
-
{file = "orjson-3.10.
|
1055 |
-
{file = "orjson-3.10.
|
1056 |
-
{file = "orjson-3.10.
|
1057 |
-
{file = "orjson-3.10.
|
1058 |
-
{file = "orjson-3.10.
|
1059 |
-
{file = "orjson-3.10.
|
1060 |
-
{file = "orjson-3.10.
|
1061 |
-
{file = "orjson-3.10.
|
1062 |
-
{file = "orjson-3.10.
|
1063 |
-
{file = "orjson-3.10.
|
1064 |
-
{file = "orjson-3.10.
|
1065 |
-
{file = "orjson-3.10.
|
1066 |
-
{file = "orjson-3.10.
|
1067 |
-
{file = "orjson-3.10.
|
1068 |
-
{file = "orjson-3.10.
|
1069 |
-
{file = "orjson-3.10.
|
1070 |
-
{file = "orjson-3.10.
|
1071 |
-
{file = "orjson-3.10.
|
1072 |
-
{file = "orjson-3.10.
|
1073 |
-
{file = "orjson-3.10.
|
1074 |
-
{file = "orjson-3.10.
|
1075 |
-
{file = "orjson-3.10.
|
1076 |
-
{file = "orjson-3.10.
|
1077 |
-
{file = "orjson-3.10.
|
1078 |
-
{file = "orjson-3.10.
|
1079 |
-
{file = "orjson-3.10.
|
1080 |
-
{file = "orjson-3.10.
|
1081 |
-
{file = "orjson-3.10.
|
1082 |
-
{file = "orjson-3.10.
|
1083 |
-
{file = "orjson-3.10.
|
1084 |
-
{file = "orjson-3.10.
|
1085 |
]
|
1086 |
|
1087 |
[[package]]
|
@@ -1319,13 +1319,13 @@ virtualenv = ">=20.10.0"
|
|
1319 |
|
1320 |
[[package]]
|
1321 |
name = "pydantic"
|
1322 |
-
version = "2.10.
|
1323 |
description = "Data validation using Python type hints"
|
1324 |
optional = false
|
1325 |
python-versions = ">=3.8"
|
1326 |
files = [
|
1327 |
-
{file = "pydantic-2.10.
|
1328 |
-
{file = "pydantic-2.10.
|
1329 |
]
|
1330 |
|
1331 |
[package.dependencies]
|
@@ -2262,76 +2262,90 @@ files = [
|
|
2262 |
|
2263 |
[[package]]
|
2264 |
name = "wrapt"
|
2265 |
-
version = "1.17.
|
2266 |
description = "Module for decorators, wrappers and monkey patching."
|
2267 |
optional = false
|
2268 |
python-versions = ">=3.8"
|
2269 |
files = [
|
2270 |
-
{file = "wrapt-1.17.
|
2271 |
-
{file = "wrapt-1.17.
|
2272 |
-
{file = "wrapt-1.17.
|
2273 |
-
{file = "wrapt-1.17.
|
2274 |
-
{file = "wrapt-1.17.
|
2275 |
-
{file = "wrapt-1.17.
|
2276 |
-
{file = "wrapt-1.17.
|
2277 |
-
{file = "wrapt-1.17.
|
2278 |
-
{file = "wrapt-1.17.
|
2279 |
-
{file = "wrapt-1.17.
|
2280 |
-
{file = "wrapt-1.17.
|
2281 |
-
{file = "wrapt-1.17.
|
2282 |
-
{file = "wrapt-1.17.
|
2283 |
-
{file = "wrapt-1.17.
|
2284 |
-
{file = "wrapt-1.17.
|
2285 |
-
{file = "wrapt-1.17.
|
2286 |
-
{file = "wrapt-1.17.
|
2287 |
-
{file = "wrapt-1.17.
|
2288 |
-
{file = "wrapt-1.17.
|
2289 |
-
{file = "wrapt-1.17.
|
2290 |
-
{file = "wrapt-1.17.
|
2291 |
-
{file = "wrapt-1.17.
|
2292 |
-
{file = "wrapt-1.17.
|
2293 |
-
{file = "wrapt-1.17.
|
2294 |
-
{file = "wrapt-1.17.
|
2295 |
-
{file = "wrapt-1.17.
|
2296 |
-
{file = "wrapt-1.17.
|
2297 |
-
{file = "wrapt-1.17.
|
2298 |
-
{file = "wrapt-1.17.
|
2299 |
-
{file = "wrapt-1.17.
|
2300 |
-
{file = "wrapt-1.17.
|
2301 |
-
{file = "wrapt-1.17.
|
2302 |
-
{file = "wrapt-1.17.
|
2303 |
-
{file = "wrapt-1.17.
|
2304 |
-
{file = "wrapt-1.17.
|
2305 |
-
{file = "wrapt-1.17.
|
2306 |
-
{file = "wrapt-1.17.
|
2307 |
-
{file = "wrapt-1.17.
|
2308 |
-
{file = "wrapt-1.17.
|
2309 |
-
{file = "wrapt-1.17.
|
2310 |
-
{file = "wrapt-1.17.
|
2311 |
-
{file = "wrapt-1.17.
|
2312 |
-
{file = "wrapt-1.17.
|
2313 |
-
{file = "wrapt-1.17.
|
2314 |
-
{file = "wrapt-1.17.
|
2315 |
-
{file = "wrapt-1.17.
|
2316 |
-
{file = "wrapt-1.17.
|
2317 |
-
{file = "wrapt-1.17.
|
2318 |
-
{file = "wrapt-1.17.
|
2319 |
-
{file = "wrapt-1.17.
|
2320 |
-
{file = "wrapt-1.17.
|
2321 |
-
{file = "wrapt-1.17.
|
2322 |
-
{file = "wrapt-1.17.
|
2323 |
-
{file = "wrapt-1.17.
|
2324 |
-
{file = "wrapt-1.17.
|
2325 |
-
{file = "wrapt-1.17.
|
2326 |
-
{file = "wrapt-1.17.
|
2327 |
-
{file = "wrapt-1.17.
|
2328 |
-
{file = "wrapt-1.17.
|
2329 |
-
{file = "wrapt-1.17.
|
2330 |
-
{file = "wrapt-1.17.
|
2331 |
-
{file = "wrapt-1.17.
|
2332 |
-
{file = "wrapt-1.17.
|
2333 |
-
{file = "wrapt-1.17.
|
2334 |
-
{file = "wrapt-1.17.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2335 |
]
|
2336 |
|
2337 |
[[package]]
|
@@ -2348,4 +2362,4 @@ files = [
|
|
2348 |
[metadata]
|
2349 |
lock-version = "2.0"
|
2350 |
python-versions = "^3.11"
|
2351 |
-
content-hash = "
|
|
|
373 |
|
374 |
[[package]]
|
375 |
name = "gradio"
|
376 |
+
version = "5.12.0"
|
377 |
description = "Python library for easily interacting with trained machine learning models"
|
378 |
optional = false
|
379 |
python-versions = ">=3.10"
|
380 |
files = [
|
381 |
+
{file = "gradio-5.12.0-py3-none-any.whl", hash = "sha256:b4b79a2c537131a8a5e23046565e64da40156ac24f9082e563e734e89641e160"},
|
382 |
]
|
383 |
|
384 |
[package.dependencies]
|
|
|
387 |
audioop-lts = {version = "<1.0", markers = "python_version >= \"3.13\""}
|
388 |
fastapi = ">=0.115.2,<1.0"
|
389 |
ffmpy = "*"
|
390 |
+
gradio-client = "1.5.4"
|
391 |
httpx = ">=0.24.1"
|
392 |
huggingface-hub = ">=0.25.1"
|
393 |
jinja2 = "<4.0"
|
|
|
416 |
|
417 |
[[package]]
|
418 |
name = "gradio-client"
|
419 |
+
version = "1.5.4"
|
420 |
description = "Python library for easily interacting with trained machine learning models"
|
421 |
optional = false
|
422 |
python-versions = ">=3.10"
|
423 |
files = [
|
424 |
+
{file = "gradio_client-1.5.4-py3-none-any.whl", hash = "sha256:ad38c9a6f7fc590e822627f5bf5685321a7822b8f1a88b76d00a0621a43162d6"},
|
425 |
+
{file = "gradio_client-1.5.4.tar.gz", hash = "sha256:281a1b6c4e45210c70b60888bb6f329c27f30645d7aa376e1f20966de82273dc"},
|
426 |
]
|
427 |
|
428 |
[package.dependencies]
|
|
|
459 |
develop = false
|
460 |
|
461 |
[package.dependencies]
|
462 |
+
attrs = "^24.3.0"
|
463 |
filetype = "^1.2"
|
464 |
jinja2 = "^3.1.4"
|
465 |
marshmallow = "^3.21.3"
|
|
|
529 |
[package.source]
|
530 |
type = "git"
|
531 |
url = "https://github.com/griptape-ai/griptape.git"
|
532 |
+
reference = "main"
|
533 |
+
resolved_reference = "03b65295cabcea9c57343494c86c6d12ac0faa1c"
|
534 |
|
535 |
[[package]]
|
536 |
name = "h11"
|
|
|
858 |
|
859 |
[[package]]
|
860 |
name = "marshmallow"
|
861 |
+
version = "3.25.1"
|
862 |
description = "A lightweight library for converting complex datatypes to and from native Python datatypes."
|
863 |
optional = false
|
864 |
python-versions = ">=3.9"
|
865 |
files = [
|
866 |
+
{file = "marshmallow-3.25.1-py3-none-any.whl", hash = "sha256:ec5d00d873ce473b7f2ffcb7104286a376c354cab0c2fa12f5573dab03e87210"},
|
867 |
+
{file = "marshmallow-3.25.1.tar.gz", hash = "sha256:f4debda3bb11153d81ac34b0d582bf23053055ee11e791b54b4b35493468040a"},
|
868 |
]
|
869 |
|
870 |
[package.dependencies]
|
|
|
872 |
|
873 |
[package.extras]
|
874 |
dev = ["marshmallow[tests]", "pre-commit (>=3.5,<5.0)", "tox"]
|
875 |
+
docs = ["autodocsumm (==0.2.14)", "furo (==2024.8.6)", "sphinx (==8.1.3)", "sphinx-copybutton (==0.5.2)", "sphinx-issues (==5.0.0)", "sphinxext-opengraph (==0.9.1)"]
|
876 |
tests = ["pytest", "simplejson"]
|
877 |
|
878 |
[[package]]
|
|
|
977 |
|
978 |
[[package]]
|
979 |
name = "openai"
|
980 |
+
version = "1.59.7"
|
981 |
description = "The official Python library for the openai API"
|
982 |
optional = false
|
983 |
python-versions = ">=3.8"
|
984 |
files = [
|
985 |
+
{file = "openai-1.59.7-py3-none-any.whl", hash = "sha256:cfa806556226fa96df7380ab2e29814181d56fea44738c2b0e581b462c268692"},
|
986 |
+
{file = "openai-1.59.7.tar.gz", hash = "sha256:043603def78c00befb857df9f0a16ee76a3af5984ba40cb7ee5e2f40db4646bf"},
|
987 |
]
|
988 |
|
989 |
[package.dependencies]
|
|
|
1002 |
|
1003 |
[[package]]
|
1004 |
name = "orjson"
|
1005 |
+
version = "3.10.14"
|
1006 |
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
|
1007 |
optional = false
|
1008 |
python-versions = ">=3.8"
|
1009 |
files = [
|
1010 |
+
{file = "orjson-3.10.14-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:849ea7845a55f09965826e816cdc7689d6cf74fe9223d79d758c714af955bcb6"},
|
1011 |
+
{file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5947b139dfa33f72eecc63f17e45230a97e741942955a6c9e650069305eb73d"},
|
1012 |
+
{file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cde6d76910d3179dae70f164466692f4ea36da124d6fb1a61399ca589e81d69a"},
|
1013 |
+
{file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c6dfbaeb7afa77ca608a50e2770a0461177b63a99520d4928e27591b142c74b1"},
|
1014 |
+
{file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa45e489ef80f28ff0e5ba0a72812b8cfc7c1ef8b46a694723807d1b07c89ebb"},
|
1015 |
+
{file = "orjson-3.10.14-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f5007abfdbb1d866e2aa8990bd1c465f0f6da71d19e695fc278282be12cffa5"},
|
1016 |
+
{file = "orjson-3.10.14-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1b49e2af011c84c3f2d541bb5cd1e3c7c2df672223e7e3ea608f09cf295e5f8a"},
|
1017 |
+
{file = "orjson-3.10.14-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:164ac155109226b3a2606ee6dda899ccfbe6e7e18b5bdc3fbc00f79cc074157d"},
|
1018 |
+
{file = "orjson-3.10.14-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:6b1225024cf0ef5d15934b5ffe9baf860fe8bc68a796513f5ea4f5056de30bca"},
|
1019 |
+
{file = "orjson-3.10.14-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:d6546e8073dc382e60fcae4a001a5a1bc46da5eab4a4878acc2d12072d6166d5"},
|
1020 |
+
{file = "orjson-3.10.14-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:9f1d2942605c894162252d6259b0121bf1cb493071a1ea8cb35d79cb3e6ac5bc"},
|
1021 |
+
{file = "orjson-3.10.14-cp310-cp310-win32.whl", hash = "sha256:397083806abd51cf2b3bbbf6c347575374d160331a2d33c5823e22249ad3118b"},
|
1022 |
+
{file = "orjson-3.10.14-cp310-cp310-win_amd64.whl", hash = "sha256:fa18f949d3183a8d468367056be989666ac2bef3a72eece0bade9cdb733b3c28"},
|
1023 |
+
{file = "orjson-3.10.14-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:f506fd666dd1ecd15a832bebc66c4df45c1902fd47526292836c339f7ba665a9"},
|
1024 |
+
{file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efe5fd254cfb0eeee13b8ef7ecb20f5d5a56ddda8a587f3852ab2cedfefdb5f6"},
|
1025 |
+
{file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ddc8c866d7467f5ee2991397d2ea94bcf60d0048bdd8ca555740b56f9042725"},
|
1026 |
+
{file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3af8e42ae4363773658b8d578d56dedffb4f05ceeb4d1d4dd3fb504950b45526"},
|
1027 |
+
{file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84dd83110503bc10e94322bf3ffab8bc49150176b49b4984dc1cce4c0a993bf9"},
|
1028 |
+
{file = "orjson-3.10.14-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36f5bfc0399cd4811bf10ec7a759c7ab0cd18080956af8ee138097d5b5296a95"},
|
1029 |
+
{file = "orjson-3.10.14-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:868943660fb2a1e6b6b965b74430c16a79320b665b28dd4511d15ad5038d37d5"},
|
1030 |
+
{file = "orjson-3.10.14-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33449c67195969b1a677533dee9d76e006001213a24501333624623e13c7cc8e"},
|
1031 |
+
{file = "orjson-3.10.14-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:e4c9f60f9fb0b5be66e416dcd8c9d94c3eabff3801d875bdb1f8ffc12cf86905"},
|
1032 |
+
{file = "orjson-3.10.14-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:0de4d6315cfdbd9ec803b945c23b3a68207fd47cbe43626036d97e8e9561a436"},
|
1033 |
+
{file = "orjson-3.10.14-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:83adda3db595cb1a7e2237029b3249c85afbe5c747d26b41b802e7482cb3933e"},
|
1034 |
+
{file = "orjson-3.10.14-cp311-cp311-win32.whl", hash = "sha256:998019ef74a4997a9d741b1473533cdb8faa31373afc9849b35129b4b8ec048d"},
|
1035 |
+
{file = "orjson-3.10.14-cp311-cp311-win_amd64.whl", hash = "sha256:9d034abdd36f0f0f2240f91492684e5043d46f290525d1117712d5b8137784eb"},
|
1036 |
+
{file = "orjson-3.10.14-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:2ad4b7e367efba6dc3f119c9a0fcd41908b7ec0399a696f3cdea7ec477441b09"},
|
1037 |
+
{file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f496286fc85e93ce0f71cc84fc1c42de2decf1bf494094e188e27a53694777a7"},
|
1038 |
+
{file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c7f189bbfcded40e41a6969c1068ba305850ba016665be71a217918931416fbf"},
|
1039 |
+
{file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8cc8204f0b75606869c707da331058ddf085de29558b516fc43c73ee5ee2aadb"},
|
1040 |
+
{file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deaa2899dff7f03ab667e2ec25842d233e2a6a9e333efa484dfe666403f3501c"},
|
1041 |
+
{file = "orjson-3.10.14-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1c3ea52642c9714dc6e56de8a451a066f6d2707d273e07fe8a9cc1ba073813d"},
|
1042 |
+
{file = "orjson-3.10.14-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9d3f9ed72e7458ded9a1fb1b4d4ed4c4fdbaf82030ce3f9274b4dc1bff7ace2b"},
|
1043 |
+
{file = "orjson-3.10.14-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:07520685d408a2aba514c17ccc16199ff2934f9f9e28501e676c557f454a37fe"},
|
1044 |
+
{file = "orjson-3.10.14-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:76344269b550ea01488d19a2a369ab572c1ac4449a72e9f6ac0d70eb1cbfb953"},
|
1045 |
+
{file = "orjson-3.10.14-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e2979d0f2959990620f7e62da6cd954e4620ee815539bc57a8ae46e2dacf90e3"},
|
1046 |
+
{file = "orjson-3.10.14-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:03f61ca3674555adcb1aa717b9fc87ae936aa7a63f6aba90a474a88701278780"},
|
1047 |
+
{file = "orjson-3.10.14-cp312-cp312-win32.whl", hash = "sha256:d5075c54edf1d6ad81d4c6523ce54a748ba1208b542e54b97d8a882ecd810fd1"},
|
1048 |
+
{file = "orjson-3.10.14-cp312-cp312-win_amd64.whl", hash = "sha256:175cafd322e458603e8ce73510a068d16b6e6f389c13f69bf16de0e843d7d406"},
|
1049 |
+
{file = "orjson-3.10.14-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:0905ca08a10f7e0e0c97d11359609300eb1437490a7f32bbaa349de757e2e0c7"},
|
1050 |
+
{file = "orjson-3.10.14-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92d13292249f9f2a3e418cbc307a9fbbef043c65f4bd8ba1eb620bc2aaba3d15"},
|
1051 |
+
{file = "orjson-3.10.14-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90937664e776ad316d64251e2fa2ad69265e4443067668e4727074fe39676414"},
|
1052 |
+
{file = "orjson-3.10.14-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9ed3d26c4cb4f6babaf791aa46a029265850e80ec2a566581f5c2ee1a14df4f1"},
|
1053 |
+
{file = "orjson-3.10.14-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:56ee546c2bbe9599aba78169f99d1dc33301853e897dbaf642d654248280dc6e"},
|
1054 |
+
{file = "orjson-3.10.14-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:901e826cb2f1bdc1fcef3ef59adf0c451e8f7c0b5deb26c1a933fb66fb505eae"},
|
1055 |
+
{file = "orjson-3.10.14-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:26336c0d4b2d44636e1e1e6ed1002f03c6aae4a8a9329561c8883f135e9ff010"},
|
1056 |
+
{file = "orjson-3.10.14-cp313-cp313-win32.whl", hash = "sha256:e2bc525e335a8545c4e48f84dd0328bc46158c9aaeb8a1c2276546e94540ea3d"},
|
1057 |
+
{file = "orjson-3.10.14-cp313-cp313-win_amd64.whl", hash = "sha256:eca04dfd792cedad53dc9a917da1a522486255360cb4e77619343a20d9f35364"},
|
1058 |
+
{file = "orjson-3.10.14-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:9a0fba3b8a587a54c18585f077dcab6dd251c170d85cfa4d063d5746cd595a0f"},
|
1059 |
+
{file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:175abf3d20e737fec47261d278f95031736a49d7832a09ab684026528c4d96db"},
|
1060 |
+
{file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:29ca1a93e035d570e8b791b6c0feddd403c6a5388bfe870bf2aa6bba1b9d9b8e"},
|
1061 |
+
{file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f77202c80e8ab5a1d1e9faf642343bee5aaf332061e1ada4e9147dbd9eb00c46"},
|
1062 |
+
{file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6e2ec73b7099b6a29b40a62e08a23b936423bd35529f8f55c42e27acccde7954"},
|
1063 |
+
{file = "orjson-3.10.14-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2d1679df9f9cd9504f8dff24555c1eaabba8aad7f5914f28dab99e3c2552c9d"},
|
1064 |
+
{file = "orjson-3.10.14-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:691ab9a13834310a263664313e4f747ceb93662d14a8bdf20eb97d27ed488f16"},
|
1065 |
+
{file = "orjson-3.10.14-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:b11ed82054fce82fb74cea33247d825d05ad6a4015ecfc02af5fbce442fbf361"},
|
1066 |
+
{file = "orjson-3.10.14-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:e70a1d62b8288677d48f3bea66c21586a5f999c64ecd3878edb7393e8d1b548d"},
|
1067 |
+
{file = "orjson-3.10.14-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:16642f10c1ca5611251bd835de9914a4b03095e28a34c8ba6a5500b5074338bd"},
|
1068 |
+
{file = "orjson-3.10.14-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:3871bad546aa66c155e3f36f99c459780c2a392d502a64e23fb96d9abf338511"},
|
1069 |
+
{file = "orjson-3.10.14-cp38-cp38-win32.whl", hash = "sha256:0293a88815e9bb5c90af4045f81ed364d982f955d12052d989d844d6c4e50945"},
|
1070 |
+
{file = "orjson-3.10.14-cp38-cp38-win_amd64.whl", hash = "sha256:6169d3868b190d6b21adc8e61f64e3db30f50559dfbdef34a1cd6c738d409dfc"},
|
1071 |
+
{file = "orjson-3.10.14-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:06d4ec218b1ec1467d8d64da4e123b4794c781b536203c309ca0f52819a16c03"},
|
1072 |
+
{file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:962c2ec0dcaf22b76dee9831fdf0c4a33d4bf9a257a2bc5d4adc00d5c8ad9034"},
|
1073 |
+
{file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:21d3be4132f71ef1360385770474f29ea1538a242eef72ac4934fe142800e37f"},
|
1074 |
+
{file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c28ed60597c149a9e3f5ad6dd9cebaee6fb2f0e3f2d159a4a2b9b862d4748860"},
|
1075 |
+
{file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7e947f70167fe18469f2023644e91ab3d24f9aed69a5e1c78e2c81b9cea553fb"},
|
1076 |
+
{file = "orjson-3.10.14-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64410696c97a35af2432dea7bdc4ce32416458159430ef1b4beb79fd30093ad6"},
|
1077 |
+
{file = "orjson-3.10.14-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8050a5d81c022561ee29cd2739de5b4445f3c72f39423fde80a63299c1892c52"},
|
1078 |
+
{file = "orjson-3.10.14-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:b49a28e30d3eca86db3fe6f9b7f4152fcacbb4a467953cd1b42b94b479b77956"},
|
1079 |
+
{file = "orjson-3.10.14-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:ca041ad20291a65d853a9523744eebc3f5a4b2f7634e99f8fe88320695ddf766"},
|
1080 |
+
{file = "orjson-3.10.14-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:d313a2998b74bb26e9e371851a173a9b9474764916f1fc7971095699b3c6e964"},
|
1081 |
+
{file = "orjson-3.10.14-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7796692136a67b3e301ef9052bde6fe8e7bd5200da766811a3a608ffa62aaff0"},
|
1082 |
+
{file = "orjson-3.10.14-cp39-cp39-win32.whl", hash = "sha256:eee4bc767f348fba485ed9dc576ca58b0a9eac237f0e160f7a59bce628ed06b3"},
|
1083 |
+
{file = "orjson-3.10.14-cp39-cp39-win_amd64.whl", hash = "sha256:96a1c0ee30fb113b3ae3c748fd75ca74a157ff4c58476c47db4d61518962a011"},
|
1084 |
+
{file = "orjson-3.10.14.tar.gz", hash = "sha256:cf31f6f071a6b8e7aa1ead1fa27b935b48d00fbfa6a28ce856cfff2d5dd68eed"},
|
1085 |
]
|
1086 |
|
1087 |
[[package]]
|
|
|
1319 |
|
1320 |
[[package]]
|
1321 |
name = "pydantic"
|
1322 |
+
version = "2.10.5"
|
1323 |
description = "Data validation using Python type hints"
|
1324 |
optional = false
|
1325 |
python-versions = ">=3.8"
|
1326 |
files = [
|
1327 |
+
{file = "pydantic-2.10.5-py3-none-any.whl", hash = "sha256:4dd4e322dbe55472cb7ca7e73f4b63574eecccf2835ffa2af9021ce113c83c53"},
|
1328 |
+
{file = "pydantic-2.10.5.tar.gz", hash = "sha256:278b38dbbaec562011d659ee05f63346951b3a248a6f3642e1bc68894ea2b4ff"},
|
1329 |
]
|
1330 |
|
1331 |
[package.dependencies]
|
|
|
2262 |
|
2263 |
[[package]]
|
2264 |
name = "wrapt"
|
2265 |
+
version = "1.17.2"
|
2266 |
description = "Module for decorators, wrappers and monkey patching."
|
2267 |
optional = false
|
2268 |
python-versions = ">=3.8"
|
2269 |
files = [
|
2270 |
+
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"},
|
2271 |
+
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"},
|
2272 |
+
{file = "wrapt-1.17.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7"},
|
2273 |
+
{file = "wrapt-1.17.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c"},
|
2274 |
+
{file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72"},
|
2275 |
+
{file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061"},
|
2276 |
+
{file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2"},
|
2277 |
+
{file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c"},
|
2278 |
+
{file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62"},
|
2279 |
+
{file = "wrapt-1.17.2-cp310-cp310-win32.whl", hash = "sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563"},
|
2280 |
+
{file = "wrapt-1.17.2-cp310-cp310-win_amd64.whl", hash = "sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f"},
|
2281 |
+
{file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58"},
|
2282 |
+
{file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda"},
|
2283 |
+
{file = "wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438"},
|
2284 |
+
{file = "wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a"},
|
2285 |
+
{file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000"},
|
2286 |
+
{file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6"},
|
2287 |
+
{file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b"},
|
2288 |
+
{file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662"},
|
2289 |
+
{file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72"},
|
2290 |
+
{file = "wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317"},
|
2291 |
+
{file = "wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3"},
|
2292 |
+
{file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925"},
|
2293 |
+
{file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392"},
|
2294 |
+
{file = "wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40"},
|
2295 |
+
{file = "wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d"},
|
2296 |
+
{file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b"},
|
2297 |
+
{file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98"},
|
2298 |
+
{file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82"},
|
2299 |
+
{file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae"},
|
2300 |
+
{file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9"},
|
2301 |
+
{file = "wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9"},
|
2302 |
+
{file = "wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991"},
|
2303 |
+
{file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125"},
|
2304 |
+
{file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998"},
|
2305 |
+
{file = "wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5"},
|
2306 |
+
{file = "wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8"},
|
2307 |
+
{file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6"},
|
2308 |
+
{file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc"},
|
2309 |
+
{file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2"},
|
2310 |
+
{file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b"},
|
2311 |
+
{file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504"},
|
2312 |
+
{file = "wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a"},
|
2313 |
+
{file = "wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845"},
|
2314 |
+
{file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192"},
|
2315 |
+
{file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b"},
|
2316 |
+
{file = "wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0"},
|
2317 |
+
{file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306"},
|
2318 |
+
{file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb"},
|
2319 |
+
{file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681"},
|
2320 |
+
{file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6"},
|
2321 |
+
{file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6"},
|
2322 |
+
{file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f"},
|
2323 |
+
{file = "wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555"},
|
2324 |
+
{file = "wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c"},
|
2325 |
+
{file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5c803c401ea1c1c18de70a06a6f79fcc9c5acfc79133e9869e730ad7f8ad8ef9"},
|
2326 |
+
{file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f917c1180fdb8623c2b75a99192f4025e412597c50b2ac870f156de8fb101119"},
|
2327 |
+
{file = "wrapt-1.17.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ecc840861360ba9d176d413a5489b9a0aff6d6303d7e733e2c4623cfa26904a6"},
|
2328 |
+
{file = "wrapt-1.17.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb87745b2e6dc56361bfde481d5a378dc314b252a98d7dd19a651a3fa58f24a9"},
|
2329 |
+
{file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58455b79ec2661c3600e65c0a716955adc2410f7383755d537584b0de41b1d8a"},
|
2330 |
+
{file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4e42a40a5e164cbfdb7b386c966a588b1047558a990981ace551ed7e12ca9c2"},
|
2331 |
+
{file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:91bd7d1773e64019f9288b7a5101f3ae50d3d8e6b1de7edee9c2ccc1d32f0c0a"},
|
2332 |
+
{file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:bb90fb8bda722a1b9d48ac1e6c38f923ea757b3baf8ebd0c82e09c5c1a0e7a04"},
|
2333 |
+
{file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:08e7ce672e35efa54c5024936e559469436f8b8096253404faeb54d2a878416f"},
|
2334 |
+
{file = "wrapt-1.17.2-cp38-cp38-win32.whl", hash = "sha256:410a92fefd2e0e10d26210e1dfb4a876ddaf8439ef60d6434f21ef8d87efc5b7"},
|
2335 |
+
{file = "wrapt-1.17.2-cp38-cp38-win_amd64.whl", hash = "sha256:95c658736ec15602da0ed73f312d410117723914a5c91a14ee4cdd72f1d790b3"},
|
2336 |
+
{file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a"},
|
2337 |
+
{file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061"},
|
2338 |
+
{file = "wrapt-1.17.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82"},
|
2339 |
+
{file = "wrapt-1.17.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9"},
|
2340 |
+
{file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f"},
|
2341 |
+
{file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b"},
|
2342 |
+
{file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f"},
|
2343 |
+
{file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8"},
|
2344 |
+
{file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9"},
|
2345 |
+
{file = "wrapt-1.17.2-cp39-cp39-win32.whl", hash = "sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb"},
|
2346 |
+
{file = "wrapt-1.17.2-cp39-cp39-win_amd64.whl", hash = "sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb"},
|
2347 |
+
{file = "wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8"},
|
2348 |
+
{file = "wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3"},
|
2349 |
]
|
2350 |
|
2351 |
[[package]]
|
|
|
2362 |
[metadata]
|
2363 |
lock-version = "2.0"
|
2364 |
python-versions = "^3.11"
|
2365 |
+
content-hash = "ebb458fbd7b9383309f68daa93e48a205a42b40bbe921205c3e61caabc323ff1"
|
pyproject.toml
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
[tool.poetry]
|
2 |
-
name = "
|
3 |
version = "0.1.0"
|
4 |
description = ""
|
5 |
authors = ["Collin Dutter <[email protected]>", "Kate Forsberg <[email protected]>"]
|
@@ -7,7 +7,7 @@ readme = "README.md"
|
|
7 |
|
8 |
[tool.poetry.dependencies]
|
9 |
python = "^3.11"
|
10 |
-
griptape = { git = "https://github.com/griptape-ai/griptape.git", rev = "
|
11 |
python-statemachine = {extras = ["diagrams"], version = "^2.3.6"}
|
12 |
pyyaml = "^6.0.2"
|
13 |
schema = "^0.7.7"
|
|
|
1 |
[tool.poetry]
|
2 |
+
name = "uw_programmatic"
|
3 |
version = "0.1.0"
|
4 |
description = ""
|
5 |
authors = ["Collin Dutter <[email protected]>", "Kate Forsberg <[email protected]>"]
|
|
|
7 |
|
8 |
[tool.poetry.dependencies]
|
9 |
python = "^3.11"
|
10 |
+
griptape = { git = "https://github.com/griptape-ai/griptape.git", rev = "main" }
|
11 |
python-statemachine = {extras = ["diagrams"], version = "^2.3.6"}
|
12 |
pyyaml = "^6.0.2"
|
13 |
schema = "^0.7.7"
|
requirements.txt
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
-
griptape
|
2 |
python-statemachine[diagrams]==2.3.6
|
3 |
pyyaml==6.0.2
|
4 |
schema==0.7.7
|
|
|
1 |
+
griptape @ git+https://github.com/griptape-ai/griptape.git@main
|
2 |
python-statemachine[diagrams]==2.3.6
|
3 |
pyyaml==6.0.2
|
4 |
schema==0.7.7
|
uw_programmatic/base_machine.py
CHANGED
@@ -10,7 +10,7 @@ from typing import TYPE_CHECKING, cast
|
|
10 |
|
11 |
import requests
|
12 |
from dotenv import load_dotenv
|
13 |
-
from griptape.artifacts import ListArtifact, TextArtifact
|
14 |
from griptape.configs import Defaults
|
15 |
from griptape.configs.drivers import (
|
16 |
OpenAiDriversConfig,
|
@@ -37,10 +37,12 @@ from griptape.rules import Rule, Ruleset
|
|
37 |
from griptape.structures import Agent, Workflow
|
38 |
from griptape.tasks import CodeExecutionTask, StructureRunTask, ToolTask
|
39 |
from griptape.tools import RagTool
|
40 |
-
from parsers import UWConfigParser
|
41 |
from statemachine import State, StateMachine
|
42 |
from statemachine.factory import StateMachineMetaclass
|
43 |
|
|
|
|
|
|
|
44 |
logger = logging.getLogger(__name__)
|
45 |
logging.getLogger("griptape").setLevel(logging.ERROR)
|
46 |
|
@@ -92,6 +94,7 @@ class UWBaseMachine(StateMachine):
|
|
92 |
self.current_question_count = 0
|
93 |
# To keep vector stores on track
|
94 |
self.kb_ids = {}
|
|
|
95 |
|
96 |
self.state_status: dict[str, bool] = {}
|
97 |
|
@@ -303,10 +306,10 @@ class UWBaseMachine(StateMachine):
|
|
303 |
raise ValueError(response.status_code)
|
304 |
self.kb_ids = all_kbs
|
305 |
|
306 |
-
|
307 |
|
308 |
-
|
309 |
-
|
310 |
workflow = Workflow(id="create_question_workflow")
|
311 |
# How many questions still need to be created?
|
312 |
for _ in range(self.question_number - len(self.question_list)):
|
@@ -322,6 +325,30 @@ class UWBaseMachine(StateMachine):
|
|
322 |
workflow.add_task(end_task)
|
323 |
return workflow
|
324 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
325 |
# Ends the get_questions_workflow. Compiles all workflow outputs into one output.
|
326 |
def end_workflow(self, task: CodeExecutionTask) -> ListArtifact:
|
327 |
parent_outputs = task.parent_outputs
|
@@ -331,158 +358,3 @@ class UWBaseMachine(StateMachine):
|
|
331 |
continue
|
332 |
questions.append(outputs)
|
333 |
return ListArtifact(questions)
|
334 |
-
|
335 |
-
# Generates one workflow to create a single question.
|
336 |
-
def get_single_question(self) -> Workflow:
|
337 |
-
question_generator = Workflow(id="single_question")
|
338 |
-
taxonomy = random.choice(self.taxonomy)
|
339 |
-
taxonomyprompt = {
|
340 |
-
"Knowledge": "Generate a quiz question based ONLY on this information: {{parent_outputs['information_task']}}, then write the answer to the question. The interrogative verb for the question should be randomly chosen from: 'define', 'list', 'state', 'identify','label'.",
|
341 |
-
"Comprehension": "Generate a quiz question based ONLY on this information: {{parent_outputs['information_task']}}, then write the answer to the question. The interrogative verb for the question should be randomly chosen from: 'explain', 'predict', 'interpret', 'infer', 'summarize', 'convert','give an example of x'.",
|
342 |
-
"Application": "Generate a quiz question based ONLY on this information: {{parent_outputs['information_task']}}, then write the answer to the question. The structure of the question should be randomly chosen from: 'How could x be used to y?', 'How would you show/make use of/modify/demonstrate/solve/apply x to conditions y?'",
|
343 |
-
}
|
344 |
-
pages, driver = self.get_vector_store_id_from_page()
|
345 |
-
get_information = StructureRunTask(
|
346 |
-
id="information_task",
|
347 |
-
input="What is the information in KB?",
|
348 |
-
structure_run_driver=LocalStructureRunDriver(
|
349 |
-
create_structure=lambda: self.make_rag_structure(driver)
|
350 |
-
),
|
351 |
-
child_ids=["get_question"],
|
352 |
-
)
|
353 |
-
# Get KBs and select it, assign it to the structure or create the structure right here.
|
354 |
-
# Rules for subject matter expert: return only a json with question and answer as keys.
|
355 |
-
generate_q_task = StructureRunTask(
|
356 |
-
id="get_question",
|
357 |
-
input=taxonomyprompt[taxonomy],
|
358 |
-
structure_run_driver=LocalStructureRunDriver(
|
359 |
-
create_structure=lambda: self.get_structure("subject_matter_expert")
|
360 |
-
),
|
361 |
-
parent_ids=["information_task"],
|
362 |
-
)
|
363 |
-
get_question_code_task = CodeExecutionTask(
|
364 |
-
id="get_only_question",
|
365 |
-
on_run=self.get_question_for_wrong_answers,
|
366 |
-
parent_ids=["get_question"],
|
367 |
-
child_ids=["wrong_answers"],
|
368 |
-
)
|
369 |
-
get_separated_answer_code_task = CodeExecutionTask(
|
370 |
-
id="get_separated_answer",
|
371 |
-
on_run=self.get_separated_answer_for_wrong_answers,
|
372 |
-
parent_ids=["get_question"],
|
373 |
-
child_ids=["wrong_answers"],
|
374 |
-
)
|
375 |
-
generate_wrong_answers = StructureRunTask(
|
376 |
-
id="wrong_answers",
|
377 |
-
input="""Write and return three incorrect answers for this question: {{parent_outputs['get_separated_question']}}. The correct answer to the question is: {{parent_outputs['get_separated_answer']}}, and incorrect answers should have similar sentence structure to the correct answer. Write the incorrect answers from this information: {{parent_outputs['information_task']}}""",
|
378 |
-
structure_run_driver=LocalStructureRunDriver(
|
379 |
-
create_structure=lambda: self.get_structure("wrong_answers_generator")
|
380 |
-
),
|
381 |
-
parent_ids=["get_only_question", "information_task"],
|
382 |
-
)
|
383 |
-
compile_task = CodeExecutionTask(
|
384 |
-
id="compile_task",
|
385 |
-
input=f"{pages}, {taxonomy}",
|
386 |
-
on_run=self.single_question_last_task,
|
387 |
-
parent_ids=["wrong_answers", "get_question"],
|
388 |
-
)
|
389 |
-
question_generator.add_tasks(
|
390 |
-
get_information,
|
391 |
-
generate_q_task,
|
392 |
-
get_question_code_task,
|
393 |
-
get_separated_answer_code_task,
|
394 |
-
generate_wrong_answers,
|
395 |
-
compile_task,
|
396 |
-
)
|
397 |
-
return question_generator
|
398 |
-
|
399 |
-
# Task to separate the Question into a string
|
400 |
-
def get_question_for_wrong_answers(self, task: CodeExecutionTask) -> TextArtifact:
|
401 |
-
parent_outputs = task.parent_outputs
|
402 |
-
question = parent_outputs["get_question"].value
|
403 |
-
question = json.loads(question)["Question"]
|
404 |
-
return TextArtifact(question)
|
405 |
-
|
406 |
-
# Task to separate the Answer into a string
|
407 |
-
def get_separated_answer_for_wrong_answers(
|
408 |
-
self, task: CodeExecutionTask
|
409 |
-
) -> TextArtifact:
|
410 |
-
parent_outputs = task.parent_outputs
|
411 |
-
answer = parent_outputs["get_question"].value
|
412 |
-
print(answer)
|
413 |
-
answer = json.loads(answer)["Answer"]
|
414 |
-
return TextArtifact(answer)
|
415 |
-
|
416 |
-
# Combines all the outputs into one dictionary that represents the question
|
417 |
-
def single_question_last_task(self, task: CodeExecutionTask) -> TextArtifact:
|
418 |
-
parent_outputs = task.parent_outputs
|
419 |
-
wrong_answers = parent_outputs["wrong_answers"].value # Output is a list
|
420 |
-
wrong_answers = wrong_answers.split("\n")
|
421 |
-
question_and_answer = parent_outputs["get_question"].value # Output is a json
|
422 |
-
try:
|
423 |
-
question_and_answer = json.loads(question_and_answer)
|
424 |
-
except:
|
425 |
-
question_and_answer = question_and_answer.split("\n")[1:]
|
426 |
-
question_and_answer = "".join(question_and_answer)
|
427 |
-
question_and_answer = json.loads(question_and_answer)
|
428 |
-
inputs = task.input.value.split(",")
|
429 |
-
question = {
|
430 |
-
"Question": question_and_answer["Question"],
|
431 |
-
"Answer": question_and_answer["Answer"],
|
432 |
-
"Wrong Answers": wrong_answers,
|
433 |
-
"Page": inputs[0],
|
434 |
-
"Taxonomy": inputs[1],
|
435 |
-
}
|
436 |
-
return TextArtifact(question)
|
437 |
-
|
438 |
-
# These are helper methods
|
439 |
-
# Picks the KB from the dictionary
|
440 |
-
def get_vector_store_id_from_page(
|
441 |
-
self,
|
442 |
-
) -> tuple[str, GriptapeCloudVectorStoreDriver]:
|
443 |
-
possible_kbs = {}
|
444 |
-
for name, kb_id in self.kb_ids.items():
|
445 |
-
page_nums = name.split("p")[1:]
|
446 |
-
start_page = int(page_nums[0].split("-")[0])
|
447 |
-
end_page = int(page_nums[1])
|
448 |
-
if end_page <= self.page_range[1] and start_page >= self.page_range[0]:
|
449 |
-
possible_kbs[kb_id] = f"{start_page}-{end_page}"
|
450 |
-
kb_id = random.choice(list(possible_kbs.keys()))
|
451 |
-
page_value = possible_kbs[kb_id]
|
452 |
-
return page_value, GriptapeCloudVectorStoreDriver(
|
453 |
-
api_key=os.getenv("GT_CLOUD_API_KEY", ""),
|
454 |
-
knowledge_base_id=kb_id,
|
455 |
-
)
|
456 |
-
|
457 |
-
# Uses this and all below to build the Rag Tool to get information from the KB
|
458 |
-
def build_rag_engine(
|
459 |
-
self, vector_store_driver: GriptapeCloudVectorStoreDriver
|
460 |
-
) -> RagEngine:
|
461 |
-
return RagEngine(
|
462 |
-
retrieval_stage=RetrievalRagStage(
|
463 |
-
retrieval_modules=[
|
464 |
-
VectorStoreRetrievalRagModule(
|
465 |
-
vector_store_driver=vector_store_driver,
|
466 |
-
)
|
467 |
-
],
|
468 |
-
),
|
469 |
-
response_stage=ResponseRagStage(
|
470 |
-
response_modules=[TextChunksResponseRagModule()]
|
471 |
-
),
|
472 |
-
)
|
473 |
-
|
474 |
-
def build_rag_tool(self, engine: RagEngine) -> RagTool:
|
475 |
-
return RagTool(
|
476 |
-
description="Contains information about the textbook. Use it ONLY for context.",
|
477 |
-
rag_engine=engine,
|
478 |
-
)
|
479 |
-
|
480 |
-
def make_rag_structure(
|
481 |
-
self, vector_store: GriptapeCloudVectorStoreDriver
|
482 |
-
) -> Structure:
|
483 |
-
if vector_store:
|
484 |
-
tool = self.build_rag_tool(self.build_rag_engine(vector_store))
|
485 |
-
use_rag_task = ToolTask(tool=tool)
|
486 |
-
return Agent(tasks=[use_rag_task])
|
487 |
-
errormsg = "No Vector Store"
|
488 |
-
raise ValueError(errormsg)
|
|
|
10 |
|
11 |
import requests
|
12 |
from dotenv import load_dotenv
|
13 |
+
from griptape.artifacts import ListArtifact, TextArtifact, InfoArtifact, BaseArtifact
|
14 |
from griptape.configs import Defaults
|
15 |
from griptape.configs.drivers import (
|
16 |
OpenAiDriversConfig,
|
|
|
37 |
from griptape.structures import Agent, Workflow
|
38 |
from griptape.tasks import CodeExecutionTask, StructureRunTask, ToolTask
|
39 |
from griptape.tools import RagTool
|
|
|
40 |
from statemachine import State, StateMachine
|
41 |
from statemachine.factory import StateMachineMetaclass
|
42 |
|
43 |
+
from parsers import UWConfigParser
|
44 |
+
from uw_programmatic.single_question_machine import SingleQuestion
|
45 |
+
|
46 |
logger = logging.getLogger(__name__)
|
47 |
logging.getLogger("griptape").setLevel(logging.ERROR)
|
48 |
|
|
|
94 |
self.current_question_count = 0
|
95 |
# To keep vector stores on track
|
96 |
self.kb_ids = {}
|
97 |
+
self.rejected_questions: list = []
|
98 |
|
99 |
self.state_status: dict[str, bool] = {}
|
100 |
|
|
|
306 |
raise ValueError(response.status_code)
|
307 |
self.kb_ids = all_kbs
|
308 |
|
309 |
+
# ALL METHODS RELATING TO THE WORKFLOW AND PIPELINE ARE BELOW THIS LINE
|
310 |
|
311 |
+
# This is the overarching workflow. Creates a workflow with get_single_question x amount of times.
|
312 |
+
# def get_questions_workflow(self) -> Workflow:
|
313 |
workflow = Workflow(id="create_question_workflow")
|
314 |
# How many questions still need to be created?
|
315 |
for _ in range(self.question_number - len(self.question_list)):
|
|
|
325 |
workflow.add_task(end_task)
|
326 |
return workflow
|
327 |
|
328 |
+
def workflow_cet(self, task: CodeExecutionTask) -> BaseArtifact:
|
329 |
+
question_machine = SingleQuestion.create_statemachine(
|
330 |
+
self.taxonomy, self.kb_ids, self.page_range
|
331 |
+
)
|
332 |
+
question_machine.send("start_up")
|
333 |
+
if question_machine.rejected:
|
334 |
+
self.rejected_questions.append(question_machine.generated_question)
|
335 |
+
return InfoArtifact("Question is Rejected")
|
336 |
+
return TextArtifact(question_machine.generated_question)
|
337 |
+
|
338 |
+
def get_questions_workflow(self) -> Workflow:
|
339 |
+
workflow = Workflow(id="create_question_workflow")
|
340 |
+
# How many questions still need to be created?
|
341 |
+
for _ in range(self.question_number - len(self.question_list)):
|
342 |
+
task = CodeExecutionTask(
|
343 |
+
on_run=self.workflow_cet,
|
344 |
+
child_ids=["end_task"],
|
345 |
+
)
|
346 |
+
# Create X amount of workflows to run for X amount of questions needed.
|
347 |
+
workflow.add_task(task)
|
348 |
+
end_task = CodeExecutionTask(id="end_task", on_run=self.end_workflow)
|
349 |
+
workflow.add_task(end_task)
|
350 |
+
return workflow
|
351 |
+
|
352 |
# Ends the get_questions_workflow. Compiles all workflow outputs into one output.
|
353 |
def end_workflow(self, task: CodeExecutionTask) -> ListArtifact:
|
354 |
parent_outputs = task.parent_outputs
|
|
|
358 |
continue
|
359 |
questions.append(outputs)
|
360 |
return ListArtifact(questions)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
uw_programmatic/single_question_machine.py
ADDED
@@ -0,0 +1,466 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import os
|
4 |
+
import random
|
5 |
+
import schema
|
6 |
+
from typing import TYPE_CHECKING, cast
|
7 |
+
|
8 |
+
from griptape.configs import Defaults
|
9 |
+
from griptape.configs.drivers import (
|
10 |
+
OpenAiDriversConfig,
|
11 |
+
)
|
12 |
+
from griptape.drivers import GriptapeCloudVectorStoreDriver, OpenAiChatPromptDriver
|
13 |
+
from griptape.engines.rag import RagEngine
|
14 |
+
from griptape.engines.rag.modules import (
|
15 |
+
TextChunksResponseRagModule,
|
16 |
+
VectorStoreRetrievalRagModule,
|
17 |
+
)
|
18 |
+
from griptape.engines.rag.stages import ResponseRagStage, RetrievalRagStage
|
19 |
+
from griptape.events import (
|
20 |
+
BaseEvent,
|
21 |
+
EventBus,
|
22 |
+
EventListener,
|
23 |
+
FinishStructureRunEvent,
|
24 |
+
)
|
25 |
+
from griptape.rules import Rule, Ruleset
|
26 |
+
from griptape.structures import Agent
|
27 |
+
from griptape.tasks import ToolTask
|
28 |
+
from griptape.tools import RagTool
|
29 |
+
from statemachine import State, StateMachine
|
30 |
+
from statemachine.factory import StateMachineMetaclass
|
31 |
+
|
32 |
+
if TYPE_CHECKING:
|
33 |
+
from griptape.structures import Structure
|
34 |
+
|
35 |
+
Defaults.drivers_config = OpenAiDriversConfig(
|
36 |
+
prompt_driver=OpenAiChatPromptDriver(model="gpt-4o", max_tokens=4096)
|
37 |
+
)
|
38 |
+
|
39 |
+
# States will be:
|
40 |
+
# random_selection (does dice roll and kb selection plus information task)
|
41 |
+
# Question generation (generates the question and answer properly)
|
42 |
+
# Wrong answer generation (generates a wrong answer?)
|
43 |
+
# Compile task (finishes all and compiles it into a neat thing)
|
44 |
+
# TODO: How to get it to return everything
|
45 |
+
STATES = [
|
46 |
+
"start",
|
47 |
+
"random_selection",
|
48 |
+
"get_textbook",
|
49 |
+
"question_generation",
|
50 |
+
"wrong_answer_generation",
|
51 |
+
"audit_question",
|
52 |
+
"compile_task",
|
53 |
+
"end",
|
54 |
+
]
|
55 |
+
START = "start"
|
56 |
+
END = "end"
|
57 |
+
TRANSITIONS = [
|
58 |
+
{
|
59 |
+
"event": "next_state",
|
60 |
+
"transitions": [
|
61 |
+
{"from": "random_selection", "to": "get_textbook"},
|
62 |
+
{"from": "get_textbook", "to": "question_generation"},
|
63 |
+
{"from": "question_generation", "to": "wrong_answer_generation"},
|
64 |
+
{"from": "wrong_answer_generation", "to": "audit_question"},
|
65 |
+
{"from": "audit_question", "to": "compile_task"},
|
66 |
+
{"from": "compile_task", "to": "end"},
|
67 |
+
],
|
68 |
+
},
|
69 |
+
{
|
70 |
+
"event": "redo",
|
71 |
+
"transitions": [
|
72 |
+
{"from": "audit_question", "to": "wrong_answer_generation"},
|
73 |
+
],
|
74 |
+
},
|
75 |
+
{
|
76 |
+
"event": "start_up",
|
77 |
+
"transitions": [
|
78 |
+
{"from": "start", "to": "random_selection"},
|
79 |
+
],
|
80 |
+
},
|
81 |
+
]
|
82 |
+
RULESETS = {
|
83 |
+
"specific_question_creator": [
|
84 |
+
"""Question should be a multiple choice quiz style question that assesses a students knowledge of the information in the knowledge base (which should be referred
|
85 |
+
to as 'the textbook'). Answer should be a correct answer to the question that
|
86 |
+
uses information from the knowledge base. Do not return incorrect answers.""",
|
87 |
+
"""The length of the question should be 30 words at most.""",
|
88 |
+
"""Question should never reference or ask about an entire section, never reference
|
89 |
+
or ask about a quote in the knowledge base, never ask for the page number of
|
90 |
+
some information, and never ask for information about the file, document, or
|
91 |
+
knowledge base.""",
|
92 |
+
"""The answer to the question should be short, but should not omit important
|
93 |
+
information.""",
|
94 |
+
],
|
95 |
+
"incorrect_answers_creator": [
|
96 |
+
"""All incorrect answers should be different, but plausible answers to the question.""",
|
97 |
+
"""Incorrect answers may reference material from the knowledge base, but must
|
98 |
+
not be correct answers to the question""",
|
99 |
+
"""Length of incorrect answers should be 10 words max, 5 words minimum""",
|
100 |
+
],
|
101 |
+
"question_auditor_ruleset": [
|
102 |
+
# """If any of the rules are false, return false and why. If they are all true, return true.""",
|
103 |
+
"""If any of the rules are false, return True for the part of the question why they are false.""",
|
104 |
+
"""The reason why it is false is between 3-7 words""",
|
105 |
+
"""There is exactly one correct answer.""",
|
106 |
+
"""The correct answer has a clearly distinct meaning from all incorrect answers.""",
|
107 |
+
"""Incorrect answers are plausible to someone who does not know the correct answer""",
|
108 |
+
"""All answer choices are on the same topic as the question""",
|
109 |
+
"""All answer choices are relevant to the context of the question, with no unrelated concepts or entities.""",
|
110 |
+
"""All answers have semantically different meanings from one another, even if they are syntactically similar.""",
|
111 |
+
"""All answer choices are parallel to one another with respect to grammatical structure, length, and complexity""",
|
112 |
+
],
|
113 |
+
}
|
114 |
+
STRUCTURES = {
|
115 |
+
"subject_matter_expert": {"ruleset_ids": ["specific_question_creator"]},
|
116 |
+
"wrong_answers_generator": {"ruleset_ids": ["incorrect_answers_creator"]},
|
117 |
+
"question_auditor": {"ruleset_ids": ["question_auditor_ruleset"]},
|
118 |
+
}
|
119 |
+
|
120 |
+
|
121 |
+
class SingleQuestion(StateMachine):
|
122 |
+
"Base class for machine"
|
123 |
+
|
124 |
+
def __init__(self, **kwargs):
|
125 |
+
self._structures = {}
|
126 |
+
self.kb_ids = kwargs["kb_ids"]
|
127 |
+
self.page_range: tuple = kwargs["page_range"]
|
128 |
+
self.taxonomy_choices: list = kwargs["taxonomy_choices"]
|
129 |
+
self.question: str = ""
|
130 |
+
self.answer: str = ""
|
131 |
+
self.wrong_answers: list = []
|
132 |
+
self.generated_question: dict = {}
|
133 |
+
self.taxonomy: str = ""
|
134 |
+
self.rejected: bool = False
|
135 |
+
self.give_up: int = 0
|
136 |
+
self.reject_reason: str = ""
|
137 |
+
|
138 |
+
def on_event(event: BaseEvent) -> None:
|
139 |
+
"Takes in griptape events and fixes them"
|
140 |
+
try:
|
141 |
+
self.send("griptape_event", event_=event.to_dict())
|
142 |
+
except Exception as e:
|
143 |
+
errormsg = f"Would not allow Griptape Event to be sent"
|
144 |
+
raise ValueError(errormsg) from e
|
145 |
+
|
146 |
+
EventBus.clear_event_listeners()
|
147 |
+
EventBus.add_event_listener(
|
148 |
+
EventListener(on_event, event_types=[FinishStructureRunEvent]),
|
149 |
+
)
|
150 |
+
super().__init__()
|
151 |
+
|
152 |
+
@classmethod
|
153 |
+
def create_statemachine(
|
154 |
+
cls, taxonomy_choices: list, kb_ids: dict, page_range: tuple
|
155 |
+
) -> SingleQuestion:
|
156 |
+
states_instances = {}
|
157 |
+
events = {}
|
158 |
+
for state in STATES:
|
159 |
+
initial = state == START
|
160 |
+
final = state == END
|
161 |
+
# Creates the states
|
162 |
+
states_instances[state] = State(value=state, initial=initial, final=final)
|
163 |
+
if not (initial or final or state in ("random_selection", "compile_task")):
|
164 |
+
# Creates the internal transition
|
165 |
+
transition = states_instances[state].to(
|
166 |
+
states_instances[state],
|
167 |
+
event="griptape_event",
|
168 |
+
on=f"on_event_{state}",
|
169 |
+
internal=True,
|
170 |
+
)
|
171 |
+
if "griptape_event" in events:
|
172 |
+
events["griptape_event"] |= transition
|
173 |
+
else:
|
174 |
+
events["griptape_event"] = transition
|
175 |
+
for transition in TRANSITIONS:
|
176 |
+
for transition_data in transition["transitions"]:
|
177 |
+
transition_value = states_instances[transition_data["from"]].to(
|
178 |
+
states_instances[transition_data["to"]],
|
179 |
+
event=transition["event"],
|
180 |
+
internal=False,
|
181 |
+
)
|
182 |
+
if transition["event"] in events:
|
183 |
+
events[transition["event"]] |= transition_value
|
184 |
+
else:
|
185 |
+
events[transition["event"]] = transition_value
|
186 |
+
attrs_mapper = {
|
187 |
+
**states_instances,
|
188 |
+
**events,
|
189 |
+
}
|
190 |
+
kwargs = {
|
191 |
+
"taxonomy_choices": taxonomy_choices,
|
192 |
+
"kb_ids": kb_ids,
|
193 |
+
"page_range": page_range,
|
194 |
+
}
|
195 |
+
return cast(
|
196 |
+
SingleQuestion,
|
197 |
+
StateMachineMetaclass(cls.__name__, (cls,), attrs_mapper)(**kwargs),
|
198 |
+
)
|
199 |
+
|
200 |
+
# BENEATH ARE THE NECESSARY METHODS
|
201 |
+
|
202 |
+
def on_enter_random_selection(self) -> None:
|
203 |
+
# Get the random taxonomy
|
204 |
+
self.taxonomy = random.choice(self.taxonomy_choices)
|
205 |
+
# I changed this so I didn't have to do an "eval". Not sure how it'll work.
|
206 |
+
taxonomy_prompt = {
|
207 |
+
"Knowledge": "Generate a quiz question based ONLY on the information. Then write the answer to the question. The interrogative verb for the question should be randomly chosen from: 'define', 'list', 'state', 'identify','label'. INFORMATION: ",
|
208 |
+
"Comprehension": "Generate a quiz question based ONLY on the information. Then write the answer to the question. The interrogative verb for the question should be randomly chosen from: 'explain', 'predict', 'interpret', 'infer', 'summarize', 'convert','give an example of x'. INFORMATION: ",
|
209 |
+
"Application": "Generate a quiz question based ONLY on the information. Then write the answer to the question. The structure of the question should be randomly chosen from: 'How could x be used to y?', 'How would you show/make use of/modify/demonstrate/solve/apply x to conditions y?' INFORMATION: ",
|
210 |
+
}
|
211 |
+
self.taxonomy_prompt = taxonomy_prompt[self.taxonomy]
|
212 |
+
# get the random page range and GTCVectorStoreDriver
|
213 |
+
pages, driver = self.get_vector_store_id_from_page()
|
214 |
+
self.pages = pages
|
215 |
+
self.driver = driver
|
216 |
+
self.send("next_state")
|
217 |
+
|
218 |
+
def on_enter_get_textbook(self) -> None:
|
219 |
+
# I am going to create the agent in this method
|
220 |
+
if "get_information" not in self._structures:
|
221 |
+
tool = self.build_rag_tool(self.build_rag_engine(self.driver))
|
222 |
+
use_rag_task = ToolTask(tool=tool)
|
223 |
+
information_retriever = Agent(id="get_information", tasks=[use_rag_task])
|
224 |
+
self._structures["get_information"] = information_retriever
|
225 |
+
self._structures["get_information"].run("What is the information in KB?")
|
226 |
+
|
227 |
+
def on_event_get_textbook(self, event_: dict) -> None:
|
228 |
+
event_type = event_["type"]
|
229 |
+
match event_type:
|
230 |
+
case "FinishStructureRunEvent":
|
231 |
+
structure_id = event_["structure_id"]
|
232 |
+
match structure_id:
|
233 |
+
case "get_information":
|
234 |
+
self.information = event_["output_task_output"]["value"]
|
235 |
+
self.send("next_state")
|
236 |
+
|
237 |
+
def on_enter_question_generation(self) -> None:
|
238 |
+
if "subject_matter_expert" not in self._structures:
|
239 |
+
rulesets = self.get_rulesets("subject_matter_expert")
|
240 |
+
subject_matter_expert = Agent(id="subject_matter_expert", rulesets=rulesets)
|
241 |
+
subject_matter_expert.task.output_schema = schema.Schema(
|
242 |
+
{"Question": str, "Answer": str}
|
243 |
+
)
|
244 |
+
self._structures["subject_matter_expert"] = subject_matter_expert
|
245 |
+
self._structures["subject_matter_expert"].run(
|
246 |
+
f"{self.taxonomy_prompt}{self.information}"
|
247 |
+
) # TODO: Will this work the same as before
|
248 |
+
|
249 |
+
def on_event_question_generation(self, event_: dict) -> None:
|
250 |
+
event_type = event_["type"]
|
251 |
+
match event_type:
|
252 |
+
case "FinishStructureRunEvent":
|
253 |
+
structure_id = event_["structure_id"]
|
254 |
+
match structure_id:
|
255 |
+
case "subject_matter_expert":
|
256 |
+
question = event_["output_task_output"]["value"]
|
257 |
+
# save question and answer separately
|
258 |
+
self.question = question["Question"]
|
259 |
+
self.answer = question["Answer"]
|
260 |
+
self.send("next_state")
|
261 |
+
|
262 |
+
def on_enter_wrong_answer_generation(self) -> None:
|
263 |
+
if "wrong_answers_generator" not in self._structures:
|
264 |
+
rulesets = self.get_rulesets("wrong_answers_generator")
|
265 |
+
wrong_answers_generator = Agent(
|
266 |
+
id="wrong_answers_generator", rulesets=rulesets
|
267 |
+
)
|
268 |
+
wrong_answers_generator.task.output_schema = schema.Schema(
|
269 |
+
{"1": str, "2": str, "3": str, "4": str}
|
270 |
+
)
|
271 |
+
self._structures["wrong_answers_generator"] = wrong_answers_generator
|
272 |
+
if not self.rejected:
|
273 |
+
prompt = f"""Write and return four incorrect answers for this question: {self.question}. The correct answer to the question is: {self.answer}, and incorrect answers should have similar sentence structure to the correct answer. Write the incorrect answers from this information: {self.information}"""
|
274 |
+
else:
|
275 |
+
prompt = f"""Write and return four incorrect answers for this question: {self.question}. The correct answer to the question is: {self.answer}, and incorrect answers should have similar sentence structure to the correct answer. Write the incorrect answers from this information: {self.information}. Answers should not be: {self.reject_reason}."""
|
276 |
+
print(self.reject_reason)
|
277 |
+
self._structures["wrong_answers_generator"].run(prompt)
|
278 |
+
|
279 |
+
def on_event_wrong_answer_generation(self, event_: dict) -> None:
|
280 |
+
event_type = event_["type"]
|
281 |
+
match event_type:
|
282 |
+
case "FinishStructureRunEvent":
|
283 |
+
structure_id = event_["structure_id"]
|
284 |
+
match structure_id:
|
285 |
+
case "wrong_answers_generator":
|
286 |
+
wrong_answers = event_["output_task_output"]["value"]
|
287 |
+
wrong_answers = [wrong_answers[x] for x in ["1", "2", "3", "4"]]
|
288 |
+
# save question and answer separately
|
289 |
+
self.wrong_answers = wrong_answers
|
290 |
+
self.send("next_state")
|
291 |
+
|
292 |
+
def on_enter_audit_question(self) -> None:
|
293 |
+
if "question_auditor" not in self._structures:
|
294 |
+
rulesets = self.get_rulesets("question_auditor")
|
295 |
+
question_auditor = Agent(id="question_auditor", rulesets=rulesets)
|
296 |
+
# question_auditor.task.output_schema = schema.Schema(
|
297 |
+
# {
|
298 |
+
# "keep": bool,
|
299 |
+
# "why": schema.Optional(
|
300 |
+
# {
|
301 |
+
# "Question": bool,
|
302 |
+
# "Answer": bool,
|
303 |
+
# "Wrong Answers": bool,
|
304 |
+
# "Reason": str,
|
305 |
+
# }
|
306 |
+
# ),
|
307 |
+
# }
|
308 |
+
# )
|
309 |
+
question_auditor.task.output_schema = schema.Schema(
|
310 |
+
{
|
311 |
+
"Bad_Question": bool,
|
312 |
+
"Bad_Answer": bool,
|
313 |
+
"Bad_Wrong_Answers": bool,
|
314 |
+
"Reason": schema.Optional(str),
|
315 |
+
}
|
316 |
+
)
|
317 |
+
self._structures["question_auditor"] = question_auditor
|
318 |
+
# prompt = f"This is the question: {self.question}. This is the answer: {self.answer}. These are the incorrect answers:{self.wrong_answers}. This is the information given:{self.information}. IF the question is not kept, return True for the reason why from 'Question', 'Answers', 'Wrong Answers'."
|
319 |
+
prompt = f"This is the question: {self.question}. This is the answer: {self.answer}. These are the incorrect answers:{self.wrong_answers}. This is the information given:{self.information}. IF the question is should not be kept, return True for the reason why from 'Bad_Question', 'Bad_Answer', 'Bad_Wrong_Answers'."
|
320 |
+
self._structures["question_auditor"].run(prompt)
|
321 |
+
|
322 |
+
def on_event_audit_question(self, event_: dict) -> None:
|
323 |
+
event_type = event_["type"]
|
324 |
+
match event_type:
|
325 |
+
case "FinishStructureRunEvent":
|
326 |
+
structure_id = event_["structure_id"]
|
327 |
+
match structure_id:
|
328 |
+
case "question_auditor":
|
329 |
+
if self.give_up >= 3:
|
330 |
+
self.rejected = True
|
331 |
+
self.reject_reason += " \n Too many tries"
|
332 |
+
self.send("next_state")
|
333 |
+
return
|
334 |
+
self.give_up += 1
|
335 |
+
audit = event_["output_task_output"]["value"]
|
336 |
+
# TODO: Go back to some other state that checks the quality bar
|
337 |
+
# if audit["keep"]:
|
338 |
+
# self.send("next_state")
|
339 |
+
# else:
|
340 |
+
# self.rejected = True
|
341 |
+
# self.reject_reason = audit["why"]["Reason"]
|
342 |
+
# if audit["why"]["Question"]:
|
343 |
+
# self.send("next_state")
|
344 |
+
# return
|
345 |
+
# if audit["why"]["Answer"]:
|
346 |
+
# self.send("next_state")
|
347 |
+
# return
|
348 |
+
# if audit["why"]["Wrong Answers"]:
|
349 |
+
# self.send(
|
350 |
+
# "redo"
|
351 |
+
# ) # Goes back to generate more wrong answers
|
352 |
+
# return
|
353 |
+
# self.send("next_state")
|
354 |
+
print(audit)
|
355 |
+
if audit["Bad_Question"]:
|
356 |
+
self.rejected = True
|
357 |
+
self.reject_reason = audit["Reason"]
|
358 |
+
self.reject_classification = "Bad_Question"
|
359 |
+
self.send("next_state")
|
360 |
+
return
|
361 |
+
if audit["Bad_Answer"]:
|
362 |
+
self.rejected = True
|
363 |
+
self.reject_reason = audit["Reason"]
|
364 |
+
self.reject_classification = "Bad_Answer"
|
365 |
+
self.send("next_state")
|
366 |
+
return
|
367 |
+
if audit["Bad_Wrong_Answers"]:
|
368 |
+
self.rejected = True
|
369 |
+
self.reject_reason = audit["Reason"]
|
370 |
+
self.reject_classification = "Bad_Wrong_Answers"
|
371 |
+
self.send("redo")
|
372 |
+
return
|
373 |
+
self.rejected = False
|
374 |
+
self.send("next_state")
|
375 |
+
|
376 |
+
def on_enter_compile_task(self) -> None:
|
377 |
+
# TODO: Logic to determine if I should go back to wrong answers
|
378 |
+
question = {
|
379 |
+
"Question": self.question,
|
380 |
+
"Answer": self.answer,
|
381 |
+
"Wrong Answers": self.wrong_answers,
|
382 |
+
"Page": self.pages,
|
383 |
+
"Taxonomy": self.taxonomy,
|
384 |
+
}
|
385 |
+
if self.rejected:
|
386 |
+
question["Reject Classification"] = self.reject_classification
|
387 |
+
question["Reason"] = self.reject_reason
|
388 |
+
self.generated_question = question
|
389 |
+
self.send("next_state")
|
390 |
+
|
391 |
+
# TODO : Does this return output
|
392 |
+
def on_enter_end(self) -> dict:
|
393 |
+
return self.generated_question
|
394 |
+
|
395 |
+
# HELPER METHODS BELOW
|
396 |
+
|
397 |
+
def get_rulesets(self, structure_id: str) -> list:
|
398 |
+
final_ruleset_list = []
|
399 |
+
ruleset_ids = STRUCTURES[structure_id]["ruleset_ids"]
|
400 |
+
for ruleset_id in ruleset_ids:
|
401 |
+
ruleset_rules = RULESETS[ruleset_id]
|
402 |
+
rules = [Rule(rule) for rule in ruleset_rules]
|
403 |
+
final_ruleset_list.append(Ruleset(ruleset_id, rules=rules))
|
404 |
+
return final_ruleset_list
|
405 |
+
|
406 |
+
def get_vector_store_id_from_page(
|
407 |
+
self,
|
408 |
+
) -> tuple[str, GriptapeCloudVectorStoreDriver]:
|
409 |
+
possible_kbs = {}
|
410 |
+
for name, kb_id in self.kb_ids.items():
|
411 |
+
page_nums = name.split("p")[1:]
|
412 |
+
start_page = int(page_nums[0].split("-")[0])
|
413 |
+
end_page = int(page_nums[1])
|
414 |
+
if end_page <= self.page_range[1] and start_page >= self.page_range[0]:
|
415 |
+
possible_kbs[kb_id] = f"{start_page}-{end_page}"
|
416 |
+
kb_id = random.choice(list(possible_kbs.keys()))
|
417 |
+
page_value = possible_kbs[kb_id]
|
418 |
+
return page_value, GriptapeCloudVectorStoreDriver(
|
419 |
+
api_key=os.getenv("GT_CLOUD_API_KEY", ""),
|
420 |
+
knowledge_base_id=kb_id,
|
421 |
+
)
|
422 |
+
|
423 |
+
# Uses this and all below to build the Rag Tool to get information from the KB
|
424 |
+
def build_rag_engine(
|
425 |
+
self, vector_store_driver: GriptapeCloudVectorStoreDriver
|
426 |
+
) -> RagEngine:
|
427 |
+
return RagEngine(
|
428 |
+
retrieval_stage=RetrievalRagStage(
|
429 |
+
retrieval_modules=[
|
430 |
+
VectorStoreRetrievalRagModule(
|
431 |
+
vector_store_driver=vector_store_driver,
|
432 |
+
)
|
433 |
+
],
|
434 |
+
),
|
435 |
+
response_stage=ResponseRagStage(
|
436 |
+
response_modules=[TextChunksResponseRagModule()]
|
437 |
+
),
|
438 |
+
)
|
439 |
+
|
440 |
+
def build_rag_tool(self, engine: RagEngine) -> RagTool:
|
441 |
+
return RagTool(
|
442 |
+
description="Contains information about the textbook. Use it ONLY for context.",
|
443 |
+
rag_engine=engine,
|
444 |
+
)
|
445 |
+
|
446 |
+
def make_rag_structure(
|
447 |
+
self, vector_store: GriptapeCloudVectorStoreDriver
|
448 |
+
) -> Structure:
|
449 |
+
if vector_store:
|
450 |
+
tool = self.build_rag_tool(self.build_rag_engine(vector_store))
|
451 |
+
use_rag_task = ToolTask(tool=tool)
|
452 |
+
return Agent(tasks=[use_rag_task])
|
453 |
+
errormsg = "No Vector Store"
|
454 |
+
raise ValueError(errormsg)
|
455 |
+
|
456 |
+
|
457 |
+
if __name__ == "__main__":
|
458 |
+
flow = SingleQuestion.create_statemachine(
|
459 |
+
["Comprehension"],
|
460 |
+
{"p126-p129": "9efbb8ab-6a5e-4bca-aab0-7f7500bfb7b5"},
|
461 |
+
(120, 150),
|
462 |
+
)
|
463 |
+
flow.send("start_up")
|
464 |
+
# When incorporating into the main flow - we can just get the result of flow.generated_question and use that value onwards
|
465 |
+
# TODO: Do any events need to be sent?
|
466 |
+
print(flow.generated_question)
|
uw_programmatic/uw_machine.py
CHANGED
@@ -1,14 +1,15 @@
|
|
1 |
from __future__ import annotations
|
2 |
|
3 |
import ast
|
|
|
4 |
import csv
|
5 |
import json
|
6 |
from pathlib import Path
|
7 |
import random
|
8 |
from typing import TYPE_CHECKING
|
9 |
-
|
10 |
from uw_programmatic.base_machine import UWBaseMachine
|
11 |
|
|
|
12 |
if TYPE_CHECKING:
|
13 |
from griptape.tools import BaseTool
|
14 |
|
@@ -27,6 +28,12 @@ class UWMachine(UWBaseMachine):
|
|
27 |
self.retrieve_vector_stores()
|
28 |
self.send("enter_first_state")
|
29 |
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
# The first state: Listens for Gradio and then gives us the parameters to search for.
|
31 |
# Reinitializes the Give Up counter.
|
32 |
def on_event_gather_parameters(self, event_: dict) -> None:
|
@@ -38,8 +45,6 @@ class UWMachine(UWBaseMachine):
|
|
38 |
self.page_range = parameters["page_range"]
|
39 |
self.question_number = parameters["question_number"]
|
40 |
self.taxonomy = parameters["taxonomy"]
|
41 |
-
self.current_question_count = 0
|
42 |
-
self.give_up_count = 0
|
43 |
self.send("next_state")
|
44 |
case _:
|
45 |
err_msg = f"Unexpected Transition Event ID: {event_value}."
|
@@ -82,6 +87,7 @@ class UWMachine(UWBaseMachine):
|
|
82 |
structure_id = event_value["structure_id"]
|
83 |
match structure_id:
|
84 |
case "create_question_workflow":
|
|
|
85 |
values = event_value["output_task_output"]["value"]
|
86 |
questions = [
|
87 |
ast.literal_eval(question["value"])
|
@@ -100,7 +106,23 @@ class UWMachine(UWBaseMachine):
|
|
100 |
def on_enter_assess_generated_q(self) -> None:
|
101 |
merged_list = [*self.question_list, *self.most_recent_questions]
|
102 |
prompt = f"{merged_list}"
|
103 |
-
self.get_structure("similarity_auditor")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
# Sets the returned question list (with similar questions wiped) equal to self.question_list
|
106 |
def on_event_assess_generated_q(self, event_: dict) -> None:
|
@@ -116,35 +138,29 @@ class UWMachine(UWBaseMachine):
|
|
116 |
case "similarity_auditor":
|
117 |
new_question_list = event_value["output_task_output"][
|
118 |
"value"
|
119 |
-
]
|
120 |
-
try:
|
121 |
-
new_question_list = json.loads(
|
122 |
-
new_question_list
|
123 |
-
) # This must be in that JSON format
|
124 |
-
except: # If not in JSON decode format
|
125 |
-
new_question_list = self.question_list
|
126 |
self.question_list = new_question_list
|
127 |
self.send("next_state") # go to Evaluate Q Count
|
128 |
|
129 |
# Writes and saves a csv in the correct format to outputs/professor_guide.csv
|
130 |
def on_enter_output_q(self) -> None:
|
131 |
-
|
132 |
-
|
133 |
-
) as file:
|
134 |
writer = csv.writer(file)
|
135 |
-
for question in
|
136 |
new_row = ["MC", "", 1]
|
137 |
-
new_row.append(
|
138 |
-
wrong_answers = list(
|
139 |
column = random.randint(1, len(wrong_answers) + 1)
|
140 |
new_row.append(column)
|
141 |
for i in range(1, len(wrong_answers) + 2):
|
142 |
if i == column:
|
143 |
-
new_row.append(
|
144 |
else:
|
145 |
new_row.append(wrong_answers.pop())
|
146 |
-
new_row.append(
|
147 |
-
new_row.append(
|
148 |
writer.writerow(new_row)
|
149 |
if self.give_up_count == 3:
|
150 |
writer.writerow(
|
@@ -152,6 +168,11 @@ class UWMachine(UWBaseMachine):
|
|
152 |
"Failed to generate more questions.",
|
153 |
]
|
154 |
)
|
|
|
|
|
|
|
|
|
|
|
155 |
self.send("next_state") # back to gather_parameters
|
156 |
|
157 |
# Necessary to prevent errors being thrown from state machine
|
|
|
1 |
from __future__ import annotations
|
2 |
|
3 |
import ast
|
4 |
+
import schema
|
5 |
import csv
|
6 |
import json
|
7 |
from pathlib import Path
|
8 |
import random
|
9 |
from typing import TYPE_CHECKING
|
|
|
10 |
from uw_programmatic.base_machine import UWBaseMachine
|
11 |
|
12 |
+
|
13 |
if TYPE_CHECKING:
|
14 |
from griptape.tools import BaseTool
|
15 |
|
|
|
28 |
self.retrieve_vector_stores()
|
29 |
self.send("enter_first_state")
|
30 |
|
31 |
+
def on_enter_gather_parameters(self) -> None:
|
32 |
+
# Reinitialzes the state machine
|
33 |
+
self.current_question_count = 0
|
34 |
+
self.give_up_count = 0
|
35 |
+
self.question_list = []
|
36 |
+
|
37 |
# The first state: Listens for Gradio and then gives us the parameters to search for.
|
38 |
# Reinitializes the Give Up counter.
|
39 |
def on_event_gather_parameters(self, event_: dict) -> None:
|
|
|
45 |
self.page_range = parameters["page_range"]
|
46 |
self.question_number = parameters["question_number"]
|
47 |
self.taxonomy = parameters["taxonomy"]
|
|
|
|
|
48 |
self.send("next_state")
|
49 |
case _:
|
50 |
err_msg = f"Unexpected Transition Event ID: {event_value}."
|
|
|
87 |
structure_id = event_value["structure_id"]
|
88 |
match structure_id:
|
89 |
case "create_question_workflow":
|
90 |
+
# TODO: Can you use task.output_schema on a workflow?
|
91 |
values = event_value["output_task_output"]["value"]
|
92 |
questions = [
|
93 |
ast.literal_eval(question["value"])
|
|
|
106 |
def on_enter_assess_generated_q(self) -> None:
|
107 |
merged_list = [*self.question_list, *self.most_recent_questions]
|
108 |
prompt = f"{merged_list}"
|
109 |
+
similarity_auditor = self.get_structure("similarity_auditor")
|
110 |
+
similarity_auditor.task.output_schema = schema.Schema(
|
111 |
+
{
|
112 |
+
"list": schema.Schema(
|
113 |
+
[
|
114 |
+
{
|
115 |
+
"Question": str,
|
116 |
+
"Answer": str,
|
117 |
+
"Wrong Answers": schema.Schema([str]),
|
118 |
+
"Page": str,
|
119 |
+
"Taxonomy": str,
|
120 |
+
}
|
121 |
+
]
|
122 |
+
)
|
123 |
+
}
|
124 |
+
)
|
125 |
+
similarity_auditor.run(prompt)
|
126 |
|
127 |
# Sets the returned question list (with similar questions wiped) equal to self.question_list
|
128 |
def on_event_assess_generated_q(self, event_: dict) -> None:
|
|
|
138 |
case "similarity_auditor":
|
139 |
new_question_list = event_value["output_task_output"][
|
140 |
"value"
|
141 |
+
]["list"]
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
self.question_list = new_question_list
|
143 |
self.send("next_state") # go to Evaluate Q Count
|
144 |
|
145 |
# Writes and saves a csv in the correct format to outputs/professor_guide.csv
|
146 |
def on_enter_output_q(self) -> None:
|
147 |
+
file_path = Path.cwd().joinpath("outputs/professor_guide.csv")
|
148 |
+
file_path.parent.mkdir(parents=True, exist_ok=True)
|
149 |
+
with file_path.open("w+", newline="") as file:
|
150 |
writer = csv.writer(file)
|
151 |
+
for question in self.question_list:
|
152 |
new_row = ["MC", "", 1]
|
153 |
+
new_row.append(question["Question"])
|
154 |
+
wrong_answers = list(question["Wrong Answers"])
|
155 |
column = random.randint(1, len(wrong_answers) + 1)
|
156 |
new_row.append(column)
|
157 |
for i in range(1, len(wrong_answers) + 2):
|
158 |
if i == column:
|
159 |
+
new_row.append(question["Answer"])
|
160 |
else:
|
161 |
new_row.append(wrong_answers.pop())
|
162 |
+
new_row.append(question["Page"])
|
163 |
+
new_row.append(question["Taxonomy"])
|
164 |
writer.writerow(new_row)
|
165 |
if self.give_up_count == 3:
|
166 |
writer.writerow(
|
|
|
168 |
"Failed to generate more questions.",
|
169 |
]
|
170 |
)
|
171 |
+
rejected_path = Path.cwd().joinpath("outputs/rejected_list.csv")
|
172 |
+
with rejected_path.open("w+", newline="") as rejected_file:
|
173 |
+
writer = csv.writer(rejected_file)
|
174 |
+
for question in self.rejected_questions:
|
175 |
+
writer.writerow(question.values())
|
176 |
self.send("next_state") # back to gather_parameters
|
177 |
|
178 |
# Necessary to prevent errors being thrown from state machine
|