File size: 4,302 Bytes
d477d5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25d18e3
 
 
 
d477d5c
 
 
 
 
 
 
 
 
 
 
c56aab6
d477d5c
f54681b
e7445ae
d477d5c
 
 
 
 
 
 
 
 
 
 
 
e7445ae
d477d5c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
events:
  enter_first_state:
    transitions:
    - from: start
      to: gather_parameters
  finish_state:
    transitions:
    - from: evaluate_q_count
      to: output_q
  next_state:
    transitions:
    - from: gather_parameters
      to: evaluate_q_count
    - from: evaluate_q_count
      to: need_more_q
    - from: need_more_q
      to: assess_generated_q
    - from: assess_generated_q
      to: evaluate_q_count
    - from: output_q
      to: gather_parameters
  end_state:
    transitions:
    - from: output_q
      to: end
  error_to_start:
    transitions:
    - from: need_more_q
      to: gather_parameters
rulesets:
  frame_question_best_practices:
    name: Frame Question with KB
    rules:
    - '"Return a string with the reformmated question'
    - '"No commentary, no code, no backticks'
    - '"Use the information from your knowledge base'
    - '"Do not change the content of the question'
  incorrect_answers_creator:
    name: Create Wrong Answers
    rules:
    - '"Return ONLY a list of 4 incorrect answers. No markdown, no commentary, no
      backticks.'
    - '"All incorrect answers should be significantly different, but plausible answers to the question.'
    - 'If the question references a subject, the incorrect answer should be related to the subject.'
    - '"Incorrect answers may reference material from the knowledge base, but must
      not be correct answers to the question'
    - '"Length of incorrect answers should be 10 words max, 5 words minimum'
  similarity_checker:
    name: Check Similarity
    rules:
    - '''"you are adept at comparing questions to check whether they are similar'''
    - '''"you will be given a list of questions. If two questions assess very similar
      subjects in a very similar way, remove one of them from the list.'''
    - '''"do not change anything else in the list.'''
    - '''"output only the edited list.'''
    - '''Return ONLY a json'''
    - '''Always return a list '''
    - '''No markdown, no commentary, no code, no backticks.'''
    - '"Use \" for quotes within the JSON'
  specific_question_creator:
    name: Create Question
    rules:
    - '"Return ONLY a json with ''Question'' and ''Answer'' as keys.'
    - " No markdown, no commentary, no code, no backticks."
    - '"Query to knowledge base should always be ''find information for quiz question'''
    - '"Question should be a multiple choice quiz style question that assesses a student''s
      knowledge of the information in the knowledge base (which should be referred
      to as ''the textbook''). Answer should be a correct answer to the question that
      uses information from the knowledge base. Do not return incorrect answers.'
    - '"The length of the question should be 30 words at most.'
    - '"Question should never reference or ask about an entire section, never reference
      or ask about a quote in the knowledge base, never ask for the page number of
      some information, and never ask for information about the file, document, or
      knowledge base.'
    - '"The answer to the question should be short, but should not omit important
      information.'
  taxonomy_prompter:
    name: Decide Taxonomy
    rules:
    - '"behave as if you were a user asking an AI chatbot to generate a question for
      you'
states:
  assess_generated_q:
    structures:
      Similarity_Auditor: {}
  end:
    final: true
  evaluate_q_count: {}
  gather_parameters: {}
  need_more_q:
    structures:
      best_practices_expert: {}
      subject_matter_expert: {}
      taxonomy_expert: {}
  output_q: {}
  start:
    initial: true
structures:
  best_practices_expert:
    model: gpt-4o
    prompt_id: best_practices_question
    ruleset_ids:
    - frame_question_best_practices
    vector_stores:
    - best_practices
  similarity_auditor:
    model: gpt-4o
    prompt_id: similarity_auditor_prompt
    ruleset_ids:
    - similarity_checker
  subject_matter_expert:
    model: gpt-4o
    prompt_id: scope_question_subject_expert
    ruleset_ids:
    - specific_question_creator
  taxonomy_expert:
    model: gpt-4o
    prompt_id: scope_question_taxonomy
    ruleset_ids:
    - taxonomy_prompter
  wrong_answers_generator:
    model: gpt-4o
    prompt_id: write_incorrect_answers
    ruleset_ids:
    - incorrect_answers_creator