baconnier commited on
Commit
60c1839
·
verified ·
1 Parent(s): 36a55bc

Update variables.py

Browse files
Files changed (1) hide show
  1. variables.py +19 -10
variables.py CHANGED
@@ -11,16 +11,30 @@ metaprompt_explanations = {
11
  "bolism": "Utilize this method when working with autoregressive language models and when the task requires careful reasoning before conclusions. It's best for prompts that need detailed output formatting. Choose this over others when the prompt's structure and reasoning order are crucial."
12
  }
13
 
14
- # Load JSON data from environment variable
15
- PROMPT_DATA = json.loads(os.getenv('PROMPT_TEMPLATES', '[]'))
 
 
 
 
 
 
 
 
16
 
17
- # Create metaprompt_explanations dictionary from JSON data
18
  metaprompt_explanations = {
19
- prompt["name"].lower().split()[0]: prompt["description"]
20
  for prompt in PROMPT_DATA
21
  }
22
 
23
 
 
 
 
 
 
 
24
  models = [
25
  # Meta-Llama models (all support system)
26
  "meta-llama/Meta-Llama-3-70B-Instruct",
@@ -45,11 +59,6 @@ models = [
45
 
46
  explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
47
  # Generate explanation markdown from JSON data
48
- explanation_markdown = "".join([
49
- f"- **{prompt['name'].lower().split()[0]}**: {prompt['description']}\n"
50
- for prompt in PROMPT_DATA
51
- ])
52
-
53
 
54
 
55
  meta_info=""
@@ -60,7 +69,7 @@ if not api_token:
60
 
61
  # Store templates in a dictionary
62
  meta_prompts = {
63
- prompt["name"].lower().split()[0]: prompt["template"]
64
  for prompt in PROMPT_DATA
65
  }
66
 
 
11
  "bolism": "Utilize this method when working with autoregressive language models and when the task requires careful reasoning before conclusions. It's best for prompts that need detailed output formatting. Choose this over others when the prompt's structure and reasoning order are crucial."
12
  }
13
 
14
+ # Load JSON data directly
15
+ PROMPT_DATA = [
16
+ {
17
+ "name": name,
18
+ "description": data["description"],
19
+ "template": data["template"],
20
+ "examples": data.get("examples", []) # Using get() to handle optional examples
21
+ }
22
+ for name, data in json.loads(os.getenv('PROMPT_TEMPLATES', '[]')).items()
23
+ ]
24
 
25
+ # Create metaprompt_explanations dictionary
26
  metaprompt_explanations = {
27
+ prompt["name"].lower(): prompt["description"]
28
  for prompt in PROMPT_DATA
29
  }
30
 
31
 
32
+ # Generate explanation markdown
33
+ explanation_markdown = "".join([
34
+ f"- **{key}**: {value}\n"
35
+ for key, value in metaprompt_explanations.items()
36
+ ])
37
+
38
  models = [
39
  # Meta-Llama models (all support system)
40
  "meta-llama/Meta-Llama-3-70B-Instruct",
 
59
 
60
  explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
61
  # Generate explanation markdown from JSON data
 
 
 
 
 
62
 
63
 
64
  meta_info=""
 
69
 
70
  # Store templates in a dictionary
71
  meta_prompts = {
72
+ prompt["name"].lower(): prompt["template"]
73
  for prompt in PROMPT_DATA
74
  }
75