File size: 3,355 Bytes
6babc2d
 
b25d878
53f56d7
 
6babc2d
53f56d7
 
 
 
 
 
 
16d2c99
 
53f56d7
6babc2d
53f56d7
70fc62f
53f56d7
6babc2d
70fc62f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a3d4f67
1584099
 
 
 
 
 
 
 
 
 
 
 
 
53f56d7
b25d878
 
 
 
53f56d7
6babc2d
53f56d7
70fc62f
53f56d7
 
4bba9ed
 
 
5e0769d
 
4bba9ed
 
 
5e0769d
4bba9ed
 
 
 
 
 
 
6425bec
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import json
import os

# Load templates from environment variable with a safe default
templates_json = os.getenv('PROMPT_TEMPLATES', '{}')

try:
    # Parse JSON data with error handling
    prompt_data = json.loads(templates_json)
except json.JSONDecodeError:
    # Fallback to empty dict if JSON is invalid
    prompt_data = {}


print(prompt_data)
# Create explanations dictionary with safe access
metaprompt_explanations = {
    key: data.get("description", "No description available")
    for key, data in prompt_data.items()
} if prompt_data else {}

# Generate markdown explanation
explanation_markdown = "".join([
    f"- **{key}**: {value}\n" 
    for key, value in metaprompt_explanations.items()
])

# Define models list
models = [
    "meta-llama/Meta-Llama-3-70B-Instruct",
    "meta-llama/Meta-Llama-3-8B-Instruct",
    "meta-llama/Llama-3.1-70B-Instruct",
    "meta-llama/Llama-3.1-8B-Instruct",
    "meta-llama/Llama-3.2-3B-Instruct",
    "meta-llama/Llama-3.2-1B-Instruct",
    "meta-llama/Llama-2-13b-chat-hf",
    "meta-llama/Llama-2-7b-chat-hf",
    "HuggingFaceH4/zephyr-7b-beta",
    "HuggingFaceH4/zephyr-7b-alpha",
    "Qwen/Qwen2.5-72B-Instruct",
    "Qwen/Qwen2.5-1.5B",
    "microsoft/Phi-3.5-mini-instruct"
]

examples=[
        ["Write a story on the end of prompt engineering replaced by an Ai specialized in refining prompts.", "done"],
        ["Tell me about that guy who invented the light bulb", "physics"],
        ["Explain the universe.", "star"],
        ["What's the population of New York City and how tall is the Empire State Building and who was the first mayor?", "morphosis"],
        ["List American presidents.", "verse"],                        
        ["Explain why the experiment failed.", "morphosis"],
        ["Is nuclear energy good?", "verse"],
        ["How does a computer work?", "phor"],
        ["How to make money fast?", "done"],
        ["how can you prove IT0's lemma in stochastic calculus ?", "arpe"],                    
    ]

# Get API token with error handling
api_token = os.getenv('HF_API_TOKEN')
if not api_token:
    raise ValueError("HF_API_TOKEN not found in environment variables")

# Create meta_prompts dictionary with safe access
meta_prompts = {
    key: data.get("template", "No template available")
    for key, data in prompt_data.items()
} if prompt_data else {}

prompt_refiner_model = os.getenv('PROMPT_REFINER_MODEL', 'meta-llama/Llama-3.1-8B-Instruct')


prompt_refiner_model = os.getenv('prompt_refiner_model')
echo_prompt_refiner = os.getenv('echo_prompt_refiner')


metadone = os.getenv('metadone')

metaprompt1 = os.getenv('metaprompt1')   
loic_metaprompt = os.getenv('loic_metaprompt')    
openai_metaprompt = os.getenv('openai_metaprompt')
original_meta_prompt = os.getenv('original_meta_prompt')    
new_meta_prompt = os.getenv('new_meta_prompt')   
advanced_meta_prompt = os.getenv('advanced_meta_prompt')
math_meta_prompt = os.getenv('metamath')
autoregressive_metaprompt = os.getenv('autoregressive_metaprompt')

meta_prompts = {
            "morphosis": original_meta_prompt,
            "verse": new_meta_prompt,
            "physics": metaprompt1,
            "bolism": loic_metaprompt,
            "done": metadone,
            "star": echo_prompt_refiner,
            "math": math_meta_prompt,
            "arpe": autoregressive_metaprompt
        }