Spaces:
Running
Running
File size: 1,394 Bytes
6babc2d b25d878 70fc62f 6babc2d 70fc62f 6babc2d 70fc62f 6babc2d 70fc62f a3d4f67 70fc62f b25d878 6babc2d 70fc62f 6babc2d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import json
import os
# Load JSON data from environment variable and parse it
json_data = os.getenv('PROMPT_TEMPLATES', '{}')
prompt_data = json.loads(json_data)
# Create dictionaries from the JSON data
metaprompt_explanations = {
key: data["description"]
for key, data in prompt_data.items()
}
# Generate markdown explanation
explanation_markdown = "".join([
f"- **{key}**: {value}\n"
for key, value in metaprompt_explanations.items()
])
# Define models list
models = [
# Meta-Llama models (all support system)
"meta-llama/Meta-Llama-3-70B-Instruct",
"meta-llama/Meta-Llama-3-8B-Instruct",
"meta-llama/Llama-3.1-70B-Instruct",
"meta-llama/Llama-3.1-8B-Instruct",
"meta-llama/Llama-3.2-3B-Instruct",
"meta-llama/Llama-3.2-1B-Instruct",
"meta-llama/Llama-2-13b-chat-hf",
"meta-llama/Llama-2-7b-chat-hf",
# HuggingFaceH4 models (support system)
"HuggingFaceH4/zephyr-7b-beta",
"HuggingFaceH4/zephyr-7b-alpha",
# Qwen models (support system)
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-1.5B",
"microsoft/Phi-3.5-mini-instruct"
]
# Check for API token
api_token = os.getenv('HF_API_TOKEN')
if not api_token:
raise ValueError("HF_API_TOKEN not found in environment variables")
# Store templates in a dictionary
meta_prompts = {
key: data["template"]
for key, data in prompt_data.items()
}
|