Spaces:
Running
Running
File size: 6,485 Bytes
173384d a59fbdd 173384d a59fbdd 173384d a59fbdd 173384d a59fbdd 173384d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 |
import gradio as gr
from dotenv import load_dotenv
import os
from proctor import (
CompositeTechnique,
RolePrompting,
ChainOfThought,
ChainOfVerification,
SelfAsk,
EmotionPrompting,
ZeroShotCoT,
list_techniques,
)
# Load environment variables
load_dotenv()
# Check for OpenRouter API key
openrouter_key = os.environ.get("OPENROUTER_API_KEY")
if not openrouter_key:
raise ValueError("OPENROUTER_API_KEY not set. Please set it in your .env file.")
# Available models and techniques
MODELS = {
"Google Gemini 2.5 Flash": "openrouter/google/gemini-2.5-flash-preview-05-20",
"Claude 4 Sonnet": "openrouter/anthropic/claude-sonnet-4",
"DeepSeek R1": "openrouter/deepseek/deepseek-r1-0528",
"Llama 4 Scout": "openrouter/meta-llama/llama-4-scout",
"Mistral Small 3.1 24B": "openrouter/mistralai/mistral-small-3.1-24b-instruct",
}
TECHNIQUES = list_techniques()
# Model configurations
MODEL_CONFIGS = {
"openrouter/google/gemini-2.5-flash-preview-05-20": {
"api_base": "https://openrouter.ai/api/v1",
"api_key": openrouter_key,
"temperature": 0.3,
"max_tokens": 1500,
},
"openrouter/anthropic/claude-sonnet-4": {
"api_base": "https://openrouter.ai/api/v1",
"api_key": openrouter_key,
"temperature": 0.7,
"max_tokens": 2000,
},
"openrouter/deepseek/deepseek-r1-0528": {
"api_base": "https://openrouter.ai/api/v1",
"api_key": openrouter_key,
"temperature": 0.6,
"max_tokens": 3000,
},
"openrouter/meta-llama/llama-4-scout": {
"api_base": "https://openrouter.ai/api/v1",
"api_key": openrouter_key,
"temperature": 0.6,
"max_tokens": 2500,
},
"openrouter/mistralai/mistral-small-3.1-24b-instruct": {
"api_base": "https://openrouter.ai/api/v1",
"api_key": openrouter_key,
"temperature": 0.8,
"max_tokens": 1000,
},
}
# Composite technique definitions
TECHNIQUE_CONFIGS = {
"Expert Chain-of-Thought": CompositeTechnique(
name="Expert Chain-of-Thought",
identifier="custom-expert-cot",
techniques=[RolePrompting(), ChainOfThought(), ChainOfVerification()],
),
"Deep Reasoning Analysis": CompositeTechnique(
name="Deep Reasoning Analysis",
identifier="deep-reasoning",
techniques=[ChainOfThought(), SelfAsk(), ChainOfVerification()],
),
"ChainOfThought": ChainOfThought(),
"EmotionPrompting": EmotionPrompting(),
"RolePrompting": RolePrompting(),
"SelfAsk": SelfAsk(),
"ZeroShotCoT": ZeroShotCoT(),
}
def format_as_markdown(response):
"""
Format the response as Markdown for better readability.
Assumes the response may have sections like headings, lists, etc.
"""
lines = response.split("\n")
formatted_lines = []
in_list = False
for line in lines:
line = line.strip()
if not line:
in_list = False
formatted_lines.append("")
continue
# Check for headings (e.g., "Target Market:")
if line.endswith(":") and not line.startswith("-"):
formatted_lines.append(f"### {line}")
continue
# Check for list items (e.g., "- Item" or "1. Item")
if line.startswith("-") or line[0].isdigit() and line[1:3] in [". ", ".("]:
in_list = True
formatted_lines.append(line)
continue
# If not a heading or list item, treat as a paragraph
if in_list:
in_list = False
formatted_lines.append("")
formatted_lines.append(line)
return "\n".join(formatted_lines)
def process_problem(problem, technique_name, model_name, role="", emotion=""):
if not problem:
return "Please enter a problem statement."
technique = TECHNIQUE_CONFIGS.get(technique_name)
if not technique:
return f"Technique {technique_name} not found."
llm_config = MODEL_CONFIGS.get(MODELS[model_name])
if not llm_config:
return f"Model {model_name} not found."
try:
# Handle techniques that require additional parameters
kwargs = {"llm_config": llm_config}
if technique_name == "RolePrompting":
kwargs["role"] = role or "Expert"
elif technique_name == "EmotionPrompting":
kwargs["emotion"] = emotion or "thoughtful and methodical"
elif technique_name == "Expert Chain-of-Thought":
kwargs["role"] = role or "Expert"
response = technique.execute(problem, **kwargs)
# Format the response as Markdown
markdown_response = format_as_markdown(response)
return markdown_response
except Exception as e:
return f"**Error**: {str(e)}"
# Create Gradio interface
with gr.Blocks(title="Proctor AI Prompt Engineering App") as interface:
gr.Markdown("# Proctor AI Prompt Engineering App")
gr.Markdown("Enter a problem, select a technique and model, and get a response powered by OpenRouter.")
problem_input = gr.Textbox(label="Problem Statement", placeholder="e.g., How to build a house for a family of 4?")
technique_dropdown = gr.Dropdown(choices=list(TECHNIQUE_CONFIGS.keys()), label="Prompting Technique")
model_dropdown = gr.Dropdown(choices=list(MODELS.keys()), label="Model")
role_input = gr.Textbox(label="Role (for RolePrompting or Expert CoT)", placeholder="e.g., Expert House Builder", visible=False)
emotion_input = gr.Textbox(label="Emotion (for EmotionPrompting)", placeholder="e.g., thoughtful and methodical", visible=False)
output = gr.Markdown(label="Response") # Changed to gr.Markdown for proper rendering
submit_button = gr.Button("Generate Response")
# Dynamic visibility for role and emotion inputs
def update_inputs(technique):
return {
role_input: gr.update(visible=technique in ["RolePrompting", "Expert Chain-of-Thought"]),
emotion_input: gr.update(visible=technique == "EmotionPrompting")
}
technique_dropdown.change(fn=update_inputs, inputs=technique_dropdown, outputs=[role_input, emotion_input])
submit_button.click(
fn=process_problem,
inputs=[problem_input, technique_dropdown, model_dropdown, role_input, emotion_input],
outputs=output
)
# Launch the app
if __name__ == "__main__":
interface.launch(
share=True
) |