Spaces:
Running
Running
import gradio as gr | |
from dotenv import load_dotenv | |
import os | |
import logging | |
from typing import Dict, Any, Optional | |
from proctor import ( | |
CompositeTechnique, | |
RolePrompting, | |
ChainOfThought, | |
ChainOfVerification, | |
SelfAsk, | |
EmotionPrompting, | |
ZeroShotCoT, | |
list_techniques, | |
) | |
# Configure logging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
# Load environment variables | |
load_dotenv() | |
# Check for OpenRouter API key | |
openrouter_key = os.environ.get("OPENROUTER_API_KEY") | |
if not openrouter_key: | |
raise ValueError("OPENROUTER_API_KEY not set. Please set it in your .env file.") | |
# Available models and techniques | |
MODELS = { | |
"Google Gemini 2.5 Flash": "openrouter/google/gemini-2.5-flash-preview-05-20", | |
"Claude 4 Sonnet": "openrouter/anthropic/claude-sonnet-4", | |
"DeepSeek R1": "openrouter/deepseek/deepseek-r1-0528", | |
"Llama 4 Scout": "openrouter/meta-llama/llama-4-scout", | |
"Mistral Small 3.1 24B": "openrouter/mistralai/mistral-small-3.1-24b-instruct", | |
} | |
TECHNIQUES = list_techniques() | |
# Model configurations with optimized parameters | |
MODEL_CONFIGS = { | |
"openrouter/google/gemini-2.5-flash-preview-05-20": { | |
"api_base": "https://openrouter.ai/api/v1", | |
"api_key": openrouter_key, | |
"temperature": 0.3, | |
"max_tokens": 15000, | |
}, | |
"openrouter/anthropic/claude-sonnet-4": { | |
"api_base": "https://openrouter.ai/api/v1", | |
"api_key": openrouter_key, | |
"temperature": 0.7, | |
"max_tokens": 12000, | |
}, | |
"openrouter/deepseek/deepseek-r1-0528": { | |
"api_base": "https://openrouter.ai/api/v1", | |
"api_key": openrouter_key, | |
"temperature": 0.6, | |
"max_tokens": 8000, | |
}, | |
"openrouter/meta-llama/llama-4-scout": { | |
"api_base": "https://openrouter.ai/api/v1", | |
"api_key": openrouter_key, | |
"temperature": 0.6, | |
"max_tokens": 12500, | |
}, | |
"openrouter/mistralai/mistral-small-3.1-24b-instruct": { | |
"api_base": "https://openrouter.ai/api/v1", | |
"api_key": openrouter_key, | |
"temperature": 0.8, | |
"max_tokens": 1000, | |
}, | |
} | |
# Composite technique definitions | |
TECHNIQUE_CONFIGS = { | |
"Expert Chain-of-Thought": CompositeTechnique( | |
name="Expert Chain-of-Thought", | |
identifier="custom-expert-cot", | |
techniques=[RolePrompting(), ChainOfThought(), ChainOfVerification()], | |
), | |
"Deep Reasoning Analysis": CompositeTechnique( | |
name="Deep Reasoning Analysis", | |
identifier="deep-reasoning", | |
techniques=[ChainOfThought(), SelfAsk(), ChainOfVerification()], | |
), | |
"ChainOfThought": ChainOfThought(), | |
"EmotionPrompting": EmotionPrompting(), | |
"RolePrompting": RolePrompting(), | |
"SelfAsk": SelfAsk(), | |
"ZeroShotCoT": ZeroShotCoT(), | |
} | |
def format_as_markdown(response: str) -> str: | |
""" | |
Format the response as Markdown for better readability. | |
Args: | |
response: The raw response text to format | |
Returns: | |
Formatted markdown string | |
""" | |
if not response: | |
return "" | |
lines = response.split("\n") | |
formatted_lines = [] | |
in_list = False | |
for line in lines: | |
line = line.strip() | |
if not line: | |
in_list = False | |
formatted_lines.append("") | |
continue | |
# Check for headings (e.g., "Target Market:") | |
if line.endswith(":") and not line.startswith("-") and len(line) < 100: | |
formatted_lines.append(f"### {line}") | |
continue | |
# Check for list items (e.g., "- Item" or "1. Item") | |
if line.startswith("-") or (line and line[0].isdigit() and len(line) > 2 and line[1:3] in [". ", ".("]): | |
in_list = True | |
formatted_lines.append(line) | |
continue | |
# If not a heading or list item, treat as a paragraph | |
if in_list: | |
in_list = False | |
formatted_lines.append("") | |
formatted_lines.append(line) | |
return "\n".join(formatted_lines) | |
def validate_inputs(problem: str, technique_name: str, model_name: str) -> Optional[str]: | |
""" | |
Validate user inputs and return error message if invalid. | |
Args: | |
problem: The problem statement | |
technique_name: Selected technique name | |
model_name: Selected model name | |
Returns: | |
Error message if validation fails, None otherwise | |
""" | |
if not problem or not problem.strip(): | |
return "Please enter a problem statement." | |
if technique_name not in TECHNIQUE_CONFIGS: | |
return f"Technique '{technique_name}' not found." | |
if model_name not in MODELS: | |
return f"Model '{model_name}' not found." | |
return None | |
def process_problem( | |
problem: str, | |
technique_name: str, | |
model_name: str, | |
role: str = "", | |
emotion: str = "" | |
) -> str: | |
""" | |
Process the problem using the selected technique and model. | |
Args: | |
problem: The problem statement to solve | |
technique_name: Name of the prompting technique to use | |
model_name: Name of the model to use | |
role: Role for role prompting (optional) | |
emotion: Emotion for emotion prompting (optional) | |
Returns: | |
Formatted response or error message | |
""" | |
# Validate inputs | |
validation_error = validate_inputs(problem, technique_name, model_name) | |
if validation_error: | |
return f"**Error**: {validation_error}" | |
technique = TECHNIQUE_CONFIGS[technique_name] | |
model_id = MODELS[model_name] | |
llm_config = MODEL_CONFIGS[model_id] | |
try: | |
# Prepare kwargs for technique execution | |
kwargs = {"llm_config": llm_config} | |
# Add technique-specific parameters | |
if technique_name == "RolePrompting": | |
kwargs["role"] = role.strip() or "Expert" | |
elif technique_name == "EmotionPrompting": | |
kwargs["emotion"] = emotion.strip() or "thoughtful and methodical" | |
elif technique_name == "Expert Chain-of-Thought": | |
kwargs["role"] = role.strip() or "Expert" | |
logger.info(f"Processing problem with {technique_name} using {model_name}") | |
response = technique.execute(problem.strip(), **kwargs) | |
# Format and return the response | |
markdown_response = format_as_markdown(response) | |
logger.info("Successfully processed problem") | |
return markdown_response | |
except Exception as e: | |
error_msg = f"Error processing request: {str(e)}" | |
logger.error(error_msg) | |
return f"**Error**: {error_msg}" | |
def update_input_visibility(technique: str) -> Dict[str, Any]: | |
""" | |
Update visibility of role and emotion inputs based on selected technique. | |
Args: | |
technique: Selected technique name | |
Returns: | |
Dictionary with visibility updates for inputs | |
""" | |
show_role = technique in ["RolePrompting", "Expert Chain-of-Thought"] | |
show_emotion = technique == "EmotionPrompting" | |
return { | |
role_input: gr.update(visible=show_role), | |
emotion_input: gr.update(visible=show_emotion) | |
} | |
# Create Gradio interface with improved styling | |
with gr.Blocks( | |
title="Proctor AI Prompt Engineering App", | |
theme=gr.themes.Soft(), | |
css=""" | |
.gradio-container { | |
max-width: 1200px !important; | |
margin: auto !important; | |
} | |
""" | |
) as interface: | |
gr.Markdown( | |
""" | |
# 🤖 Proctor AI Prompt Engineering App | |
**Enhance your problem-solving with advanced AI prompting techniques** | |
Enter a problem, select a technique and model, and get intelligent responses powered by OpenRouter. | |
""" | |
) | |
with gr.Row(): | |
with gr.Column(scale=2): | |
problem_input = gr.Textbox( | |
label="Problem Statement", | |
placeholder="e.g., How to build a sustainable house for a family of 4?", | |
lines=3, | |
max_lines=5 | |
) | |
with gr.Row(): | |
technique_dropdown = gr.Dropdown( | |
choices=list(TECHNIQUE_CONFIGS.keys()), | |
label="Prompting Technique", | |
value=list(TECHNIQUE_CONFIGS.keys())[0] if TECHNIQUE_CONFIGS else None | |
) | |
model_dropdown = gr.Dropdown( | |
choices=list(MODELS.keys()), | |
label="Model", | |
value=list(MODELS.keys())[0] if MODELS else None | |
) | |
role_input = gr.Textbox( | |
label="Role (for RolePrompting or Expert CoT)", | |
placeholder="e.g., Expert Architect", | |
visible=False | |
) | |
emotion_input = gr.Textbox( | |
label="Emotion (for EmotionPrompting)", | |
placeholder="e.g., thoughtful and methodical", | |
visible=False | |
) | |
submit_button = gr.Button( | |
"🚀 Generate Response", | |
variant="primary", | |
size="lg" | |
) | |
with gr.Column(scale=3): | |
output = gr.Markdown( | |
label="Response", | |
value="*Your response will appear here...*" | |
) | |
# Event handlers | |
technique_dropdown.change( | |
fn=update_input_visibility, | |
inputs=technique_dropdown, | |
outputs=[role_input, emotion_input] | |
) | |
submit_button.click( | |
fn=process_problem, | |
inputs=[problem_input, technique_dropdown, model_dropdown, role_input, emotion_input], | |
outputs=output | |
) | |
# Add examples | |
gr.Examples( | |
examples=[ | |
["How can I improve team productivity in a remote work environment?", "Expert Chain-of-Thought", "Claude 4 Sonnet", "Management Consultant", ""], | |
["What are the key factors to consider when starting a tech startup?", "Deep Reasoning Analysis", "Google Gemini 2.5 Flash", "", ""], | |
["How do I create a sustainable garden in a small urban space?", "RolePrompting", "DeepSeek R1", "Urban Gardening Expert", ""], | |
], | |
inputs=[problem_input, technique_dropdown, model_dropdown, role_input, emotion_input], | |
) | |
# Launch the app | |
if __name__ == "__main__": | |
interface.launch( | |
share=True, | |
) |