Spaces:
Running
Running
| import os | |
| from dotenv import load_dotenv | |
| import gradio as gr | |
| from proctor import ( | |
| CompositeTechnique, | |
| RolePrompting, | |
| ChainOfThought, | |
| ChainOfVerification, | |
| SelfAsk, | |
| EmotionPrompting, | |
| list_techniques, | |
| ) | |
| # Load environment variables (.env should contain OPENROUTER_API_KEY) | |
| load_dotenv() | |
| openrouter_key = os.environ.get("OPENROUTER_API_KEY") | |
| # Check API key | |
| if not openrouter_key: | |
| raise RuntimeError( | |
| "❌ OPENROUTER_API_KEY not set. Please set it in your .env file." | |
| ) | |
| # ----- Model Configs ----- | |
| MODEL_CONFIGS = { | |
| "gemini": { | |
| "model": "openrouter/google/gemini-2.5-flash-preview-05-20", | |
| "api_base": "https://openrouter.ai/api/v1", | |
| "api_key": openrouter_key, | |
| "temperature": 0.3, | |
| "max_tokens": 1500, | |
| }, | |
| "claude": { | |
| "model": "openrouter/anthropic/claude-sonnet-4", | |
| "api_base": "https://openrouter.ai/api/v1", | |
| "api_key": openrouter_key, | |
| "temperature": 0.7, | |
| "max_tokens": 2000, | |
| }, | |
| "deepseek": { | |
| "model": "openrouter/deepseek/deepseek-r1-0528", | |
| "api_base": "https://openrouter.ai/api/v1", | |
| "api_key": openrouter_key, | |
| "temperature": 0.6, | |
| "max_tokens": 3000, | |
| }, | |
| "llama": { | |
| "model": "openrouter/meta-llama/llama-4-scout", | |
| "api_base": "https://openrouter.ai/api/v1", | |
| "api_key": openrouter_key, | |
| "temperature": 0.6, | |
| "max_tokens": 2500, | |
| }, | |
| "mistral": { | |
| "model": "openrouter/mistralai/mistral-small-3.1-24b-instruct", | |
| "api_base": "https://openrouter.ai/api/v1", | |
| "api_key": openrouter_key, | |
| "temperature": 0.8, | |
| "max_tokens": 1000, | |
| }, | |
| } | |
| # ----- Tool Functions ----- | |
| def proctor_expert_cot(problem: str) -> dict: | |
| """ | |
| Chain-of-Thought, Verification, and Role Prompting on Gemini. | |
| """ | |
| technique = CompositeTechnique( | |
| name="Expert Chain-of-Thought", | |
| identifier="custom-expert-cot", | |
| techniques=[ | |
| RolePrompting(), | |
| ChainOfThought(), | |
| ChainOfVerification(), | |
| ], | |
| ) | |
| response = technique.execute( | |
| problem, | |
| llm_config=MODEL_CONFIGS["gemini"], | |
| role="Expert House Builder and Construction Manager" | |
| ) | |
| return { | |
| "model": "Google Gemini 2.5 Flash", | |
| "technique": "Expert Chain-of-Thought", | |
| "response": response | |
| } | |
| def proctor_claude_cot(problem: str) -> dict: | |
| """ | |
| Chain-of-Thought with Claude 4 Sonnet. | |
| """ | |
| technique = ChainOfThought() | |
| response = technique.execute(problem, llm_config=MODEL_CONFIGS["claude"]) | |
| return { | |
| "model": "Claude 4 Sonnet", | |
| "technique": "Chain-of-Thought", | |
| "response": response | |
| } | |
| def proctor_deepseek_reasoning(problem: str) -> dict: | |
| """ | |
| Deep reasoning with DeepSeek R1: CoT, SelfAsk, Verification. | |
| """ | |
| technique = CompositeTechnique( | |
| name="Deep Reasoning Analysis", | |
| identifier="deep-reasoning", | |
| techniques=[ | |
| ChainOfThought(), | |
| SelfAsk(), | |
| ChainOfVerification(), | |
| ], | |
| ) | |
| response = technique.execute(problem, llm_config=MODEL_CONFIGS["deepseek"]) | |
| return { | |
| "model": "DeepSeek R1", | |
| "technique": "Deep Reasoning Analysis", | |
| "response": response | |
| } | |
| def proctor_llama_emotion(problem: str) -> dict: | |
| """ | |
| Emotion Prompting with Llama 4 Scout. | |
| """ | |
| technique = EmotionPrompting() | |
| response = technique.execute( | |
| problem, | |
| llm_config=MODEL_CONFIGS["llama"], | |
| emotion="thoughtful and methodical" | |
| ) | |
| return { | |
| "model": "Llama 4 Scout", | |
| "technique": "Emotion Prompting", | |
| "response": response | |
| } | |
| def proctor_mistral_tips(problem: str) -> dict: | |
| """ | |
| Fast Role Prompting with Mistral Small (for quick suggestions). | |
| """ | |
| technique = RolePrompting() | |
| response = technique.execute( | |
| problem, | |
| llm_config=MODEL_CONFIGS["mistral"], | |
| role="Construction Project Manager" | |
| ) | |
| return { | |
| "model": "Mistral Small 3.1 24B", | |
| "technique": "Role Prompting", | |
| "response": response | |
| } | |
| # Optionally, expose a unified tool for arbitrary model/technique selection: | |
| def proctor_flexible( | |
| problem: str, | |
| model: str = "gemini", | |
| technique: str = "ChainOfThought", | |
| role: str = "", | |
| emotion: str = "" | |
| ) -> dict: | |
| """ | |
| Flexible interface for any model/technique combo. | |
| """ | |
| technique_map = { | |
| "ChainOfThought": ChainOfThought, | |
| "RolePrompting": RolePrompting, | |
| "EmotionPrompting": EmotionPrompting, | |
| "SelfAsk": SelfAsk, | |
| "ChainOfVerification": ChainOfVerification, | |
| } | |
| if technique == "CompositeExpert": | |
| tech = CompositeTechnique( | |
| name="Expert Chain-of-Thought", | |
| identifier="custom-expert-cot", | |
| techniques=[ | |
| RolePrompting(), | |
| ChainOfThought(), | |
| ChainOfVerification(), | |
| ], | |
| ) | |
| response = tech.execute(problem, llm_config=MODEL_CONFIGS[model], role=role) | |
| elif technique == "DeepReasoning": | |
| tech = CompositeTechnique( | |
| name="Deep Reasoning Analysis", | |
| identifier="deep-reasoning", | |
| techniques=[ | |
| ChainOfThought(), | |
| SelfAsk(), | |
| ChainOfVerification(), | |
| ], | |
| ) | |
| response = tech.execute(problem, llm_config=MODEL_CONFIGS[model]) | |
| else: | |
| tech_cls = technique_map.get(technique, ChainOfThought) | |
| if technique == "RolePrompting": | |
| response = tech_cls().execute(problem, llm_config=MODEL_CONFIGS[model], role=role) | |
| elif technique == "EmotionPrompting": | |
| response = tech_cls().execute(problem, llm_config=MODEL_CONFIGS[model], emotion=emotion) | |
| else: | |
| response = tech_cls().execute(problem, llm_config=MODEL_CONFIGS[model]) | |
| return { | |
| "model": MODEL_CONFIGS[model]["model"], | |
| "technique": technique, | |
| "response": response | |
| } | |
| # ----- Gradio/MCP Interface ----- | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# 🏗️ Proctor AI MCP Server\nAdvanced prompt engineering tools via OpenRouter and Proctor AI.\n\n*Try from an MCP-compatible client or the web UI below!*") | |
| with gr.Tab("Gemini (Expert CoT)"): | |
| gr.Interface(fn=proctor_expert_cot, inputs=gr.Textbox(label="Problem"), outputs=gr.JSON(), allow_flagging="never") | |
| with gr.Tab("Claude 4 (Chain-of-Thought)"): | |
| gr.Interface(fn=proctor_claude_cot, inputs=gr.Textbox(label="Problem"), outputs=gr.JSON(), allow_flagging="never") | |
| with gr.Tab("DeepSeek R1 (Deep Reasoning)"): | |
| gr.Interface(fn=proctor_deepseek_reasoning, inputs=gr.Textbox(label="Problem"), outputs=gr.JSON(), allow_flagging="never") | |
| with gr.Tab("Llama 4 (Emotion Prompting)"): | |
| gr.Interface(fn=proctor_llama_emotion, inputs=gr.Textbox(label="Problem"), outputs=gr.JSON(), allow_flagging="never") | |
| with gr.Tab("Mistral (Quick Tips)"): | |
| gr.Interface(fn=proctor_mistral_tips, inputs=gr.Textbox(label="Problem (tips request)"), outputs=gr.JSON(), allow_flagging="never") | |
| with gr.Tab("Flexible (Advanced)"): | |
| model_dropdown = gr.Dropdown(choices=list(MODEL_CONFIGS.keys()), value="gemini", label="Model") | |
| technique_dropdown = gr.Dropdown( | |
| choices=["ChainOfThought", "RolePrompting", "EmotionPrompting", "SelfAsk", "ChainOfVerification", "CompositeExpert", "DeepReasoning"], | |
| value="ChainOfThought", | |
| label="Technique" | |
| ) | |
| role_input = gr.Textbox(label="Role (optional)", value="") | |
| emotion_input = gr.Textbox(label="Emotion (optional)", value="") | |
| flexible_iface = gr.Interface( | |
| fn=proctor_flexible, | |
| inputs=[gr.Textbox(label="Problem"), | |
| model_dropdown, | |
| technique_dropdown, | |
| role_input, | |
| emotion_input], | |
| outputs=gr.JSON(), | |
| allow_flagging="never" | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(mcp_server=True) |