import os
import json
import re
from huggingface_hub import InferenceClient
import gradio as gr
from pydantic import BaseModel, Field
from typing import Optional, Literal
from huggingface_hub.errors import HfHubHTTPError
from custom_css import custom_css
from variables import *
class PromptInput(BaseModel):
text: str = Field(..., description="The initial prompt text")
meta_prompt_choice: Literal["star","done","physics","morphosis", "verse", "phor","bolism","math","arpe"] = Field(..., description="Choice of meta prompt strategy")
class RefinementOutput(BaseModel):
query_analysis: Optional[str] = None
initial_prompt_evaluation: Optional[str] = None
refined_prompt: Optional[str] = None
explanation_of_refinements: Optional[str] = None
raw_content: Optional[str] = None
class PromptRefiner:
def __init__(self, api_token: str,meta_prompts):
self.client = InferenceClient(token=api_token, timeout=120)
self.meta_prompts = {
"morphosis": original_meta_prompt,
"verse": new_meta_prompt,
"physics": metaprompt1,
"bolism": loic_metaprompt,
"done": metadone,
"star": echo_prompt_refiner,
"math": math_meta_prompt,
"arpe": autoregressive_metaprompt
}
# self.meta_prompts = meta_prompts
def refine_prompt(self, prompt_input: PromptInput) -> tuple:
try:
# Select meta prompt using dictionary instead of if-elif chain
print(meta_prompts)
selected_meta_prompt = self.meta_prompts.get(
prompt_input.meta_prompt_choice,
advanced_meta_prompt
)
messages = [
{
"role": "system",
"content": 'You are an expert at refining and extending prompts. Given a basic prompt, provide a more relevant and detailed prompt.'
},
{
"role": "user",
"content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt_input.text)
}
]
response = self.client.chat_completion(
model=prompt_refiner_model,
messages=messages,
max_tokens=3000,
temperature=0.8
)
response_content = response.choices[0].message.content.strip()
# Parse the response
result = self._parse_response(response_content)
return (
result.get('initial_prompt_evaluation', ''),
result.get('refined_prompt', ''),
result.get('explanation_of_refinements', ''),
result
)
except HfHubHTTPError as e:
return (
"Error: Model timeout. Please try again later.",
"The selected model is currently experiencing high traffic.",
"The selected model is currently experiencing high traffic.",
{}
)
except Exception as e:
return (
f"Error: {str(e)}",
"",
"An unexpected error occurred.",
{}
)
def _parse_response(self, response_content: str) -> dict:
try:
# Try to find JSON in response
json_match = re.search(r'
⚠ This space is in progress, and we're actively working on it, so you might find some bugs! Please report any issues you have in the Community tab to help us make it better for all.
" ) def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple: input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice) # Since result is a tuple with 4 elements based on the return value of prompt_refiner.refine_prompt initial_prompt_evaluation, refined_prompt, explanation_refinements, full_response = self.prompt_refiner.refine_prompt(input_data) analysis_evaluation = f"\n\n{initial_prompt_evaluation}" return ( analysis_evaluation, refined_prompt, explanation_refinements, full_response ) def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str): try: original_output = self.prompt_refiner.apply_prompt(original_prompt, model) refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model) return original_output, refined_output except Exception as e: return f"Error: {str(e)}", f"Error: {str(e)}" def launch(self, share=False): self.interface.launch(share=share) #explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()]) ''' meta_info="" api_token = os.getenv('HF_API_TOKEN') if not api_token: raise ValueError("HF_API_TOKEN not found in environment variables") metadone = os.getenv('metadone') prompt_refiner_model = os.getenv('prompt_refiner_model') echo_prompt_refiner = os.getenv('echo_prompt_refiner') metaprompt1 = os.getenv('metaprompt1') loic_metaprompt = os.getenv('loic_metaprompt') openai_metaprompt = os.getenv('openai_metaprompt') original_meta_prompt = os.getenv('original_meta_prompt') new_meta_prompt = os.getenv('new_meta_prompt') advanced_meta_prompt = os.getenv('advanced_meta_prompt') math_meta_prompt = os.getenv('metamath') autoregressive_metaprompt = os.getenv('autoregressive_metaprompt') ''' if __name__ == '__main__': prompt_refiner = PromptRefiner(api_token,meta_prompts) gradio_interface = GradioInterface(prompt_refiner,custom_css) gradio_interface.launch(share=True)