Spaces:
Running
Running
File size: 7,185 Bytes
7ed59a1 51d80c4 7ed59a1 51d80c4 7ed59a1 51d80c4 7ed59a1 51d80c4 7ed59a1 51d80c4 7ed59a1 3c725c1 7ed59a1 51d80c4 7ed59a1 51d80c4 7ed59a1 51d80c4 7ed59a1 51d80c4 7ed59a1 51d80c4 7ed59a1 51d80c4 7ed59a1 51d80c4 7ed59a1 51d80c4 7ed59a1 51d80c4 7ed59a1 51d80c4 7ed59a1 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
import json
import re
from typing import Optional, Dict, Any
from pydantic import BaseModel, Field, validator
from huggingface_hub import InferenceClient
from huggingface_hub.errors import HfHubHTTPError
from variables import *
class LLMResponse(BaseModel):
initial_prompt_evaluation: str = Field(..., description="Evaluation of the initial prompt")
refined_prompt: str = Field(..., description="The refined version of the prompt")
explanation_of_refinements: str = Field(..., description="Explanation of the refinements made")
response_content: Optional[Dict[str, Any]] = Field(None, description="Raw response content")
@validator('initial_prompt_evaluation', 'refined_prompt', 'explanation_of_refinements')
def clean_text_fields(cls, v):
if isinstance(v, str):
return v.strip().replace('\\n', '\n').replace('\\"', '"')
return v
class PromptRefiner:
def __init__(self, api_token: str, meta_prompts):
self.client = InferenceClient(token=api_token, timeout=120)
self.meta_prompts = meta_prompts
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
try:
selected_meta_prompt = self.meta_prompts.get(
meta_prompt_choice,
self.meta_prompts["star"]
)
messages = [
{
"role": "system",
"content": 'You are an expert at refining and extending prompts. Given a basic prompt, provide a more relevant and detailed prompt.'
},
{
"role": "user",
"content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt)
}
]
response = self.client.chat_completion(
model=prompt_refiner_model,
messages=messages,
max_tokens=3000,
temperature=0.8
)
response_content = response.choices[0].message.content.strip()
result = self._parse_response(response_content)
# Create and validate LLMResponse
llm_response = LLMResponse(**result)
return (
llm_response.initial_prompt_evaluation,
llm_response.refined_prompt,
llm_response.explanation_of_refinements,
llm_response.dict()
)
except HfHubHTTPError as e:
return self._create_error_response("Model timeout. Please try again later.")
except Exception as e:
return self._create_error_response(f"Unexpected error: {str(e)}")
def _create_error_response(self, error_message: str) -> tuple:
error_response = LLMResponse(
initial_prompt_evaluation=f"Error: {error_message}",
refined_prompt="The selected model is currently unavailable.",
explanation_of_refinements="An error occurred during processing.",
response_content={"error": error_message}
)
return (
error_response.initial_prompt_evaluation,
error_response.refined_prompt,
error_response.explanation_of_refinements,
error_response.dict()
)
def _parse_response(self, response_content: str) -> dict:
try:
# First attempt: Try to extract JSON from <json> tags
json_match = re.search(r'<json>\s*(.*?)\s*</json>', response_content, re.DOTALL)
if json_match:
json_str = json_match.group(1)
json_str = re.sub(r'\n\s*', ' ', json_str)
json_str = json_str.replace('"', '\\"')
json_output = json.loads(f'"{json_str}"')
if isinstance(json_output, str):
json_output = json.loads(json_output)
return {
"initial_prompt_evaluation": json_output.get("initial_prompt_evaluation", ""),
"refined_prompt": json_output.get("refined_prompt", ""),
"explanation_of_refinements": json_output.get("explanation_of_refinements", ""),
"response_content": json_output
}
# Second attempt: Try to extract fields using regex
output = {}
for key in ["initial_prompt_evaluation", "refined_prompt", "explanation_of_refinements"]:
pattern = rf'"{key}":\s*"(.*?)"(?:,|\}})'
match = re.search(pattern, response_content, re.DOTALL)
output[key] = match.group(1) if match else ""
output["response_content"] = response_content
return output
except (json.JSONDecodeError, ValueError) as e:
print(f"Error parsing response: {e}")
print(f"Raw content: {response_content}")
return {
"initial_prompt_evaluation": "Error parsing response",
"refined_prompt": "",
"explanation_of_refinements": str(e),
"response_content": str(e)
}
def apply_prompt(self, prompt: str, model: str) -> str:
try:
messages = [
{
"role": "system",
"content": """You are a markdown formatting expert. Format your responses with proper spacing and structure following these rules:
1. Paragraph Spacing:
- Add TWO blank lines between major sections (##)
- Add ONE blank line between subsections (###)
- Add ONE blank line between paragraphs within sections
- Add ONE blank line before and after lists
- Add ONE blank line before and after code blocks
- Add ONE blank line before and after blockquotes
2. Section Formatting:
# Title
## Major Section
[blank line]
Content paragraph 1
[blank line]
Content paragraph 2
[blank line]"""
},
{
"role": "user",
"content": prompt
}
]
response = self.client.chat_completion(
model=model,
messages=messages,
max_tokens=3000,
temperature=0.8,
stream=True
)
full_response = ""
for chunk in response:
if chunk.choices[0].delta.content is not None:
full_response += chunk.choices[0].delta.content
return full_response.replace('\n\n', '\n').strip()
except Exception as e:
return f"Error: {str(e)}" |