baconnier commited on
Commit
819f2b3
·
verified ·
1 Parent(s): 3bc68ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -61
app.py CHANGED
@@ -1,22 +1,22 @@
1
  import os
2
  import json
 
3
  from huggingface_hub import InferenceClient
4
  import gradio as gr
5
  from pydantic import BaseModel, Field
6
- from typing import Optional, Literal, Dict
7
  from huggingface_hub.errors import HfHubHTTPError
8
 
9
-
10
- # Input model
11
  class PromptInput(BaseModel):
12
  text: str = Field(..., description="The initial prompt text")
13
- meta_prompt_choice: Literal["star", "done", "physics", "morphosis", "verse", "phor", "bolism", "math", "arpe"] = Field(..., description="Choice of meta prompt strategy")
14
 
15
- # Output model for LLM responses
16
- class LLMResponse(BaseModel):
17
- initial_prompt_evaluation: str = Field(default="")
18
- refined_prompt: str = Field(default="")
19
- explanation_of_refinements: str = Field(default="")
 
20
 
21
  class PromptRefiner:
22
  def __init__(self, api_token: str):
@@ -34,23 +34,19 @@ class PromptRefiner:
34
 
35
  def refine_prompt(self, prompt_input: PromptInput) -> tuple:
36
  try:
 
37
  selected_meta_prompt = self.meta_prompts.get(
38
- prompt_input.meta_prompt_choice,
39
  advanced_meta_prompt
40
  )
41
-
42
  messages = [
43
  {
44
- "role": "system",
45
- "content": '''You are an expert at refining prompts. Respond in JSON format with exactly these fields:
46
- {
47
- "initial_prompt_evaluation": "your evaluation",
48
- "refined_prompt": "your refined prompt",
49
- "explanation_of_refinements": "your explanation"
50
- }'''
51
  },
52
  {
53
- "role": "user",
54
  "content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt_input.text)
55
  }
56
  ]
@@ -59,81 +55,101 @@ class PromptRefiner:
59
  model=prompt_refiner_model,
60
  messages=messages,
61
  max_tokens=2000,
62
- temperature=0.8,
63
- response_format={"type": "json_object"}
64
  )
65
-
66
- # Parse response using Pydantic
67
  response_content = response.choices[0].message.content.strip()
68
- try:
69
- parsed_response = LLMResponse.model_validate_json(response_content)
70
- result = parsed_response.model_dump()
71
- except Exception:
72
- # Fallback to basic dict if JSON parsing fails
73
- result = {
74
- "initial_prompt_evaluation": response_content,
75
- "refined_prompt": prompt_input.text,
76
- "explanation_of_refinements": "Failed to parse model response"
77
- }
78
-
79
  return (
80
- result["initial_prompt_evaluation"],
81
- result["refined_prompt"],
82
- result["explanation_of_refinements"],
83
  result
84
  )
85
 
86
  except HfHubHTTPError as e:
87
- error_response = LLMResponse(
88
- initial_prompt_evaluation="Error: Model timeout or connection issue",
89
- refined_prompt=prompt_input.text,
90
- explanation_of_refinements="Please try again in a few moments"
91
- ).model_dump()
92
  return (
93
- error_response["initial_prompt_evaluation"],
94
- error_response["refined_prompt"],
95
- error_response["explanation_of_refinements"],
96
- error_response
97
  )
98
  except Exception as e:
99
- error_response = LLMResponse(
100
- initial_prompt_evaluation=f"Error: {str(e)}",
101
- refined_prompt=prompt_input.text,
102
- explanation_of_refinements="An unexpected error occurred"
103
- ).model_dump()
104
  return (
105
- error_response["initial_prompt_evaluation"],
106
- error_response["refined_prompt"],
107
- error_response["explanation_of_refinements"],
108
- error_response
109
  )
110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  def apply_prompt(self, prompt: str, model: str) -> str:
112
  try:
113
  messages = [
114
  {
115
  "role": "system",
116
- "content": "You are a helpful assistant. Answer in stylized version with latex format or markdown if relevant. Separate your answer into logical sections using level 2 headers (##) for sections and bolding (**) for subsections."
117
  },
118
  {
119
  "role": "user",
120
  "content": prompt
121
  }
122
  ]
123
-
124
  response = self.client.chat_completion(
125
  model=model,
126
  messages=messages,
127
  max_tokens=2000,
128
  temperature=0.8
129
  )
130
-
131
- return response.choices[0].message.content.strip().replace('\n\n', '\n')
132
-
 
133
  except Exception as e:
134
  return f"Error: {str(e)}"
135
 
136
-
137
  class GradioInterface:
138
  def __init__(self, prompt_refiner: PromptRefiner):
139
  self.prompt_refiner = prompt_refiner
 
1
  import os
2
  import json
3
+ import re
4
  from huggingface_hub import InferenceClient
5
  import gradio as gr
6
  from pydantic import BaseModel, Field
7
+ from typing import Optional, Literal
8
  from huggingface_hub.errors import HfHubHTTPError
9
 
 
 
10
  class PromptInput(BaseModel):
11
  text: str = Field(..., description="The initial prompt text")
12
+ meta_prompt_choice: Literal["star","done","physics","morphosis", "verse", "phor","bolism","math","arpe"] = Field(..., description="Choice of meta prompt strategy")
13
 
14
+ class RefinementOutput(BaseModel):
15
+ query_analysis: Optional[str] = None
16
+ initial_prompt_evaluation: Optional[str] = None
17
+ refined_prompt: Optional[str] = None
18
+ explanation_of_refinements: Optional[str] = None
19
+ raw_content: Optional[str] = None
20
 
21
  class PromptRefiner:
22
  def __init__(self, api_token: str):
 
34
 
35
  def refine_prompt(self, prompt_input: PromptInput) -> tuple:
36
  try:
37
+ # Select meta prompt using dictionary instead of if-elif chain
38
  selected_meta_prompt = self.meta_prompts.get(
39
+ prompt_input.meta_prompt_choice,
40
  advanced_meta_prompt
41
  )
42
+
43
  messages = [
44
  {
45
+ "role": "system",
46
+ "content": 'You are an expert at refining and extending prompts. Given a basic prompt, provide a more detailed.'
 
 
 
 
 
47
  },
48
  {
49
+ "role": "user",
50
  "content": selected_meta_prompt.replace("[Insert initial prompt here]", prompt_input.text)
51
  }
52
  ]
 
55
  model=prompt_refiner_model,
56
  messages=messages,
57
  max_tokens=2000,
58
+ temperature=0.8
 
59
  )
60
+
 
61
  response_content = response.choices[0].message.content.strip()
62
+
63
+ # Parse the response
64
+ result = self._parse_response(response_content)
65
+
 
 
 
 
 
 
 
66
  return (
67
+ result.get('initial_prompt_evaluation', ''),
68
+ result.get('refined_prompt', ''),
69
+ result.get('explanation_of_refinements', ''),
70
  result
71
  )
72
 
73
  except HfHubHTTPError as e:
 
 
 
 
 
74
  return (
75
+ "Error: Model timeout. Please try again later.",
76
+ "The selected model is currently experiencing high traffic.",
77
+ "The selected model is currently experiencing high traffic.",
78
+ {}
79
  )
80
  except Exception as e:
 
 
 
 
 
81
  return (
82
+ f"Error: {str(e)}",
83
+ "",
84
+ "An unexpected error occurred.",
85
+ {}
86
  )
87
 
88
+ def _parse_response(self, response_content: str) -> dict:
89
+ try:
90
+ # Try to find JSON in response
91
+ json_match = re.search(r'<json>\s*(.*?)\s*</json>', response_content, re.DOTALL)
92
+ if json_match:
93
+ json_str = json_match.group(1)
94
+ json_str = re.sub(r'\n\s*', ' ', json_str)
95
+ json_str = json_str.replace('"', '\\"')
96
+ json_output = json.loads(f'"{json_str}"')
97
+
98
+ if isinstance(json_output, str):
99
+ json_output = json.loads(json_output)
100
+ output={
101
+ key: value.replace('\\"', '"') if isinstance(value, str) else value
102
+ for key, value in json_output.items()
103
+ }
104
+ output['response_content']=json_output
105
+ # Clean up JSON values
106
+ return output
107
+
108
+ # Fallback to regex parsing if no JSON found
109
+ output = {}
110
+ for key in ["initial_prompt_evaluation", "refined_prompt", "explanation_of_refinements"]:
111
+ pattern = rf'"{key}":\s*"(.*?)"(?:,|\}})'
112
+ match = re.search(pattern, response_content, re.DOTALL)
113
+ output[key] = match.group(1).replace('\\n', '\n').replace('\\"', '"') if match else ""
114
+ output['response_content']=response_content
115
+ return output
116
+
117
+ except (json.JSONDecodeError, ValueError) as e:
118
+ print(f"Error parsing response: {e}")
119
+ print(f"Raw content: {response_content}")
120
+ return {
121
+ "initial_prompt_evaluation": "Error parsing response",
122
+ "refined_prompt": "",
123
+ "explanation_of_refinements": str(e),
124
+ 'response_content':str(e)
125
+ }
126
+
127
  def apply_prompt(self, prompt: str, model: str) -> str:
128
  try:
129
  messages = [
130
  {
131
  "role": "system",
132
+ "content": "You are a helpful assistant. Answer in stylized version with latex format or markdown if relevant. Separate your answer into logical sections using level 2 headers (##) for sections and bolding (**) for subsections. Incorporate a variety of lists, headers, and text to make the answer visually appealing"
133
  },
134
  {
135
  "role": "user",
136
  "content": prompt
137
  }
138
  ]
139
+
140
  response = self.client.chat_completion(
141
  model=model,
142
  messages=messages,
143
  max_tokens=2000,
144
  temperature=0.8
145
  )
146
+
147
+ output = response.choices[0].message.content.strip()
148
+ return output.replace('\n\n', '\n').strip()
149
+
150
  except Exception as e:
151
  return f"Error: {str(e)}"
152
 
 
153
  class GradioInterface:
154
  def __init__(self, prompt_refiner: PromptRefiner):
155
  self.prompt_refiner = prompt_refiner