baconnier commited on
Commit
728acaa
·
verified ·
1 Parent(s): 7838862

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -22
app.py CHANGED
@@ -6,10 +6,16 @@ from pydantic import BaseModel, Field
6
  from typing import Optional, Literal, Dict
7
  from huggingface_hub.errors import HfHubHTTPError
8
 
 
 
 
 
 
9
  class PromptInput(BaseModel):
10
  text: str = Field(..., description="The initial prompt text")
11
  meta_prompt_choice: Literal["star", "done", "physics", "morphosis", "verse", "phor", "bolism", "math", "arpe"] = Field(..., description="Choice of meta prompt strategy")
12
 
 
13
  class LLMResponse(BaseModel):
14
  initial_prompt_evaluation: str = Field(default="")
15
  refined_prompt: str = Field(default="")
@@ -18,7 +24,7 @@ class LLMResponse(BaseModel):
18
  class PromptRefiner:
19
  def __init__(self, api_token: str):
20
  self.client = InferenceClient(token=api_token, timeout=300)
21
- self.meta_prompts: Dict[str, str] = {
22
  "morphosis": original_meta_prompt,
23
  "verse": new_meta_prompt,
24
  "physics": metaprompt1,
@@ -41,9 +47,9 @@ class PromptRefiner:
41
  "role": "system",
42
  "content": '''You are an expert at refining prompts. Respond in JSON format with exactly these fields:
43
  {
44
- "initial_prompt_evaluation": "your evaluation of the initial prompt",
45
- "refined_prompt": "your refined version of the prompt",
46
- "explanation_of_refinements": "your explanation of the changes made"
47
  }'''
48
  },
49
  {
@@ -65,12 +71,12 @@ class PromptRefiner:
65
  try:
66
  parsed_response = LLMResponse.model_validate_json(response_content)
67
  result = parsed_response.model_dump()
68
- except Exception as e:
69
  # Fallback to basic dict if JSON parsing fails
70
  result = {
71
- "initial_prompt_evaluation": "Error parsing model response",
72
- "refined_prompt": response_content,
73
- "explanation_of_refinements": str(e)
74
  }
75
 
76
  return (
@@ -82,27 +88,27 @@ class PromptRefiner:
82
 
83
  except HfHubHTTPError as e:
84
  error_response = LLMResponse(
85
- initial_prompt_evaluation="Error: Model timeout",
86
- refined_prompt="The model is currently experiencing high traffic",
87
- explanation_of_refinements="Please try again later"
88
- )
89
  return (
90
- error_response.initial_prompt_evaluation,
91
- error_response.refined_prompt,
92
- error_response.explanation_of_refinements,
93
- error_response.model_dump()
94
  )
95
  except Exception as e:
96
  error_response = LLMResponse(
97
  initial_prompt_evaluation=f"Error: {str(e)}",
98
- refined_prompt="",
99
  explanation_of_refinements="An unexpected error occurred"
100
- )
101
  return (
102
- error_response.initial_prompt_evaluation,
103
- error_response.refined_prompt,
104
- error_response.explanation_of_refinements,
105
- error_response.model_dump()
106
  )
107
 
108
  def apply_prompt(self, prompt: str, model: str) -> str:
 
6
  from typing import Optional, Literal, Dict
7
  from huggingface_hub.errors import HfHubHTTPError
8
 
9
+ from pydantic import BaseModel, Field
10
+ from typing import Optional, Literal
11
+ from huggingface_hub.errors import HfHubHTTPError
12
+
13
+ # Input model
14
  class PromptInput(BaseModel):
15
  text: str = Field(..., description="The initial prompt text")
16
  meta_prompt_choice: Literal["star", "done", "physics", "morphosis", "verse", "phor", "bolism", "math", "arpe"] = Field(..., description="Choice of meta prompt strategy")
17
 
18
+ # Output model for LLM responses
19
  class LLMResponse(BaseModel):
20
  initial_prompt_evaluation: str = Field(default="")
21
  refined_prompt: str = Field(default="")
 
24
  class PromptRefiner:
25
  def __init__(self, api_token: str):
26
  self.client = InferenceClient(token=api_token, timeout=300)
27
+ self.meta_prompts = {
28
  "morphosis": original_meta_prompt,
29
  "verse": new_meta_prompt,
30
  "physics": metaprompt1,
 
47
  "role": "system",
48
  "content": '''You are an expert at refining prompts. Respond in JSON format with exactly these fields:
49
  {
50
+ "initial_prompt_evaluation": "your evaluation",
51
+ "refined_prompt": "your refined prompt",
52
+ "explanation_of_refinements": "your explanation"
53
  }'''
54
  },
55
  {
 
71
  try:
72
  parsed_response = LLMResponse.model_validate_json(response_content)
73
  result = parsed_response.model_dump()
74
+ except Exception:
75
  # Fallback to basic dict if JSON parsing fails
76
  result = {
77
+ "initial_prompt_evaluation": response_content,
78
+ "refined_prompt": prompt_input.text,
79
+ "explanation_of_refinements": "Failed to parse model response"
80
  }
81
 
82
  return (
 
88
 
89
  except HfHubHTTPError as e:
90
  error_response = LLMResponse(
91
+ initial_prompt_evaluation="Error: Model timeout or connection issue",
92
+ refined_prompt=prompt_input.text,
93
+ explanation_of_refinements="Please try again in a few moments"
94
+ ).model_dump()
95
  return (
96
+ error_response["initial_prompt_evaluation"],
97
+ error_response["refined_prompt"],
98
+ error_response["explanation_of_refinements"],
99
+ error_response
100
  )
101
  except Exception as e:
102
  error_response = LLMResponse(
103
  initial_prompt_evaluation=f"Error: {str(e)}",
104
+ refined_prompt=prompt_input.text,
105
  explanation_of_refinements="An unexpected error occurred"
106
+ ).model_dump()
107
  return (
108
+ error_response["initial_prompt_evaluation"],
109
+ error_response["refined_prompt"],
110
+ error_response["explanation_of_refinements"],
111
+ error_response
112
  )
113
 
114
  def apply_prompt(self, prompt: str, model: str) -> str: