Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -103,6 +103,24 @@ def mistral_nemo_call(prompt, API_TOKEN, model="mistralai/Mistral-Nemo-Instruct-
|
|
103 |
return response
|
104 |
|
105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1,
|
107 |
strength=0.7, huggingface_api_key=None, use_dev=False,
|
108 |
enhance_prompt_style="generic", enhance_prompt_option=False,
|
@@ -137,10 +155,25 @@ def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Ka
|
|
137 |
|
138 |
original_prompt = prompt
|
139 |
if enhance_prompt_option:
|
140 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
141 |
print(f'\033[1mGeneration {key} enhanced prompt:\033[0m {prompt}')
|
|
|
142 |
if use_mistral_nemo:
|
143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
print(f'\033[1mGeneration {key} Mistral-Nemo prompt:\033[0m {prompt}')
|
145 |
|
146 |
final_prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
|
|
|
103 |
return response
|
104 |
|
105 |
|
106 |
+
|
107 |
+
def chat_with_persona(message, history, system_message, max_tokens, temperature, top_p):
|
108 |
+
"""Function to interact with the chatbot API using the generated persona"""
|
109 |
+
try:
|
110 |
+
# Call the API with the current message and system prompt (persona)
|
111 |
+
response = chatbot_client.predict(
|
112 |
+
message=message,
|
113 |
+
system_message=system_message,
|
114 |
+
max_tokens=max_tokens,
|
115 |
+
temperature=temperature,
|
116 |
+
top_p=top_p,
|
117 |
+
api_name="/chat"
|
118 |
+
)
|
119 |
+
return response
|
120 |
+
except Exception as e:
|
121 |
+
return f"Error communicating with the chatbot API: {str(e)}"
|
122 |
+
|
123 |
+
|
124 |
def query(prompt, is_negative=False, steps=30, cfg_scale=7, sampler="DPM++ 2M Karras", seed=-1,
|
125 |
strength=0.7, huggingface_api_key=None, use_dev=False,
|
126 |
enhance_prompt_style="generic", enhance_prompt_option=False,
|
|
|
155 |
|
156 |
original_prompt = prompt
|
157 |
if enhance_prompt_option:
|
158 |
+
style=enhance_prompt_style
|
159 |
+
system_prompt=f"""
|
160 |
+
You are a image generation prompt enhancer specialized in the {style} style.
|
161 |
+
You must respond only with the enhanced version of the users input prompt
|
162 |
+
Remember, image generation models can be stimulated by refering to camera 'effect' in the prompt like :4k ,award winning, super details, 35mm lens, hd
|
163 |
+
"""
|
164 |
+
prompt = chat_with_persona(message=prompt, history=[], system_message=system_prompt, max_tokens=256, temperature=0.1, top_p=0.97)
|
165 |
+
#prompt = enhance_prompt_v2(prompt, style=enhance_prompt_style)
|
166 |
print(f'\033[1mGeneration {key} enhanced prompt:\033[0m {prompt}')
|
167 |
+
|
168 |
if use_mistral_nemo:
|
169 |
+
style=nemo_enhance_prompt_style
|
170 |
+
system_prompt=f"""
|
171 |
+
You are a image generation prompt enhancer specialized in the {style} style.
|
172 |
+
You must respond only with the enhanced version of the users input prompt
|
173 |
+
Remember, image generation models can be stimulated by refering to camera 'effect' in the prompt like :4k ,award winning, super details, 35mm lens, hd
|
174 |
+
"""
|
175 |
+
prompt = chat_with_persona(message=prompt, history=[], system_message=system_prompt, max_tokens=256, temperature=0.1, top_p=0.97)
|
176 |
+
#prompt = mistral_nemo_call(prompt, API_TOKEN=API_TOKEN, style=nemo_enhance_prompt_style)
|
177 |
print(f'\033[1mGeneration {key} Mistral-Nemo prompt:\033[0m {prompt}')
|
178 |
|
179 |
final_prompt = f"{prompt} | ultra detail, ultra elaboration, ultra quality, perfect."
|