baconnier commited on
Commit
afbf3d7
·
verified ·
1 Parent(s): 5e0769d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -194,9 +194,6 @@ class GradioInterface:
194
  gr.Markdown("# PROMPT++")
195
  gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
196
  gr.Markdown("Learn how to generate an improved version of your prompts.")
197
- gr.HTML(
198
- "<p style='text-align: center; color:orange;'>⚠ This space is in progress, and we're actively working on it, so you might find some bugs! Please report any issues you have in the Community tab to help us make it better for all.</p>"
199
- )
200
 
201
  with gr.Column(elem_classes=["container", "input-container"]):
202
  prompt_text = gr.Textbox(
@@ -294,7 +291,9 @@ class GradioInterface:
294
  outputs=[original_output, refined_output],
295
  api_name="apply_prompts" # Optional: adds API endpoint
296
  )
297
-
 
 
298
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
299
  input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
300
  # Since result is a tuple with 4 elements based on the return value of prompt_refiner.refine_prompt
 
194
  gr.Markdown("# PROMPT++")
195
  gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
196
  gr.Markdown("Learn how to generate an improved version of your prompts.")
 
 
 
197
 
198
  with gr.Column(elem_classes=["container", "input-container"]):
199
  prompt_text = gr.Textbox(
 
291
  outputs=[original_output, refined_output],
292
  api_name="apply_prompts" # Optional: adds API endpoint
293
  )
294
+ gr.HTML(
295
+ "<p style='text-align: center; color:orange;'>⚠ This space is in progress, and we're actively working on it, so you might find some bugs! Please report any issues you have in the Community tab to help us make it better for all.</p>"
296
+ )
297
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
298
  input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
299
  # Since result is a tuple with 4 elements based on the return value of prompt_refiner.refine_prompt