Spaces:
Running
Running
File size: 6,552 Bytes
0bb3006 a217992 2e9f353 0b82b81 2e9f353 8100125 09cb397 8100125 09cb397 8100125 09cb397 a217992 09cb397 8100125 09cb397 8100125 09cb397 b68c9a6 09cb397 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 |
import gradio as gr
from prompt_refiner import PromptRefiner
from variables import models, explanation_markdown
from variables import *
from custom_css import custom_css
class GradioInterface:
def __init__(self, prompt_refiner: PromptRefiner, custom_css):
self.prompt_refiner = prompt_refiner
with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
with gr.Column(elem_classes=["container", "title-container"]):
gr.Markdown("# PROMPT++")
gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
gr.Markdown("Learn how to generate an improved version of your prompts.")
with gr.Column(elem_classes=["container", "input-container"]):
prompt_text = gr.Textbox(
label="Type your prompt (or let it empty to see metaprompt)",
lines=5
)
with gr.Row(elem_classes=["container2"]):
automatic_metaprompt_button = gr.Button("Refine Prompt using automatic MetaPrompt choice")
with gr.Accordion("Manual Choice of Meta Prompt", open=False):
meta_prompt_choice = gr.Radio(
metaprompt_list,
label="Choose Meta Prompt",
value=metaprompt_list[0],
elem_classes=["no-background", "radio-group"]
)
refine_button = gr.Button("Refine Prompt using manual choice")
gr.Markdown(explanation_markdown)
gr.Examples(
examples=examples,
inputs=[prompt_text, meta_prompt_choice]
)
with gr.Column(elem_classes=["container", "analysis-container"]):
gr.Markdown(' ')
gr.Markdown("### Original Prompt analysis")
analysis_evaluation = gr.Markdown()
gr.Markdown("---")
#MetaPrompt_analysis_evaluation= gr.Markdown("### MataPrompt used")
MetaPrompt_analysis = gr.Markdown("### MataPrompt used")
gr.Markdown("### Refined Prompt")
refined_prompt = gr.Textbox(
label="Refined Prompt",
interactive=True,
show_label=True,
show_copy_button=True,
)
gr.Markdown("### Explanation of Refinements")
explanation_of_refinements = gr.Markdown()
with gr.Column(elem_classes=["container", "model-container"]):
with gr.Row():
apply_model = gr.Dropdown(models,
value="meta-llama/Llama-3.1-8B-Instruct",
label="Choose the Model",
container=False,
scale=1,
min_width=300
)
apply_button = gr.Button("Apply Prompts")
gr.Markdown("### Prompts on choosen model")
with gr.Tabs():
with gr.TabItem("Original Prompt Output"):
original_output = gr.Markdown()
with gr.TabItem("Refined Prompt Output"):
refined_output = gr.Markdown()
with gr.Accordion("Full Response JSON", open=False, visible=True):
full_response_json = gr.JSON()
automatic_metaprompt_button.click(
fn=self.automatic_metaprompt,
inputs=[prompt_text, meta_prompt_choice],
outputs=[MetaPrompt_analysis, analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
)
refine_button.click(
fn=self.refine_prompt,
inputs=[prompt_text, meta_prompt_choice],
outputs=[MetaPrompt_analysis, analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
)
apply_button.click(
fn=self.apply_prompts,
inputs=[prompt_text, refined_prompt, apply_model],
outputs=[original_output, refined_output],
api_name="apply_prompts"
)
gr.HTML(
"<p style='text-align: center; color:orange;'>⚠ This space is in progress, and we're actively working on it, so you might find some bugs! Please report any issues you have in the Community tab to help us make it better for all.</p>"
)
def automatic_metaprompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
"""Wrapper method to call prompt_refiner's automatic_metaprompt"""
metaprompt_analysis_evaluation, initial_prompt_evaluation, refined_prompt, explanation_refinements, full_response = (
self.prompt_refiner.automatic_metaprompt(prompt, meta_prompt_choice)
)
analysis_evaluation = f"\n\n{initial_prompt_evaluation}"
return (
metaprompt_analysis_evaluation,
analysis_evaluation,
refined_prompt,
explanation_refinements,
full_response
)
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
"""Refine the given prompt using the selected meta prompt."""
metaprompt_analysis_evaluation, initial_prompt_evaluation, refined_prompt, explanation_refinements, full_response = (
self.prompt_refiner.refine_prompt(prompt, meta_prompt_choice)
)
analysis_evaluation = f"\n\n{initial_prompt_evaluation}"
return (
metaprompt_analysis_evaluation,
analysis_evaluation,
refined_prompt,
explanation_refinements,
full_response
)
def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
try:
original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
return original_output, refined_output
except Exception as e:
return f"Error: {str(e)}", f"Error: {str(e)}"
def launch(self, share=False):
self.interface.launch(share=share)
if __name__ == '__main__':
# Initialize the prompt refiner with API token
prompt_refiner = PromptRefiner(api_token, meta_prompts, metaprompt_explanations)
# Create the Gradio interface
gradio_interface = GradioInterface(prompt_refiner, custom_css)
# Launch the interface
gradio_interface.launch(share=True) |