File size: 3,656 Bytes
cb069fa
 
 
b6e5361
 
 
 
 
 
 
 
 
d39daa5
b6e5361
 
d39daa5
cb069fa
b6e5361
 
c1ff79f
b6e5361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35f1887
b6e5361
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16e0909
b6e5361
 
 
 
 
 
 
c1ff79f
b6e5361
bcdf45a
b6e5361
 
 
 
 
 
f54b979
b6e5361
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import streamlit as st
import google.generativeai as genai

# Streamlit app layout
st.title('PromptLab')

# Create two columns for the Shinobi and Raikage buttons
col1, col2 = st.columns(2)

mode = st.radio("Choose a mode:", ["Shinobi", "Raikage"], horizontal=True)

# Retrieve the API key from Streamlit secrets
GOOGLE_API_KEY = st.secrets["GEMINI_API_KEY"]

# Configure the Google Generative AI API with your API key
genai.configure(api_key=GOOGLE_API_KEY)

# Input field for the blog topic
topic = st.text_area('Enter your prompt:')

# Display selected mode
st.write(f"You selected: {mode}")


# Shinobi and Raikage templates
SHINOBI_TEMPLATE = """
You are an advanced prompt enhancer, specializing in creating structured, high-clarity prompts that optimize LLM performance.  
Your task is to refine a given prompt using the **Shinobi framework**, ensuring the following principles:

βœ… **Concise & High-Density Prompting** β†’ Remove fluff, keeping instructions clear and actionable (~250 words max).  
βœ… **Explicit Role Definition** β†’ Assign a role to the AI for better contextual grounding.  
βœ… **Step-by-Step Clarity** β†’ Break the task into structured sections, avoiding ambiguity.  
βœ… **Defined Output Format** β†’ Specify the response format (JSON, CSV, list, structured text, etc.).  
βœ… **Zero Conflicting Instructions** β†’ Ensure clarity in constraints (e.g., avoid β€œsimple yet comprehensive”).  
βœ… **Optional: One-Shot Example** β†’ Add a single example where relevant to guide the AI.  

### **Enhance the following prompt using Shinobi principles:**  
**Original Prompt:**  
{user_prompt}  

**Enhanced Shinobi Prompt:**  
"""

RAIKAGE_TEMPLATE = """
You are an elite AI strategist, specializing in designing execution-focused prompts that maximize LLM efficiency.  
Your task is to refine a given prompt using the **Raikage framework**, ensuring the following principles:  

βœ… **Precision & Depth** β†’ Ensure expert-level guidance, reducing vagueness and ambiguity.  
βœ… **Context & Execution Approach** β†’ Include a structured methodology to solve the problem.  
βœ… **Defined Output Format** β†’ Specify exact structure (JSON, formatted text, markdown, tables, or code blocks).  
βœ… **Edge Case Handling & Constraints** β†’ Account for potential failures and model limitations.  
βœ… **Optional: Few-Shot Prompting** β†’ If beneficial, provide 1-2 high-quality examples for refinement.  
βœ… **Complies with External Factors** β†’ Adhere to best practices (e.g., ethical scraping, security policies).  

### **Enhance the following prompt using Raikage principles:**  
**Original Prompt:**  
{user_prompt}  

**Enhanced Raikage Prompt:**  
"""
if st.button("Generate Enhanced Prompt"):
    if topic.strip():
        with st.spinner("Enhancing your prompt..."):
            # Choose the template based on the selected mode
            if mode == "Shinobi":
                prompt = SHINOBI_TEMPLATE.format(user_prompt=topic)
            else:
                prompt = RAIKAGE_TEMPLATE.format(user_prompt=topic)

            # Initialize the generative model
            model = genai.GenerativeModel('gemini-2.0-flash')

            # Generate enhanced prompt
            try:
                response = model.generate_content(prompt)
                enhanced_prompt = response.text  # Extract the response text
                st.subheader("πŸ”Ή Enhanced Prompt:")
                st.code(enhanced_prompt, language="markdown")
            except Exception as e:
                st.error(f"❌ Error generating enhanced prompt: {e}")
    else:
        st.warning("⚠️ Please enter a prompt before generating.")