Spaces:
Sleeping
Sleeping
File size: 13,778 Bytes
2be25ee 2759dc3 2be25ee 2759dc3 2be25ee 12288a7 2be25ee d7f0fca 2759dc3 2be25ee 12288a7 2be25ee 2759dc3 d7f0fca 2759dc3 73b5f68 2be25ee 2759dc3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 |
#------------------------------------------------------------------------
# Import Modules
#------------------------------------------------------------------------
import streamlit as st
import openai
import random
import os
from langchain.chat_models import ChatOpenAI
# from langsmith import Client
# from langchain.smith import RunEvalConfig, run_on_dataset
#------------------------------------------------------------------------
# Load API Keys From the .env File, & OpenAI, Pinecone, and LangSmith Client
#------------------------------------------------------------------------
# Fetch the OpenAI API key from Streamlit secrets
os.environ["OPENAI_API_KEY"] = st.secrets["OPENAI_API_KEY"]
# Retrieve the OpenAI API Key from environment variable
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# Initialize OpenAI Service
openai.api_key = OPENAI_API_KEY
# # Fetch LangSmith API key from Streamlit secrets
# # os.environ["LANGCHAIN_API_KEY"] = st.secrets["LANGCHAIN_API_KEY"]
# os.environ["LANGCHAIN_API_KEY"] = ""
# os.environ["LANGCHAIN_TRACING_V2"] = "true"
# os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
# os.environ["LANGCHAIN_PROJECT"] = "Inkqa"
# # Retrieve the LangSmith API Key from environment variable
# LANGCHAIN_API_KEY = os.getenv("LANGCHAIN_API_KEY")
# # Initialize LangSmith Service
# client = Client(api_key=LANGCHAIN_API_KEY) #langsmith client
#------------------------------------------------------------------------
# Initialize
#------------------------------------------------------------------------
# Streamlit page setup
st.set_page_config(
page_title="PromptSmith™ | Prompts",
page_icon=":control_knobs:",
layout="centered",
initial_sidebar_state="auto",
menu_items={
'Get Help': 'mailto:[email protected]',
'About': "This app is built on the article *Principled Instructions Are All You Need*, which is accessible in the side bar"
}
)
#Add the image with a specified width
image_width = 300 # Set the desired width in pixels
st.image('MTSS.ai_Logo.png', width=image_width)
# Streamlit app layout
st.header('PromptSmith™ | Prompts')
st.subheader('Evidence-based Prompts')
with st.sidebar:
# Password input field
# password = st.text_input("Enter Password:", type="password")
st.subheader(':grey[_Principled Instructions Are All You Need_]')
st.caption(':grey[_Bsharat et. al., 2024_]')
st.link_button("View | Download", "https://arxiv.org/pdf/2312.16171.pdf")
st.divider()
# Principles text with Markdown formatting
Principles_Content = """
**Principles**
1. If you prefer more concise answers, **no need to be polite** with LLM so there is no need to add phrases like "please", "if you don't mind", "thank you", "I would like to, etc., and get straight to the point.
2. Integrate the **intended audience** in the prompt, e.g., the audience is an expert in the field.
3. Break down complex tasks into a **sequence of simpler prompts** in an interactive conversation.
4. Employ **affirmative directives** such as 'do,' while steering clear of negative language like 'don't'.
5. When you need **clarity or a deeper understanding** of a topic, idea, or any piece of information, utilize the following prompts:
* Explain [insert specific topic] in simple terms. * Explain to me like I'm 11 vears old.
* Explain to me as if I'm a beginner in [field].
* Write the [essay/text/paragraph] using simple English like you're explaining something to a 5-year-old.
6. Add "I'm going to **tip** Sxxx for a better solution!"
7. Implement **example-driven prompting** (Use few-shot prompting).
8. When **formatting** your prompt, start with '###Instruction###', followed by either '###Example###' or '###Question###' if relevant. Subsequently, present your content. Use one or more line breaks to separate instructions, examples, questions, context, and input data.
9. Incorporate the following **phrases**: "Your task is" and "You MUST".
10. Incorporate the following **phrases**: "You will be penalized".
11. Use the **phrase** "Answer a question given in a natural, human-like manner" in your prompts.
12. Use **leading words** like writing "think step by step".
13. Add to your prompt the following **phrase** "Ensure that your answer is unbiased and avoids relying on stereotypes."
14. Allow the model to elicit precise details and requirements from you by **asking you questions** until he has enough information to provide the needed output (for example, "From now on, I would like you to ask me questions to ..").
15. To inquire about a specific topic or idea or any information and you want to test your understanding, you can use the following **phrase**: "Teach me any [theorem/topic/rule name] and include a test at the end, and let me know if my answers are correct after I respond, without providing the answers beforehand."
16. **Assign a role** to the large language models.
17. Use **Delimiters**.
18. **Repeat** a specific word or phrase multiple times within a prompt.
19. Combine **Chain-of-thought (CoT)** with few-Shot prompts.
20. Use **output primers**, which involve concluding your prompt with the beginning of the desired output. Utilize output primers by ending your prompt with the start of the anticipated response.
21. To write an **essay/text/paragraph/article** or any type of text that should be detailed: "Write a detailed [essay/text/paragraph] for me on [topic] in detail by adding all the information necessary".
22. To **correct/change specific text without changing its style**: "Try to revise every paragraph sent by users. You should only improve the user's grammar and vocabulary and make sure it sounds natural. You should maintain the original writing style, ensuring that a formal paragraph remains formal.!"
23. When you have a complex **coding prompt** that may be in different files: "From now and on whenever you generate code that spans more than one file, generate a [programming language ] script that can be run to automatically create the specified files or make changes to existing files to insert the generated code. [your question]".
24. When you want to initiate or continue a text using specific words, phrases, or sentences, utilize the following **prompt**:
* I'm providing you with the beginning [song lyrics/story/paragraph/essay...]: [Insert lyrics/words/sentence]. Finish it based on the words provided. Keep the flow consistent.
25. Clearly state the **requirements** that the model must follow in order to produce content, in the form of the keywords, regulations, hint, or instructions
26. To write any text, such as an essay or paragraph, that is intended to be similar to a provided sample, include the following **instructions**:
* Use the same language based on the provided paragraph|/title/text /essay/answer].
"""
st.markdown(Principles_Content)
def refine_prompt_with_LLM(role, audience, topic, expertise, examples, text, instructions, primer):
# Define the prompt principles
prompt_principles = """
### Prompt Principles
- Use clear, concise instructional language.
- Is it necessary to include polite phrases like ‘please’ or ‘thank you.’
- Break down complex tasks into a sequence of simpler prompts.
- Employ affirmative directives such as 'do,' while steering clear of negative language like 'don't'.
- Add ‘I'm going to tip well for a better answer!’
- When formatting the prompt, start with ###Instruction###, followed by either ###Example### or ###Question### if relevant.
- Incorporate the following phrases: ‘Your task is’ and ‘You must’.
- Incorporate the following phrases: ‘You will be penalized if you don’t follow the instructions, examples, or question.’
- Use the phrase ‘answer a question given in a natural, human-like manner’ in your prompt.
- Use leading words like writing ‘think step by step.’
- Add to your prompt the following phrase "Ensure that your answer is unbiased and avoids relying on stereotypes."
- Use delimiters in your prompt, shape, and structure prompt for optimal clarity. Segment your instructions, inputs, examples, and outputs into logical blocks.
- If warranted, repeat a specific word or phrase multiple times within the prompt.
- Use Chain-of-thought (CoT) in the prompt. For example, instead of asking Who was the first person to walk on the moon?, write the prompt "1. The moon landing happened in 1969. 2. We need to identify the astronaut who first stepped onto the moon during that mission.
- Improve the user's grammar and vocabulary and make sure it sounds natural. You should maintain the original writing style, ensuring that a formal paragraph remains formal.
"""
# Construct the full prompt with user input and prompt principles
prompt = f"""
### Instruction
Create a prompt for the ###question### or ###topic###: {topic}.
The prompt must instruct the AI to be the following ###role###: {role}, and ###audience###: {audience}.
The prompt should respond using the following level of ###expertise###: {expertise}.
Here is an few-shot prompt to help guide your output, consider it an ###example###: {examples}.
Your text response should be ###formatted### as a {text}.
Your prompt should instruct the AI to ###format### the response as a: {instructions}.
You will begin your response using this ###primer###: {primer}.
### Prompt principles
{prompt_principles}
"""
# Generate the refined prompt using ChatGPT
response = openai.chat.completions.create(
model="gpt-3.5-turbo", # Adjust model as needed
messages=[
{"role": "system", "content": "You are a helpful assistant that creates prompts for AI. You will write a prompt using the audience, topic, and additional context, and your prompt should in paragraph form so that the user can copy and paste the prompt into a ChatGPT and get their answer. You will not answer the prompt, just provide a revised prompt."},
{"role": "user", "content": prompt}
],
max_tokens=200
)
return response.choices[0].message.content.strip()
def prompt_response_from_LLM(prompt):
# Generate the refined prompt using ChatGPT
response = openai.chat.completions.create(
model="gpt-3.5-turbo", # Adjust model as needed
messages=[
{"role": "system", "content": "You are an educator with many years of experience working with MTSS, the science of literacy, and SWPBIS. Respond to the prompt thoughtfully and ensure you are providing the best coaching and technical assitance."},
{"role": "user", "content": prompt}
],
# max_tokens=1000
)
return response.choices[0].message.content.strip()
# Collect user inputs
topic = st.text_input("What is the specific question or topic you want to inquire about?")
role = st.text_input("What role would you like to assign to the AI? E.g experienced high school teacher.")
audience = st.text_input("Who is the intended audience?")
expertise = st.text_input("Explain to me as if I'm a beginner/expert.")
examples = st.text_input("Do you have an example response to your prompt? E.g., [Prompt] Create a literary analysis of a poem by Emily Dickinson? [Example Response]: *Analysis of Hope is the thing with feathers, examining Dickinson's use of metaphor and imagery.*")
text = st.text_input("Would you like your response to be a sentence / paragraph / page / essay?")
instructions = st.text_input("Offer clear and concise instructions on what expect from the AI. E.g., [Prompt] Write an essay discussing the impact of… [Instructions] Include scientific evidence… or [Instructions] Provide a bulleted list, then summarize in a few sentences.")
primer = st.text_input("How would you like the AI to start answering? E.g., [Primer] Begin your response by highlighting the benefits of using the essential practices of literacy instruction… discuss… describe… explain… explore… examine…")
# st.divider()
# Generate refined prompt
if st.button("Generate Refined Prompt", type="secondary"):
with st.spinner('Thinking...'):
refined_prompt = refine_prompt_with_LLM(role, audience, topic, expertise, examples, text, instructions, primer)
if refined_prompt:
st.session_state['refined_prompt'] = refined_prompt
# Text box to display or edit the refined prompt
refined_prompt_text = st.text_area("Refined PROMPT", value=st.session_state.get('refined_prompt', ''), height=300)
if refined_prompt_text:
st.success('Refined prompt generated successfully.')
st.session_state['refined_prompt'] = refined_prompt_text # Update session state to reflect any changes made by the user in the text box
st.divider()
# Generate response from refined prompt
if st.button("Generate Response from Refined Prompt", type="secondary"):
with st.spinner('Thinking...'):
if 'refined_prompt' in st.session_state and st.session_state['refined_prompt']:
response_text = prompt_response_from_LLM(st.session_state['refined_prompt'])
if response_text:
st.session_state['response_text'] = response_text
else:
st.error("Please generate a refined prompt first.")
# Text box to display the response
response_text_box = st.text_area("MTSS.ai RESPONSE", value=st.session_state.get('response_text', ''), height=600)
if response_text_box:
st.info('Powered by MTSS GPT. AI can make mistakes. Consider checking important information.')
|