File size: 5,561 Bytes
cdcbdc1 9627d5c 1c4d0ed 74fe076 cdcbdc1 9627d5c cdcbdc1 1c4d0ed 74fe076 cdcbdc1 74fe076 1c4d0ed 74fe076 cdcbdc1 1c4d0ed cdcbdc1 9627d5c 1c4d0ed 74fe076 9627d5c cdcbdc1 9627d5c cdcbdc1 1c4d0ed cdcbdc1 74fe076 cdcbdc1 1c4d0ed cdcbdc1 74fe076 1c4d0ed 74fe076 1c4d0ed 74fe076 1c4d0ed 74fe076 1c4d0ed 74fe076 1c4d0ed 74fe076 9627d5c 1c4d0ed 74fe076 1c4d0ed 74fe076 1c4d0ed cdcbdc1 9627d5c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
import gradio as gr
import openai
import os
import re
from transformers import pipeline
title = "System Prompt Depersonalizer"
description = """
This app transforms personalized system prompts into generalized versions that can be shared with a wider audience.
You can use either OpenAI's API (requires API key) or Hugging Face's models (free).
"""
# Validate OpenAI API key format
def validate_api_key(api_key):
if not api_key:
return False
# Check if it starts with "sk-" and has appropriate length
return bool(re.match(r'^sk-[A-Za-z0-9]{32,}$', api_key))
# Define depersonalization function using OpenAI (v1.0+ syntax)
def depersonalize_prompt_openai(prompt, api_key):
if not validate_api_key(api_key):
return "Error: Invalid API key format. OpenAI API keys should start with 'sk-' followed by at least 32 characters."
try:
client = openai.OpenAI(api_key=api_key)
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": """
You are an AI assistant specializing in transforming personalized system prompts into generalized versions that can be shared with and used by a wider audience. Your task is to identify and remove personal elements while preserving the core functionality and purpose of the original prompt.
Task Breakdown:
1. Analyze the Original Prompt
- Identify personalized elements such as:
* Names (e.g., Daniel Rosehill)
* Specific hardware or software configurations
* Location-specific references
* Unique use cases or workflows
* Personal preferences or requirements
2. Generalize the Content
- Replace personal references with generic alternatives
- Broaden specific technical requirements when appropriate
- Maintain the core functionality and purpose
- Preserve the overall structure and flow of instructions
3. Maintain Quality
- Preserve clear instructions and constraints
- Keep specialized knowledge and capabilities
- Ensure the prompt remains coherent and effective
- Retain unique value propositions of the original
Output Format:
Provide only the depersonalized system prompt in Markdown format inside a code block. Do not include any other commentary or explanation.
"""},
{"role": "user", "content": prompt}
],
max_tokens=1200
)
return response.choices[0].message.content
except Exception as e:
error_msg = str(e)
if "API key" in error_msg.lower() or "authentication" in error_msg.lower():
return "Error: Your API key was rejected by OpenAI. Please check that you've entered a valid API key."
else:
return f"Error: {error_msg}"
# Define depersonalization function using Hugging Face models
def depersonalize_prompt_hf(prompt):
try:
# Use a text generation pipeline with a suitable model
generator = pipeline('text2text-generation', model='google/flan-t5-base')
# Create a prompt that instructs the model to depersonalize
instruction = """
Transform this personalized system prompt into a generalized version by removing personal elements
(names, specific hardware/software, locations, unique use cases, personal preferences)
while preserving the core functionality and purpose:
"""
full_prompt = instruction + "\n\n" + prompt
# Generate the depersonalized version
result = generator(full_prompt, max_length=1024, do_sample=False)
return result[0]['generated_text']
except Exception as e:
return f"Error with Hugging Face model: {str(e)}"
# Function to route to the appropriate depersonalization method
def depersonalize_prompt(prompt, api_key, use_openai):
if use_openai:
if not api_key.strip():
return "Error: OpenAI API key is required when using OpenAI. Please enter your API key or switch to Hugging Face."
return depersonalize_prompt_openai(prompt, api_key)
else:
return depersonalize_prompt_hf(prompt)
# Build Gradio UI
with gr.Blocks() as demo:
gr.Markdown(f"# {title}")
gr.Markdown(description)
with gr.Row():
use_openai = gr.Checkbox(label="Use OpenAI (requires API key)", value=True)
api_key_input = gr.Textbox(
label="OpenAI API Key",
placeholder="sk-...",
type="password",
visible=True
)
input_prompt = gr.Textbox(
label="Personalized System Prompt",
placeholder="Paste your personalized system prompt here...",
lines=10
)
output_prompt = gr.Textbox(
label="Depersonalized System Prompt",
lines=10,
interactive=True
)
with gr.Row():
run_btn = gr.Button("Depersonalize")
copy_btn = gr.Button("Copy Result")
# Update API key input visibility based on checkbox
def update_api_key_visibility(use_openai):
return gr.update(visible=use_openai)
use_openai.change(
fn=update_api_key_visibility,
inputs=[use_openai],
outputs=[api_key_input]
)
run_btn.click(
fn=depersonalize_prompt,
inputs=[input_prompt, api_key_input, use_openai],
outputs=output_prompt
)
copy_btn.click(
fn=lambda x: x,
inputs=[output_prompt],
outputs=[],
js="navigator.clipboard.writeText(args[0]); alert('Copied!');"
)
demo.launch()
|