File size: 9,278 Bytes
ef9649a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 |
"""
Dynamic loader for system prompt components.
Automatically loads all prompt components from their respective directories.
"""
import os
import importlib.util
import re
def load_module_from_file(file_path):
"""Load a Python module from a file path."""
module_name = os.path.splitext(os.path.basename(file_path))[0]
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def load_prompts_from_directory(directory):
"""Load all prompt components from a directory."""
prompts = {}
if not os.path.exists(directory):
return prompts
for file in os.listdir(directory):
if file.endswith('.py') and file != '__init__.py':
file_path = os.path.join(directory, file)
try:
module = load_module_from_file(file_path)
name = os.path.splitext(file)[0]
if hasattr(module, 'PROMPT'):
prompts[name] = module.PROMPT.strip()
except Exception as e:
print(f"Error loading {file}: {str(e)}")
return prompts
# Get the base directory for prompt components
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# Load all prompt components
MODEL_PERSONALITIES = load_prompts_from_directory(
os.path.join(BASE_DIR, 'model_personalities')
)
USER_GEOLOCATION = load_prompts_from_directory(
os.path.join(BASE_DIR, 'user_geolocation')
)
USER_PERSONALITY = load_prompts_from_directory(
os.path.join(BASE_DIR, 'user_personality')
)
USER_WORLDVIEW = load_prompts_from_directory(
os.path.join(BASE_DIR, 'user_worldview')
)
USER_POLITICAL_LEANING = load_prompts_from_directory(
os.path.join(BASE_DIR, 'user_political_leaning')
)
MODEL_FORMALITY = load_prompts_from_directory(
os.path.join(BASE_DIR, 'model_formality')
)
MODEL_RESPONSE_STYLE = load_prompts_from_directory(
os.path.join(BASE_DIR, 'model_response_style')
)
MODEL_EXPERTISE = load_prompts_from_directory(
os.path.join(BASE_DIR, 'model_expertise')
)
MODEL_LANGUAGE_STYLE = load_prompts_from_directory(
os.path.join(BASE_DIR, 'model_language_style')
)
MODEL_IDENTITY = load_prompts_from_directory(
os.path.join(BASE_DIR, 'model_identity')
)
USER_LEARNING_STYLE = load_prompts_from_directory(
os.path.join(BASE_DIR, 'user_learning_style')
)
USER_COMMUNICATION_PACE = load_prompts_from_directory(
os.path.join(BASE_DIR, 'user_communication_pace')
)
USER_OUTPUT_PREFERENCE = load_prompts_from_directory(
os.path.join(BASE_DIR, 'user_output_preference')
)
def truncate_to_word_length(text: str, target_length: int) -> str:
"""
Adjust text to reach target word length by expanding or truncating as needed.
Args:
text: The text to truncate
target_length: Target number of words
Returns:
Truncated text maintaining key elements
"""
if not text or target_length <= 0:
return text
sections = text.split("\n\n")
words = text.split()
current_length = len(words)
# If we're already at target length, return as is
if current_length == target_length:
return text
# If we need to expand
if current_length < target_length:
words_to_add = target_length - current_length
expanded_sections = []
for section in sections:
# Add elaborative phrases to reach target length
expanded = section
if "You are" in section:
expanded += " Your purpose is to assist users effectively and professionally."
if "approach" in section:
expanded += " This approach ensures optimal results and user satisfaction."
expanded_sections.append(expanded)
return "\n\n".join(expanded_sections)
# If we need to reduce
words_per_section = target_length // len(sections)
adjusted_sections = [" ".join(section.split()[:words_per_section]) for section in sections]
return "\n\n".join(adjusted_sections)
def combine_prompts(personality: str, geolocation: str, user_type: str,
worldview: str, political_leaning: str = "neutral",
formality: str = "neutral",
response_style: str = "neutral",
data_format: str = "neutral",
output_preference: str = "neutral",
expertise: str = "neutral",
learning_style: str = "neutral",
communication_pace: str = "neutral",
language_style: str = "neutral",
identity_type: str = "neutral",
ai_name: str = None,
backstory: str = None,
user_name: str = None,
age_group: str = None,
occupation: str = None,
target_length: int = None) -> str:
"""
Combine selected prompts from different categories into a cohesive system prompt.
Args:
personality: Key from MODEL_PERSONALITIES
geolocation: Key from USER_GEOLOCATION
user_type: Key from USER_PERSONALITY
worldview: Key from USER_WORLDVIEW
political_leaning: Key from USER_POLITICAL_LEANING
formality: Key from MODEL_FORMALITY
response_style: Key from MODEL_RESPONSE_STYLE
expertise: Key from MODEL_EXPERTISE
learning_style: Key from USER_LEARNING_STYLE
communication_pace: Key from USER_COMMUNICATION_PACE
target_length: Optional target word length for the final prompt
Returns:
Combined system prompt
"""
# Only include non-neutral components
components = [
f"You are {ai_name}. " if ai_name else "",
backstory + "\n" if backstory else "",
f"The user's name is {user_name}. " if user_name else "",
f"The user is in the age group {age_group}. " if age_group else "",
f"The user's occupation is {occupation}. " if occupation else "",
MODEL_IDENTITY.get(identity_type, "")
if identity_type != "neutral" else "",
MODEL_PERSONALITIES.get(personality, "")
if personality != "neutral" else "",
USER_GEOLOCATION.get(geolocation, "")
if geolocation != "neutral" else "",
USER_PERSONALITY.get(user_type, "")
if user_type != "neutral" else "",
USER_WORLDVIEW.get(worldview, "")
if worldview != "neutral" else "",
USER_POLITICAL_LEANING.get(political_leaning, "")
if political_leaning != "neutral" else "",
MODEL_FORMALITY.get(formality, "")
if formality != "neutral" else "",
MODEL_RESPONSE_STYLE.get(response_style, "")
if response_style != "neutral" else "",
USER_OUTPUT_PREFERENCE.get(output_preference, "")
if output_preference != "neutral" else "",
USER_OUTPUT_PREFERENCE.get(data_format, "")
if data_format != "neutral" else "",
MODEL_EXPERTISE.get(expertise, "")
if expertise != "neutral" else "",
USER_LEARNING_STYLE.get(learning_style, "")
if learning_style != "neutral" else "",
USER_COMMUNICATION_PACE.get(communication_pace, "")
if communication_pace != "neutral" else "",
MODEL_LANGUAGE_STYLE.get(language_style, "")
if language_style != "neutral" else ""
]
# Filter out empty strings
components = [c for c in components if c]
if not components:
return "No prompt components selected."
# Join components
combined = "\n\n".join(components)
# Truncate if target length specified
if target_length and target_length > 0:
return truncate_to_word_length(combined, target_length)
return combined
def get_available_prompts():
"""Get all available prompt options for each category."""
return {
# Model characteristics and preferences
"model_personalities": ["neutral"] + [k for k in MODEL_PERSONALITIES.keys() if k != "neutral"],
"model_formality": ["neutral", "very_informal", "informal", "formal", "extremely_formal"],
"model_response_style": ["neutral", "concise", "balanced", "detailed", "socratic"],
"model_expertise": ["neutral", "generalist", "specialist", "academic", "practical"],
"model_language_style": ["neutral", "shakespearean", "middle_ages", "rhyming"],
"model_identity": ["neutral", "bot", "alien", "sloth"],
# User characteristics
"user_geolocation": ["neutral"] + [k for k in USER_GEOLOCATION.keys() if k != "neutral"],
"user_personality": ["neutral"] + [k for k in USER_PERSONALITY.keys() if k != "neutral"],
"user_worldview": ["neutral"] + [k for k in USER_WORLDVIEW.keys() if k != "neutral"],
"user_political_leaning": ["neutral", "conservative", "progressive"],
"user_learning_style": ["neutral", "visual", "practical", "theoretical", "sequential"],
"user_communication_pace": ["neutral", "methodical", "dynamic", "interactive", "contemplative"],
"user_output_preference": ["neutral"] + [k for k in USER_OUTPUT_PREFERENCE.keys() if k != "neutral"]
} |