File size: 10,506 Bytes
d9b8e9c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 |
import torch
from model import load_model_lazy, unload_model
from generate import generate_code, generate_text
from database import *
import train
import uuid
train_pass = '6818'
# AI-Powered Story World Builder Functions
world_data = {}
def _generate_code(code_prompt, max_tokens, selected_model='codegen'):
"""
Generate code based on the code prompt and selected model.
"""
# Load the model lazily
model_data = load_model_lazy(selected_model)
# Generate code
generated_code = generate_code(model_data, code_prompt, max_tokens)
# Unload the model after use
unload_model(selected_model)
return generated_code
def generate(input_text, selected_model, max_new_token):
"""
Generate text based on the selected model and input text.
"""
# Load the model lazily
model_data = load_model_lazy(selected_model)
# Generate text
generated_text = generate_text(model_data, input_text, max_new_token)
insert_into_db(input_text, selected_model)
# Unload the model after use
unload_model(selected_model)
return generated_text
def define_world(world_name, locations, characters):
"""
Define a new story world with locations and characters.
"""
world_data["world_name"] = world_name
world_data["locations"] = locations.split(", ")
world_data["characters"] = characters.split(", ")
return f"World '{world_name}' created with locations: {locations} and characters: {characters}"
def generate_story(model, world_name, event, max_length):
"""
Generate a story based on the defined world and an event.
"""
if not world_name or not world_data.get("world_name"):
return "Error: Please define a world first."
if world_name != world_data["world_name"]:
return f"Error: World '{world_name}' not found. Define it first."
prompt = f"In the world of {world_name}, {event}. Locations: {', '.join(world_data['locations'])}. Characters: {', '.join(world_data['characters'])}."
generated_story = generate(prompt, model, max_length)
return generated_story
# Story Mode
story = []
# Main Function For Story Generating
def interactive_story(input_text, selected_model, max_length):
global story
if input_text.strip():
story.append(input_text) # Add user input to story
current_text = " ".join(story) # Build cumulative story
generated_text = generate(current_text, selected_model, max_length)
story.append(generated_text) # Add generated text to story
return current_text + "\n\n" + generated_text
def reset_story():
global story
story = [] # Reset story
return ""
def generate_multiverse(input_text, selected_model, max_new_tokens, num_worlds=3):
"""
Generate multiple parallel worlds from a single input text.
"""
worlds = []
for i in range(num_worlds):
world_intro = f"World {i + 1}: "
# Custom logic for different parallel worlds
if i == 0:
world_intro += f"{input_text} This world leads to a parallel universe!"
elif i == 1:
world_intro += f"{input_text} In this world, time splits into different periods!"
elif i == 2:
world_intro += f"{input_text} This world faces a strange physical anomaly that changes everything!"
# Generate the story for this world
generated_text = generate(world_intro, selected_model, max_new_tokens)
worlds.append(generated_text)
return "\n\n".join(worlds)
# Function to verify password, train the model, and clear the database
def verify_and_train_combined(selected_model, train_method, epochs, batch_size, password, custom_text, dataset_file, dataset_name, split_name):
if password != train_pass:
return "Error: Incorrect password. Training not started."
if train_method == "Custom Text" and custom_text.strip():
train.train_model_with_text(selected_model, custom_text, epochs, batch_size)
return f"Training completed for model: {selected_model} using custom text."
elif train_method == "Database":
train.train_model_with_database(selected_model, epochs, batch_size)
clear_database()
return f"Training completed for model: {selected_model} using database. Database cleared."
elif train_method == "Dataset File" and dataset_file is not None:
try:
dataset_path = dataset_file.name
train.train_model_with_dataset(selected_model, epochs, batch_size, dataset_path)
return f"Training completed for model: {selected_model} using uploaded dataset."
except Exception as e:
return f"Error during training with dataset: {str(e)}"
elif train_method == "Hugging Face Dataset" and dataset_name.strip():
try:
train.train_model_with_hf_dataset(selected_model, epochs, batch_size, dataset_name, split=split_name.strip())
return f"Training completed for model: {selected_model} using Hugging Face dataset {dataset_name}."
except Exception as e:
return f"Error during training with Hugging Face dataset: {str(e)}"
else:
return "Error: Invalid input for training. Please check your selections."
def limit_chat_history(chat_history, max_turns=3):
"""
محدود کردن تعداد پیامهای تاریخچه به max_turns.
"""
turns = chat_history.split("\n")
if len(turns) > max_turns * 2: # هر سوال و پاسخ دو خط میشود
turns = turns[-max_turns * 2:] # فقط n پیام اخیر را نگه میدارد
return "\n".join(turns)
def chatbot_response(username, input_text, selected_model, chat_id=None):
if not username.strip():
return "Error: Please enter a username.", "", str(uuid.uuid4()) # تولید شناسه جدید
# اگر شناسه چت وارد نشده باشد، یک شناسه جدید تولید میشود
if not chat_id or chat_id.strip() == "":
chat_id = str(uuid.uuid4()) # تولید شناسه جدید
# Load model lazily
model_data = load_model_lazy(selected_model)
# Retrieve previous chats from database
previous_chats = fetch_chats_by_id(chat_id)
chat_history = "\n".join([f"User: {msg}\nAI: {resp}" for msg, resp in previous_chats])
# محدود کردن تاریخچه چت
if chat_history:
chat_history = limit_chat_history(chat_history, max_turns=3)
prompt = f"{chat_history}\nUser: {input_text}\nAI:"
else:
prompt = f"User: {input_text}\nAI:"
# Generate response
max_new_token = 150 # تعداد توکنهای جدید
full_response = generate_text(model_data, prompt, max_new_token) # حذف آرگومانهای اضافی
# Extract only the new AI response
ai_response = full_response.split("AI:")[-1].strip()
unload_model(selected_model)
# Save chat to database
insert_chat(chat_id, username, input_text, ai_response)
# Return updated chat history and chat_id
updated_history = chat_history + f"\nUser: {input_text}\nAI: {ai_response}"
return updated_history, chat_id
def chat_ids(username):
return fetch_ids_by_user(username)
def reset_chat(username):
clear_chats_by_username(username) # حذف چتهای مرتبط با کاربر
return f"Chat history cleared for user: {username}", ""
# توابع تحلیل احساسات
def analyze_emotion(user_input):
# بارگذاری مدل احساسات
model_data = load_model_lazy("bert-emotion")
# اگر مدل از pipeline پشتیبانی میکند
if "pipeline" in model_data:
emotion_pipeline = model_data["pipeline"]
result = emotion_pipeline(user_input)
emotion = result[0]['label']
confidence = result[0]['score']
else:
# روش قدیمی برای مدلهایی که از pipeline پشتیبانی نمیکنند
emotion_tokenizer = model_data['tokenizer']
emotion_model = model_data['model']
inputs = emotion_tokenizer(user_input, return_tensors="pt", truncation=True, padding=True)
outputs = emotion_model(**inputs)
probs = torch.nn.functional.softmax(outputs.logits, dim=-1)
emotion = probs.argmax().item()
confidence = probs.max().item()
unload_model("bert-emotion")
return emotion, confidence
def emotion_label(index):
emotions = ["anger", "joy", "sadness", "fear", "love", "surprise"]
return emotions[index]
def chatbot_response_with_emotion(username, input_text, selected_model, chat_id=None):
if not username.strip():
return "Error: Please enter a username.", "", str(uuid.uuid4())
if not chat_id or chat_id.strip() == "":
chat_id = str(uuid.uuid4())
# بارگذاری مدل چت و احساسات
model_data = load_model_lazy(selected_model)
# تحلیل احساسات پیام کاربر
emotion, confidence = analyze_emotion(input_text)
user_emotion = emotion # برچسب احساسات
# بازیابی چتهای قبلی از پایگاه داده
previous_chats = fetch_chats_by_id(chat_id)
chat_history = "\n".join([f"User: {msg}\nAI: {resp}" for msg, resp in previous_chats])
# محدود کردن تاریخچه چت
if chat_history:
chat_history = limit_chat_history(chat_history, max_turns=3)
prompt = f"[Emotion: {user_emotion}]\n{chat_history}\nUser: {input_text}\nAI:"
else:
prompt = f"[Emotion: {user_emotion}]\nUser: {input_text}\nAI:"
# تولید پاسخ
max_new_token = 150
full_response = generate_text(model_data, prompt, max_new_token)
# استخراج پاسخ AI
ai_response = full_response.split("AI:")[-1].strip()
# آزادسازی مدلها
unload_model(selected_model)
unload_model("bert-emotion")
# ذخیره چت در پایگاه داده
insert_chat(chat_id, username, input_text, ai_response)
# بازگرداندن تاریخچه بهروز شده و شناسه چت
updated_history = chat_history + f"\nUser: {input_text}\nAI: {ai_response}"
return updated_history, chat_id, user_emotion |