Spaces:
Running
Running
import gradio as gr | |
import requests | |
import json | |
import time | |
# --- API Configuration --- | |
BLACKBOX_URL = "https://api.blackbox.ai/api/chat" | |
# --- Model Configuration --- | |
api_models = { | |
"Lake 1 Mini": "mistralai/Mistral-Small-24B-Instruct-2501", | |
"Lake 1 Base": "databricks/dbrx-instruct", | |
"Lake 1 Chat": "deepseek-ai/deepseek-llm-67b-chat", | |
"Lake 1 Advanced": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO" | |
} | |
# Model-specific system prompts | |
MODEL_PROMPTS = { | |
"Lake 1 Mini": "You are a general-purpose AI assistant focused on providing concise and practical answers.", | |
"Lake 1 Base": "You are a technical expert AI specializing in detailed explanations and step-by-step solutions.", | |
"Lake 1 Chat": "You are a friendly conversational AI that prioritizes natural dialogue and approachable responses.", | |
"Lake 1 Advanced": "You are an advanced AI capable of expert-level analysis and critical thinking." | |
} | |
# --- Rate Limits --- | |
STANDARD_RPM = 4 | |
PLUS_RPM = 8 | |
PRO_RPM = 16 | |
STANDARD_TPM = 1200 | |
PLUS_TPM = 2400 | |
PRO_TPM = 4800 | |
# --- Magic Word Secrets --- | |
MAGIC_WORD_SECRET_1 = "SourSesameManager" | |
MAGIC_WORD_SECRET_2 = "BeanedSesameHockey" | |
def get_system_message(model: str, preset: str, access: str) -> str: | |
"""Generate combined system message with model-specific and access-level prompts""" | |
base_prompt = MODEL_PROMPTS.get(model, "You are a helpful AI assistant.") | |
preset_modes = { | |
"Fast": "Prioritize speed over detail", | |
"Normal": "Balance speed and detail", | |
"Quality": "Prioritize detailed, comprehensive responses" | |
} | |
access_levels = { | |
"standard": f"Standard access: Limited to {STANDARD_RPM} requests/min", | |
"plus": f"Plus access: Up to {PLUS_RPM} requests/min", | |
"pro": f"Pro access: Maximum {PRO_RPM} requests/min" | |
} | |
return ( | |
f"{base_prompt}\n" | |
f"Mode: {preset_modes[preset]}\n" | |
f"Access: {access_levels[access]}\n" | |
"Respond appropriately to the user's query:" | |
) | |
def check_rate_limit(settings_state: dict) -> bool: | |
"""Check if user has exceeded their RPM limit""" | |
current_time = time.time() | |
last_reset = settings_state.get("last_reset", 0) | |
# Reset counter if more than 60 seconds have passed | |
if current_time - last_reset > 60: | |
settings_state["request_count"] = 0 | |
settings_state["last_reset"] = current_time | |
max_rpm = PRO_RPM if settings_state["access"] == "pro" else \ | |
PLUS_RPM if settings_state["access"] == "plus" else STANDARD_RPM | |
if settings_state.get("request_count", 0) >= max_rpm: | |
return False | |
settings_state["request_count"] = settings_state.get("request_count", 0) + 1 | |
return True | |
def call_blackbox_api(messages: list, model: str, max_new_tokens: int) -> str: | |
headers = {'Content-Type': 'application/json'} | |
payload = json.dumps({ | |
"messages": messages, | |
"model": model, | |
"max_tokens": str(max_new_tokens) | |
}) | |
response = requests.post(BLACKBOX_URL, headers=headers, data=payload) | |
if response.status_code == 200 and "application/json" in response.headers.get('Content-Type', ''): | |
try: | |
data = response.json() | |
if 'choices' in data and data['choices']: | |
return data['choices'][0]['message']['content'] | |
else: | |
return "Error: Unexpected response format." | |
except Exception as e: | |
return f"Error parsing JSON: {e}" | |
else: | |
return f"{response.text}" | |
def generate_response(message: str, model_name: str, preset: str, access: str) -> str: | |
max_tokens = PRO_TPM if access == "pro" else PLUS_TPM if access == "plus" else STANDARD_TPM | |
api_model = api_models.get(model_name, api_models["Lake 1 Mini"]) | |
messages = [ | |
{"role": "system", "content": get_system_message(model_name, preset, access)}, | |
{"role": "user", "content": message} | |
] | |
return call_blackbox_api(messages, api_model, max_tokens) | |
def chat_handler(message, history, settings_state): | |
if not check_rate_limit(settings_state): | |
return history + [ | |
{"role": "user", "content": message}, | |
{"role": "assistant", "content": f"Rate limit exceeded! Current plan allows {settings_state['access']} RPM."} | |
] | |
response = generate_response( | |
message, | |
settings_state["model"], | |
settings_state["preset"], | |
settings_state["access"] | |
) | |
return history + [{"role": "user", "content": message}, {"role": "assistant", "content": response}] | |
def update_settings(model, preset, magic_word): | |
access = "pro" if magic_word == MAGIC_WORD_SECRET_2 else \ | |
"plus" if magic_word == MAGIC_WORD_SECRET_1 else "standard" | |
models = ["Lake 1 Mini", "Lake 1 Base", "Lake 1 Chat"] + \ | |
(["Lake 1 Advanced"] if access in ["pro", "plus"] else []) | |
new_state = { | |
"model": model, | |
"preset": preset, | |
"access": access, | |
"request_count": 0, | |
"last_reset": time.time() | |
} | |
return ( | |
new_state, | |
f"**Settings:** Model: {model} | Preset: {preset} | Access: {access.title()}", | |
gr.update(choices=models, value=models[0]) | |
) | |
def create_interface(): | |
css = """ | |
.donate-btn, .subscribe-btn { | |
background: linear-gradient(45deg, #4CAF50, #45a049); | |
color: white; | |
border: none; | |
padding: 8px 16px; | |
border-radius: 4px; | |
cursor: pointer; | |
transition: all 0.3s; | |
} | |
.donate-btn:hover, .subscribe-btn:hover { | |
transform: scale(1.05); | |
box-shadow: 0 4px 8px rgba(0,0,0,0.2); | |
} | |
.rate-limit { | |
color: #ff4444; | |
font-weight: bold; | |
margin: 10px 0; | |
} | |
@keyframes typing { | |
0% { opacity: 0.5; } | |
50% { opacity: 1; } | |
100% { opacity: 0.5; } | |
} | |
.typing-indicator { | |
animation: typing 1.5s infinite; | |
font-size: 0.9em; | |
color: #666; | |
} | |
""" | |
with gr.Blocks(title="Lake AI", css=css, theme=gr.themes.Soft()) as app: | |
state = gr.State({ | |
"model": "Lake 1 Mini", | |
"preset": "Normal", | |
"access": "standard", | |
"request_count": 0, | |
"last_reset": time.time() | |
}) | |
with gr.Tab("Chat"): | |
gr.Markdown("# π Lake AI Assistant") | |
chatbot = gr.Chatbot(height=400, label="Conversation", type="messages") | |
msg = gr.Textbox(label="Your Message", placeholder="Type here...") | |
with gr.Row(): | |
send_btn = gr.Button("Send", variant="primary") | |
send_btn.click(chat_handler, [msg, chatbot, state], chatbot) | |
with gr.Row(): | |
gr.Button("β Donate", elem_classes="donate-btn").click( | |
None, None, None, js="window.open('https://buymeacoffee.com/bronio_int')" | |
) | |
gr.Button("π Subscribe", elem_classes="subscribe-btn").click( | |
None, None, None, js="window.open('https://patreon.com/YourPageHere')" | |
) | |
msg.submit(chat_handler, [msg, chatbot, state], chatbot) | |
with gr.Tab("Settings"): | |
with gr.Row(): | |
with gr.Column(): | |
model = gr.Dropdown( | |
["Lake 1 Mini", "Lake 1 Base", "Lake 1 Chat"], | |
label="AI Model", | |
value="Lake 1 Mini" | |
) | |
preset = gr.Dropdown( | |
["Fast", "Normal", "Quality"], | |
label="Performance Mode", | |
value="Normal" | |
) | |
key = gr.Textbox(label="Premium Key", type="password") | |
status = gr.Markdown() | |
gr.Button("Apply Settings").click( | |
update_settings, [model, preset, key], [state, status, model] | |
) | |
return app | |
if __name__ == "__main__": | |
create_interface().launch() |