BICORP commited on
Commit
9bf3f64
·
verified ·
1 Parent(s): 9a7392a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +184 -76
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import requests
3
  import json
 
4
 
5
  # --- API Configuration ---
6
  BLACKBOX_URL = "https://api.blackbox.ai/api/chat"
@@ -10,19 +11,76 @@ api_models = {
10
  "Lake 1 Mini": "mistralai/Mistral-Small-24B-Instruct-2501",
11
  "Lake 1 Base": "databricks/dbrx-instruct",
12
  "Lake 1 Chat": "deepseek-ai/deepseek-llm-67b-chat",
 
13
  }
14
 
15
- # --- Default Settings ---
16
- DEFAULT_SETTINGS = {
17
- 'tpm': 600, # tokens per minute
18
- 'rpm': 7, # requests per minute
19
- 'preset': 'Normal'
 
20
  }
21
 
22
- def call_blackbox_api(prompt: str, model: str, max_new_tokens: int) -> str:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  headers = {'Content-Type': 'application/json'}
24
  payload = json.dumps({
25
- "messages": [{"role": "user", "content": prompt}],
26
  "model": model,
27
  "max_tokens": str(max_new_tokens)
28
  })
@@ -39,82 +97,132 @@ def call_blackbox_api(prompt: str, model: str, max_new_tokens: int) -> str:
39
  else:
40
  return f"{response.text}"
41
 
42
- def generate_response(message: str, model_name: str, preset: str) -> str:
43
- max_tokens = DEFAULT_SETTINGS['tpm']
44
- api_model = api_models[model_name]
45
- return call_blackbox_api(message, model=api_model, max_new_tokens=max_tokens)
 
 
 
 
46
 
47
  def chat_handler(message, history, settings_state):
48
- model = settings_state.get("model", "Lake 1 Mini")
49
- preset = settings_state.get("preset", "Normal")
50
- response = generate_response(message, model, preset)
51
- history.append({"role": "user", "content": message})
52
- history.append({"role": "assistant", "content": response})
53
- return history
54
-
55
- def update_settings(model, preset):
56
- new_state = {"model": model, "preset": preset}
57
- settings_text = f"Model: **{model}**, Preset: **{preset}**"
58
- return new_state, settings_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
  def create_interface():
61
- with gr.Blocks(title="Lake AI Assistant", theme="soft") as demo:
62
- settings_state = gr.State(value={"model": "Lake 1 Mini", "preset": "Normal"})
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
64
- with gr.Tabs():
65
- with gr.Tab("Chat"):
66
- gr.Markdown("## Chat with Lake AI Assistant")
67
- chatbot = gr.Chatbot(label="💬 Conversation", height=400, show_copy_button=True, type="messages")
68
- chat_input = gr.Textbox(label="Your Message", placeholder="Type your message here...", interactive=True)
69
- send_button = gr.Button("Send")
70
- donate_button = gr.Button("Donate ")
71
-
72
- send_button.click(
73
- fn=chat_handler,
74
- inputs=[chat_input, chatbot, settings_state],
75
- outputs=chatbot
76
- )
77
- chat_input.submit(
78
- fn=chat_handler,
79
- inputs=[chat_input, chatbot, settings_state],
80
- outputs=chatbot
81
  )
82
- # Corrected donation button implementation
83
- donate_button.click(
84
- fn=None,
85
- inputs=None,
86
- outputs=None,
87
- js="window.open('https://buymeacoffee.com/bronio_int', '_blank')"
88
  )
89
 
90
- with gr.Tab("Settings"):
91
- gr.Markdown("## Settings")
92
- with gr.Row():
93
- with gr.Column():
94
- gr.Markdown("### Model & Performance")
95
- model_dropdown = gr.Dropdown(
96
- label="Model Selection",
97
- interactive=True,
98
- choices=["Lake 1 Mini", "Lake 1 Base", "Lake 1 Chat"],
99
- value="Lake 1 Mini"
100
- )
101
- preset_dropdown = gr.Dropdown(
102
- label="Performance Preset",
103
- interactive=True,
104
- choices=["Fast", "Normal", "Quality"],
105
- value="Normal"
106
- )
107
- update_settings_button = gr.Button("Update Settings")
108
- settings_info = gr.Markdown("")
109
-
110
- update_settings_button.click(
111
- fn=update_settings,
112
- inputs=[model_dropdown, preset_dropdown],
113
- outputs=[settings_state, settings_info]
114
- )
115
-
116
- return demo
117
 
118
  if __name__ == "__main__":
119
- demo = create_interface()
120
- demo.launch()
 
1
  import gradio as gr
2
  import requests
3
  import json
4
+ import time
5
 
6
  # --- API Configuration ---
7
  BLACKBOX_URL = "https://api.blackbox.ai/api/chat"
 
11
  "Lake 1 Mini": "mistralai/Mistral-Small-24B-Instruct-2501",
12
  "Lake 1 Base": "databricks/dbrx-instruct",
13
  "Lake 1 Chat": "deepseek-ai/deepseek-llm-67b-chat",
14
+ "Lake 1 Advanced": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
15
  }
16
 
17
+ # Model-specific system prompts
18
+ MODEL_PROMPTS = {
19
+ "Lake 1 Mini": "You are a general-purpose AI assistant focused on providing concise and practical answers.",
20
+ "Lake 1 Base": "You are a technical expert AI specializing in detailed explanations and step-by-step solutions.",
21
+ "Lake 1 Chat": "You are a friendly conversational AI that prioritizes natural dialogue and approachable responses.",
22
+ "Lake 1 Advanced": "You are an advanced AI capable of expert-level analysis and critical thinking."
23
  }
24
 
25
+ # --- Rate Limits ---
26
+ STANDARD_RPM = 4
27
+ PLUS_RPM = 8
28
+ PRO_RPM = 16
29
+
30
+ STANDARD_TPM = 1200
31
+ PLUS_TPM = 2400
32
+ PRO_TPM = 4800
33
+
34
+ # --- Magic Word Secrets ---
35
+ MAGIC_WORD_SECRET_1 = "SourSesameManager"
36
+ MAGIC_WORD_SECRET_2 = "BeanedSesameHockey"
37
+
38
+ def get_system_message(model: str, preset: str, access: str) -> str:
39
+ """Generate combined system message with model-specific and access-level prompts"""
40
+ base_prompt = MODEL_PROMPTS.get(model, "You are a helpful AI assistant.")
41
+
42
+ preset_modes = {
43
+ "Fast": "Prioritize speed over detail",
44
+ "Normal": "Balance speed and detail",
45
+ "Quality": "Prioritize detailed, comprehensive responses"
46
+ }
47
+
48
+ access_levels = {
49
+ "standard": f"Standard access: Limited to {STANDARD_RPM} requests/min",
50
+ "plus": f"Plus access: Up to {PLUS_RPM} requests/min",
51
+ "pro": f"Pro access: Maximum {PRO_RPM} requests/min"
52
+ }
53
+
54
+ return (
55
+ f"{base_prompt}\n"
56
+ f"Mode: {preset_modes[preset]}\n"
57
+ f"Access: {access_levels[access]}\n"
58
+ "Respond appropriately to the user's query:"
59
+ )
60
+
61
+ def check_rate_limit(settings_state: dict) -> bool:
62
+ """Check if user has exceeded their RPM limit"""
63
+ current_time = time.time()
64
+ last_reset = settings_state.get("last_reset", 0)
65
+
66
+ # Reset counter if more than 60 seconds have passed
67
+ if current_time - last_reset > 60:
68
+ settings_state["request_count"] = 0
69
+ settings_state["last_reset"] = current_time
70
+
71
+ max_rpm = PRO_RPM if settings_state["access"] == "pro" else \
72
+ PLUS_RPM if settings_state["access"] == "plus" else STANDARD_RPM
73
+
74
+ if settings_state.get("request_count", 0) >= max_rpm:
75
+ return False
76
+
77
+ settings_state["request_count"] = settings_state.get("request_count", 0) + 1
78
+ return True
79
+
80
+ def call_blackbox_api(messages: list, model: str, max_new_tokens: int) -> str:
81
  headers = {'Content-Type': 'application/json'}
82
  payload = json.dumps({
83
+ "messages": messages,
84
  "model": model,
85
  "max_tokens": str(max_new_tokens)
86
  })
 
97
  else:
98
  return f"{response.text}"
99
 
100
+ def generate_response(message: str, model_name: str, preset: str, access: str) -> str:
101
+ max_tokens = PRO_TPM if access == "pro" else PLUS_TPM if access == "plus" else STANDARD_TPM
102
+ api_model = api_models.get(model_name, api_models["Lake 1 Mini"])
103
+ messages = [
104
+ {"role": "system", "content": get_system_message(model_name, preset, access)},
105
+ {"role": "user", "content": message}
106
+ ]
107
+ return call_blackbox_api(messages, api_model, max_tokens)
108
 
109
  def chat_handler(message, history, settings_state):
110
+ if not check_rate_limit(settings_state):
111
+ return history + [
112
+ {"role": "user", "content": message},
113
+ {"role": "assistant", "content": f"Rate limit exceeded! Current plan allows {settings_state['access']} RPM."}
114
+ ]
115
+
116
+ response = generate_response(
117
+ message,
118
+ settings_state["model"],
119
+ settings_state["preset"],
120
+ settings_state["access"]
121
+ )
122
+ return history + [{"role": "user", "content": message}, {"role": "assistant", "content": response}]
123
+
124
+ def update_settings(model, preset, magic_word):
125
+ access = "pro" if magic_word == MAGIC_WORD_SECRET_2 else \
126
+ "plus" if magic_word == MAGIC_WORD_SECRET_1 else "standard"
127
+
128
+ models = ["Lake 1 Mini", "Lake 1 Base", "Lake 1 Chat"] + \
129
+ (["Lake 1 Advanced"] if access in ["pro", "plus"] else [])
130
+
131
+ new_state = {
132
+ "model": model,
133
+ "preset": preset,
134
+ "access": access,
135
+ "request_count": 0,
136
+ "last_reset": time.time()
137
+ }
138
+
139
+ return (
140
+ new_state,
141
+ f"**Settings:** Model: {model} | Preset: {preset} | Access: {access.title()}",
142
+ gr.update(choices=models, value=models[0])
143
+ )
144
 
145
  def create_interface():
146
+ css = """
147
+ .donate-btn, .subscribe-btn {
148
+ background: linear-gradient(45deg, #4CAF50, #45a049);
149
+ color: white;
150
+ border: none;
151
+ padding: 8px 16px;
152
+ border-radius: 4px;
153
+ cursor: pointer;
154
+ transition: all 0.3s;
155
+ }
156
+ .donate-btn:hover, .subscribe-btn:hover {
157
+ transform: scale(1.05);
158
+ box-shadow: 0 4px 8px rgba(0,0,0,0.2);
159
+ }
160
+ .rate-limit {
161
+ color: #ff4444;
162
+ font-weight: bold;
163
+ margin: 10px 0;
164
+ }
165
+ @keyframes typing {
166
+ 0% { opacity: 0.5; }
167
+ 50% { opacity: 1; }
168
+ 100% { opacity: 0.5; }
169
+ }
170
+ .typing-indicator {
171
+ animation: typing 1.5s infinite;
172
+ font-size: 0.9em;
173
+ color: #666;
174
+ }
175
+ """
176
+
177
+ with gr.Blocks(title="Lake AI", css=css, theme=gr.themes.Soft()) as app:
178
+ state = gr.State({
179
+ "model": "Lake 1 Mini",
180
+ "preset": "Normal",
181
+ "access": "standard",
182
+ "request_count": 0,
183
+ "last_reset": time.time()
184
+ })
185
 
186
+ with gr.Tab("Chat"):
187
+ gr.Markdown("# 🌊 Lake AI Assistant")
188
+ chatbot = gr.Chatbot(height=400, label="Conversation", type="messages")
189
+ msg = gr.Textbox(label="Your Message", placeholder="Type here...")
190
+
191
+ with gr.Row():
192
+ send_btn = gr.Button("Send", variant="primary")
193
+ send_btn.click(chat_handler, [msg, chatbot, state], chatbot)
194
+
195
+ with gr.Row():
196
+ gr.Button("☕ Donate", elem_classes="donate-btn").click(
197
+ None, None, None, js="window.open('https://buymeacoffee.com/bronio_int')"
 
 
 
 
 
198
  )
199
+ gr.Button("🌟 Subscribe", elem_classes="subscribe-btn").click(
200
+ None, None, None, js="window.open('https://patreon.com/YourPageHere')"
 
 
 
 
201
  )
202
 
203
+ msg.submit(chat_handler, [msg, chatbot, state], chatbot)
204
+
205
+ with gr.Tab("Settings"):
206
+ with gr.Row():
207
+ with gr.Column():
208
+ model = gr.Dropdown(
209
+ ["Lake 1 Mini", "Lake 1 Base", "Lake 1 Chat"],
210
+ label="AI Model",
211
+ value="Lake 1 Mini"
212
+ )
213
+ preset = gr.Dropdown(
214
+ ["Fast", "Normal", "Quality"],
215
+ label="Performance Mode",
216
+ value="Normal"
217
+ )
218
+ key = gr.Textbox(label="Premium Key", type="password")
219
+ status = gr.Markdown()
220
+
221
+ gr.Button("Apply Settings").click(
222
+ update_settings, [model, preset, key], [state, status, model]
223
+ )
224
+
225
+ return app
 
 
 
 
226
 
227
  if __name__ == "__main__":
228
+ create_interface().launch()