Spaces:
Sleeping
Sleeping
File size: 4,545 Bytes
a701063 a02e161 c489fd1 a02e161 c489fd1 a02e161 47b6ea0 a02e161 de73e4c a02e161 6a98d1a a02e161 a701063 a02e161 a701063 a02e161 a701063 a02e161 47b6ea0 a02e161 c489fd1 a02e161 a701063 a02e161 c489fd1 a02e161 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 |
from gradio_client import Client
import gradio as gr
import random
# Hidden server configuration
SERVERS = [
"https://huggingface.co/spaces/BICORP/GOGOGOGO",
"https://huggingface.co/spaces/BICORP/server-2",
"https://huggingface.co/spaces/BICORP/server-3",
"https://huggingface.co/spaces/BICORP/server-4",
"https://huggingface.co/spaces/BICORP/server-5",
"https://huggingface.co/spaces/BICORP/server-6"
]
MODELS = [
"Lake 1 Flash",
"Lake 1 Base",
"Lake 1 Advanced",
"Lake 2 Chat [Closed Alpha]",
"Lake 2 Base [Closed Beta]"
]
PRESETS = ["Fast", "Normal", "Quality", "Unreal Performance"]
def get_random_server():
"""Randomly select from available servers"""
return random.choice(SERVERS)
def get_model_info(model_name: str) -> str:
"""Fetch model specs with retry logic"""
max_retries = 2
for _ in range(max_retries):
try:
client = Client(get_random_server())
return client.predict(model_name, api_name="/get_model_info")
except Exception as e:
continue
return "β οΈ Failed to load specifications. Please try again later."
def handle_chat(message: str, history, model: str, preset: str):
"""Process chat messages with automatic server selection"""
try:
client = Client(get_random_server())
result = client.predict(
message,
model,
preset,
api_name="/chat"
)
return result
except Exception as e:
return "β οΈ Service unavailable. Please try your request again."
def respond(message, history, model, preset):
"""
Append the user's message and model's response to the conversation history.
Returns an empty string (to clear the input textbox) and the updated chat history.
"""
history = history or []
response = handle_chat(message, history, model, preset)
history.append((message, response))
return "", history
with gr.Blocks(title="BI Corp AI Assistant", theme="soft") as demo:
gr.Markdown("# <center>ποΈ BI Corp AI Assistant</center>")
gr.Markdown("### <center>Enterprise-Grade AI Solutions</center>")
with gr.Row():
with gr.Column(scale=1):
model_dropdown = gr.Dropdown(
label="π€ Model Selection",
choices=MODELS,
value=MODELS[0],
interactive=True
)
preset_dropdown = gr.Dropdown(
label="βοΈ Performance Preset",
choices=PRESETS,
value=PRESETS[0],
interactive=True
)
model_info = gr.Markdown(
value=get_model_info(MODELS[0]),
label="π Model Specifications"
)
with gr.Column(scale=3):
# Reduced the height of the Chatbot to keep the textbox visible.
chatbot = gr.Chatbot(
height=300,
label="π¬ Conversation",
show_copy_button=True
)
message_input = gr.Textbox(
placeholder="Type your message...",
container=True,
scale=7,
autofocus=True
)
send_button = gr.Button("π Send", variant="primary")
# Update the model specifications when a different model is selected.
model_dropdown.change(
fn=get_model_info,
inputs=model_dropdown,
outputs=model_info,
queue=False
)
# Wire the Send button and Enter key in the Textbox to process chat messages.
send_button.click(
fn=respond,
inputs=[message_input, chatbot, model_dropdown, preset_dropdown],
outputs=[message_input, chatbot],
queue=True
)
# Allow the Enter key in the textbox to trigger the same function.
message_input.submit(
fn=respond,
inputs=[message_input, chatbot, model_dropdown, preset_dropdown],
outputs=[message_input, chatbot],
queue=True
)
# Clear history button to reset the conversation.
clear_btn = gr.Button("π§Ή Clear History")
clear_btn.click(
fn=lambda: ("", []),
inputs=[],
outputs=[message_input, chatbot],
queue=False
)
# Initialize model specifications on app load.
demo.load(
fn=lambda: get_model_info(MODELS[0]),
outputs=model_info,
queue=False
)
if __name__ == "__main__":
demo.launch()
|