BICORP commited on
Commit
a02e161
·
verified ·
1 Parent(s): a701063

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +128 -55
app.py CHANGED
@@ -1,70 +1,143 @@
1
- import random
2
- import gradio as gr
3
  from gradio_client import Client
 
 
4
 
5
- # List of server endpoints
6
- servers = [
7
- "BICORP/GOGOGOGO", # Server 1
8
- "BICORP/server-2", # Server 2
9
- "BICORP/server-3", # Server 3
10
- "BICORP/server-4", # Server 4
11
- "BICORP/server-5", # Server 5
12
- "BICORP/server-6" # Server 6
13
  ]
14
 
15
- # Define presets for each model
16
- presets = {
17
- "Fast": "Fast",
18
- "Normal": "Normal",
19
- "Quality": "Quality",
20
- "Unreal Performance": "Unreal Performance"
21
- }
 
 
22
 
23
- # Function to respond to user input
24
- def respond(message, history: list, selected_model, selected_preset):
25
- # Randomly select a server
26
- server = random.choice(servers)
27
- client = Client(server)
28
 
29
- # Ensure history is a list of dictionaries
30
- messages = [{"role": "user", "content": message}]
 
 
 
 
 
 
 
 
31
 
32
- # Get the response from the model
 
33
  try:
34
- response = client.predict(
35
- message=message, # Required parameter
36
- param_2=selected_model, # Model selection
37
- param_3=selected_preset, # Preset selection
38
- api_name="/chat" # Ensure this is the correct API endpoint
 
39
  )
40
-
41
- return response # Return the response
42
  except Exception as e:
43
- return f"Error: {str(e)}" # Return the error message
44
 
45
- # Model names and their pseudonyms
46
- model_choices = [
47
- ("Lake 1 Base", "Lake 1 Base") # Only one model as per your example
48
- ]
 
 
 
 
 
49
 
50
- # Convert pseudonyms to model names for the dropdown
51
- pseudonyms = [model[0] for model in model_choices]
52
-
53
- # Function to handle model selection and pseudonyms
54
- def respond_with_pseudonym(message, history: list, selected_model, selected_preset):
55
- # Call the existing respond function
56
- response = respond(message, history, selected_model, selected_preset)
57
 
58
- return response
59
-
60
- # Gradio Chat Interface
61
- demo = gr.ChatInterface(
62
- fn=respond_with_pseudonym,
63
- additional_inputs=[
64
- gr.Dropdown(choices=pseudonyms, label="Select Model", value=pseudonyms[0]), # Pseudonym selection dropdown
65
- gr.Dropdown(choices=list(presets.keys()), label="Select Preset", value="Fast") # Preset selection dropdown
66
- ],
67
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
 
69
  if __name__ == "__main__":
70
- demo.launch()
 
 
 
1
  from gradio_client import Client
2
+ import gradio as gr
3
+ import random
4
 
5
+ # Hidden server configuration
6
+ SERVERS = [
7
+ "https://huggingface.co/spaces/BICORP/GOGOGOGO",
8
+ "https://huggingface.co/spaces/BICORP/server-2",
9
+ "https://huggingface.co/spaces/BICORP/server-3",
10
+ "https://huggingface.co/spaces/BICORP/server-4",
11
+ "https://huggingface.co/spaces/BICORP/server-5",
12
+ "https://huggingface.co/spaces/BICORP/server-6"
13
  ]
14
 
15
+ MODELS = [
16
+ "Lake 1 Flash",
17
+ "Lake 1 Base",
18
+ "Lake 1 Advanced",
19
+ "Lake 2 Chat [Closed Alpha]",
20
+ "Lake 2 Base [Closed Beta]"
21
+ ]
22
+
23
+ PRESETS = ["Fast", "Normal", "Quality", "Unreal Performance"]
24
 
25
+ def get_random_server():
26
+ """Randomly select from available servers"""
27
+ return random.choice(SERVERS)
 
 
28
 
29
+ def get_model_info(model_name: str) -> str:
30
+ """Fetch model specs with retry logic"""
31
+ max_retries = 2
32
+ for _ in range(max_retries):
33
+ try:
34
+ client = Client(get_random_server())
35
+ return client.predict(model_name, api_name="/get_model_info")
36
+ except Exception as e:
37
+ continue
38
+ return "⚠️ Failed to load specifications. Please try again later."
39
 
40
+ def handle_chat(message: str, history, model: str, preset: str):
41
+ """Process chat messages with automatic server selection"""
42
  try:
43
+ client = Client(get_random_server())
44
+ result = client.predict(
45
+ message,
46
+ model,
47
+ preset,
48
+ api_name="/chat"
49
  )
50
+ return result
 
51
  except Exception as e:
52
+ return "⚠️ Service unavailable. Please try your request again."
53
 
54
+ def respond(message, history, model, preset):
55
+ """
56
+ Append the user's message and model's response to the conversation history.
57
+ Returns an empty string (to clear the input textbox) and the updated chat history.
58
+ """
59
+ history = history or []
60
+ response = handle_chat(message, history, model, preset)
61
+ history.append((message, response))
62
+ return "", history
63
 
64
+ with gr.Blocks(title="BI Corp AI Assistant", theme="soft") as demo:
65
+ gr.Markdown("# <center>🏔️ BI Corp AI Assistant</center>")
66
+ gr.Markdown("### <center>Enterprise-Grade AI Solutions</center>")
 
 
 
 
67
 
68
+ with gr.Row():
69
+ with gr.Column(scale=1):
70
+ model_dropdown = gr.Dropdown(
71
+ label="🤖 Model Selection",
72
+ choices=MODELS,
73
+ value=MODELS[0],
74
+ interactive=True
75
+ )
76
+ preset_dropdown = gr.Dropdown(
77
+ label="⚙️ Performance Preset",
78
+ choices=PRESETS,
79
+ value=PRESETS[0],
80
+ interactive=True
81
+ )
82
+ model_info = gr.Markdown(
83
+ value=get_model_info(MODELS[0]),
84
+ label="📝 Model Specifications"
85
+ )
86
+
87
+ with gr.Column(scale=3):
88
+ # Reduced the height of the Chatbot to keep the textbox visible.
89
+ chatbot = gr.Chatbot(
90
+ height=300,
91
+ label="💬 Conversation",
92
+ show_copy_button=True
93
+ )
94
+ message_input = gr.Textbox(
95
+ placeholder="Type your message...",
96
+ container=True,
97
+ scale=7,
98
+ autofocus=True
99
+ )
100
+ send_button = gr.Button("🚀 Send", variant="primary")
101
+
102
+ # Update the model specifications when a different model is selected.
103
+ model_dropdown.change(
104
+ fn=get_model_info,
105
+ inputs=model_dropdown,
106
+ outputs=model_info,
107
+ queue=False
108
+ )
109
+
110
+ # Wire the Send button and Enter key in the Textbox to process chat messages.
111
+ send_button.click(
112
+ fn=respond,
113
+ inputs=[message_input, chatbot, model_dropdown, preset_dropdown],
114
+ outputs=[message_input, chatbot],
115
+ queue=True
116
+ )
117
+
118
+ # Allow the Enter key in the textbox to trigger the same function.
119
+ message_input.submit(
120
+ fn=respond,
121
+ inputs=[message_input, chatbot, model_dropdown, preset_dropdown],
122
+ outputs=[message_input, chatbot],
123
+ queue=True
124
+ )
125
+
126
+ # Clear history button to reset the conversation.
127
+ clear_btn = gr.Button("🧹 Clear History")
128
+ clear_btn.click(
129
+ fn=lambda: ("", []),
130
+ inputs=[],
131
+ outputs=[message_input, chatbot],
132
+ queue=False
133
+ )
134
+
135
+ # Initialize model specifications on app load.
136
+ demo.load(
137
+ fn=lambda: get_model_info(MODELS[0]),
138
+ outputs=model_info,
139
+ queue=False
140
+ )
141
 
142
  if __name__ == "__main__":
143
+ demo.launch()