Nymbo commited on
Commit
d6c98d8
·
verified ·
1 Parent(s): 20be021

adding debugging logs and excessive comments, will return in the morning :)

Browse files
Files changed (1) hide show
  1. app.py +87 -76
app.py CHANGED
@@ -50,22 +50,24 @@ def respond(
50
 
51
  # Construct the messages array required by the API
52
  messages = [{"role": "system", "content": system_message}]
 
53
 
54
  # Add conversation history to the context
55
  for val in history:
56
- user_part = val[0]
57
- assistant_part = val[1]
58
  if user_part:
59
- messages.append({"role": "user", "content": user_part})
60
  print(f"Added user message to context: {user_part}")
61
  if assistant_part:
62
- messages.append({"role": "assistant", "content": assistant_part})
63
  print(f"Added assistant message to context: {assistant_part}")
64
 
65
  # Append the latest user message
66
  messages.append({"role": "user", "content": message})
 
67
 
68
- # If user provided a model, use that; otherwise, fall back to a default
69
  model_to_use = custom_model.strip() if custom_model.strip() != "" else "meta-llama/Llama-3.3-70B-Instruct"
70
  print(f"Model selected for inference: {model_to_use}")
71
 
@@ -76,13 +78,13 @@ def respond(
76
  # Make the streaming request to the HF Inference API via openai-like client
77
  for message_chunk in client.chat.completions.create(
78
  model=model_to_use, # Use either the user-provided or default model
79
- max_tokens=max_tokens,
80
- stream=True, # Stream the response
81
- temperature=temperature,
82
- top_p=top_p,
83
- frequency_penalty=frequency_penalty,
84
- seed=seed,
85
- messages=messages,
86
  ):
87
  # Extract the token text from the response chunk
88
  token_text = message_chunk.choices[0].delta.content
@@ -98,91 +100,94 @@ def respond(
98
  # -------------------------
99
 
100
  # Create a Chatbot component with a specified height
101
- chatbot = gr.Chatbot(height=600)
102
  print("Chatbot interface created.")
103
 
104
- # We'll create text boxes & sliders for system prompt, tokens, etc.
105
- system_message_box = gr.Textbox(value="", label="System message")
106
 
107
  max_tokens_slider = gr.Slider(
108
- minimum=1,
109
- maximum=4096,
110
- value=512,
111
- step=1,
112
- label="Max new tokens"
113
  )
114
  temperature_slider = gr.Slider(
115
- minimum=0.1,
116
- maximum=4.0,
117
- value=0.7,
118
- step=0.1,
119
- label="Temperature"
120
  )
121
  top_p_slider = gr.Slider(
122
- minimum=0.1,
123
- maximum=1.0,
124
- value=0.95,
125
- step=0.05,
126
- label="Top-P"
127
  )
128
  frequency_penalty_slider = gr.Slider(
129
- minimum=-2.0,
130
- maximum=2.0,
131
- value=0.0,
132
- step=0.1,
133
- label="Frequency Penalty"
134
  )
135
  seed_slider = gr.Slider(
136
- minimum=-1,
137
- maximum=65535,
138
- value=-1,
139
- step=1,
140
- label="Seed (-1 for random)"
141
  )
142
 
143
  # The custom_model_box is what the respond function sees as "custom_model"
144
  custom_model_box = gr.Textbox(
145
- value="",
146
- label="Custom Model",
147
- info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model."
148
  )
149
 
150
- # Define a function that, when a user selects a model from the radio, populates `custom_model_box`
151
  def set_custom_model_from_radio(selected):
152
  """
153
  This function will get triggered whenever someone picks a model from the 'Featured Models' radio.
154
  We will update the Custom Model text box with that selection automatically.
155
  """
 
156
  return selected
157
 
158
- # The main ChatInterface object
159
  demo = gr.ChatInterface(
160
- fn=respond,
161
- # For ChatInterface, we can pass additional inputs in order to feed them into the "respond" function
162
  additional_inputs=[
163
- system_message_box,
164
- max_tokens_slider,
165
- temperature_slider,
166
- top_p_slider,
167
- frequency_penalty_slider,
168
- seed_slider,
169
- custom_model_box
170
  ],
171
- fill_height=True,
172
- chatbot=chatbot,
173
- theme="Nymbo/Nymbo_Theme",
174
  )
175
 
 
 
176
  # -----------
177
  # ADDING THE "FEATURED MODELS" ACCORDION
178
  # -----------
179
  with demo:
180
- with gr.Accordion("Featured Models", open=False):
181
  model_search_box = gr.Textbox(
182
- label="Filter Models",
183
- placeholder="Search for a featured model...",
184
- lines=1
185
  )
 
186
 
187
  # Sample list of popular text models
188
  models_list = [
@@ -204,32 +209,38 @@ with demo:
204
  "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
205
  "microsoft/Phi-3.5-mini-instruct",
206
  ]
 
207
 
208
  featured_model_radio = gr.Radio(
209
- label="Select a model below",
210
- choices=models_list,
211
- value="meta-llama/Llama-3.3-70B-Instruct",
212
- interactive=True
213
  )
 
214
 
215
- # Filter function for the radio
216
- def filter_models(search_term):
217
- filtered = [m for m in models_list if search_term.lower() in m.lower()]
 
 
218
  return gr.update(choices=filtered)
219
 
220
- # Whenever we type in the search box, update the radio with the filtered list
221
  model_search_box.change(
222
- fn=filter_models,
223
- inputs=model_search_box,
224
- outputs=featured_model_radio
225
  )
 
226
 
227
- # Whenever we select a featured model, populate the 'Custom Model' textbox
228
  featured_model_radio.change(
229
- fn=set_custom_model_from_radio,
230
- inputs=featured_model_radio,
231
- outputs=custom_model_box
232
  )
 
233
 
234
  print("Gradio interface initialized.")
235
 
 
50
 
51
  # Construct the messages array required by the API
52
  messages = [{"role": "system", "content": system_message}]
53
+ print("Initial messages array constructed.")
54
 
55
  # Add conversation history to the context
56
  for val in history:
57
+ user_part = val[0] # Extract user message from the tuple
58
+ assistant_part = val[1] # Extract assistant message from the tuple
59
  if user_part:
60
+ messages.append({"role": "user", "content": user_part}) # Append user message
61
  print(f"Added user message to context: {user_part}")
62
  if assistant_part:
63
+ messages.append({"role": "assistant", "content": assistant_part}) # Append assistant message
64
  print(f"Added assistant message to context: {assistant_part}")
65
 
66
  # Append the latest user message
67
  messages.append({"role": "user", "content": message})
68
+ print("Latest user message appended.")
69
 
70
+ # If user provided a model, use that; otherwise, fall back to a default model
71
  model_to_use = custom_model.strip() if custom_model.strip() != "" else "meta-llama/Llama-3.3-70B-Instruct"
72
  print(f"Model selected for inference: {model_to_use}")
73
 
 
78
  # Make the streaming request to the HF Inference API via openai-like client
79
  for message_chunk in client.chat.completions.create(
80
  model=model_to_use, # Use either the user-provided or default model
81
+ max_tokens=max_tokens, # Maximum tokens for the response
82
+ stream=True, # Enable streaming responses
83
+ temperature=temperature, # Adjust randomness in response
84
+ top_p=top_p, # Control diversity in response generation
85
+ frequency_penalty=frequency_penalty, # Penalize repeated phrases
86
+ seed=seed, # Set random seed for reproducibility
87
+ messages=messages, # Contextual conversation messages
88
  ):
89
  # Extract the token text from the response chunk
90
  token_text = message_chunk.choices[0].delta.content
 
100
  # -------------------------
101
 
102
  # Create a Chatbot component with a specified height
103
+ chatbot = gr.Chatbot(height=600) # Define the height of the chatbot interface
104
  print("Chatbot interface created.")
105
 
106
+ # Create textboxes and sliders for system prompt, tokens, and other parameters
107
+ system_message_box = gr.Textbox(value="", label="System message") # Input box for system message
108
 
109
  max_tokens_slider = gr.Slider(
110
+ minimum=1, # Minimum allowable tokens
111
+ maximum=4096, # Maximum allowable tokens
112
+ value=512, # Default value
113
+ step=1, # Increment step size
114
+ label="Max new tokens" # Slider label
115
  )
116
  temperature_slider = gr.Slider(
117
+ minimum=0.1, # Minimum temperature
118
+ maximum=4.0, # Maximum temperature
119
+ value=0.7, # Default value
120
+ step=0.1, # Increment step size
121
+ label="Temperature" # Slider label
122
  )
123
  top_p_slider = gr.Slider(
124
+ minimum=0.1, # Minimum top-p value
125
+ maximum=1.0, # Maximum top-p value
126
+ value=0.95, # Default value
127
+ step=0.05, # Increment step size
128
+ label="Top-P" # Slider label
129
  )
130
  frequency_penalty_slider = gr.Slider(
131
+ minimum=-2.0, # Minimum penalty
132
+ maximum=2.0, # Maximum penalty
133
+ value=0.0, # Default value
134
+ step=0.1, # Increment step size
135
+ label="Frequency Penalty" # Slider label
136
  )
137
  seed_slider = gr.Slider(
138
+ minimum=-1, # -1 for random seed
139
+ maximum=65535, # Maximum seed value
140
+ value=-1, # Default value
141
+ step=1, # Increment step size
142
+ label="Seed (-1 for random)" # Slider label
143
  )
144
 
145
  # The custom_model_box is what the respond function sees as "custom_model"
146
  custom_model_box = gr.Textbox(
147
+ value="", # Default value
148
+ label="Custom Model", # Label for the textbox
149
+ info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model." # Additional info
150
  )
151
 
152
+ # Define a function that updates the custom model box when a featured model is selected
153
  def set_custom_model_from_radio(selected):
154
  """
155
  This function will get triggered whenever someone picks a model from the 'Featured Models' radio.
156
  We will update the Custom Model text box with that selection automatically.
157
  """
158
+ print(f"Featured model selected: {selected}") # Log selected model
159
  return selected
160
 
161
+ # Create the main ChatInterface object
162
  demo = gr.ChatInterface(
163
+ fn=respond, # The function to handle responses
 
164
  additional_inputs=[
165
+ system_message_box, # System message input
166
+ max_tokens_slider, # Max tokens slider
167
+ temperature_slider, # Temperature slider
168
+ top_p_slider, # Top-P slider
169
+ frequency_penalty_slider, # Frequency penalty slider
170
+ seed_slider, # Seed slider
171
+ custom_model_box # Custom model input
172
  ],
173
+ fill_height=True, # Allow the chatbot to fill the container height
174
+ chatbot=chatbot, # Chatbot UI component
175
+ theme="Nymbo/Nymbo_Theme", # Theme for the interface
176
  )
177
 
178
+ print("ChatInterface object created.")
179
+
180
  # -----------
181
  # ADDING THE "FEATURED MODELS" ACCORDION
182
  # -----------
183
  with demo:
184
+ with gr.Accordion("Featured Models", open=False): # Collapsible section for featured models
185
  model_search_box = gr.Textbox(
186
+ label="Filter Models", # Label for the search box
187
+ placeholder="Search for a featured model...", # Placeholder text
188
+ lines=1 # Single-line input
189
  )
190
+ print("Model search box created.")
191
 
192
  # Sample list of popular text models
193
  models_list = [
 
209
  "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
210
  "microsoft/Phi-3.5-mini-instruct",
211
  ]
212
+ print("Models list initialized.")
213
 
214
  featured_model_radio = gr.Radio(
215
+ label="Select a model below", # Label for the radio buttons
216
+ choices=models_list, # List of available models
217
+ value="meta-llama/Llama-3.3-70B-Instruct", # Default selection
218
+ interactive=True # Allow user interaction
219
  )
220
+ print("Featured models radio button created.")
221
 
222
+ # Filter function for the radio button list
223
+ def filter_models(search_term):
224
+ print(f"Filtering models with search term: {search_term}") # Log the search term
225
+ filtered = [m for m in models_list if search_term.lower() in m.lower()] # Filter models by search term
226
+ print(f"Filtered models: {filtered}") # Log filtered models
227
  return gr.update(choices=filtered)
228
 
229
+ # Update the radio list when the search box value changes
230
  model_search_box.change(
231
+ fn=filter_models, # Function to filter models
232
+ inputs=model_search_box, # Input: search box value
233
+ outputs=featured_model_radio # Output: update radio button list
234
  )
235
+ print("Model search box change event linked.")
236
 
237
+ # Update the custom model textbox when a featured model is selected
238
  featured_model_radio.change(
239
+ fn=set_custom_model_from_radio, # Function to set custom model
240
+ inputs=featured_model_radio, # Input: selected model
241
+ outputs=custom_model_box # Output: update custom model textbox
242
  )
243
+ print("Featured model radio button change event linked.")
244
 
245
  print("Gradio interface initialized.")
246