File size: 25,540 Bytes
038f313
1cee504
c5a20a4
ea82e64
75bf974
 
 
038f313
db00df1
2d6eaa5
c6bdd15
75bf974
70d58c7
 
 
75bf974
 
6a6b98f
70d58c7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6a6b98f
70d58c7
 
 
 
 
 
75bf974
038f313
27c8b8d
7c1212e
27c8b8d
 
038f313
 
 
3a64d68
98674ca
9e12544
75bf974
9e12544
6a6b98f
 
038f313
6a6b98f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0ef95ea
6a6b98f
2d6eaa5
0ef95ea
 
 
9e12544
75bf974
9e12544
d92e5cd
 
f7c4208
75bf974
9e12544
 
8eb1697
 
 
 
 
9e12544
 
ba0614b
 
901bafe
0ef95ea
 
038f313
6a6b98f
 
 
 
 
 
 
 
 
 
45b3867
6a6b98f
 
4c304f3
6a6b98f
4c304f3
6a6b98f
4c304f3
 
 
 
 
 
6a6b98f
 
 
 
 
 
 
4c304f3
6a6b98f
 
4c304f3
1cee504
6a6b98f
2d6eaa5
901bafe
5b8ad4f
6a6b98f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27c8b8d
a9862a1
3f8952c
d92e5cd
5b8ad4f
0ef95ea
2d6eaa5
6a6b98f
1cee504
 
 
 
3b18f78
1cee504
 
 
 
2d6eaa5
1cee504
 
 
ba0614b
1cee504
 
 
5b8ad4f
6a6b98f
1cee504
75bf974
1cee504
2d6eaa5
23119eb
 
1cee504
 
 
 
 
6a6b98f
 
 
 
 
23119eb
 
1cee504
 
6a6b98f
 
1cee504
0ef95ea
901bafe
d2ae72a
 
 
 
 
 
a9862a1
75bf974
 
 
 
 
7c1212e
75bf974
 
 
 
70d58c7
 
 
 
 
 
 
 
 
 
 
57fd5c0
75bf974
57fd5c0
 
 
 
 
 
75bf974
57fd5c0
 
 
 
 
 
 
 
 
 
75bf974
57fd5c0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6a6b98f
 
57fd5c0
 
 
6a6b98f
57fd5c0
 
 
6a6b98f
57fd5c0
6a6b98f
57fd5c0
 
 
6a6b98f
57fd5c0
 
 
 
 
6a6b98f
57fd5c0
 
 
6a6b98f
 
 
 
 
 
 
 
 
 
57fd5c0
 
 
6a6b98f
 
57fd5c0
 
06cdbf8
7c1212e
6a6b98f
 
7c1212e
6a6b98f
7c1212e
6a6b98f
 
7c1212e
6a6b98f
 
 
 
 
 
 
 
 
63903e4
6a6b98f
7c1212e
 
6a6b98f
 
 
 
 
 
 
7c1212e
6a6b98f
7c1212e
 
6a6b98f
 
 
 
 
7c1212e
63903e4
75bf974
6a6b98f
75bf974
 
6a6b98f
75bf974
 
 
6a6b98f
d92e5cd
75bf974
6a6b98f
75bf974
6a6b98f
 
75bf974
b0cbd1c
6a6b98f
 
 
 
75bf974
6a6b98f
 
75bf974
6a6b98f
 
75bf974
6a6b98f
7c1212e
 
6a6b98f
 
 
7c1212e
4c304f3
6a6b98f
 
 
 
 
 
 
 
 
 
 
7c1212e
75bf974
70d58c7
6a6b98f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7c1212e
 
 
6a6b98f
7c1212e
6a6b98f
 
 
 
7c1212e
6a6b98f
 
 
7c1212e
 
 
 
 
 
 
 
 
6a6b98f
7c1212e
 
6a6b98f
7c1212e
 
 
6a6b98f
75bf974
 
45b3867
75bf974
 
 
 
45b3867
75bf974
6a6b98f
 
75bf974
45b3867
6a6b98f
45b3867
 
75bf974
 
d92e5cd
6a6b98f
d92e5cd
a9862a1
fdab9dd
ea82e64
6a6b98f
7a4f867
a9862a1
9e12544
 
6a6b98f
9e12544
a9862a1
9e12544
 
6a6b98f
9e12544
a9862a1
 
 
769901b
77298b9
a9862a1
6a6b98f
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
import gradio as gr
from huggingface_hub import InferenceClient
import os
import json
import base64
from PIL import Image
import io

ACCESS_TOKEN = os.getenv("HF_TOKEN")
print("Access token loaded.")

# Function to encode image to base64
def encode_image(image_path):
    if not image_path:
        print("No image path provided")
        return None
    
    try:
        print(f"Encoding image from path: {image_path}")
        
        # If it's already a PIL Image
        if isinstance(image_path, Image.Image):
            image = image_path
        else:
            # Try to open the image file
            image = Image.open(image_path)
        
        # Convert to RGB if image has an alpha channel (RGBA)
        if image.mode == 'RGBA':
            image = image.convert('RGB')
        
        # Encode to base64
        buffered = io.BytesIO()
        image.save(buffered, format="JPEG") # Keep JPEG for consistency with image_url
        img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
        print("Image encoded successfully")
        return img_str
    except Exception as e:
        print(f"Error encoding image: {e}")
        return None

def respond(
    message,
    image_files,  # Changed parameter name and structure
    history: list[tuple[str, str]],
    system_message,
    max_tokens,
    temperature,
    top_p,
    frequency_penalty,
    seed,
    provider,
    custom_api_key,
    custom_model,    
    model_search_term, # Retained for function signature consistency if called elsewhere
    selected_model     # Retained for function signature consistency
):
    """
    Core function to stream responses from a language model.

    Args:
        message (str | list): The user's message, can be text or multimodal content.
        image_files (list[str]): List of paths to image files for the current turn.
        history (list[tuple[str, str]]): Conversation history.
        system_message (str): System prompt for the model.
        max_tokens (int): Maximum tokens for the response.
        temperature (float): Sampling temperature.
        top_p (float): Top-p (nucleus) sampling.
        frequency_penalty (float): Frequency penalty.
        seed (int): Random seed (-1 for random).
        provider (str): Inference provider.
        custom_api_key (str): Custom API key.
        custom_model (str): Custom model ID.
        model_search_term (str): Term for searching models (UI related).
        selected_model (str): Model selected from UI list.

    Yields:
        str: The cumulative response from the model.
    """
    print(f"Received message: {message}")
    print(f"Received {len(image_files) if image_files else 0} images for current turn")
    print(f"History: {history}")
    print(f"System message: {system_message}")
    print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
    print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
    print(f"Selected provider: {provider}")         
    print(f"Custom API Key provided: {bool(custom_api_key.strip())}")
    print(f"Selected model (custom_model): {custom_model}")  
    print(f"Model search term: {model_search_term}")
    print(f"Selected model from radio: {selected_model}")

    # Determine which token to use
    token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
    
    if custom_api_key.strip() != "":
        print("USING CUSTOM API KEY: BYOK token provided by user is being used for authentication")
    else:
        print("USING DEFAULT API KEY: Environment variable HF_TOKEN is being used for authentication")
    
    # Initialize the Inference Client with the provider and appropriate token
    client = InferenceClient(token=token_to_use, provider=provider)
    print(f"Hugging Face Inference Client initialized with {provider} provider.")

    # Convert seed to None if -1 (meaning random)
    if seed == -1:
        seed = None

    # Create multimodal content if images are present for the current message
    # The 'message' parameter to 'respond' is now the text part of the current turn
    # 'image_files' parameter to 'respond' now holds image paths for the current turn
    current_turn_content = []
    if message and isinstance(message, str) and message.strip():
        current_turn_content.append({
            "type": "text",
            "text": message
        })

    if image_files and len(image_files) > 0:
        for img_path in image_files: # Iterate through paths in image_files
            if img_path is not None:
                try:
                    encoded_image = encode_image(img_path) # img_path is already a path
                    if encoded_image:
                        current_turn_content.append({
                            "type": "image_url",
                            "image_url": {
                                "url": f"data:image/jpeg;base64,{encoded_image}"
                            }
                        })
                except Exception as e:
                    print(f"Error encoding image for current turn: {e}")
    
    # If current_turn_content is empty (e.g. only empty text message), use the raw message
    if not current_turn_content and isinstance(message, str):
        final_user_content_for_api = message
    elif not current_turn_content and not isinstance(message, str): # case where message might be complex type but empty
        final_user_content_for_api = "" # or handle as error
    else:
        final_user_content_for_api = current_turn_content


    # Prepare messages in the format expected by the API
    messages_for_api = [{"role": "system", "content": system_message}]
    print("Initial messages array constructed.")

    # Add conversation history to the context
    for val in history: # history is list[tuple[str, str]]
        user_hist_msg_content = val[0] # This is what user typed or image markdown
        assistant_hist_msg = val[1]

        # Process user history message (could be text or markdown image path)
        if user_hist_msg_content:
            # Check if it's an image markdown from history
            if isinstance(user_hist_msg_content, str) and user_hist_msg_content.startswith("![Image]("):
                hist_img_path = user_hist_msg_content.replace("![Image](", "").replace(")", "")
                encoded_hist_image = encode_image(hist_img_path)
                if encoded_hist_image:
                     messages_for_api.append({"role": "user", "content": [
                        {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{encoded_hist_image}"}}
                    ]})
                else: # if image encoding fails, maybe send a placeholder or skip
                    messages_for_api.append({"role": "user", "content": "[Image could not be loaded]"})
            else: # It's a text message from history
                 messages_for_api.append({"role": "user", "content": user_hist_msg_content})
            print(f"Added user message to API context from history (type: {type(user_hist_msg_content)})")
        
        if assistant_hist_msg:
            messages_for_api.append({"role": "assistant", "content": assistant_hist_msg})
            print(f"Added assistant message to API context from history: {assistant_hist_msg}")

    # Append the latest user message (which now includes images if any for this turn)
    messages_for_api.append({"role": "user", "content": final_user_content_for_api})
    print(f"Latest user message appended to API context (content type: {type(final_user_content_for_api)})")


    # Determine which model to use, prioritizing custom_model if provided
    model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model
    print(f"Model selected for inference: {model_to_use}")

    # Start with an empty string to build the response as tokens stream in
    response_text = ""
    print(f"Sending request to {provider} provider.")

    # Prepare parameters for the chat completion request
    parameters = {
        "max_tokens": max_tokens,
        "temperature": temperature,
        "top_p": top_p,
        "frequency_penalty": frequency_penalty,
    }
    
    if seed is not None:
        parameters["seed"] = seed

    # Use the InferenceClient for making the request
    try:
        # Create a generator for the streaming response
        stream = client.chat_completion(
            model=model_to_use,
            messages=messages_for_api, # Use the correctly formatted messages
            stream=True,
            **parameters
        )
        
        print("Received tokens: ", end="", flush=True)
        
        # Process the streaming response
        for chunk in stream:
            if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
                # Extract the content from the response
                if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
                    token_text_chunk = chunk.choices[0].delta.content
                    if token_text_chunk:
                        print(token_text_chunk, end="", flush=True)
                        response_text += token_text_chunk
                        yield response_text
        
        print()
    except Exception as e:
        print(f"Error during inference: {e}")
        response_text += f"\nError: {str(e)}"
        yield response_text

    print("Completed response generation.")

# Function to validate provider selection based on BYOK
def validate_provider(api_key, provider):
    if not api_key.strip() and provider != "hf-inference":
        return gr.update(value="hf-inference")
    return gr.update(value=provider)

# GRADIO UI
with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
    # Create the chatbot component
    chatbot = gr.Chatbot(
        height=600, 
        show_copy_button=True, 
        placeholder="Select a model and begin chatting. Now supports multiple inference providers and multimodal inputs",
        layout="panel"
    )
    print("Chatbot interface created.")
    
    # Multimodal textbox for messages (combines text and file uploads)
    msg = gr.MultimodalTextbox(
        placeholder="Type a message or upload images...",
        show_label=False,
        container=False,
        scale=12,
        file_types=["image"],
        file_count="multiple",
        sources=["upload"]
    )
    
    # Create accordion for settings
    with gr.Accordion("Settings", open=False):
        # System message
        system_message_box = gr.Textbox(
            value="You are a helpful AI assistant that can understand images and text.", 
            placeholder="You are a helpful assistant.",
            label="System Prompt"
        )
        
        # Generation parameters
        with gr.Row():
            with gr.Column():
                max_tokens_slider = gr.Slider(
                    minimum=1,
                    maximum=4096,
                    value=512,
                    step=1,
                    label="Max tokens"
                )
                
                temperature_slider = gr.Slider(
                    minimum=0.1,
                    maximum=4.0,
                    value=0.7,
                    step=0.1,
                    label="Temperature"
                )
                
                top_p_slider = gr.Slider(
                    minimum=0.1,
                    maximum=1.0,
                    value=0.95,
                    step=0.05,
                    label="Top-P"
                )
                
            with gr.Column():
                frequency_penalty_slider = gr.Slider(
                    minimum=-2.0,
                    maximum=2.0,
                    value=0.0,
                    step=0.1,
                    label="Frequency Penalty"
                )
                
                seed_slider = gr.Slider(
                    minimum=-1,
                    maximum=65535,
                    value=-1,
                    step=1,
                    label="Seed (-1 for random)"
                )
        
        # Provider selection
        providers_list = [
            "hf-inference", "cerebras", "together", "sambanova", 
            "novita", "cohere", "fireworks-ai", "hyperbolic", "nebius",
        ]
        
        provider_radio = gr.Radio(
            choices=providers_list, value="hf-inference", label="Inference Provider",
        )
        
        byok_textbox = gr.Textbox(
            value="", label="BYOK (Bring Your Own Key)",
            info="Enter a custom Hugging Face API key here. When empty, only 'hf-inference' provider can be used.",
            placeholder="Enter your Hugging Face API token", type="password"
        )
        
        custom_model_box = gr.Textbox(
            value="", label="Custom Model",
            info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model.",
            placeholder="meta-llama/Llama-3.3-70B-Instruct"
        )
        
        model_search_box = gr.Textbox(
            label="Filter Models", placeholder="Search for a featured model...", lines=1
        )
        
        models_list = [
            "meta-llama/Llama-3.2-11B-Vision-Instruct", "meta-llama/Llama-3.3-70B-Instruct",
            "meta-llama/Llama-3.1-70B-Instruct", "meta-llama/Llama-3.0-70B-Instruct",
            "meta-llama/Llama-3.2-3B-Instruct", "meta-llama/Llama-3.2-1B-Instruct",
            "meta-llama/Llama-3.1-8B-Instruct", "NousResearch/Hermes-3-Llama-3.1-8B",
            "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", "mistralai/Mistral-Nemo-Instruct-2407",
            "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-7B-Instruct-v0.3",
            "mistralai/Mistral-7B-Instruct-v0.2", "Qwen/Qwen3-235B-A22B", "Qwen/Qwen3-32B",
            "Qwen/Qwen2.5-72B-Instruct", "Qwen/Qwen2.5-3B-Instruct", "Qwen/Qwen2.5-0.5B-Instruct",
            "Qwen/QwQ-32B", "Qwen/Qwen2.5-Coder-32B-Instruct", "microsoft/Phi-3.5-mini-instruct",
            "microsoft/Phi-3-mini-128k-instruct", "microsoft/Phi-3-mini-4k-instruct",
        ]

        featured_model_radio = gr.Radio(
            label="Select a model below", choices=models_list,
            value="meta-llama/Llama-3.2-11B-Vision-Instruct", interactive=True
        )
        
        gr.Markdown("[View all Text-to-Text models](https://huggingface.co/models?inference_provider=all&pipeline_tag=text-generation&sort=trending) | [View all multimodal models](https://huggingface.co/models?inference_provider=all&pipeline_tag=image-text-to-text&sort=trending)")

    # MCP Support Information
    with gr.Accordion("MCP Support (for AI Tool Use)", open=False):
        gr.Markdown("""
        ### MCP (Model Context Protocol) Enabled
        
        This application's text and image generation capability can be used as a tool by MCP-compatible AI models 
        (e.g., certain versions of Claude, Cursor, or custom MCP clients like Tiny Agents).
        
        The primary interaction function (`bot`) is exposed as an MCP tool. 
        Provide the conversation history and other parameters as arguments to the tool.
        For multimodal input, ensure the history correctly references image data that the server can access 
        (Gradio's MCP layer may handle base64 to file conversion if the tool schema indicates file inputs).

        **MCP Server URL:**
        `https://YOUR_SPACE_NAME-serverless-textgen-hub.hf.space/gradio_api/mcp/sse`
        *(Replace `YOUR_SPACE_NAME` with your Hugging Face username or organization if this is a user space, 
        or the full space name if different. You can find this URL in your browser once the Space is running.)*
        
        **Example MCP Client Configuration (`mcp.json` style):**
        ```json
        {
          "servers": [
            {
              "name": "ServerlessTextGenHubTool",
              "transport": {
                "type": "sse",
                "url": "https://YOUR_SPACE_NAME-serverless-textgen-hub.hf.space/gradio_api/mcp/sse" 
              }
            }
          ]
        }
        ```
        **Note on Tool Schema:** The exact schema of the MCP tool will be determined by Gradio based on the `bot` function's 
        signature (including type hints) and the Gradio components it interacts with. 
        Refer to the `/gradio_api/mcp/schema` endpoint of your running application for the precise tool definition.
        For image inputs via MCP, clients should ideally send image URLs or base64 encoded data if the tool's schema supports file types. 
        Gradio's MCP layer attempts to handle file data conversions.
        """)

    # Chat history state
    chat_history = gr.State([]) # Not directly used, chatbot component handles its state internally
    
    # Function to filter models
    def filter_models(search_term: str):
        print(f"Filtering models with search term: {search_term}")
        filtered = [m for m in models_list if search_term.lower() in m.lower()]
        print(f"Filtered models: {filtered}")
        return gr.update(choices=filtered if filtered else models_list, value=featured_model_radio.value if filtered and featured_model_radio.value in filtered else (filtered[0] if filtered else models_list[0]))

    # Function to set custom model from radio
    def set_custom_model_from_radio(selected: str):
        print(f"Featured model selected: {selected}")
        # This function now directly returns the selected model to update custom_model_box
        # If custom_model_box is meant to override, this keeps them in sync until user types in custom_model_box
        return selected


    # Function for the chat interface (user's turn)
    def user(user_message_input: dict, history: list[list[str | None]]):
        print(f"User input (raw from MultimodalTextbox): {user_message_input}")
        
        text_content = user_message_input.get("text", "").strip()
        files = user_message_input.get("files", []) # List of temp file paths
        
        print(f"Parsed text content: '{text_content}'")
        print(f"Parsed files: {files}")
        
        # Append text message to history if present
        if text_content:
            history.append([text_content, None])
            print(f"Appended text to history: {text_content}")

        # Append image messages to history
        if files:
            for file_path in files:
                if file_path and isinstance(file_path, str): # file_path is a temp path from Gradio
                    # Embed image as markdown link in history for display
                    # The actual file path is used by `respond` via `bot`
                    history.append([f"![Image]({file_path})", None])
                    print(f"Appended image to history: {file_path}")
        
        # If neither text nor files, don't add an empty turn
        if not text_content and not files:
            print("Empty input, no change to history.")
            return history # Return current history as is
            
        return history
    
    # Define bot response function
    def bot(
        history: list[list[str | None]], # Type hint for history
        system_msg: str,
        max_tokens: int,
        temperature: float,
        top_p: float,
        freq_penalty: float,
        seed: int,
        provider: str,
        api_key: str,
        custom_model: str,
        # model_search_term: str, # This argument comes from model_search_box
        selected_model: str  # This argument comes from featured_model_radio
    ):
        """
        Processes user input from the chat history, calls the language model via the 'respond'
        function, and streams the bot's response back to update the chat history.
        This function is intended to be exposed as an MCP tool.

        Args:
            history (list[list[str | None]]): The conversation history. 
                                              Each item is [user_message, bot_message].
                                              User messages can be text or markdown image paths like "![Image](/tmp/path.jpg)".
            system_msg (str): The system prompt.
            max_tokens (int): Maximum number of tokens to generate.
            temperature (float): Sampling temperature for generation.
            top_p (float): Top-P (nucleus) sampling probability.
            freq_penalty (float): Frequency penalty for generation.
            seed (int): Random seed for generation (-1 for random).
            provider (str): The inference provider to use.
            api_key (str): Custom API key, if provided by the user.
            custom_model (str): Custom model path/ID. If empty, selected_model is used.
            selected_model (str): The model selected from the featured list.

        Yields:
            list[list[str | None]]: The updated chat history with the bot's streaming response.
        """
        print(f"Bot function called. History: {history}")
        if not history or history[-1][0] is None: # Check if last user message is None
            print("No user message in the last history turn to process.")
            # yield history # removed to avoid issues with Gradio expecting a specific sequence
            return # Or raise an error, or handle appropriately

        # The last user message is history[-1][0]
        # The bot's response will go into history[-1][1]
        
        user_turn_content = history[-1][0]
        current_turn_text_message = ""
        current_turn_image_paths = []

        # Check if the last user message in history is an image markdown
        if isinstance(user_turn_content, str) and user_turn_content.startswith("![Image]("):
            # This is an image message
            img_path = user_turn_content.replace("![Image](", "").replace(")", "")
            current_turn_image_paths.append(img_path)
            # Check if there was a text message immediately preceding this image in the same "turn"
            # This requires looking at how `user` function structures history.
            # `user` adds text and images as separate entries in history.
            # So, if history[-1][0] is an image, history[-2][0] might be related text IF it was part of the same multimodal input.
            # This logic becomes complex. Simpler: assume each history entry is distinct.
            # For MCP, it's better if the client structures the call to `bot` clearly.
            print(f"Processing image from history: {img_path}")
        elif isinstance(user_turn_content, str):
            # This is a text message
            current_turn_text_message = user_turn_content
            print(f"Processing text from history: {current_turn_text_message}")
        else:
            print(f"Unexpected content in history user turn: {user_turn_content}")
            # yield history # removed
            return


        history[-1][1] = "" # Initialize bot response field for the current turn
        
        # Call the 'respond' function.
        # History for 'respond' should be prior turns, not including the current user message being processed.
        history_for_respond = history[:-1]

        for response_chunk in respond(
            message=current_turn_text_message, # Text part of current turn
            image_files=current_turn_image_paths, # Image paths of current turn
            history=history_for_respond,    # History up to the previous turn
            system_message=system_msg,
            max_tokens=max_tokens,
            temperature=temperature,
            top_p=top_p,
            frequency_penalty=freq_penalty,
            seed=seed,
            provider=provider,
            custom_api_key=api_key,
            custom_model=custom_model,
            model_search_term="", # Not directly used by respond's core logic here
            selected_model=selected_model
        ):
            history[-1][1] = response_chunk # Update bot response in the current turn
            yield history

    # Event handlers
    # The parameters to `bot` must match the order of inputs list
    msg.submit(
        user,
        [msg, chatbot],
        [chatbot],
        queue=False
    ).then(
        bot,
        [chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider, 
         frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box, 
         # model_search_box, # Removed from bot inputs as it's UI only
         featured_model_radio],
        [chatbot]
    ).then(
        lambda: {"text": "", "files": []},
        None,
        [msg]
    )
    
    model_search_box.change(
        fn=filter_models, inputs=model_search_box, outputs=featured_model_radio
    )
    print("Model search box change event linked.")

    featured_model_radio.change(
        fn=set_custom_model_from_radio, inputs=featured_model_radio, outputs=custom_model_box
    )
    print("Featured model radio button change event linked.")
    
    byok_textbox.change(
        fn=validate_provider, inputs=[byok_textbox, provider_radio], outputs=provider_radio
    )
    print("BYOK textbox change event linked.")

    provider_radio.change(
        fn=validate_provider, inputs=[byok_textbox, provider_radio], outputs=provider_radio
    )
    print("Provider radio button change event linked.")

print("Gradio interface initialized.")

if __name__ == "__main__":
    print("Launching the demo application.")
    # Added mcp_server=True
    demo.launch(show_api=True, mcp_server=True)