Spaces:
Running
Running
import gradio as gr | |
from openai import OpenAI | |
import os | |
# Retrieve the access token from the environment variable | |
ACCESS_TOKEN = os.getenv("HF_TOKEN") | |
print("Access token loaded.") | |
# Initialize the OpenAI client with the Hugging Face Inference API endpoint | |
client = OpenAI( | |
base_url="https://api-inference.huggingface.co/v1/", | |
api_key=ACCESS_TOKEN, | |
) | |
print("OpenAI client initialized.") | |
def respond( | |
message, | |
history: list[tuple[str, str]], | |
system_message, | |
max_tokens, | |
temperature, | |
top_p, | |
frequency_penalty, | |
seed, | |
model | |
): | |
""" | |
This function handles the chatbot response. It takes in: | |
- message: the user's new message | |
- history: the list of previous messages, each as a tuple (user_msg, assistant_msg) | |
- system_message: the system prompt | |
- max_tokens: the maximum number of tokens to generate in the response | |
- temperature: sampling temperature | |
- top_p: top-p (nucleus) sampling | |
- frequency_penalty: penalize repeated tokens in the output | |
- seed: a fixed seed for reproducibility; -1 will mean 'random' | |
- model: the selected model for text generation | |
""" | |
print(f"Received message: {message}") | |
print(f"History: {history}") | |
print(f"System message: {system_message}") | |
print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}") | |
print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}, Model: {model}") | |
# Convert seed to None if -1 (meaning random) | |
if seed == -1: | |
seed = None | |
# Construct the messages array required by the API | |
messages = [{"role": "system", "content": system_message}] | |
# Add conversation history to the context | |
for val in history: | |
user_part = val[0] | |
assistant_part = val[1] | |
if user_part: | |
messages.append({"role": "user", "content": user_part}) | |
print(f"Added user message to context: {user_part}") | |
if assistant_part: | |
messages.append({"role": "assistant", "content": assistant_part}) | |
print(f"Added assistant message to context: {assistant_part}") | |
# Append the latest user message | |
messages.append({"role": "user", "content": message}) | |
# Start with an empty string to build the response as tokens stream in | |
response = "" | |
print("Sending request to OpenAI API.") | |
# Make the streaming request to the HF Inference API via openai-like client | |
for message_chunk in client.chat.completions.create( | |
model=model, # Use the selected model | |
max_tokens=max_tokens, | |
stream=True, # Stream the response | |
temperature=temperature, | |
top_p=top_p, | |
frequency_penalty=frequency_penalty, | |
seed=seed, | |
messages=messages, | |
): | |
# Extract the token text from the response chunk | |
token_text = message_chunk.choices[0].delta.content | |
print(f"Received token: {token_text}") | |
response += token_text | |
yield response | |
print("Completed response generation.") | |
# Create a Chatbot component with a specified height | |
chatbot = gr.Chatbot(height=600) | |
print("Chatbot interface created.") | |
# List of featured models (placeholder models for now) | |
featured_models = [ | |
"meta-llama/Llama-3.3-70B-Instruct", | |
"gpt-3.5-turbo", | |
"gpt-4", | |
"mistralai/Mistral-7B-Instruct-v0.1", | |
"tiiuae/falcon-40b-instruct" | |
] | |
# Function to filter models based on search input | |
def filter_models(search_term): | |
filtered_models = [m for m in featured_models if search_term.lower() in m.lower()] | |
return gr.update(choices=filtered_models) | |
# Create the Gradio ChatInterface | |
demo = gr.ChatInterface( | |
respond, | |
additional_inputs=[ | |
gr.Textbox(value="", label="System message"), | |
gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max new tokens"), | |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), | |
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-P"), | |
gr.Slider(minimum=-2.0, maximum=2.0, value=0.0, step=0.1, label="Frequency Penalty"), | |
gr.Slider(minimum=-1, maximum=65535, value=-1, step=1, label="Seed (-1 for random)"), | |
gr.Radio(label="Select a model below", value="meta-llama/Llama-3.3-70B-Instruct", choices=featured_models, interactive=True, elem_id="model-radio") | |
], | |
fill_height=True, | |
chatbot=chatbot, | |
theme="Nymbo/Nymbo_Theme", | |
) | |
# Add a "Custom Model" text box and "Featured Models" accordion | |
with demo: | |
with gr.Tab("Model Settings"): | |
with gr.Row(): | |
with gr.Column(): | |
# Textbox for custom model input | |
custom_model = gr.Textbox(label="Custom Model", info="Hugging Face model path (optional)", placeholder="username/model-name") | |
# Accordion for selecting featured models | |
with gr.Accordion("Featured Models", open=True): | |
# Textbox for searching models | |
model_search = gr.Textbox(label="Filter Models", placeholder="Search for a featured model...", lines=1, elem_id="model-search-input") | |
# Radio buttons to select the desired model | |
model_radio = gr.Radio(label="Select a model below", value="meta-llama/Llama-3.3-70B-Instruct", choices=featured_models, interactive=True, elem_id="model-radio") | |
# Update model list when search box is used | |
model_search.change(filter_models, inputs=model_search, outputs=model_radio) | |
# Add an "Information" tab with accordions | |
with gr.Tab("Information"): | |
with gr.Row(): | |
# Accordion for "Featured Models" with a table | |
with gr.Accordion("Featured Models (WiP)", open=False): | |
gr.HTML( | |
""" | |
<p><a href="https://huggingface.co/models?inference=warm&pipeline_tag=text-generation&sort=trending">See all available models</a></p> | |
<table style="width:100%; text-align:center; margin:auto;"> | |
<tr> | |
<th>Model Name</th> | |
<th>Typical Use Case</th> | |
<th>Notes</th> | |
</tr> | |
<tr> | |
<td>meta-llama/Llama-3.3-70B-Instruct</td> | |
<td>General-purpose instruction following</td> | |
<td>High-quality, large-scale model</td> | |
</tr> | |
<tr> | |
<td>gpt-3.5-turbo</td> | |
<td>Chat and general text generation</td> | |
<td>Fast and efficient</td> | |
</tr> | |
<tr> | |
<td>gpt-4</td> | |
<td>Advanced text generation</td> | |
<td>State-of-the-art performance</td> | |
</tr> | |
<tr> | |
<td>mistralai/Mistral-7B-Instruct-v0.1</td> | |
<td>Instruction following</td> | |
<td>Lightweight and efficient</td> | |
</tr> | |
<tr> | |
<td>tiiuae/falcon-40b-instruct</td> | |
<td>Instruction following</td> | |
<td>High-quality, large-scale model</td> | |
</tr> | |
</table> | |
""" | |
) | |
# Accordion for "Parameters Overview" with markdown | |
with gr.Accordion("Parameters Overview", open=False): | |
gr.Markdown( | |
""" | |
## System Message | |
###### This is the initial prompt that sets the behavior of the model. It can be used to define the tone, style, or role of the assistant. | |
## Max Tokens | |
###### This controls the maximum length of the generated response. Higher values allow for longer responses but may take more time to generate. | |
## Temperature | |
###### This controls the randomness of the output. Lower values make the model more deterministic, while higher values make it more creative. | |
## Top-P | |
###### This controls the diversity of the output by limiting the model to the most likely tokens. Lower values make the output more focused, while higher values allow for more diversity. | |
## Frequency Penalty | |
###### This penalizes repeated tokens in the output. Higher values discourage repetition, while lower values allow for more repetitive outputs. | |
## Seed | |
###### This sets a fixed seed for reproducibility. A value of -1 means the seed is random. | |
## Model | |
###### This selects the model used for text generation. You can choose from featured models or specify a custom model. | |
""" | |
) | |
print("Gradio interface initialized.") | |
if __name__ == "__main__": | |
print("Launching the demo application.") | |
demo.launch() |