Spaces:
Sleeping
Sleeping
File size: 7,874 Bytes
56fb754 5603cf0 56fb754 71bc219 56fb754 5603cf0 56fb754 f4a7ef2 56fb754 5603cf0 a3db774 5603cf0 56fb754 a3db774 56fb754 a3db774 56fb754 a3db774 56fb754 e1e8c19 56fb754 e1e8c19 56fb754 5603cf0 56fb754 e1e8c19 56fb754 5603cf0 56fb754 5603cf0 56fb754 5603cf0 56fb754 5603cf0 0349e92 5603cf0 a3db774 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 |
import os
from threading import Thread
from typing import Iterator, List, Tuple
import json
import gradio as gr
import spaces
import torch
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
from flask import Flask, request, jsonify
DESCRIPTION = """\
# Zero GPU Model Comparison Arena
Compare two language models using Hugging Face's Zero GPU initiative.
Select two different models from the dropdowns and see how they perform on the same input.
"""
MAX_MAX_NEW_TOKENS = 1024
DEFAULT_MAX_NEW_TOKENS = 256
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
MODEL_OPTIONS = [
"sarvamai/OpenHathi-7B-Hi-v0.1-Base",
"TokenBender/Navarna_v0_1_OpenHermes_Hindi"
]
models = {}
tokenizers = {}
for model_id in MODEL_OPTIONS:
tokenizers[model_id] = AutoTokenizer.from_pretrained(model_id)
models[model_id] = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
load_in_8bit=True,
)
models[model_id].eval()
# Set pad_token_id to eos_token_id if it's not set
if tokenizers[model_id].pad_token_id is None:
tokenizers[model_id].pad_token_id = tokenizers[model_id].eos_token_id
# Initialize Flask app
app = Flask(__name__)
@app.route('/log', methods=['POST'])
def log_results():
data = request.json
# Here you can implement any additional processing or storage logic
print("Logged:", json.dumps(data, indent=2))
return jsonify({"status": "success"}), 200
def prepare_input(model_id: str, message: str, chat_history: List[Tuple[str, str]]):
if "OpenHathi" in model_id:
# OpenHathi model doesn't use a specific chat template
full_prompt = message
for history_message in chat_history:
full_prompt = f"{history_message[0]}\n{history_message[1]}\n{full_prompt}"
return tokenizers[model_id](full_prompt, return_tensors="pt")
elif "Navarna" in model_id:
# Navarna model uses a chat template
conversation = []
for user, assistant in chat_history:
conversation.extend([
{"role": "user", "content": user},
{"role": "assistant", "content": assistant},
])
conversation.append({"role": "user", "content": message})
prompt = tokenizers[model_id].apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
return tokenizers[model_id](prompt, return_tensors="pt")
@spaces.GPU(duration=90)
def generate(
model_id: str,
message: str,
chat_history: List[Tuple[str, str]],
max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS,
temperature: float = 0.7,
top_p: float = 0.95,
) -> Iterator[str]:
model = models[model_id]
tokenizer = tokenizers[model_id]
inputs = prepare_input(model_id, message, chat_history)
input_ids = inputs.input_ids
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
input_ids=input_ids,
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
top_p=top_p,
temperature=temperature,
num_beams=1,
pad_token_id=tokenizer.eos_token_id,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
for text in streamer:
outputs.append(text)
yield "".join(outputs)
def compare_models(
model1_name: str,
model2_name: str,
message: str,
chat_history1: List[Tuple[str, str]],
chat_history2: List[Tuple[str, str]],
max_new_tokens: int,
temperature: float,
top_p: float,
) -> Tuple[List[Tuple[str, str]], List[Tuple[str, str]], List[Tuple[str, str]], List[Tuple[str, str]]]:
if model1_name == model2_name:
error_message = [("System", "Error: Please select two different models.")]
return error_message, error_message, chat_history1, chat_history2
output1 = "".join(list(generate(model1_name, message, chat_history1, max_new_tokens, temperature, top_p)))
output2 = "".join(list(generate(model2_name, message, chat_history2, max_new_tokens, temperature, top_p)))
chat_history1.append((message, output1))
chat_history2.append((message, output2))
log_comparison(model1_name, model2_name, message, output1, output2)
return chat_history1, chat_history2, chat_history1, chat_history2
def log_comparison(model1_name: str, model2_name: str, question: str, answer1: str, answer2: str, winner: str = None):
log_data = {
"question": question,
"model1": {"name": model1_name, "answer": answer1},
"model2": {"name": model2_name, "answer": answer2},
"winner": winner
}
# Send log data to Flask server
import requests
try:
response = requests.post('http://144.24.151.32:5000/log', json=log_data)
if response.status_code == 200:
print("Successfully logged to server")
else:
print(f"Failed to log to server. Status code: {response.status_code}")
except requests.RequestException as e:
print(f"Error sending log to server: {e}")
def vote_better(model1_name, model2_name, question, answer1, answer2, choice):
winner = model1_name if choice == "Model 1" else model2_name
log_comparison(model1_name, model2_name, question, answer1, answer2, winner)
return f"You voted that {winner} performs better. This has been logged."
with gr.Blocks(css="style.css") as demo:
gr.Markdown(DESCRIPTION)
with gr.Row():
with gr.Column():
model1_dropdown = gr.Dropdown(choices=MODEL_OPTIONS, label="Model 1", value=MODEL_OPTIONS[0])
chatbot1 = gr.Chatbot(label="Model 1 Output")
with gr.Column():
model2_dropdown = gr.Dropdown(choices=MODEL_OPTIONS, label="Model 2", value=MODEL_OPTIONS[1])
chatbot2 = gr.Chatbot(label="Model 2 Output")
text_input = gr.Textbox(label="Input Text", lines=3)
with gr.Row():
max_new_tokens = gr.Slider(label="Max new tokens", minimum=1, maximum=MAX_MAX_NEW_TOKENS, value=DEFAULT_MAX_NEW_TOKENS)
temperature = gr.Slider(label="Temperature", minimum=0.1, maximum=2.0, value=0.7)
top_p = gr.Slider(label="Top-p (nucleus sampling)", minimum=0.05, maximum=1.0, value=0.95)
compare_btn = gr.Button("Compare Models")
with gr.Row():
better1_btn = gr.Button("Model 1 is Better")
better2_btn = gr.Button("Model 2 is Better")
vote_output = gr.Textbox(label="Voting Result")
compare_btn.click(
compare_models,
inputs=[model1_dropdown, model2_dropdown, text_input, chatbot1, chatbot2, max_new_tokens, temperature, top_p],
outputs=[chatbot1, chatbot2, chatbot1, chatbot2]
)
better1_btn.click(
vote_better,
inputs=[model1_dropdown, model2_dropdown, text_input, chatbot1, chatbot2, gr.Textbox(value="Model 1", visible=False)],
outputs=[vote_output]
)
better2_btn.click(
vote_better,
inputs=[model1_dropdown, model2_dropdown, text_input, chatbot1, chatbot2, gr.Textbox(value="Model 2", visible=False)],
outputs=[vote_output]
)
if __name__ == "__main__":
# Start Flask server in a separate thread
flask_thread = Thread(target=app.run, kwargs={"host": "0.0.0.0", "port": 5000})
flask_thread.start()
# Start Gradio app with public link
demo.queue(max_size=10).launch(share=True) |