Matt
Update
3396f08
raw
history blame
1.1 kB
import gradio as gr
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
demo_conversation1 = [
{"role": "user", "content": "Hi there!"},
{"role": "assistant", "content": "Hello, human!"}
]
demo_conversation2 = [
{"role": "system", "content": "You are a helpful chatbot."},
{"role": "user", "content": "Hi there!"}
]
conversations = [demo_conversation1, demo_conversation2]
def apply_chat_template(template):
tokenizer.chat_template = template
outputs = []
for i, conversation in enumerate(conversations):
without_gen = tokenizer.apply_chat_template(conversation, tokenize=False)
with_gen = tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)
out = f"Conversation {i} without generation prompt:\n\n{without_gen}\n\nConversation {i} with generation prompt:\n\n{with_gen}\n\n"
outputs.append(out)
return tuple(outputs)
iface = gr.Interface(fn=apply_chat_template, inputs="text", outputs=["text"] * len(conversations))
iface.launch()