TinyLlama-1B / app.py
charanhu's picture
Update app.py
f55178e
raw
history blame
1.27 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T")
model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T")
def generate_text(prompt, temperature, max_length, min_length):
# Tokenize the prompt
input_ids = tokenizer.encode(prompt, return_tensors="pt")
# Generate text using the model
output = model.generate(input_ids, max_length=max_length, min_length=min_length, temperature=temperature, num_return_sequences=1)
# Decode the generated output
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
return generated_text
def chatbot_app(prompt, temperature, max_length, min_length):
generated_text = generate_text(prompt, temperature, max_length, min_length)
return generated_text
iface = gr.Interface(
fn=chatbot_app,
inputs=["text", gr.Number(minimum=0.1, maximum=2.0, value=1.0, label="Temperature"),
gr.Number(minimum=10, maximum=2048, value=10, label="Max Length"),
gr.Number(minimum=1, maximum=2048, value=1, label="Min Length")],
outputs="text",
live=False,
)
iface.launch()