File size: 1,818 Bytes
d903275 0a2c880 2ff483e a7f4470 2ff483e d903275 2ff483e 7b6b4a2 a7f4470 7b6b4a2 a7f4470 7b6b4a2 a7f4470 7b6b4a2 0a2c880 3b25749 a7f4470 7b6b4a2 0a2c880 7b6b4a2 0a2c880 7b6b4a2 a7f4470 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
import gradio as gr
from datasets import load_dataset
model_name = "Writer/palmyra-small"
tokenizer = AutoTokenizer.from_pretrained(model_name)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
def get_movie_info(movie_title):
# Load the IMDb dataset
imdb = load_dataset("imdb")
# Search for the movie in the IMDb dataset
results = imdb['title'].filter(lambda x: movie_title.lower() in x.lower())
# Check if any results are found
if len(results) > 0:
movie = results[0]
return f"Title: {movie['title']}, Year: {movie['year']}, Genre: {', '.join(movie['genre'])}"
else:
return "Movie not found"
def generate_response(prompt):
input_text_template = (
"A chat between a curious user and an artificial intelligence assistant. "
"The assistant gives helpful, detailed, and polite answers to the user's questions. "
f"USER: {prompt} "
"ASSISTANT:"
)
# Call the get_movie_info function
movie_info = get_movie_info(prompt)
# Concatenate the movie info with the input template
input_text_template += f" Movie Info: {movie_info}"
model_inputs = tokenizer(input_text_template, return_tensors="pt").to(device)
gen_conf = {
"top_k": 20,
"max_length": 200,
"temperature": 0.6,
"do_sample": True,
"eos_token_id": tokenizer.eos_token_id,
}
output = model.generate(**model_inputs, **gen_conf)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
return generated_text
iface = gr.Interface(fn=generate_response, inputs="text", outputs="text", live=True)
iface.launch()
|