import gradio as gr import torch from transformers import pipeline, GPTJForCausalLM # load fp 16 model model = GPTJForCausalLM.from_pretrained("models/hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es") config = AutoConfig.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es", name_or_path="adapter_model.bin") # load tokenizer tokenizer = AutoTokenizer.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es") # create pipeline pipe = pipeline("text-generation", model=model, config=config, tokenizer=tokenizer, device=0,) def predict(text): return pipe(f"text: {text}, entities:")["generated_text"] iface = gr.Interface( fn=predict, inputs='text', outputs='text', examples=[["Yo hoy voy a hablar de mujeres en el mundo del arte, porque me ha leído un libro fantástico que se llama Historia del arte sin hombres, de Katie Hesel."]] ) iface.launch()