File size: 911 Bytes
cd5a9e7
2fb380b
c56139c
cd5a9e7
c56139c
957d368
c56139c
3c9ff9d
 
c56139c
 
 
 
3c9ff9d
cd5a9e7
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import gradio as gr
import torch
from transformers import pipeline, GPTJForCausalLM

# load fp 16 model
model = GPTJForCausalLM.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es", torch_dtype=torch.float16)

config = AutoConfig.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es", name_or_path="adapter_model.bin")

# load tokenizer
tokenizer = AutoTokenizer.from_pretrained("hackathon-somos-nlp-2023/bertin-gpt-j-6b-ner-es")

# create pipeline
pipe = pipeline("text-generation", model=model, config=config, tokenizer=tokenizer, device=0,)

def predict(text):
  return pipe(f"text: {text}, entities:")["generated_text"]

iface = gr.Interface(
  fn=predict, 
  inputs='text',
  outputs='text',
  examples=[["Yo hoy voy a hablar de mujeres en el mundo del arte, porque me ha leΓ­do un libro fantΓ‘stico que se llama Historia del arte sin hombres, de Katie Hesel."]]
)

iface.launch()