Spaces:
Sleeping
Sleeping
# from transformers import AutoModelForCausalLM, AutoTokenizer | |
pip install --no-cache-dir transformers sentencepiece | |
import time | |
import datetime | |
import streamlit as st | |
question = "Name the planets in the solar system? A: " | |
question = "Quais são os planetas do sistema solar?" | |
question = "Qual é o maior planeta do sistema solar?" | |
before = datetime.datetime.now() | |
from transformers import AutoTokenizer, XGLMModel | |
import torch | |
prompt = "Question: Qual é o maior planeta do sistema solar ?" | |
tokenizer = AutoTokenizer.from_pretrained("facebook/xglm-564M", use_fast=False) | |
model = XGLMModel.from_pretrained("facebook/xglm-564M") | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model(**inputs, labels=inputs["input_ids"]) | |
last_hidden_states = outputs.last_hidden_state | |
output = last_hidden_states | |
output = tokenizer.batch_decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] | |
with st.container(): | |
st.write('\n\n') | |
st.write('LLM-LANAChat') | |
st.write('\n\n' + output) | |
print('saida gerada.') | |
print('\n\n') | |
after = datetime.datetime.now() | |
current_time = (after - before) # .strftime("%H:%M:%S") | |
print("\nTime Elapsed: ", current_time) | |
st.write("\nTime Elapsed: ", current_time) | |