|
!pip install accelerate |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import transformers |
|
import torch |
|
import streamlit as st |
|
|
|
model = "tiiuae/falcon-7b" |
|
|
|
tokenizer = AutoTokenizer.from_pretrained(model) |
|
text_input= st.text_area("Enter some text: ") |
|
if text_input: |
|
sequences = pipeline( |
|
text_input, |
|
max_length=200, |
|
do_sample=True, |
|
top_k=10, |
|
num_return_sequences=1, |
|
eos_token_id=tokenizer.eos_token_id, |
|
) |
|
for seq in sequences: |
|
print(f"Result: {seq['generated_text']}") |
|
pipeline = transformers.pipeline( |
|
"text-generation", |
|
model=model, |
|
tokenizer=tokenizer, |
|
torch_dtype=torch.bfloat16, |
|
trust_remote_code=True, |
|
device_map="auto", |
|
) |
|
|
|
|
|
|