Spaces:
Sleeping
Sleeping
import streamlit as st | |
import os | |
from transformers import pipeline | |
from huggingface_hub import HfFolder | |
import transformers | |
import torch | |
# Ensure the HF_TOKEN environment variable is set correctly | |
HF_TOKEN = os.getenv('HF_TOKEN') | |
if HF_TOKEN: | |
HfFolder.save_token(HF_TOKEN) | |
else: | |
st.warning("HF_TOKEN is not set. Proceeding without a token.") | |
# Use a valid model identifier | |
#generator = pipeline("text-generation", model="gpt2") | |
model_id = "meta-llama/Meta-Llama-3-8B" | |
generator = transformers.pipeline( | |
"text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="auto" | |
) | |
st.title("Text Generation") | |
st.write("Enter your text below.") | |
text = st.text_area("Your input") | |
if text: | |
out = generator(text, do_sample=False) | |
st.json(out) | |
st.write(f"Reply: {out[0]['generated_text']}") | |