File size: 843 Bytes
a2320ed
b0a2a59
a2320ed
4c1fe3c
9c084a1
79e7eb6
 
 
 
4c1fe3c
 
 
 
 
 
d1117d8
4c1fe3c
79e7eb6
 
 
 
 
 
 
 
 
d1117d8
4c1fe3c
 
 
a2320ed
 
d1117d8
a2320ed
4c1fe3c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
import streamlit as st
import os
from transformers import pipeline
from huggingface_hub import HfFolder

import transformers
import torch


# Ensure the HF_TOKEN environment variable is set correctly
HF_TOKEN = os.getenv('HF_TOKEN')
if HF_TOKEN:
    HfFolder.save_token(HF_TOKEN)
else:
    st.warning("HF_TOKEN is not set. Proceeding without a token.")

# Use a valid model identifier
#generator = pipeline("text-generation", model="gpt2")


model_id = "meta-llama/Meta-Llama-3-8B"

generator = transformers.pipeline(
    "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="auto"
)


st.title("Text Generation")
st.write("Enter your text below.")
text = st.text_area("Your input")

if text:
    out = generator(text, do_sample=False)
    st.json(out)
    st.write(f"Reply: {out[0]['generated_text']}")