nastasiasnk commited on
Commit
4c1fe3c
·
verified ·
1 Parent(s): 5965e2a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -30
app.py CHANGED
@@ -1,40 +1,23 @@
1
-
2
-
3
  import streamlit as st
4
  import os
5
- import torch # Add this line to import the torch library
6
- import transformers
7
-
8
-
9
- HF_TOKEN = os.getenv('HF_TOKEN')
10
- from huggingface_hub import HfFolder
11
- # Set the token using HfFolder (this persists the token)
12
- HfFolder.save_token(HF_TOKEN)
13
-
14
  from transformers import pipeline
 
15
 
16
- # Load the model, specifying the use of GPU if available
17
-
18
- #device = 0 if torch.cuda.is_available() else -1 # use GPU if available
19
-
20
- #generator = pipeline('text-generation', model='EleutherAI/gpt-neo-2.7B', device=device)
21
-
22
-
23
- generator = pipeline("text-generation", model="gpt-2")
24
 
 
 
25
 
26
- st.title("text class")
27
- st.write("your text.")
28
- text = st.text_area("your input")
29
 
30
  if text:
31
  out = generator(text, do_sample=False)
32
  st.json(out)
33
- st.write(f"reply: {out}")
34
-
35
-
36
-
37
-
38
-
39
-
40
-
 
 
 
1
  import streamlit as st
2
  import os
 
 
 
 
 
 
 
 
 
3
  from transformers import pipeline
4
+ from huggingface_hub import HfFolder
5
 
6
+ # Ensure the HF_TOKEN environment variable is set correctly
7
+ HF_TOKEN = os.getenv('HF_TOKEN')
8
+ if HF_TOKEN:
9
+ HfFolder.save_token(HF_TOKEN)
10
+ else:
11
+ st.warning("HF_TOKEN is not set. Proceeding without a token.")
 
 
12
 
13
+ # Use a valid model identifier
14
+ generator = pipeline("text-generation", model="gpt2")
15
 
16
+ st.title("Text Generation")
17
+ st.write("Enter your text below.")
18
+ text = st.text_area("Your input")
19
 
20
  if text:
21
  out = generator(text, do_sample=False)
22
  st.json(out)
23
+ st.write(f"Reply: {out[0]['generated_text']}")