Rzhishchev commited on
Commit
5d4b507
·
1 Parent(s): 2088f46

Update gpt2.py

Browse files
Files changed (1) hide show
  1. gpt2.py +39 -41
gpt2.py CHANGED
@@ -3,44 +3,42 @@ from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
  import torch
4
 
5
 
6
- # def app():
7
- # st.title('GPT-2 Text Generator')
8
- # st.write('This is the GPT-2 text generator page.')
9
-
10
-
11
- DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
12
-
13
- model_path = "zhvanetsky_model"
14
- tokenizer = GPT2Tokenizer.from_pretrained(model_path)
15
- model = GPT2LMHeadModel.from_pretrained(model_path).to(DEVICE)
16
-
17
- def generate_text(input_text, num_beams, temperature, max_length, top_p):
18
- model.eval()
19
- input_ids = tokenizer.encode(input_text, return_tensors="pt").to(DEVICE)
20
- with torch.no_grad():
21
- out = model.generate(input_ids,
22
- do_sample=True,
23
- num_beams=num_beams,
24
- temperature=temperature,
25
- top_p=top_p,
26
- top_k=500,
27
- max_length=max_length,
28
- no_repeat_ngram_size=3,
29
- num_return_sequences=3,
30
- )
31
- return tokenizer.decode(out[0], skip_special_tokens=True)
32
-
33
- # Streamlit interface
34
- st.title("GPT-2 Text Generator")
35
-
36
- user_input = st.text_area("Input Text", "Введите ваш текст")
37
-
38
- # Add sliders or input boxes for model parameters
39
- num_beams = st.slider("Number of Beams", min_value=1, max_value=20, value=10)
40
- temperature = st.slider("Temperature", min_value=0.1, max_value=3.0, value=1.0, step=0.1)
41
- max_length = st.number_input("Max Length", min_value=10, max_value=300, value=100)
42
- top_p = st.slider("Top P", min_value=0.1, max_value=1.0, value=0.85, step=0.05)
43
-
44
- if st.button("Generate"):
45
- generated_output = generate_text(user_input, num_beams, temperature, max_length, top_p)
46
- st.text_area("Generated Text", generated_output)
 
3
  import torch
4
 
5
 
6
+ def app(): # Инкапсулирующая функция
7
+ st.title("GPT-2 Demo")
8
+
9
+ DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
10
+
11
+ model_path = "zhvanetsky_model"
12
+ tokenizer = GPT2Tokenizer.from_pretrained(model_path)
13
+ model = GPT2LMHeadModel.from_pretrained(model_path).to(DEVICE)
14
+
15
+ def generate_text(input_text, num_beams, temperature, max_length, top_p):
16
+ model.eval()
17
+ input_ids = tokenizer.encode(input_text, return_tensors="pt").to(DEVICE)
18
+ with torch.no_grad():
19
+ out = model.generate(input_ids,
20
+ do_sample=True,
21
+ num_beams=num_beams,
22
+ temperature=temperature,
23
+ top_p=top_p,
24
+ top_k=500,
25
+ max_length=max_length,
26
+ no_repeat_ngram_size=3,
27
+ num_return_sequences=3,
28
+ )
29
+ return tokenizer.decode(out[0], skip_special_tokens=True)
30
+
31
+ # Streamlit interface
32
+ st.title("GPT-2 Text Generator")
33
+
34
+ user_input = st.text_area("Input Text", "Введите ваш текст")
35
+
36
+ # Add sliders or input boxes for model parameters
37
+ num_beams = st.slider("Number of Beams", min_value=1, max_value=20, value=10)
38
+ temperature = st.slider("Temperature", min_value=0.1, max_value=3.0, value=1.0, step=0.1)
39
+ max_length = st.number_input("Max Length", min_value=10, max_value=300, value=100)
40
+ top_p = st.slider("Top P", min_value=0.1, max_value=1.0, value=0.85, step=0.05)
41
+
42
+ if st.button("Generate"):
43
+ generated_output = generate_text(user_input, num_beams, temperature, max_length, top_p)
44
+ st.text_area("Generated Text", generated_output)