dami1996 commited on
Commit
d68ded6
·
1 Parent(s): 11a93c7

cover generation removed

Browse files
Files changed (1) hide show
  1. app.py +2 -16
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import gradio as gr
2
  import torch
3
- from diffusers import StableDiffusionPipeline
4
  from transformers import pipeline
5
 
6
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -17,39 +16,27 @@ title_generator = pipeline(
17
  device=device,
18
  )
19
 
20
- stable_diffusion = StableDiffusionPipeline.from_pretrained("prompthero/openjourney-v4")
21
- stable_diffusion.to(device)
22
-
23
 
24
  def generate_blog_post(query):
25
- # Generate the article
26
  print("Generating article.")
27
  article = text_generator(query, max_length=500, num_return_sequences=1)[0][
28
  "generated_text"
29
  ]
30
  print(f"{article = }")
31
 
32
- # Generate a title for the article
33
  print("Generating the title.")
34
  title = title_generator(article, max_length=30, num_return_sequences=1)[0][
35
  "generated_text"
36
  ]
37
  print(f"{title = }")
38
 
39
- # Generate a cover image using Stable Diffusion
40
- print("Generating the cover.")
41
- cover = stable_diffusion(title, num_inference_steps=20, guidance_scale=7.5).images[
42
- 0
43
- ]
44
-
45
- # Generate a summary of the article
46
  print("Generating the summary.")
47
  summary = summarizer(article, max_length=100, min_length=30, do_sample=False)[0][
48
  "summary_text"
49
  ]
50
  print(f"{summary = }")
51
 
52
- return title, cover, summary, article
53
 
54
 
55
  with gr.Blocks() as iface:
@@ -69,13 +56,12 @@ with gr.Blocks() as iface:
69
  article_output = gr.Textbox(label="Article", lines=10)
70
 
71
  with gr.Column(scale=1):
72
- cover_output = gr.Image(label="Cover")
73
  summary_output = gr.Textbox(label="Summary", lines=5)
74
 
75
  generate_button.click(
76
  generate_blog_post,
77
  inputs=topic_input,
78
- outputs=[title_output, cover_output, summary_output, article_output],
79
  )
80
 
81
  iface.launch()
 
1
  import gradio as gr
2
  import torch
 
3
  from transformers import pipeline
4
 
5
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
16
  device=device,
17
  )
18
 
 
 
 
19
 
20
  def generate_blog_post(query):
 
21
  print("Generating article.")
22
  article = text_generator(query, max_length=500, num_return_sequences=1)[0][
23
  "generated_text"
24
  ]
25
  print(f"{article = }")
26
 
 
27
  print("Generating the title.")
28
  title = title_generator(article, max_length=30, num_return_sequences=1)[0][
29
  "generated_text"
30
  ]
31
  print(f"{title = }")
32
 
 
 
 
 
 
 
 
33
  print("Generating the summary.")
34
  summary = summarizer(article, max_length=100, min_length=30, do_sample=False)[0][
35
  "summary_text"
36
  ]
37
  print(f"{summary = }")
38
 
39
+ return title, summary, article
40
 
41
 
42
  with gr.Blocks() as iface:
 
56
  article_output = gr.Textbox(label="Article", lines=10)
57
 
58
  with gr.Column(scale=1):
 
59
  summary_output = gr.Textbox(label="Summary", lines=5)
60
 
61
  generate_button.click(
62
  generate_blog_post,
63
  inputs=topic_input,
64
+ outputs=[title_output, summary_output, article_output],
65
  )
66
 
67
  iface.launch()