dami1996 commited on
Commit
a0fd028
Β·
1 Parent(s): f19162e

cover generator removed

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. app.py +69 -66
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: AI Blog Post Generator
3
  emoji: πŸ“
4
  colorFrom: blue
5
  colorTo: green
 
1
  ---
2
+ title: Blog Post Generator
3
  emoji: πŸ“
4
  colorFrom: blue
5
  colorTo: green
app.py CHANGED
@@ -1,66 +1,81 @@
1
  import gradio as gr
2
  import torch
3
  from transformers import pipeline
4
- from diffusers import StableDiffusionPipeline
 
 
 
 
 
5
 
6
  ARTICLE_GENERATOR_MODEL = "gpt2"
7
  SUMMARIZER_MODEL = "Falconsai/text_summarization"
8
  TITLE_GENERATOR_MODEL = "czearing/article-title-generator"
9
- IMAGE_GENERATOR_MODEL = "prompthero/openjourney-v4"
10
 
11
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
12
- print(f"{DEVICE = }")
13
 
 
14
  text_generator = pipeline(
15
  "text-generation", model=ARTICLE_GENERATOR_MODEL, device=DEVICE
16
  )
17
  summarizer = pipeline("summarization", model=SUMMARIZER_MODEL, device=DEVICE)
18
  title_generator = pipeline(
19
- "text2text-generation",
20
- model=TITLE_GENERATOR_MODEL,
21
- device=DEVICE,
22
  )
23
- image_generator = StableDiffusionPipeline.from_pretrained(
24
- IMAGE_GENERATOR_MODEL,
25
- torch_dtype=torch.float16 if DEVICE == "cuda" else torch.float32,
26
- )
27
- image_generator = image_generator.to(DEVICE)
28
 
29
 
30
- def generate_blog_post(query, article_length, title_length, summary_length):
31
- print("Generating article.")
32
- article = text_generator(query, max_length=article_length, num_return_sequences=1)[
33
- 0
34
- ]["generated_text"]
35
- print(f"{article = }")
 
 
 
 
 
 
 
 
 
 
36
 
37
- print("Generating the title.")
38
- title = title_generator(article, max_length=title_length, num_return_sequences=1)[
39
- 0
40
- ]["generated_text"]
41
- print(f"{title = }")
42
 
43
- print("Generating the summary.")
 
44
  summary = summarizer(
45
  article,
46
- max_length=summary_length,
47
- min_length=min(30, summary_length),
48
  do_sample=False,
49
- )[0]["summary_text"]
50
- print(f"{summary = }")
 
 
 
 
 
 
 
51
 
52
- print("Generating the cover image.")
53
- image = image_generator(
54
- summary, num_inference_steps=40, guidance_scale=7.5, width=512, height=512
55
- ).images[0]
56
 
57
- return title, summary, article, image
 
 
 
 
 
 
 
58
 
59
 
60
  with gr.Blocks() as iface:
61
  gr.Markdown("# Blog Post Generator")
62
  gr.Markdown(
63
- "Enter a topic, and I'll generate a blog post with a title, cover image, and optional summary!"
64
  )
65
 
66
  with gr.Row():
@@ -69,56 +84,44 @@ with gr.Blocks() as iface:
69
  with gr.Row():
70
  generate_button = gr.Button("Generate Blog Post", size="sm")
71
 
 
 
 
 
 
 
 
 
 
 
72
  with gr.Row():
73
  with gr.Column(scale=2):
74
  with gr.Blocks() as title_block:
75
  gr.Markdown("## Title")
76
-
77
- with gr.Accordion("Options", open=False):
78
- title_length = gr.Slider(
79
- minimum=10, maximum=50, value=30, step=5, label="Title Length"
80
- )
81
  title_output = gr.Textbox(label="Title")
82
 
83
  with gr.Blocks() as body_block:
84
  gr.Markdown("## Body")
85
-
86
  with gr.Accordion("Options", open=False):
87
- article_length = gr.Slider(
88
- minimum=100,
89
- maximum=1000,
90
  value=500,
91
- step=50,
92
- label="Article Length",
93
  )
94
- article_output = gr.Textbox(label="Article", lines=10)
95
 
96
  with gr.Column(scale=1):
97
- with gr.Blocks() as image_block:
98
- gr.Markdown("## Cover Image")
99
- image_output = gr.Image(label="Cover Image")
100
-
101
  with gr.Blocks() as summary_block:
102
  gr.Markdown("## Summary")
103
- with gr.Accordion("Options", open=False):
104
- summary_length = gr.Slider(
105
- minimum=30,
106
- maximum=200,
107
- value=100,
108
- step=10,
109
- label="Summary Length",
110
- )
111
  summary_output = gr.Textbox(label="Summary", lines=5)
112
 
113
- job = generate_button.click(
114
  generate_blog_post,
115
- inputs=[
116
- input_prompt,
117
- article_length,
118
- title_length,
119
- summary_length,
120
- ],
121
- outputs=[title_output, summary_output, article_output, image_output],
122
  )
123
 
124
- iface.launch()
 
 
1
  import gradio as gr
2
  import torch
3
  from transformers import pipeline
4
+ import logging
5
+
6
+ # Set up logging
7
+ logging.basicConfig(
8
+ level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s"
9
+ )
10
 
11
  ARTICLE_GENERATOR_MODEL = "gpt2"
12
  SUMMARIZER_MODEL = "Falconsai/text_summarization"
13
  TITLE_GENERATOR_MODEL = "czearing/article-title-generator"
 
14
 
15
  DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
16
+ logging.info(f"Using device: {DEVICE}")
17
 
18
+ logging.info("Initializing models...")
19
  text_generator = pipeline(
20
  "text-generation", model=ARTICLE_GENERATOR_MODEL, device=DEVICE
21
  )
22
  summarizer = pipeline("summarization", model=SUMMARIZER_MODEL, device=DEVICE)
23
  title_generator = pipeline(
24
+ "text2text-generation", model=TITLE_GENERATOR_MODEL, device=DEVICE
 
 
25
  )
26
+ logging.info("Models initialized successfully")
 
 
 
 
27
 
28
 
29
+ def generate_article(query, max_new_tokens):
30
+ logging.info(f"Generating article for query: {query}")
31
+ article = text_generator(
32
+ query,
33
+ max_new_tokens=max_new_tokens,
34
+ num_return_sequences=1,
35
+ )[0]["generated_text"]
36
+ logging.debug(f"Generated article: {article[:100]}...")
37
+ return article
38
+
39
+
40
+ def generate_title(article):
41
+ logging.info("Generating title")
42
+ title = title_generator(article, num_return_sequences=1)[0]["generated_text"]
43
+ logging.debug(f"Generated title: {title}")
44
+ return title
45
 
 
 
 
 
 
46
 
47
+ def generate_summary(article):
48
+ logging.info("Generating summary")
49
  summary = summarizer(
50
  article,
 
 
51
  do_sample=False,
52
+ )[
53
+ 0
54
+ ]["summary_text"]
55
+ logging.debug(f"Generated summary: {summary}")
56
+ return summary
57
+
58
+
59
+ def generate_blog_post(query, max_new_tokens):
60
+ logging.info("Starting blog post generation")
61
 
62
+ logging.info("Generating article")
63
+ article = generate_article(query, max_new_tokens)
 
 
64
 
65
+ logging.info("Generating title")
66
+ title = generate_title(article)
67
+
68
+ logging.info("Generating summary")
69
+ summary = generate_summary(article)
70
+
71
+ logging.info("Blog post generation completed")
72
+ return title, summary, article
73
 
74
 
75
  with gr.Blocks() as iface:
76
  gr.Markdown("# Blog Post Generator")
77
  gr.Markdown(
78
+ "Enter a topic, and I'll generate a blog post with a title and summary!"
79
  )
80
 
81
  with gr.Row():
 
84
  with gr.Row():
85
  generate_button = gr.Button("Generate Blog Post", size="sm")
86
 
87
+ gr.Examples(
88
+ examples=[
89
+ "The future of artificial intelligence in healthcare",
90
+ "Top 10 travel destinations for nature lovers",
91
+ "How to start a successful online business in 2024",
92
+ "The impact of climate change on global food security",
93
+ ],
94
+ inputs=input_prompt,
95
+ )
96
+
97
  with gr.Row():
98
  with gr.Column(scale=2):
99
  with gr.Blocks() as title_block:
100
  gr.Markdown("## Title")
 
 
 
 
 
101
  title_output = gr.Textbox(label="Title")
102
 
103
  with gr.Blocks() as body_block:
104
  gr.Markdown("## Body")
105
+ article_output = gr.Textbox(label="Article", lines=30)
106
  with gr.Accordion("Options", open=False):
107
+ max_new_tokens = gr.Slider(
108
+ minimum=20,
109
+ maximum=500,
110
  value=500,
111
+ step=10,
112
+ label="Max New Tokens",
113
  )
 
114
 
115
  with gr.Column(scale=1):
 
 
 
 
116
  with gr.Blocks() as summary_block:
117
  gr.Markdown("## Summary")
 
 
 
 
 
 
 
 
118
  summary_output = gr.Textbox(label="Summary", lines=5)
119
 
120
+ generate_button.click(
121
  generate_blog_post,
122
+ inputs=[input_prompt, max_new_tokens],
123
+ outputs=[title_output, summary_output, article_output],
 
 
 
 
 
124
  )
125
 
126
+ logging.info("Launching Gradio interface")
127
+ iface.queue().launch()