ui and models updated
Browse files- README.md +1 -57
- app.py +66 -33
- requirements.txt +0 -1
README.md
CHANGED
@@ -9,60 +9,4 @@ python_version: 3.11
|
|
9 |
app_file: app.py
|
10 |
pinned: false
|
11 |
license: mit
|
12 |
-
---
|
13 |
-
|
14 |
-
# AI Blog Post Generator
|
15 |
-
|
16 |
-
This is a web application that generates complete blog posts, including titles, content, cover images, and summaries based on user input. The app is built using Gradio and is designed to be deployed on Hugging Face Spaces.
|
17 |
-
|
18 |
-
## Features
|
19 |
-
|
20 |
-
- Generate blog post title using a specialized title generation model
|
21 |
-
- Generate blog post content using GPT-2
|
22 |
-
- Create a cover image for the blog post using Stable Diffusion
|
23 |
-
- Summarize the generated article using BART
|
24 |
-
- Simple and intuitive user interface
|
25 |
-
|
26 |
-
## Models Used
|
27 |
-
|
28 |
-
- Title Generation: fabiochiu/t5-small-medium-title-generation
|
29 |
-
- Text Generation: gpt2
|
30 |
-
- Image Generation: runwayml/stable-diffusion-v1-5
|
31 |
-
- Summarization: facebook/bart-large-cnn
|
32 |
-
|
33 |
-
## Setup and Deployment
|
34 |
-
|
35 |
-
1. Fork this repository
|
36 |
-
2. Set up a new Hugging Face Space (make sure to use a GPU-enabled instance)
|
37 |
-
3. Configure the GitHub Actions workflow by adding your Hugging Face token as a secret named `HF_TOKEN`
|
38 |
-
4. Update the `deploy.yml` file with your Hugging Face username and space name
|
39 |
-
5. Push changes to the `main` branch to trigger automatic deployment
|
40 |
-
|
41 |
-
## Local Development
|
42 |
-
|
43 |
-
To run the app locally:
|
44 |
-
|
45 |
-
1. Clone the repository
|
46 |
-
2. Install the required packages: `pip install -r requirements.txt`
|
47 |
-
3. Run the app: `python app.py`
|
48 |
-
|
49 |
-
Note: Running multiple large language models and Stable Diffusion locally requires significant computational resources. A GPU is strongly recommended for reasonable performance.
|
50 |
-
|
51 |
-
## Usage
|
52 |
-
|
53 |
-
1. Enter a topic or idea for your blog post in the input text box
|
54 |
-
2. Click the "Submit" button
|
55 |
-
3. Wait for the models to generate the title, article, cover image, and summary
|
56 |
-
4. Review the generated content and use as desired
|
57 |
-
|
58 |
-
## Contributing
|
59 |
-
|
60 |
-
Feel free to open issues or submit pull requests to improve the application. We welcome contributions of all kinds, including bug fixes, feature additions, and documentation improvements.
|
61 |
-
|
62 |
-
## License
|
63 |
-
|
64 |
-
This project is open source and available under the MIT License.
|
65 |
-
|
66 |
-
---
|
67 |
-
|
68 |
-
For more information on Hugging Face Spaces configuration, please refer to the [Spaces Configuration Reference](https://huggingface.co/docs/hub/spaces-config-reference).
|
|
|
9 |
app_file: app.py
|
10 |
pinned: false
|
11 |
license: mit
|
12 |
+
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -1,48 +1,81 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import pipeline
|
3 |
-
from diffusers import StableDiffusionPipeline
|
4 |
import torch
|
|
|
|
|
5 |
|
6 |
-
# Initialize the pipelines
|
7 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
8 |
|
9 |
-
text_generator = pipeline(
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
|
14 |
-
stable_diffusion = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
|
15 |
stable_diffusion.to(device)
|
16 |
|
17 |
-
|
18 |
def generate_blog_post(query):
|
19 |
# Generate the article
|
20 |
-
|
21 |
-
|
|
|
|
|
|
|
|
|
22 |
# Generate a title for the article
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
25 |
# Generate a cover image using Stable Diffusion
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
28 |
# Generate a summary of the article
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
-
# Launch the app
|
48 |
iface.launch()
|
|
|
1 |
import gradio as gr
|
|
|
|
|
2 |
import torch
|
3 |
+
from diffusers import StableDiffusionPipeline
|
4 |
+
from transformers import pipeline
|
5 |
|
|
|
6 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
|
8 |
+
text_generator = pipeline(
|
9 |
+
"text-generation", model="openchat/openchat-3.5-0106", device=device
|
10 |
+
)
|
11 |
+
summarizer = pipeline(
|
12 |
+
"summarization", model="sshleifer/distilbart-cnn-12-6", device=device
|
13 |
+
)
|
14 |
+
title_generator = pipeline(
|
15 |
+
"text2text-generation",
|
16 |
+
model="fabiochiu/t5-small-medium-title-generation",
|
17 |
+
device=device,
|
18 |
+
)
|
19 |
|
20 |
+
stable_diffusion = StableDiffusionPipeline.from_pretrained("prompthero/openjourney-v4")
|
|
|
21 |
stable_diffusion.to(device)
|
22 |
|
23 |
+
|
24 |
def generate_blog_post(query):
|
25 |
# Generate the article
|
26 |
+
print("Generating article.")
|
27 |
+
article = text_generator(query, max_length=500, num_return_sequences=1)[0][
|
28 |
+
"generated_text"
|
29 |
+
]
|
30 |
+
print(f"{article = }")
|
31 |
+
|
32 |
# Generate a title for the article
|
33 |
+
print("Generating the title.")
|
34 |
+
title = title_generator(article, max_length=30, num_return_sequences=1)[0][
|
35 |
+
"generated_text"
|
36 |
+
]
|
37 |
+
print(f"{title = }")
|
38 |
+
|
39 |
# Generate a cover image using Stable Diffusion
|
40 |
+
print("Generating the cover.")
|
41 |
+
cover = stable_diffusion(title, num_inference_steps=20, guidance_scale=7.5).images[
|
42 |
+
0
|
43 |
+
]
|
44 |
+
|
45 |
# Generate a summary of the article
|
46 |
+
print("Generating the summary.")
|
47 |
+
summary = summarizer(article, max_length=100, min_length=30, do_sample=False)[0][
|
48 |
+
"summary_text"
|
49 |
+
]
|
50 |
+
print(f"{summary = }")
|
51 |
+
|
52 |
+
return title, cover, summary, article
|
53 |
+
|
54 |
+
|
55 |
+
with gr.Blocks() as iface:
|
56 |
+
gr.Markdown("# Blog Post Generator")
|
57 |
+
gr.Markdown(
|
58 |
+
"Enter a topic, and I'll generate a blog post with a title, cover image, and summary!"
|
59 |
+
)
|
60 |
+
|
61 |
+
with gr.Row():
|
62 |
+
topic_input = gr.Textbox(lines=2, placeholder="Enter your blog post topic...")
|
63 |
+
|
64 |
+
generate_button = gr.Button("Generate Blog Post", size="sm")
|
65 |
+
|
66 |
+
with gr.Row():
|
67 |
+
with gr.Column(scale=2):
|
68 |
+
title_output = gr.Textbox(label="Title")
|
69 |
+
article_output = gr.Textbox(label="Article", lines=10)
|
70 |
+
|
71 |
+
with gr.Column(scale=1):
|
72 |
+
cover_output = gr.Image(label="Cover")
|
73 |
+
summary_output = gr.Textbox(label="Summary", lines=5)
|
74 |
+
|
75 |
+
generate_button.click(
|
76 |
+
generate_blog_post,
|
77 |
+
inputs=topic_input,
|
78 |
+
outputs=[title_output, cover_output, summary_output, article_output],
|
79 |
+
)
|
80 |
|
|
|
81 |
iface.launch()
|
requirements.txt
CHANGED
@@ -1,6 +1,5 @@
|
|
1 |
gradio==4.39.0
|
2 |
transformers==4.43.2
|
3 |
-
torch==2.4.0
|
4 |
diffusers==0.29.2
|
5 |
accelerate==0.33.0
|
6 |
sentencepiece==0.2.0
|
|
|
1 |
gradio==4.39.0
|
2 |
transformers==4.43.2
|
|
|
3 |
diffusers==0.29.2
|
4 |
accelerate==0.33.0
|
5 |
sentencepiece==0.2.0
|