V6
Browse files
app.py
CHANGED
@@ -1,45 +1,59 @@
|
|
1 |
-
import os
|
2 |
-
import openai
|
3 |
-
import gradio as gr
|
4 |
-
import transformers
|
5 |
-
|
6 |
-
openai.api_key = os.environ["api"]
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
generator = transformers.pipeline("text-generation", model="EleutherAI/gpt-neo-2.7B")
|
11 |
|
12 |
-
def generateBlogTopics(prompt1):
|
13 |
-
response = generator("Generate blog topics on: {}. \n \n 1. ".format(prompt1), max_length=100, do_sample=True, temperature=0.7)[0]["generated_text"]
|
14 |
-
return response
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
def blogSectionExpander(prompt1):
|
21 |
-
response = generator("Expand the blog section in to a detailed professional, witty and clever explanation.\n\n {}".format(prompt1), max_length=400, do_sample=True, temperature=0.7)[0]["generated_text"]
|
22 |
-
return response
|
23 |
-
|
24 |
-
input_text = gr.inputs.Textbox(lines=5, label="Enter prompt text here:")
|
25 |
-
|
26 |
-
title_section_output = gr.outputs.Textbox(label="Title & Sections")
|
27 |
-
section_expander_output = gr.outputs.Textbox(label="Section Expander")
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
inputs=input_text,
|
32 |
-
outputs=title_section_output,
|
33 |
-
title="Blog Title & Sections Generator",
|
34 |
-
description="Generate high level sections for your blog topic",
|
35 |
-
live=False
|
36 |
-
).launch(share=True)
|
37 |
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
|
|
|
|
|
|
|
2 |
|
3 |
+
import gradio as gr
|
4 |
+
import openai
|
5 |
+
import re
|
6 |
+
from transformers import pipeline, set_seed
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
+
# Set up OpenAI API credentials
|
9 |
+
openai.api_key = os.environ["api"]
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
+
# Set up Hugging Face pipeline for summarization
|
12 |
+
summarizer = pipeline("summarization", model="t5-base", tokenizer="t5-base")
|
13 |
+
|
14 |
+
# Define the function that generates the blog article
|
15 |
+
def generate_article(topic):
|
16 |
+
# Use OpenAI's GPT-3 to generate the article
|
17 |
+
prompt = f"Write a blog post about {topic} with 5 different sections."
|
18 |
+
response = openai.Completion.create(
|
19 |
+
engine="davinci",
|
20 |
+
prompt=prompt,
|
21 |
+
max_tokens=2048,
|
22 |
+
n=1,
|
23 |
+
stop=None,
|
24 |
+
temperature=0.5,
|
25 |
+
)
|
26 |
+
article = response.choices[0].text
|
27 |
+
|
28 |
+
# Clean up the article text
|
29 |
+
article = re.sub('\n', ' ', article)
|
30 |
+
article = re.sub('\s+', ' ', article)
|
31 |
+
|
32 |
+
# Split the article into 5 sections
|
33 |
+
section_length = len(article) // 5
|
34 |
+
sections = [article[i:i+section_length] for i in range(0, len(article), section_length)]
|
35 |
+
|
36 |
+
# Set seed for reproducibility in Hugging Face spaces
|
37 |
+
set_seed(42)
|
38 |
+
|
39 |
+
# Summarize each section to generate subheadings
|
40 |
+
subheadings = [summarizer(section, max_length=30, min_length=10, do_sample=False)[0]['summary_text'] for section in sections]
|
41 |
+
|
42 |
+
# Combine the sections and subheadings into a formatted blog post
|
43 |
+
blog_post = f"# {topic}\n\n"
|
44 |
+
for i in range(5):
|
45 |
+
blog_post += f"## {subheadings[i]}\n\n{sections[i]}\n\n"
|
46 |
+
|
47 |
+
return blog_post
|
48 |
+
|
49 |
+
# Set up the Gradio interface
|
50 |
+
iface = gr.Interface(
|
51 |
+
generate_article,
|
52 |
+
inputs=gr.inputs.Textbox("Enter a topic for your blog post"),
|
53 |
+
outputs=gr.outputs.HTML(),
|
54 |
+
title="Blog Post Generator",
|
55 |
+
description="Generate a blog post on a given topic with 5 different sections.",
|
56 |
+
)
|
57 |
+
|
58 |
+
# Launch the interface
|
59 |
+
iface.launch(share=True)
|