Spaces:
Sleeping
Sleeping
import streamlit as st | |
from crewai import Agent, Task, Crew, Process | |
from crewai_tools import WebsiteSearchTool | |
from dotenv import load_dotenv | |
import os | |
from fpdf import FPDF | |
from io import BytesIO | |
import textwrap | |
# Load environment variables | |
load_dotenv() | |
# Streamlit App | |
st.title("Blog Generator with AI") | |
st.sidebar.header("Input") | |
# OpenAI API key input | |
user_api_key = st.sidebar.text_input("Enter your OpenAI API Key", type="password") | |
news_link = st.sidebar.text_input("Enter News Link", "") | |
generate_blog = st.sidebar.button("Generate Blog") | |
if generate_blog: | |
if user_api_key: | |
# Set the OpenAI API key dynamically | |
os.environ["OPENAI_API_KEY"] = user_api_key | |
if news_link: | |
st.info("Fetching and processing the news...") | |
# Define tools | |
web_tool = WebsiteSearchTool() | |
# Create agents | |
blog_researcher = Agent( | |
role='AI Blog Researcher from News Website', | |
goal='Get the relevant latest AI related news from News Website', | |
verbose=True, | |
memory=True, | |
backstory=("Expert in understanding videos in AI, Data Science, Machine Learning, and GEN AI."), | |
tools=[web_tool], | |
allow_delegation=True | |
) | |
blog_writer = Agent( | |
role='Blog Writer', | |
goal='Narrate compelling tech stories about the News Article and add the reference links at the end of the blog.', | |
verbose=True, | |
memory=True, | |
backstory=( | |
"With a flair for simplifying complex topics, you craft engaging narratives that captivate and educate," | |
"bringing new discoveries to light in an accessible manner." | |
), | |
tools=[web_tool], | |
allow_delegation=False | |
) | |
# Define tasks | |
research_task = Task( | |
description=( | |
"Research Task: " | |
"1. Visit the provided news link and analyze the content thoroughly\n" | |
"2. Extract key information about the AI-related news or development\n" | |
"3. Identify main points, technological aspects, and potential impact\n" | |
"4. Gather any relevant background information or context\n" | |
"5. Note any quotes, statistics, or specific technical details" | |
), | |
expected_output=( | |
"A detailed research report containing:\n" | |
"- Main story/development summary\n" | |
"- Technical aspects and innovations\n" | |
"- Impact and implications\n" | |
"- Supporting facts and context\n" | |
"- Notable quotes or statistics" | |
), | |
agent=blog_researcher | |
) | |
write_task = Task( | |
description=( | |
"Writing Task:\n" | |
"1. Use the research findings to craft an engaging blog post\n" | |
"2. Structure the content with clear introduction, body, and conclusion\n" | |
"3. Explain technical concepts in an accessible manner\n" | |
"4. Include relevant examples and real-world applications\n" | |
"5. Add reference links at the end" | |
), | |
expected_output=( | |
"A well-structured blog post that includes:\n" | |
"- Engaging headline and introduction\n" | |
"- Clear explanation of the AI development\n" | |
"- Technical details explained simply\n" | |
"- Impact and future implications\n" | |
"- Reference links" | |
), | |
agent=blog_writer | |
) | |
# Create crew - Fixed verbose parameter to be boolean | |
crew = Crew( | |
agents=[blog_researcher, blog_writer], | |
tasks=[research_task, write_task], | |
process=Process.sequential, | |
verbose=True # Changed from 2 to True | |
) | |
try: | |
# Kickoff the process with the news link as input | |
result = crew.kickoff(inputs={'topic': news_link}) | |
# Extract blog content from the result | |
if hasattr(result, 'tasks_output'): | |
blog_content = result.tasks_output[-1].raw # Get the last task's output | |
else: | |
# If tasks_output is not available, try to get the raw result | |
blog_content = str(result) | |
# Display the blog content | |
st.subheader("Generated Blog") | |
st.text_area("Blog Content", value=blog_content, height=400) | |
# Actions to save or reject the blog | |
st.subheader("Actions") | |
save_blog = st.button("Save Blog as PDF") | |
reject_blog = st.button("Reject Blog") | |
if save_blog: | |
try: | |
# Create PDF with proper text wrapping and formatting | |
pdf = FPDF() | |
pdf.add_page() | |
pdf.set_auto_page_break(auto=True, margin=15) | |
# Add title | |
pdf.set_font("Arial", "B", 16) | |
pdf.cell(0, 10, "Generated Blog", ln=True, align='C') | |
pdf.ln(10) | |
# Add content with proper formatting | |
pdf.set_font("Arial", size=12) | |
# Process and wrap text | |
effective_page_width = pdf.w - 2 * pdf.l_margin | |
# Split content into paragraphs | |
paragraphs = blog_content.split('\n') | |
for paragraph in paragraphs: | |
if paragraph.strip(): # Only process non-empty paragraphs | |
# Wrap text to fit page width | |
wrapped_text = textwrap.fill(paragraph, width=95) | |
# Split wrapped text into lines and write them | |
lines = wrapped_text.split('\n') | |
for line in lines: | |
pdf.multi_cell(0, 10, line) | |
pdf.ln(5) # Add small space between paragraphs | |
# Generate PDF bytes | |
pdf_buffer = BytesIO() | |
pdf.output(pdf_buffer) | |
pdf_buffer.seek(0) | |
# Create download button | |
st.download_button( | |
label="Download Blog as PDF", | |
data=pdf_buffer, | |
file_name="generated_blog.pdf", | |
mime="application/pdf" | |
) | |
st.success("Blog saved as PDF and ready for download!") | |
except Exception as e: | |
st.error(f"Failed to save blog as PDF: {str(e)}") | |
st.error("PDF Generation Error Details:", exc_info=True) | |
if reject_blog: | |
st.warning("Blog rejected. No file was saved.") | |
except Exception as e: | |
st.error(f"An error occurred during the process: {str(e)}") | |
else: | |
st.error("Please enter a valid news link.") | |
else: | |
st.error("Please provide your OpenAI API Key.") |