Spaces:
Sleeping
Sleeping
File size: 5,059 Bytes
e5b00f3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import streamlit as st
from crewai import Agent, Task, Crew, Process
from crewai_tools import WebsiteSearchTool
from dotenv import load_dotenv
import os
# Load environment variables
load_dotenv()
# Streamlit App
st.title("Blog Generator with AI")
st.sidebar.header("Input")
# OpenAI API key input
user_api_key = st.sidebar.text_input("Enter your OpenAI API Key", type="password")
news_link = st.sidebar.text_input("Enter News Link", "")
generate_blog = st.sidebar.button("Generate Blog")
if generate_blog:
if user_api_key:
# Set the OpenAI API key dynamically
os.environ["OPENAI_API_KEY"] = user_api_key
if news_link:
st.info("Fetching and processing the news...")
# Define tools
web_tool = WebsiteSearchTool()
# Create agents
blog_researcher = Agent(
role='AI Blog Researcher from News Website',
goal='Get the relevant latest AI related news from News Website',
verbose=True,
memory=True,
backstory=("Expert in understanding videos in AI, Data Science, Machine Learning, and GEN AI."),
tools=[web_tool],
allow_delegation=True
)
blog_writer = Agent(
role='Blog Writer',
goal='Narrate compelling tech stories about the News Article and add the reference links at the end of the blog.',
verbose=True,
memory=True,
backstory=(
"With a flair for simplifying complex topics, you craft engaging narratives that captivate and educate,"
"bringing new discoveries to light in an accessible manner."
),
tools=[web_tool],
allow_delegation=False
)
# Define tasks
research_task = Task(
description=(
"Identify the News Article and get detailed information about the News from the website."
),
expected_output='A comprehensive 3-paragraph-long report based on the {topic} of News.',
tools=[web_tool],
agent=blog_researcher,
)
write_task = Task(
description=(
"Get the info from the News Website on the topic {topic}."
),
expected_output='Summarize the info from the News website on the topic {topic} and create the content for the blog.',
tools=[web_tool],
agent=blog_writer,
async_execution=False,
output_file="" # Provide an empty string or valid file path
)
# Create crew
crew = Crew(
agents=[blog_researcher, blog_writer],
tasks=[research_task, write_task],
process=Process.sequential,
memory=True,
cache=True,
max_rpm=100,
share_crew=True
)
# Kickoff the process and fetch the result
try:
result = crew.kickoff(inputs={'topic': news_link})
# Inspect result attributes for debugging
st.subheader("Result Attributes")
st.write(dir(result))
# Access task outputs
try:
task_outputs = result.tasks_output
except AttributeError:
st.error("The result object does not have 'tasks_output'.")
task_outputs = []
# Display task outputs
st.subheader("Task Outputs")
for idx, task_output in enumerate(task_outputs):
st.write(f"Task {idx + 1}:")
st.json(task_output)
# Extract blog content
try:
blog_content = task_outputs[1].raw
except (IndexError, AttributeError):
blog_content = "Unable to fetch blog content from the task outputs."
# Display the blog content
st.subheader("Generated Blog")
st.text_area("Blog Content", value=blog_content, height=400)
# Actions to save or reject the blog
st.subheader("Actions")
save_blog = st.button("Save Blog")
reject_blog = st.button("Reject Blog")
if save_blog:
with open("saved_blog.txt", "w") as f:
f.write(blog_content)
st.success("Blog file saved successfully!")
if reject_blog:
st.warning("Blog rejected. No file was saved.")
except Exception as e:
st.error(f"An error occurred during the process: {e}")
else:
st.error("Please enter a valid news link.")
else:
st.error("Please provide your OpenAI API Key.")
|