|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from urllib.parse import urlparse, parse_qs |
|
import gradio as gr |
|
import requests |
|
from bs4 import BeautifulSoup |
|
import openai |
|
from openai import OpenAI |
|
import speech_recognition as sr |
|
from transformers import pipeline |
|
|
|
from transformers.pipelines.audio_utils import ffmpeg_read |
|
|
|
from youtube_transcript_api import YouTubeTranscriptApi, TranscriptsDisabled |
|
from youtube_transcript_api.formatters import TextFormatter |
|
|
|
from urllib.parse import urlparse, parse_qs |
|
import json |
|
|
|
import os |
|
import yaml |
|
import pandas as pd |
|
import numpy as np |
|
|
|
from datetime import datetime, timedelta |
|
|
|
|
|
|
|
|
|
openai_api_key = os.environ["OPENAI_API_KEY"] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def is_youtube_url(url): |
|
try: |
|
|
|
parsed_url = urlparse(url) |
|
|
|
|
|
if parsed_url.netloc in ["www.youtube.com", "youtube.com", "m.youtube.com", "youtu.be"]: |
|
|
|
if "youtube.com" in parsed_url.netloc: |
|
return "v" in parse_qs(parsed_url.query) |
|
|
|
elif "youtu.be" in parsed_url.netloc: |
|
return len(parsed_url.path.strip("/")) > 0 |
|
return False |
|
except Exception as e: |
|
return False |
|
|
|
def get_youtube_transcript(youtube_url): |
|
try: |
|
|
|
parsed_url = urlparse(youtube_url) |
|
video_id = parse_qs(parsed_url.query).get("v") |
|
|
|
if not video_id: |
|
return "Invalid YouTube URL. Please provide a valid URL." |
|
|
|
video_id = video_id[0] |
|
|
|
|
|
transcript = YouTubeTranscriptApi.get_transcript(video_id, proxies={"https": "http://localhost:8080"}) |
|
|
|
|
|
formatter = TextFormatter() |
|
formatted_transcript = formatter.format_transcript(transcript) |
|
|
|
return formatted_transcript |
|
|
|
except Exception as e: |
|
return f"An error occurred: {str(e)}" |
|
|
|
|
|
|
|
|
|
|
|
def check_subtitles(video_id): |
|
try: |
|
transcripts = YouTubeTranscriptApi.list_transcripts(video_id) |
|
print(f"Available transcripts: {transcripts}") |
|
return True |
|
except TranscriptsDisabled: |
|
print("Subtitles are disabled for this video.") |
|
return False |
|
except Exception as e: |
|
print(f"An unexpected error occurred: {e}") |
|
return False |
|
|
|
|
|
video_id = "Um017R5Kr3A" |
|
check_subtitles(video_id) |
|
|
|
|
|
|
|
|
|
|
|
|
|
client = OpenAI(api_key=openai_api_key) |
|
|
|
|
|
|
|
|
|
def process_webpage(url): |
|
try: |
|
if is_youtube_url(url): |
|
rendered_content = get_youtube_transcript(url) |
|
else: |
|
|
|
response = requests.get(url) |
|
soup = BeautifulSoup(response.text, "html.parser") |
|
html_content = str(soup.prettify()) |
|
|
|
for script in soup(["script", "style"]): |
|
script.decompose() |
|
rendered_content = soup.get_text(separator="\n").strip().replace("\n\n", "") |
|
|
|
text_content = rendered_content[:2000] |
|
|
|
|
|
summary_prompt = f"Summarize the following content:\n{text_content}\n Please use the language of the originial content" |
|
perspectives_prompt = f"Generate a reflective review for the following content:\n{text_content}\n Please output the perspectives in no more than 5 very concise bullet points. Please use the language of the originial content" |
|
|
|
summary_response = client.chat.completions.create( |
|
model="gpt-4o", |
|
messages=[{"role": "user", "content": summary_prompt}], |
|
max_tokens=500, |
|
) |
|
perspectives_response = client.chat.completions.create( |
|
model="gpt-4o", |
|
messages=[{"role": "user", "content": perspectives_prompt}], |
|
max_tokens=500, |
|
) |
|
|
|
summary = summary_response.choices[0].message.content.strip() |
|
perspectives = perspectives_response.choices[0].message.content.strip() |
|
|
|
return rendered_content, summary, perspectives |
|
except Exception as e: |
|
return f"Error fetching or processing content: {str(e)}", "", "" |
|
|
|
|
|
|
|
|
|
|
|
|
|
def chat_with_ai(chat_history, user_input, content): |
|
try: |
|
messages = [{"role": "system", "content": "You are a helpful assistant."}] |
|
|
|
|
|
for user, bot in chat_history: |
|
messages.append({"role": "user", "content": user}) |
|
messages.append({"role": "assistant", "content": bot}) |
|
|
|
|
|
messages.append({"role": "user", "content": f"Based on this content: {content}\n\n{user_input}"}) |
|
|
|
|
|
ai_response = client.chat.completions.create( |
|
model="gpt-4o", |
|
messages=messages, |
|
max_tokens=300, |
|
) |
|
reply = ai_response.choices[0].message.content.strip() |
|
chat_history.append((user_input, reply)) |
|
return chat_history |
|
except Exception as e: |
|
return chat_history + [(user_input, f"Error: {str(e)}")] |
|
|
|
|
|
|
|
|
|
|
|
def generate_reflection(chat_history): |
|
""" |
|
Generate a reflection based on the chat history. |
|
|
|
Args: |
|
chat_history (list of tuples): List of (user_input, ai_reply) pairs. |
|
|
|
Returns: |
|
str: A reflective summary generated by AI. |
|
""" |
|
try: |
|
messages = [{"role": "system", "content": "You are a professional content summarizer. Generate thoughtful reflections."}] |
|
|
|
|
|
for user, bot in chat_history: |
|
messages.append({"role": "user", "content": user}) |
|
messages.append({"role": "assistant", "content": bot}) |
|
|
|
|
|
messages.append({"role": "user", "content": "Please provide a concise, reflective summary of this conversation."}) |
|
|
|
|
|
ai_response = client.chat.completions.create( |
|
model="gpt-4o", |
|
messages=messages, |
|
max_tokens=200, |
|
) |
|
reflection = ai_response.choices[0].message.content.strip() |
|
return reflection |
|
except Exception as e: |
|
return f"Error generating reflection: {str(e)}" |
|
|
|
|
|
|
|
|
|
|
|
import requests |
|
|
|
def post_to_linkedin(access_token, reflection, visibility="PUBLIC"): |
|
""" |
|
Post a reflection to LinkedIn. |
|
|
|
Args: |
|
access_token (str): LinkedIn API access token. |
|
reflection (str): The content to post. |
|
visibility (str): Visibility setting ("PUBLIC" or "CONNECTIONS"). Defaults to "PUBLIC". |
|
|
|
Returns: |
|
str: Confirmation or error message. |
|
""" |
|
try: |
|
url = "https://api.linkedin.com/v2/ugcPosts" |
|
headers = { |
|
"Authorization": f"Bearer {access_token}", |
|
"Content-Type": "application/json", |
|
} |
|
your_linkedin_person_id = 'jay' |
|
payload = { |
|
"author": f"urn:li:person:{your_linkedin_person_id}", |
|
"lifecycleState": "PUBLISHED", |
|
"visibility": {"com.linkedin.ugc.MemberNetworkVisibility": visibility}, |
|
"specificContent": { |
|
"com.linkedin.ugc.ShareContent": { |
|
"shareCommentary": { |
|
"text": reflection |
|
}, |
|
"shareMediaCategory": "NONE" |
|
} |
|
} |
|
} |
|
|
|
response = requests.post(url, headers=headers, json=payload) |
|
if response.status_code == 201: |
|
return "Reflection successfully posted to LinkedIn!" |
|
else: |
|
return f"Failed to post to LinkedIn. Error: {response.json()}" |
|
except Exception as e: |
|
return f"Error posting to LinkedIn: {str(e)}" |
|
|
|
|
|
|
|
|
|
|
|
|
|
ideas_db = [] |
|
|
|
def extract_ideas_from_text(text): |
|
|
|
ideas = text.split(". ") |
|
for idea in ideas: |
|
if idea.strip(): |
|
ideas_db.append({"content": idea.strip(), "timestamp": datetime.now()}) |
|
return [idea["content"] for idea in ideas_db] |
|
|
|
|
|
|
|
|
|
|
|
|
|
def prepare_meeting(json_input): |
|
try: |
|
meetings = json.loads(json_input) |
|
preparations = [] |
|
for meeting in meetings: |
|
title = meeting.get("title", "No Title") |
|
time = meeting.get("time", "No Time") |
|
description = meeting.get("description", "No Description") |
|
preparations.append(f"Meeting: {title}\nTime: {time}\nDetails: {description}") |
|
return "\n\n".join(preparations) |
|
except Exception as e: |
|
return f"Error processing input: {e}" |
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("## Curify: Unified AI Tools for Productivity") |
|
|
|
with gr.Tab("Curify Digest"): |
|
with gr.Row(): |
|
|
|
with gr.Column(): |
|
gr.Markdown("## Render Webpage") |
|
url_input = gr.Textbox(label="Enter URL") |
|
|
|
fetch_btn = gr.Button("Fetch and Process Webpage") |
|
text_output = gr.Textbox(label="Webpage Content", lines=7) |
|
|
|
with gr.Column(): |
|
gr.Markdown("## Summary & Perspectives") |
|
summary_output = gr.Textbox(label="Summary", lines=5) |
|
perspectives_output = gr.Textbox(label="Perspectives", lines=5) |
|
|
|
|
|
with gr.Column(): |
|
gr.Markdown("## Interactive Chatbot") |
|
chatbot_history_gr = gr.Chatbot(label="Chat History") |
|
user_input = gr.Textbox(label="Ask a Question", placeholder="Type your question here...") |
|
chatbot_btn = gr.Button("Send") |
|
reflection_btn = gr.Button("Generate reflection") |
|
reflection_output = gr.Textbox(label="Reflections", lines=5) |
|
|
|
fetch_btn.click( |
|
process_webpage, |
|
inputs=url_input, |
|
outputs=[text_output, summary_output, perspectives_output], |
|
) |
|
|
|
chatbot_btn.click( |
|
chat_with_ai, |
|
inputs=[chatbot_history_gr, user_input, text_output], |
|
outputs=chatbot_history_gr, |
|
) |
|
|
|
reflection_btn.click( |
|
generate_reflection, |
|
inputs=chatbot_history_gr, |
|
outputs=reflection_output, |
|
) |
|
|
|
|
|
with gr.Tab("Curify Ideas"): |
|
text_input = gr.Textbox(label="Enter text or ideas") |
|
extracted_ideas = gr.Textbox(label="Extracted Ideas", interactive=False) |
|
extract_button = gr.Button("Extract Ideas") |
|
|
|
def process_ideas(text): |
|
return ", ".join(extract_ideas_from_text(text)) |
|
|
|
extract_button.click(process_ideas, inputs=[text_input], outputs=[extracted_ideas]) |
|
|
|
with gr.Tab("Curify Projects"): |
|
json_input = gr.Textbox(label="Enter meeting data (JSON format)") |
|
prepared_meetings = gr.Textbox(label="Meeting Preparations", interactive=False) |
|
prepare_button = gr.Button("Prepare Meetings") |
|
|
|
prepare_button.click(prepare_meeting, inputs=[json_input], outputs=[prepared_meetings]) |
|
|
|
demo.launch(share=True) |
|
|
|
|
|
|
|
|
|
|