Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import tempfile, requests, os, subprocess | |
| from langchain.chains import LLMChain | |
| from langchain.prompts import PromptTemplate | |
| from langchain.chat_models import ChatOpenAI | |
| from gtts import gTTS | |
| from bs4 import BeautifulSoup | |
| from PIL import Image, ImageDraw, ImageFont, ImageEnhance | |
| import ffmpeg | |
| import textwrap | |
| import random | |
| from urllib.request import urlretrieve | |
| UNSPLASH_KEY = "-7tFgMCy_pwrouZrC8mmEIBpyskEyP25e3_Y4vWSvBs" | |
| llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.3) | |
| summary_prompt = PromptTemplate.from_template(""" | |
| Provide a crisp, promotional-style summary (under 100 words) of the following: | |
| {text} | |
| Summary: | |
| """) | |
| summary_chain = LLMChain(llm=llm, prompt=summary_prompt) | |
| def extract_main_content(url): | |
| resp = requests.get(url, timeout=10) | |
| soup = BeautifulSoup(resp.content, "html.parser") | |
| for tag in soup(["nav", "header", "footer", "aside", "script", "style", "noscript"]): tag.decompose() | |
| paras = [p.get_text() for p in soup.find_all("p") if len(p.get_text()) > 60] | |
| return "\n".join(paras[:20]) or None | |
| def fetch_unsplash_image(query): | |
| url = f"https://api.unsplash.com/photos/random?query={query}&orientation=landscape&client_id={UNSPLASH_KEY}" | |
| try: | |
| resp = requests.get(url).json() | |
| return resp['urls']['regular'] | |
| except: | |
| return "https://img.freepik.com/free-photo/blue-abstract-gradient-wave-wallpaper_53876-102605.jpg" | |
| ASSETS = { | |
| "logo": "https://huggingface.co/spaces/csccorner/Link-to-video/resolve/main/csharplogo.png", | |
| "graphics": [ | |
| "https://img.freepik.com/free-vector/startup-launch-concept-with-rocket_23-2147866180.jpg", | |
| "https://img.freepik.com/free-vector/artificial-intelligence-concept-illustration_114360-7307.jpg", | |
| "https://img.freepik.com/free-vector/business-goal-achievement-banner_33099-1687.jpg" | |
| ] | |
| } | |
| def download_asset(url): | |
| local_path = tempfile.NamedTemporaryFile(delete=False, suffix=".png").name | |
| urlretrieve(url, local_path) | |
| return local_path | |
| def create_slides(text, duration, output_folder, max_lines=6): | |
| font_path = "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf" | |
| font = ImageFont.truetype(font_path, 48) | |
| logo_path = download_asset(ASSETS["logo"]) | |
| chunks = textwrap.wrap(text, width=36) | |
| slides = ["\n".join(chunks[i:i+max_lines]) for i in range(0, len(chunks), max_lines)] | |
| per_slide_time = duration / len(slides) | |
| slide_paths = [] | |
| for i, slide_text in enumerate(slides): | |
| query = random.choice(slide_text.split()) | |
| bg_path = download_asset(fetch_unsplash_image(query)) | |
| graphic_path = download_asset(random.choice(ASSETS["graphics"])) | |
| bg = Image.open(bg_path).resize((1280, 720)).convert("RGBA") | |
| enhancer = ImageEnhance.Brightness(bg) | |
| bg = enhancer.enhance(0.3) | |
| draw = ImageDraw.Draw(bg) | |
| lines = slide_text.split("\n") | |
| lines = [line.encode('utf-8', 'replace').decode('utf-8') for line in lines] | |
| total_height = sum([font.getbbox(line)[3] - font.getbbox(line)[1] for line in lines]) + (len(lines)-1)*20 | |
| y = max((720 - total_height) // 2, 20) | |
| for line in lines: | |
| w = font.getbbox(line)[2] - font.getbbox(line)[0] | |
| draw.text(((1280 - w) // 2, y), line, font=font, fill="white") | |
| y += font.getbbox(line)[3] - font.getbbox(line)[1] + 20 | |
| logo = Image.open(logo_path).convert("RGBA") | |
| logo = logo.resize((160, int(160 * logo.size[1] / logo.size[0]))) | |
| bg.paste(logo, (30, 630 - logo.size[1]), logo) | |
| graphic = Image.open(graphic_path).convert("RGBA") | |
| graphic = graphic.resize((200, 200)) | |
| bg.paste(graphic, (1040, 40), graphic) | |
| frame_path = os.path.join(output_folder, f"slide_{i}.png") | |
| bg.convert("RGB").save(frame_path) | |
| slide_paths.append((frame_path, per_slide_time)) | |
| return slide_paths | |
| def url_to_av_summary(url, duration): | |
| content = extract_main_content(url) | |
| if not content: | |
| return "Failed to extract article content.", None | |
| summary = summary_chain.invoke({"text": content[:3000]})["text"].replace('"','')[:300] | |
| audio_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3").name | |
| gTTS(text=summary).save(audio_path) | |
| frame_dir = tempfile.mkdtemp() | |
| slides = create_slides(summary, duration, frame_dir) | |
| concat_txt_path = os.path.join(frame_dir, "slides.txt") | |
| with open(concat_txt_path, "w") as f: | |
| for path, t in slides: | |
| f.write(f"file '{path}'\n") | |
| f.write(f"duration {t}\n") | |
| f.write(f"file '{slides[-1][0]}'\n") | |
| concat_img = os.path.join(frame_dir, "video_input.mp4") | |
| subprocess.run([ | |
| "ffmpeg", "-y", "-f", "concat", "-safe", "0", "-i", concat_txt_path, | |
| "-vsync", "vfr", "-pix_fmt", "yuv420p", concat_img | |
| ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) | |
| final_video = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name | |
| video_input = ffmpeg.input(concat_img) | |
| audio_input = ffmpeg.input(audio_path) | |
| ffmpeg.output(video_input, audio_input, final_video, | |
| vcodec='libx264', acodec='aac', pix_fmt='yuv420p', shortest=None | |
| ).run(overwrite_output=True, quiet=True) | |
| return summary, final_video | |
| iface = gr.Interface( | |
| fn=url_to_av_summary, | |
| inputs=[ | |
| gr.Textbox(label="Article URL"), | |
| gr.Radio([5, 10], label="Video Duration (sec)", value=5) | |
| ], | |
| outputs=[ | |
| gr.Textbox(label="Summary"), | |
| gr.Video(label="Generated AV Summary") | |
| ], | |
| title="🎮 AV Summary Generator (Visual Promo Style)", | |
| description="Generates a 5/10 sec video summary from article URL with clean typography, visuals, C# Corner logo, and themed illustrations." | |
| ) | |
| if __name__ == '__main__': | |
| iface.launch() | |