Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
from bs4 import BeautifulSoup | |
import nltk | |
from nltk.corpus import stopwords | |
from nltk.tokenize import word_tokenize | |
import string | |
nltk.download('punkt') | |
nltk.download('stopwords') | |
def get_text_from_url(url): | |
try: | |
response = requests.get(url, timeout=10) | |
soup = BeautifulSoup(response.text, 'html.parser') | |
text = soup.get_text() | |
return text | |
except: | |
return "" | |
def extract_keywords(text): | |
text = text.lower() | |
tokens = word_tokenize(text) | |
words = [word for word in tokens if word.isalnum()] | |
stop_words = set(stopwords.words('english')) | |
keywords = [word for word in words if word not in stop_words] | |
return set(keywords) | |
def compare_keywords(url_a, url_b): | |
text_a = get_text_from_url(url_a) | |
text_b = get_text_from_url(url_b) | |
if not text_a or not text_b: | |
return "❌ Failed to fetch one or both websites. Please check URLs." | |
keywords_a = extract_keywords(text_a) | |
keywords_b = extract_keywords(text_b) | |
diff_keywords = keywords_b - keywords_a | |
sorted_keywords = sorted(diff_keywords) | |
return "\n".join(sorted_keywords) | |
interface = gr.Interface( | |
fn=compare_keywords, | |
inputs=[ | |
gr.Textbox(label="Your Website (A)"), | |
gr.Textbox(label="Competitor Website (B)") | |
], | |
outputs="text", | |
title="🔍 Competitor Keyword Finder", | |
description="Enter your website and a competitor's. This tool finds keywords the competitor uses but you don’t." | |
) | |
interface.launch() | |