Spaces:
Running
Running
import streamlit as st | |
import requests | |
import json | |
import os | |
import zipfile | |
import io | |
# Load API Key from Hugging Face Secrets | |
API_KEY = os.getenv("INSTA_API") | |
# API Details | |
API_URL = "https://instagram-scraper-api2.p.rapidapi.com/v1/info" | |
HEADERS = { | |
"x-rapidapi-key": API_KEY, | |
"x-rapidapi-host": "instagram-scraper-api2.p.rapidapi.com" | |
} | |
# Function to extract all links recursively | |
def extract_links(obj): | |
links = [] | |
if isinstance(obj, dict): | |
for key, value in obj.items(): | |
if isinstance(value, str) and value.startswith("http"): | |
links.append(value) | |
elif isinstance(value, (dict, list)): | |
links.extend(extract_links(value)) | |
elif isinstance(obj, list): | |
for item in obj: | |
links.extend(extract_links(item)) | |
return links | |
# Streamlit UI | |
st.set_page_config(page_title="Instagram Scraper", layout="wide") | |
st.title("πΈ Instagram Scraper API") | |
# User Input | |
username = st.text_input("Enter Instagram Username:", "mrbeast") | |
if st.button("Fetch Details"): | |
with st.spinner("Fetching data..."): | |
response = requests.get(API_URL, headers=HEADERS, params={"username_or_id_or_url": username}) | |
if response.status_code == 200: | |
data = response.json() | |
st.success("β Data Retrieved Successfully!") | |
# **Display JSON Response by Default** | |
st.subheader("π JSON Response") | |
st.json(data) | |
# Extract all links | |
links = extract_links(data) | |
# Separate image links and other links | |
image_links = [link for link in links if any(ext in link for ext in [".jpg", ".jpeg", ".png", ".webp"])] | |
other_links = [link for link in links if link not in image_links] | |
# Display Images | |
if image_links: | |
st.subheader("πΌοΈ Extracted Images:") | |
img_bytes_list = [] | |
img_filenames = [] | |
for idx, img_url in enumerate(image_links): | |
st.markdown(f"πΌοΈ [Image {idx + 1}]({img_url})") | |
# Download images as bytes | |
img_bytes = requests.get(img_url).content | |
img_bytes_list.append(img_bytes) | |
img_filenames.append(f"{username}_image_{idx+1}.jpg") | |
# **Download All Images as ZIP** | |
if img_bytes_list: | |
with io.BytesIO() as zip_buffer: | |
with zipfile.ZipFile(zip_buffer, "w") as zip_file: | |
for filename, img_data in zip(img_filenames, img_bytes_list): | |
zip_file.writestr(filename, img_data) | |
zip_buffer.seek(0) | |
st.download_button("π₯ Download All Images", data=zip_buffer, file_name=f"{username}_images.zip", mime="application/zip") | |
# Display Other Links | |
if other_links: | |
st.subheader("π Other Links:") | |
for link in other_links: | |
st.markdown(f"[π {link}]({link})") | |
# **Download Buttons Below Links** | |
st.subheader("β¬οΈ Download Data") | |
# Convert JSON to formatted text | |
json_data = json.dumps(data, indent=4) | |
text_data = "\n".join(f"{key}: {value}" for key, value in data.items()) | |
# Download JSON File | |
st.download_button(label="Download JSON", data=json_data, file_name=f"{username}_data.json", mime="application/json") | |
# Download Text File | |
st.download_button(label="Download Text", data=text_data, file_name=f"{username}_details.txt", mime="text/plain") | |
else: | |
st.error("β Failed to retrieve data. Please check the username.") | |
# Footer | |
st.markdown("---") | |
st.caption("Powered by RapidAPI & Streamlit") | |