Spaces:
Running
Running
File size: 3,925 Bytes
248da8f 4f22392 248da8f fd4db75 248da8f d89d5ff 248da8f d89d5ff 248da8f 4f22392 2fb4206 248da8f d89d5ff 9c9b778 248da8f aa22731 248da8f d89d5ff 248da8f d89d5ff d61a13c 4f22392 2fb4206 d61a13c 4f22392 c2cb9ed 4f22392 d89d5ff d61a13c d89d5ff d61a13c d89d5ff d61a13c 248da8f 9c9b778 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import streamlit as st
import requests
import json
import os
import zipfile
import io
# Load API Key from Hugging Face Secrets
API_KEY = os.getenv("INSTA_API")
# API Details
API_URL = "https://instagram-scraper-api2.p.rapidapi.com/v1/info"
HEADERS = {
"x-rapidapi-key": API_KEY,
"x-rapidapi-host": "instagram-scraper-api2.p.rapidapi.com"
}
# Function to extract all links recursively
def extract_links(obj):
links = []
if isinstance(obj, dict):
for key, value in obj.items():
if isinstance(value, str) and value.startswith("http"):
links.append(value)
elif isinstance(value, (dict, list)):
links.extend(extract_links(value))
elif isinstance(obj, list):
for item in obj:
links.extend(extract_links(item))
return links
# Streamlit UI
st.set_page_config(page_title="Instagram Scraper", layout="wide")
st.title("πΈ Instagram Scraper API")
# User Input
username = st.text_input("Enter Instagram Username:", "mrbeast")
if st.button("Fetch Details"):
with st.spinner("Fetching data..."):
response = requests.get(API_URL, headers=HEADERS, params={"username_or_id_or_url": username})
if response.status_code == 200:
data = response.json()
st.success("β
Data Retrieved Successfully!")
# **Display JSON Response by Default**
st.subheader("π JSON Response")
st.json(data)
# Extract all links
links = extract_links(data)
# Separate image links and other links
image_links = [link for link in links if any(ext in link for ext in [".jpg", ".jpeg", ".png", ".webp"])]
other_links = [link for link in links if link not in image_links]
# Display Images
if image_links:
st.subheader("πΌοΈ Extracted Images:")
img_bytes_list = []
img_filenames = []
for idx, img_url in enumerate(image_links):
st.markdown(f"πΌοΈ [Image {idx + 1}]({img_url})")
# Download images as bytes
img_bytes = requests.get(img_url).content
img_bytes_list.append(img_bytes)
img_filenames.append(f"{username}_image_{idx+1}.jpg")
# **Download All Images as ZIP**
if img_bytes_list:
with io.BytesIO() as zip_buffer:
with zipfile.ZipFile(zip_buffer, "w") as zip_file:
for filename, img_data in zip(img_filenames, img_bytes_list):
zip_file.writestr(filename, img_data)
zip_buffer.seek(0)
st.download_button("π₯ Download All Images", data=zip_buffer, file_name=f"{username}_images.zip", mime="application/zip")
# Display Other Links
if other_links:
st.subheader("π Other Links:")
for link in other_links:
st.markdown(f"[π {link}]({link})")
# **Download Buttons Below Links**
st.subheader("β¬οΈ Download Data")
# Convert JSON to formatted text
json_data = json.dumps(data, indent=4)
text_data = "\n".join(f"{key}: {value}" for key, value in data.items())
# Download JSON File
st.download_button(label="Download JSON", data=json_data, file_name=f"{username}_data.json", mime="application/json")
# Download Text File
st.download_button(label="Download Text", data=text_data, file_name=f"{username}_details.txt", mime="text/plain")
else:
st.error("β Failed to retrieve data. Please check the username.")
# Footer
st.markdown("---")
st.caption("Powered by RapidAPI & Streamlit")
|