Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,8 +2,6 @@ import streamlit as st
|
|
2 |
import requests
|
3 |
import json
|
4 |
import os
|
5 |
-
import zipfile
|
6 |
-
import io
|
7 |
|
8 |
# Load API Key from Hugging Face Secrets
|
9 |
API_KEY = os.getenv("INSTA_API")
|
@@ -15,7 +13,7 @@ HEADERS = {
|
|
15 |
"x-rapidapi-host": "instagram-scraper-api2.p.rapidapi.com"
|
16 |
}
|
17 |
|
18 |
-
# Function to extract all links recursively
|
19 |
def extract_links(obj):
|
20 |
links = []
|
21 |
if isinstance(obj, dict):
|
@@ -31,7 +29,7 @@ def extract_links(obj):
|
|
31 |
|
32 |
# Streamlit UI
|
33 |
st.set_page_config(page_title="Instagram Scraper", layout="wide")
|
34 |
-
st.title("πΈ Instagram Scraper")
|
35 |
|
36 |
# User Input
|
37 |
username = st.text_input("Enter Instagram Username:", "mrbeast")
|
@@ -48,41 +46,16 @@ if st.button("Fetch Details"):
|
|
48 |
st.subheader("π JSON Response")
|
49 |
st.json(data)
|
50 |
|
51 |
-
#
|
52 |
links = extract_links(data)
|
53 |
|
54 |
-
#
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
# Display Images
|
59 |
-
if image_links:
|
60 |
-
st.subheader("πΌοΈ Extracted Images:")
|
61 |
-
img_bytes_list = []
|
62 |
-
img_filenames = []
|
63 |
-
|
64 |
-
for idx, img_url in enumerate(image_links): # Use image_links instead of links
|
65 |
-
st.image(img_url, caption=f"Image {idx + 1}", use_container_width=True)
|
66 |
-
|
67 |
-
# Download images as bytes
|
68 |
-
img_bytes = requests.get(img_url).content
|
69 |
-
img_bytes_list.append(img_bytes)
|
70 |
-
img_filenames.append(f"{username}_image_{idx+1}.jpg")
|
71 |
-
|
72 |
-
# **Download All Images as ZIP**
|
73 |
-
if img_bytes_list:
|
74 |
-
with io.BytesIO() as zip_buffer:
|
75 |
-
with zipfile.ZipFile(zip_buffer, "w") as zip_file:
|
76 |
-
for filename, img_data in zip(img_filenames, img_bytes_list):
|
77 |
-
zip_file.writestr(filename, img_data)
|
78 |
-
zip_buffer.seek(0)
|
79 |
-
st.download_button("π₯ Download All Images", data=zip_buffer, file_name=f"{username}_images.zip", mime="application/zip")
|
80 |
-
|
81 |
-
# Display Other Links
|
82 |
-
if other_links:
|
83 |
-
st.subheader("π Other Links:")
|
84 |
-
for link in other_links:
|
85 |
st.markdown(f"[π {link}]({link})")
|
|
|
|
|
86 |
|
87 |
# **Download Buttons Below Links**
|
88 |
st.subheader("β¬οΈ Download Data")
|
@@ -97,9 +70,16 @@ if st.button("Fetch Details"):
|
|
97 |
# Download Text File
|
98 |
st.download_button(label="Download Text", data=text_data, file_name=f"{username}_details.txt", mime="text/plain")
|
99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
else:
|
101 |
st.error("β Failed to retrieve data. Please check the username.")
|
102 |
|
103 |
# Footer
|
104 |
st.markdown("---")
|
105 |
-
st.caption("Powered by Streamlit")
|
|
|
2 |
import requests
|
3 |
import json
|
4 |
import os
|
|
|
|
|
5 |
|
6 |
# Load API Key from Hugging Face Secrets
|
7 |
API_KEY = os.getenv("INSTA_API")
|
|
|
13 |
"x-rapidapi-host": "instagram-scraper-api2.p.rapidapi.com"
|
14 |
}
|
15 |
|
16 |
+
# Function to extract all links recursively from JSON
|
17 |
def extract_links(obj):
|
18 |
links = []
|
19 |
if isinstance(obj, dict):
|
|
|
29 |
|
30 |
# Streamlit UI
|
31 |
st.set_page_config(page_title="Instagram Scraper", layout="wide")
|
32 |
+
st.title("πΈ Instagram Scraper API")
|
33 |
|
34 |
# User Input
|
35 |
username = st.text_input("Enter Instagram Username:", "mrbeast")
|
|
|
46 |
st.subheader("π JSON Response")
|
47 |
st.json(data)
|
48 |
|
49 |
+
# Extracting Links from Nested JSON
|
50 |
links = extract_links(data)
|
51 |
|
52 |
+
# Display Links Below JSON
|
53 |
+
if links:
|
54 |
+
st.subheader("π Extracted Links:")
|
55 |
+
for link in links:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
st.markdown(f"[π {link}]({link})")
|
57 |
+
else:
|
58 |
+
st.info("β No links found in the response.")
|
59 |
|
60 |
# **Download Buttons Below Links**
|
61 |
st.subheader("β¬οΈ Download Data")
|
|
|
70 |
# Download Text File
|
71 |
st.download_button(label="Download Text", data=text_data, file_name=f"{username}_details.txt", mime="text/plain")
|
72 |
|
73 |
+
# Download Profile Picture (If available)
|
74 |
+
profile_pic = data.get("profile_picture")
|
75 |
+
if profile_pic:
|
76 |
+
st.image(profile_pic, caption="Profile Picture")
|
77 |
+
img_bytes = requests.get(profile_pic).content
|
78 |
+
st.download_button(label="Download Profile Picture", data=img_bytes, file_name=f"{username}_profile.jpg", mime="image/jpeg")
|
79 |
+
|
80 |
else:
|
81 |
st.error("β Failed to retrieve data. Please check the username.")
|
82 |
|
83 |
# Footer
|
84 |
st.markdown("---")
|
85 |
+
st.caption("Powered by RapidAPI & Streamlit")
|