|  | import streamlit as st | 
					
						
						|  | import requests | 
					
						
						|  | import json | 
					
						
						|  | import os | 
					
						
						|  | import zipfile | 
					
						
						|  | import io | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | API_KEY = os.getenv("INSTA_API") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | API_URL = "https://instagram-scraper-api2.p.rapidapi.com/v1/info" | 
					
						
						|  | HEADERS = { | 
					
						
						|  | "x-rapidapi-key": API_KEY, | 
					
						
						|  | "x-rapidapi-host": "instagram-scraper-api2.p.rapidapi.com" | 
					
						
						|  | } | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | def extract_links(obj): | 
					
						
						|  | links = [] | 
					
						
						|  | if isinstance(obj, dict): | 
					
						
						|  | for key, value in obj.items(): | 
					
						
						|  | if isinstance(value, str) and value.startswith("http"): | 
					
						
						|  | links.append(value) | 
					
						
						|  | elif isinstance(value, (dict, list)): | 
					
						
						|  | links.extend(extract_links(value)) | 
					
						
						|  | elif isinstance(obj, list): | 
					
						
						|  | for item in obj: | 
					
						
						|  | links.extend(extract_links(item)) | 
					
						
						|  | return links | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | st.set_page_config(page_title="Instagram Scraper", layout="wide") | 
					
						
						|  | st.title("πΈ Instagram Scraper API") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | username = st.text_input("Enter Instagram Username:", "mrbeast") | 
					
						
						|  |  | 
					
						
						|  | if st.button("Fetch Details"): | 
					
						
						|  | with st.spinner("Fetching data..."): | 
					
						
						|  | response = requests.get(API_URL, headers=HEADERS, params={"username_or_id_or_url": username}) | 
					
						
						|  |  | 
					
						
						|  | if response.status_code == 200: | 
					
						
						|  | data = response.json() | 
					
						
						|  | st.success("β
 Data Retrieved Successfully!") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | st.subheader("π JSON Response") | 
					
						
						|  | st.json(data) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | links = extract_links(data) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | image_links = [link for link in links if any(ext in link for ext in [".jpg", ".jpeg", ".png", ".webp"])] | 
					
						
						|  | other_links = [link for link in links if link not in image_links] | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if image_links: | 
					
						
						|  | st.subheader("πΌοΈ Extracted Images:") | 
					
						
						|  | img_bytes_list = [] | 
					
						
						|  | img_filenames = [] | 
					
						
						|  |  | 
					
						
						|  | for idx, img_url in enumerate(image_links): | 
					
						
						|  | st.markdown(f"πΌοΈ [Image {idx + 1}]({img_url})") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | img_bytes = requests.get(img_url).content | 
					
						
						|  | img_bytes_list.append(img_bytes) | 
					
						
						|  | img_filenames.append(f"{username}_image_{idx+1}.jpg") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if img_bytes_list: | 
					
						
						|  | with io.BytesIO() as zip_buffer: | 
					
						
						|  | with zipfile.ZipFile(zip_buffer, "w") as zip_file: | 
					
						
						|  | for filename, img_data in zip(img_filenames, img_bytes_list): | 
					
						
						|  | zip_file.writestr(filename, img_data) | 
					
						
						|  | zip_buffer.seek(0) | 
					
						
						|  | st.download_button("π₯ Download All Images", data=zip_buffer, file_name=f"{username}_images.zip", mime="application/zip") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | if other_links: | 
					
						
						|  | st.subheader("π Other Links:") | 
					
						
						|  | for link in other_links: | 
					
						
						|  | st.markdown(f"[π {link}]({link})") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | st.subheader("β¬οΈ Download Data") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | json_data = json.dumps(data, indent=4) | 
					
						
						|  | text_data = "\n".join(f"{key}: {value}" for key, value in data.items()) | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | st.download_button(label="Download JSON", data=json_data, file_name=f"{username}_data.json", mime="application/json") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | st.download_button(label="Download Text", data=text_data, file_name=f"{username}_details.txt", mime="text/plain") | 
					
						
						|  |  | 
					
						
						|  | else: | 
					
						
						|  | st.error("β Failed to retrieve data. Please check the username.") | 
					
						
						|  |  | 
					
						
						|  |  | 
					
						
						|  | st.markdown("---") | 
					
						
						|  | st.caption("Powered by RapidAPI & Streamlit") | 
					
						
						|  |  |