import streamlit as st import requests from bs4 import BeautifulSoup # Function to fetch live recitals from the GDPR website def fetch_gdpr_recitals(): url = "https://gdpr-info.eu/recitals/" response = requests.get(url) # Check if the request was successful if response.status_code != 200: st.error("Failed to fetch data from the GDPR website.") return {} soup = BeautifulSoup(response.content, 'html.parser') recitals = {} # Locate all recital links articles = soup.find_all('div', class_='artikel') # Extract each recital's link and title for i, article in enumerate(articles): if i >= 3: # Limit to the first 3 recitals break link = article.find('a')['href'] number = article.find('span', class_='nummer').text.strip('()') title = article.find('span', class_='titel').text.strip() # Fetch the content of each recital rec_response = requests.get(link) if rec_response.status_code == 200: rec_soup = BeautifulSoup(rec_response.content, 'html.parser') content = rec_soup.find('div', class_='entry-content').get_text(strip=True) recitals[number] = {'title': title, 'content': content} else: st.error(f"Failed to fetch recital {number} from {link}") return recitals def display_up_page(): st.title("UP Page - GDPR Recitals") # Fetch and display live recitals if st.button("Fetch Live Recitals"): with st.spinner("Fetching updates..."): recitals = fetch_gdpr_recitals() if recitals: for number, details in recitals.items(): st.markdown(f"*Recital {number}: {details['title']}*") st.write(details['content']) else: st.write("No recitals found.") # Call the function to display the UP page content if __name__ == "__main__": display_up_page()