|
import streamlit as st |
|
import requests |
|
from bs4 import BeautifulSoup |
|
|
|
|
|
def fetch_gdpr_recitals(): |
|
url = "https://gdpr-info.eu/recitals/" |
|
response = requests.get(url) |
|
|
|
|
|
if response.status_code != 200: |
|
st.error("Failed to fetch data from the GDPR website.") |
|
return {} |
|
|
|
soup = BeautifulSoup(response.content, 'html.parser') |
|
|
|
recitals = {} |
|
|
|
articles = soup.find_all('div', class_='artikel') |
|
|
|
|
|
for i, article in enumerate(articles): |
|
if i >= 3: |
|
break |
|
link = article.find('a')['href'] |
|
number = article.find('span', class_='nummer').text.strip('()') |
|
title = article.find('span', class_='titel').text.strip() |
|
|
|
|
|
rec_response = requests.get(link) |
|
if rec_response.status_code == 200: |
|
rec_soup = BeautifulSoup(rec_response.content, 'html.parser') |
|
content = rec_soup.find('div', class_='entry-content').get_text(strip=True) |
|
recitals[number] = {'title': title, 'content': content} |
|
else: |
|
st.error(f"Failed to fetch recital {number} from {link}") |
|
|
|
return recitals |
|
|
|
def display_up_page(): |
|
st.title("UP Page - GDPR Recitals") |
|
|
|
|
|
if st.button("Fetch Live Recitals"): |
|
with st.spinner("Fetching updates..."): |
|
recitals = fetch_gdpr_recitals() |
|
if recitals: |
|
for number, details in recitals.items(): |
|
st.markdown(f"*Recital {number}: {details['title']}*") |
|
st.write(details['content']) |
|
else: |
|
st.write("No recitals found.") |
|
|
|
|
|
if __name__ == "__main__": |
|
display_up_page() |