import streamlit as st # Streamlit app configuration (must be the first Streamlit command) st.set_page_config(page_title="GDPR Recitals Live Updates", layout="wide") import requests from bs4 import BeautifulSoup import legal_document_analysis import Update_tracker # Import the new up.py module # Function to fetch live recitals from the GDPR website def fetch_gdpr_recitals(): url = "https://gdpr-info.eu/recitals/" response = requests.get(url) # Check if the request was successful if response.status_code != 200: st.error("Update The GDPR website.") return {} soup = BeautifulSoup(response.content, 'html.parser') recitals = {} # Locate all recital links articles = soup.find_all('div', class_='artikel') # Extract each recital's link and title for i, article in enumerate(articles): if i >= 3: # Limit to the first 3 recitals break link = article.find('a')['href'] number = article.find('span', class_='nummer').text.strip('()') title = article.find('span', class_='titel').text.strip() # Fetch the content of each recital rec_response = requests.get(link) if rec_response.status_code == 200: rec_soup = BeautifulSoup(rec_response.content, 'html.parser') content = rec_soup.find('div', class_='entry-content').get_text(strip=True) recitals[number] = {'title': title, 'content': content} else: st.error(f"Failed to fetch recital {number} from {link}") return recitals # Streamlit App Navigation def main(): st.sidebar.title("Navigation") page = st.sidebar.radio("Choose a page", ["Legal Document Analysis", "Update_tracker"]) if page == "Legal Document Analysis": legal_document_analysis.display_legal_analysis_page() # Display the legal analysis page elif page == "Update_tracker": Update_tracker.display_Update_tracker_page() # Display the UP page if __name__ == "__main__": main()