SR05 commited on
Commit
2016b0e
·
verified ·
1 Parent(s): 770ff6a

Update abudhabi_visa.py

Browse files
Files changed (1) hide show
  1. abudhabi_visa.py +32 -24
abudhabi_visa.py CHANGED
@@ -5,39 +5,47 @@ import re
5
 
6
  URL = "https://www.ireland.ie/en/uae/abudhabi/services/visas/weekly-decision-reports/"
7
 
 
 
 
 
8
  def get_latest_report():
9
- response = requests.get(URL)
10
- if response.status_code != 200:
11
- return None, "Failed to fetch the webpage"
 
 
 
12
 
13
- soup = BeautifulSoup(response.text, "html.parser")
 
14
 
15
- # Find all links
16
- links = soup.find_all("a", href=True)
17
 
18
- # Regex pattern to match report titles
19
- pattern = re.compile(r"Abu Dhabi Visa Decision (\d{1,2} \w+ \d{4}) to (\d{1,2} \w+ \d{4})", re.IGNORECASE)
 
20
 
21
- latest_date = None
22
- latest_link = None
23
- latest_report_name = None
 
 
 
 
 
24
 
25
- for link in links:
26
- match = pattern.search(link.text)
27
- if match:
28
- report_date = match.group(2) # Extract the end date
29
- if latest_date is None or report_date > latest_date:
30
- latest_date = report_date
31
- latest_report_name = link.text.strip()
32
- latest_link = link['href']
33
 
34
- if latest_link:
35
- return latest_report_name, latest_link
36
- else:
37
- return None, "No reports found"
38
 
39
  # Streamlit UI
40
- st.title("🇦🇪 Abu Dhabi Visa Decision Reports")
41
  st.write("Fetching the latest visa decision report dynamically.")
42
 
43
  latest_report, report_url = get_latest_report()
 
5
 
6
  URL = "https://www.ireland.ie/en/uae/abudhabi/services/visas/weekly-decision-reports/"
7
 
8
+ HEADERS = {
9
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36"
10
+ }
11
+
12
  def get_latest_report():
13
+ try:
14
+ response = requests.get(URL, headers=HEADERS, timeout=10)
15
+ if response.status_code != 200:
16
+ return None, f"Failed to fetch the webpage. Status Code: {response.status_code}"
17
+
18
+ soup = BeautifulSoup(response.text, "html.parser")
19
 
20
+ # Find all links
21
+ links = soup.find_all("a", href=True)
22
 
23
+ # Regex pattern to match report titles
24
+ pattern = re.compile(r"Abu Dhabi Visa Decision (\d{1,2} \w+ \d{4}) to (\d{1,2} \w+ \d{4})", re.IGNORECASE)
25
 
26
+ latest_date = None
27
+ latest_link = None
28
+ latest_report_name = None
29
 
30
+ for link in links:
31
+ match = pattern.search(link.text)
32
+ if match:
33
+ report_date = match.group(2) # Extract the end date
34
+ if latest_date is None or report_date > latest_date:
35
+ latest_date = report_date
36
+ latest_report_name = link.text.strip()
37
+ latest_link = link['href']
38
 
39
+ if latest_link:
40
+ return latest_report_name, latest_link
41
+ else:
42
+ return None, "No reports found"
 
 
 
 
43
 
44
+ except Exception as e:
45
+ return None, f"Error: {str(e)}"
 
 
46
 
47
  # Streamlit UI
48
+ st.title("Abu Dhabi Visa Decision Reports")
49
  st.write("Fetching the latest visa decision report dynamically.")
50
 
51
  latest_report, report_url = get_latest_report()