File size: 4,755 Bytes
99573a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ff1f54d
 
 
 
99573a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88bd624
 
99573a1
 
 
1f72f50
99573a1
f82cef4
471ea72
e4ca5be
092a44d
e4ca5be
 
99573a1
 
 
e4ca5be
d185d0b
99573a1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import os 
import wget
import sys
import bs4
import json
import pandas as pd
from huggingface_hub import InferenceClient
import urllib.request
import gradio as gr

def get_menu():
    fp = urllib.request.urlopen("https://www.sw-ka.de/en/hochschulgastronomie/speiseplan/mensa_adenauerring/")
    mybytes = fp.read()

    html_content = mybytes.decode("utf8")

    #html_content = "".join(open('index.html',mode='r',encoding='utf-8').readlines())
    # Parse the HTML content

    # Parse the HTML using BeautifulSoup
    soup = bs4.BeautifulSoup(html_content, 'html.parser')

    canteen_div = soup.find('div', id='canteen_day_1')
    # Find all tables within the canteen_div
    tables = canteen_div.find_all('table')

    foods = []
    prices = []
    nutri = []
    line_names = []



    cnt = 0
    canteen_div = soup.find('div', id='canteen_day_1')
    # Find all tables within the canteen_div
    tables = canteen_div.find_all('table')
    # Iterate over each table
    for table in tables:
        # Extract food name
    # Find all table rows with a class starting with "mt-"
        menu_items = table.find_all('tr', class_=lambda class_name: class_name and class_name.startswith('mt-'))

        # Iterate through each menu item
        for item in menu_items:
            food_name = item.find('span', class_='bg').text.strip()
            
            # Extract price
            price = item.find('span', class_='bgp price_1').text.strip()
            
            # Extract nutritional information
            nutritional_info = {}
            nutritional_data = item.find('div', class_='nutrition_facts')
            if nutritional_data:
                for element in nutritional_data.find_all('div', class_=['energie', 'proteine', 'kohlenhydrate', 'zucker', 'fett', 'gesaettigt', 'salz']):
                    key = element.find('div').text.strip()
                    value = element.find_all('div')[1].text.strip()
                    nutritional_info[key] = value
            
            # Print extracted information

            #print(f"\nFood Name: {food_name}")
            foods.append(food_name)
            prices.append(price)
            try:
                nutri.append(json.dumps(nutritional_info['Energie'], indent=4))
            except:
                nutri.append("")
            #print(f"Price: {price}")
            if nutritional_info:
                #print("Nutritional Information:")
                for key, value in nutritional_info.items():
                    pass
                    #print(f"- {key}: {value}")
            else:
                pass
                #print("No nutritional information available.")
            cnt+=1
        break
        # Iterate over each row
        # Find all rows (tr) with class 'mensatype_rows'
        #rows = table.find_all('tr', class_='mensatype_rows')
    # Find all menu items within the table

    # Find all rows with class 'mensatype_rows'
    canteen_div = soup.find('div', id='canteen_day_1')
    # Find all tables within the canteen_div
    tables = canteen_div.find_all('table')


    # Iterate over each table
    for table in tables:
        # Iterate over each row
        # Find all rows (tr) with class 'mensatype_rows'
        rows = table.find_all('tr', class_='mensatype_rows')
        
        # Iterate over each row
        for row in rows:
            # Extract the row name
            row_name = row.find('div').get_text(strip=True)
            menu_titles = row.find_all('td', class_='menu-title')
            
            # Iterate over each food item
            for menu_title in menu_titles:
                line_names.append(row_name)

    menu = ""
    df = pd.DataFrame(zip(line_names,foods,prices,nutri),columns=['line','food','price','nutri'])
    #df = df[~df['line'].str.contains("Abendessen")]
    #df = df[~df['line'].str.contains("pizza")]
    #df = df[~df['line'].str.contains("werk")]
    df_line = df.groupby('line', sort=False)
    for line, df_group in df_line:
        menu+= "Line Name: " + line + "\n"
        for idx,row in df_group.iterrows():
            menu+=row['food'] + "\n"
            menu+= "Price: " + row['price'] + "\n"
            menu+= "Calories: " + row['nutri'] + "\n"
    return menu

def reply_bot(message, history):
    curr_prompt = message
    try:
        print(curr_prompt)
    #answer = client.text_generation(prompt=prompt, max_new_tokens=512)
        answer = ""
        for token in client.text_generation(prompt=curr_prompt, max_new_tokens=512, stream=True):
            answer+=token
            yield answer
    except:
        return "Clear History or ask FR to increase Context Window. Current capacity only 4k tokens"

    #return answer

gr.ChatInterface(reply_bot).launch()