jaimik69 commited on
Commit
17c69c8
·
1 Parent(s): 54f245c

Upload NLP_project.ipynb

Browse files
Files changed (1) hide show
  1. NLP_project.ipynb +700 -0
NLP_project.ipynb ADDED
@@ -0,0 +1,700 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "metadata": {
7
+ "id": "jZ0wVSoo_5yd"
8
+ },
9
+ "outputs": [],
10
+ "source": [
11
+ "%pip install gradio gensim sentence_transformers torch torchvision torchaudio -f https://download.pytorch.org/whl/cu111/torch_stable.html"
12
+ ]
13
+ },
14
+ {
15
+ "cell_type": "code",
16
+ "execution_count": null,
17
+ "metadata": {
18
+ "colab": {
19
+ "base_uri": "https://localhost:8080/"
20
+ },
21
+ "id": "c7lAKXG_DTM_",
22
+ "outputId": "60bad5b5-83a2-4f21-fce3-8bda36b50c5a"
23
+ },
24
+ "outputs": [
25
+ {
26
+ "name": "stderr",
27
+ "output_type": "stream",
28
+ "text": [
29
+ "[nltk_data] Downloading package punkt to /root/nltk_data...\n",
30
+ "[nltk_data] Package punkt is already up-to-date!\n",
31
+ "[nltk_data] Downloading package averaged_perceptron_tagger to\n",
32
+ "[nltk_data] /root/nltk_data...\n",
33
+ "[nltk_data] Package averaged_perceptron_tagger is already up-to-\n",
34
+ "[nltk_data] date!\n",
35
+ "[nltk_data] Downloading package vader_lexicon to /root/nltk_data...\n",
36
+ "[nltk_data] Package vader_lexicon is already up-to-date!\n"
37
+ ]
38
+ }
39
+ ],
40
+ "source": [
41
+ "import pandas as pd\n",
42
+ "import networkx as nx\n",
43
+ "from sklearn.feature_extraction.text import TfidfVectorizer\n",
44
+ "from sklearn.metrics.pairwise import linear_kernel\n",
45
+ "from textblob import TextBlob\n",
46
+ "from nltk.sentiment.vader import SentimentIntensityAnalyzer\n",
47
+ "import nltk\n",
48
+ "from nltk import pos_tag\n",
49
+ "from nltk.tokenize import word_tokenize\n",
50
+ "from gensim.models import Word2Vec\n",
51
+ "import spacy\n",
52
+ "from sentence_transformers import SentenceTransformer, util\n",
53
+ "import numpy as np\n",
54
+ "import torch\n",
55
+ "import gradio as gr\n",
56
+ "import os\n",
57
+ "import math\n",
58
+ "from datetime import datetime\n",
59
+ "\n",
60
+ "# Use GPU\n",
61
+ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
62
+ "\n",
63
+ "# Load a BERT model\n",
64
+ "bert_model = SentenceTransformer('paraphrase-MiniLM-L6-v2', device=device)\n",
65
+ "\n",
66
+ "# Download NLTK resources (if not already downloaded)\n",
67
+ "nltk.download('punkt')\n",
68
+ "nltk.download('averaged_perceptron_tagger')\n",
69
+ "nltk.download('vader_lexicon')\n",
70
+ "\n",
71
+ "# Load Spacy model for Named Entity Recognition (NER)\n",
72
+ "nlp = spacy.load(\"en_core_web_sm\")"
73
+ ]
74
+ },
75
+ {
76
+ "cell_type": "code",
77
+ "execution_count": null,
78
+ "metadata": {
79
+ "id": "dG1jLPn5DXeb"
80
+ },
81
+ "outputs": [],
82
+ "source": [
83
+ "# Load Yelp dataset\n",
84
+ "yelp_data = pd.read_csv('restaurants.csv')\n",
85
+ "\n",
86
+ "# Filter out relevant information (e.g., restaurant name, rating, location, categories, hours)\n",
87
+ "restaurants = yelp_data[['name', 'stars', 'city', 'categories', 'hours']]\n",
88
+ "\n",
89
+ "comfort_food_terms = [\n",
90
+ " \"Home cooking\",\n",
91
+ " \"Soul food\",\n",
92
+ " \"Indulgent food\",\n",
93
+ " \"Feel-good food\",\n",
94
+ " \"Nostalgia food\",\n",
95
+ " \"Emotional food\",\n",
96
+ " \"Guilty pleasure\",\n",
97
+ " \"Indulgence\",\n",
98
+ " \"Treat\",\n",
99
+ " \"Culinary hug\",\n",
100
+ " \"Soul-soothing food\",\n",
101
+ " \"Heart-warming food\",\n",
102
+ "]\n",
103
+ "\n",
104
+ "exciting_food_terms = [\n",
105
+ " \"Adventurous food\",\n",
106
+ " \"Exotic food\",\n",
107
+ " \"Culinary adventure\",\n",
108
+ " \"Sensual food\",\n",
109
+ " \"Tantalizing food\",\n",
110
+ " \"Mouthwatering food\",\n",
111
+ " \"Delectable food\",\n",
112
+ " \"Irresistible food\",\n",
113
+ " \"Tempting food\",\n",
114
+ " \"Gastronomical delight\",\n",
115
+ "]"
116
+ ]
117
+ },
118
+ {
119
+ "cell_type": "code",
120
+ "execution_count": null,
121
+ "metadata": {
122
+ "id": "Z05Z7XmOW5xe"
123
+ },
124
+ "outputs": [],
125
+ "source": [
126
+ "# Create a TF-IDF vectorizer to convert restaurant categories into numerical features\n",
127
+ "tfidf_vectorizer = TfidfVectorizer(stop_words='english', lowercase=True)\n",
128
+ "tfidf_matrix = tfidf_vectorizer.fit_transform(restaurants['categories']+\", \"+restaurants['city'].fillna(''))\n",
129
+ "\n",
130
+ "# Train Word2Vec model on restaurant names\n",
131
+ "restaurant_names = [word_tokenize(restaurant['categories'].lower()+restaurant['city'].lower()) for _, restaurant in restaurants.iterrows()]\n",
132
+ "word2vec_model = Word2Vec(sentences=restaurant_names, vector_size=100, window=5, min_count=1, workers=4)\n",
133
+ "\n",
134
+ "if os.path.isfile(\"./bert_embeddings.npy\"):\n",
135
+ " restaurant_embeddings = np.load('bert_embeddings.npy')\n",
136
+ "else:\n",
137
+ " # Precompute restaurant embeddings on the chosen device for both categories and cities\n",
138
+ " restaurant_embeddings = [bert_model.encode((restaurant['categories'].lower() + restaurant['city'].lower()), convert_to_tensor=True).detach().cpu().numpy()\n",
139
+ " for _, restaurant in restaurants.iterrows()]\n",
140
+ " np.save('bert_embeddings.npy', np.array(restaurant_embeddings))\n",
141
+ " restaurant_embeddings = np.load('bert_embeddings.npy')\n",
142
+ "\n",
143
+ "\n",
144
+ "# Build a knowledge graph\n",
145
+ "knowledge_graph = nx.Graph()\n",
146
+ "\n",
147
+ "# Add restaurant nodes with attributes\n",
148
+ "for _, restaurant in restaurants.iterrows():\n",
149
+ " knowledge_graph.add_node(\n",
150
+ " restaurant['name'],\n",
151
+ " stars=restaurant['stars'],\n",
152
+ " city=restaurant['city'],\n",
153
+ " categories=restaurant['categories'],\n",
154
+ " hours=restaurant['hours']\n",
155
+ " )\n",
156
+ "\n",
157
+ "# Function to extract named entities from user input using Spacy NER\n",
158
+ "def extract_named_entities(user_input):\n",
159
+ " doc = nlp(user_input)\n",
160
+ " named_entities = [(ent.text, ent.label_) for ent in doc.ents]\n",
161
+ " return named_entities\n",
162
+ "\n",
163
+ "# Function to perform sentiment analysis using VADER\n",
164
+ "def analyze_sentiment_vader(text):\n",
165
+ " sid = SentimentIntensityAnalyzer()\n",
166
+ " compound_score = sid.polarity_scores(text)['compound']\n",
167
+ " return compound_score"
168
+ ]
169
+ },
170
+ {
171
+ "cell_type": "code",
172
+ "execution_count": null,
173
+ "metadata": {
174
+ "id": "wtd56Af2EI7h"
175
+ },
176
+ "outputs": [],
177
+ "source": [
178
+ "# # Function to get restaurant recommendations based on graph similarity\n",
179
+ "# def get_graph_recommendations(user_nouns):\n",
180
+ "# # Use graph-based similarity to get similar restaurants from the knowledge graph\n",
181
+ "# # You can choose a graph similarity algorithm based on your requirements\n",
182
+ "# # For example, you can use Jaccard similarity or node2vec embeddings\n",
183
+ "# # For simplicity, let's use Jaccard similarity here\n",
184
+ "# user_nouns_set = set(user_nouns)\n",
185
+ "# graph_recommendations = set()\n",
186
+ "\n",
187
+ "# # print(user_nouns_set)\n",
188
+ "# for restaurant in knowledge_graph.nodes():\n",
189
+ "# restaurant_nouns = set(pos_tag(word_tokenize(restaurant)))\n",
190
+ "# jaccard_similarity = len(user_nouns_set.intersection(restaurant_nouns)) / len(user_nouns_set.union(restaurant_nouns))\n",
191
+ "\n",
192
+ "# # print(restaurant, restaurant_nouns, jaccard_similarity)\n",
193
+ "# # If Jaccard similarity is above a threshold, consider it a recommendation\n",
194
+ "# if jaccard_similarity > 0.1: # You can adjust the threshold\n",
195
+ "# graph_recommendations.add(restaurant)\n",
196
+ "\n",
197
+ "# # print(graph_recommendations)\n",
198
+ "# return graph_recommendations\n",
199
+ "\n",
200
+ "# Function to get restaurant recommendations based on Word Overlap\n",
201
+ "def get_overlap_recommendations(user_input, num_recommendations):\n",
202
+ " overlap_scores = pd.DataFrame()\n",
203
+ " overlap_scores['combined_text'] = (restaurants['name'] + \", \" + restaurants['categories'] + \", \" + restaurants['city']).str.lower()\n",
204
+ " overlap_scores['overlap_score'] = overlap_scores['combined_text'].apply(lambda x: sum(word in user_input.lower() for word in x.replace(',', '').split()))\n",
205
+ " sorted_overlap_scores = overlap_scores.sort_values(by='overlap_score', ascending=False)\n",
206
+ " top_recommendations_indices = [idx for idx, _ in sorted_overlap_scores.head(num_recommendations).iterrows()]\n",
207
+ " return top_recommendations_indices\n",
208
+ "\n",
209
+ "\n",
210
+ "# Function to get restaurant recommendations based on Word Embeddings similarity\n",
211
+ "def get_embedding_recommendations(user_input, num_recommendations):\n",
212
+ " tokens = word_tokenize(user_input.lower())\n",
213
+ " embedding_similarities = {}\n",
214
+ "\n",
215
+ " for i, restaurant in restaurants.iterrows():\n",
216
+ " restaurant_tokens = word_tokenize(restaurant['categories'].lower() + restaurant['city'].lower())\n",
217
+ " similarity = word2vec_model.wv.n_similarity(tokens, restaurant_tokens)\n",
218
+ " embedding_similarities[i] = similarity\n",
219
+ "\n",
220
+ " # Sort the dictionary by similarity scores and get top recommendations\n",
221
+ " sorted_similarities = sorted(embedding_similarities.items(), key=lambda item: item[1], reverse=True)\n",
222
+ " top_recommendations_indices = [idx for idx, _ in sorted_similarities[:num_recommendations]]\n",
223
+ "\n",
224
+ " return top_recommendations_indices\n",
225
+ "\n",
226
+ "# Function to get restaurant recommendations based on BERT embeddings similarity\n",
227
+ "def get_bert_recommendations(user_input, num_recommendations):\n",
228
+ " user_embedding = bert_model.encode(user_input.lower(), convert_to_tensor=True).detach().cpu().numpy()\n",
229
+ " # user_embedding = np.mean(bert_model.encode(user_input.lower(), convert_to_tensor=True).detach().cpu().numpy(), axis=0)\n",
230
+ "\n",
231
+ " bert_similarities = {}\n",
232
+ "\n",
233
+ " for i, restaurant in restaurants.iterrows():\n",
234
+ " restaurant_embedding = restaurant_embeddings[i]\n",
235
+ " # print(restaurant_embedding, user_embedding)\n",
236
+ " # similarity = np.dot(user_embedding, restaurant_embedding) / (np.linalg.norm(user_embedding) * np.linalg.norm(restaurant_embedding))\n",
237
+ " similarity = util.cos_sim(user_embedding, restaurant_embedding)\n",
238
+ " bert_similarities[i] = similarity\n",
239
+ " # print(i, similarity)\n",
240
+ "\n",
241
+ " # Sort the dictionary by similarity scores and get top recommendations\n",
242
+ " sorted_similarities = sorted(bert_similarities.items(), key=lambda item: item[1], reverse=True)\n",
243
+ " top_recommendations_indices = [idx for idx, _ in sorted_similarities[:num_recommendations]]\n",
244
+ "\n",
245
+ " return top_recommendations_indices"
246
+ ]
247
+ },
248
+ {
249
+ "cell_type": "code",
250
+ "execution_count": null,
251
+ "metadata": {
252
+ "id": "gU0JhAg0EB3b"
253
+ },
254
+ "outputs": [],
255
+ "source": [
256
+ "# Function to recommend restaurants based on user input, sentiment, availability, and knowledge graph\n",
257
+ "def recommend_restaurants(user_input, num_recommendations=5, sentiment_recursive_break=False):\n",
258
+ "\n",
259
+ " sr = pd.DataFrame()\n",
260
+ "\n",
261
+ " # Tokenize and perform POS tagging on the user input\n",
262
+ " tokens = word_tokenize(user_input)\n",
263
+ " pos_tags = pos_tag(tokens)\n",
264
+ "\n",
265
+ " # Extract nouns and locations from POS tags\n",
266
+ " user_nouns = [word for word, pos in pos_tags if pos.startswith('N') or pos.startswith('J')]\n",
267
+ " user_locations = [word.lower().strip() for word, pos in pos_tags if pos.startswith('NNP')] # Assume proper nouns are locations\n",
268
+ " # print(user_input)\n",
269
+ "\n",
270
+ " # Extract named entities from user input\n",
271
+ " named_entities = extract_named_entities(user_input)\n",
272
+ " # print(\"Named Entities:\", named_entities)\n",
273
+ "\n",
274
+ " # Transform user input into a TF-IDF vector\n",
275
+ " user_tfidf = tfidf_vectorizer.transform([user_input])\n",
276
+ "\n",
277
+ " # Compute the cosine similarity between the user input and restaurant categories\n",
278
+ " cosine_similarities = linear_kernel(user_tfidf, tfidf_matrix).flatten()\n",
279
+ "\n",
280
+ " # Get recommended restaurants from simple word overlap\n",
281
+ " overlap_recommendations = get_overlap_recommendations(user_input, num_recommendations)\n",
282
+ "\n",
283
+ " # Get indices of restaurants with the highest similarity scores using TF-IDF\n",
284
+ " tfidf_recommendations = cosine_similarities.argsort()[:-num_recommendations-1:-1]\n",
285
+ "\n",
286
+ " # Get recommendations using Word Embeddings\n",
287
+ " embedding_similarities = get_embedding_recommendations(user_input, num_recommendations)\n",
288
+ "\n",
289
+ " # Get recommendations using BERT\n",
290
+ " bert_recommendations_indices = get_bert_recommendations(user_input, num_recommendations)\n",
291
+ "\n",
292
+ " # Combine recommendations from both approaches\n",
293
+ " combined_recommendations = set(overlap_recommendations) | set(tfidf_recommendations) | set(embedding_similarities) |set(bert_recommendations_indices)\n",
294
+ " combined_recommendations = set(embedding_similarities)\n",
295
+ " # print(\"TFIDF:\", tfidf_recommendations)\n",
296
+ " # print(\"overlap:\", overlap_recommendations)\n",
297
+ " # print(\"w2v:\", embedding_similarities)\n",
298
+ " # print(\"BERT:\", bert_recommendations_indices)\n",
299
+ "\n",
300
+ " # Get details of recommended restaurants\n",
301
+ " recommended_restaurants = restaurants.iloc[list(combined_recommendations)]\n",
302
+ "\n",
303
+ " # print(\"rec res:\", combined_recommendations)\n",
304
+ "\n",
305
+ " # Refine recommendations based on extracted information, location, and named entities\n",
306
+ " for _, restaurant in recommended_restaurants.iterrows():\n",
307
+ " # if any(category in restaurant['categories'] for category in user_nouns) and any(location in restaurant['city'] for location in user_locations):\n",
308
+ " # if any(location in restaurant['city'].lower().strip() for location in user_locations):\n",
309
+ " print(f\"Recommendation: {restaurant['name'], restaurant['city'], restaurant['categories'], restaurant['stars']} \")\n",
310
+ "\n",
311
+ " if not sentiment_recursive_break:\n",
312
+ " # Perform sentiment analysis using VADER\n",
313
+ " sentiment_score = analyze_sentiment_vader(user_input)\n",
314
+ "\n",
315
+ " if sentiment_score < -0.2:\n",
316
+ " print(\"Considering your negative sentiment, you might prefer comforting places.\")\n",
317
+ " sr = recommend_restaurants(user_input=user_input + \", \" + \", \".join(comfort_food_terms), num_recommendations=num_recommendations, sentiment_recursive_break=True)\n",
318
+ " # comforting_places = restaurants[restaurants['categories'].str.contains('comfort food', case=False, na=False)]\n",
319
+ " # print(\"Comforting food places suggestions:\")\n",
320
+ " # print(comforting_places[['name', 'stars', 'city', 'categories']])\n",
321
+ " elif sentiment_score > 0.2:\n",
322
+ " print(\"Considering your positive sentiment, you might prefer something exciting.\")\n",
323
+ " sr = recommend_restaurants(user_input=user_input + \", \" + \", \".join(exciting_food_terms), num_recommendations=num_recommendations, sentiment_recursive_break=True)\n",
324
+ " # exciting_places = restaurants[restaurants['categories'].str.contains('nightlife|arts & entertainment|restaurants', case=False, na=False)]\n",
325
+ " # print(\"Exciting food places suggestions:\")\n",
326
+ " # print(exciting_places[['name', 'stars', 'city', 'categories']])\n",
327
+ " else:\n",
328
+ " print(\"neutral sentiment\")\n",
329
+ "\n",
330
+ " # Rank the restaurants based on ratings in descending order\n",
331
+ " ranked_restaurants = recommended_restaurants.sort_values(by='stars', ascending=False).head(num_recommendations)\n",
332
+ "\n",
333
+ " # return recommended restaurants for evaluation\n",
334
+ " return ranked_restaurants"
335
+ ]
336
+ },
337
+ {
338
+ "cell_type": "code",
339
+ "execution_count": null,
340
+ "metadata": {
341
+ "id": "aDZYCDQxfMFH"
342
+ },
343
+ "outputs": [],
344
+ "source": [
345
+ "# Example prompt\n",
346
+ "user_prompt = input(\"Enter your restaurant preference (e.g., I want Chinese in Brooklyn): \")\n",
347
+ "\n",
348
+ "# Get recommendations based on the user's input, sentiment, availability, and knowledge graph\n",
349
+ "recommend_restaurants(user_prompt)"
350
+ ]
351
+ },
352
+ {
353
+ "cell_type": "code",
354
+ "execution_count": null,
355
+ "metadata": {
356
+ "id": "asj5AxXvDjrY"
357
+ },
358
+ "outputs": [],
359
+ "source": [
360
+ "def evaluate_recommendations(predicted_df, filtered_df, k):\n",
361
+ " \"\"\"\n",
362
+ " Evaluate predicted restaurant recommendations against the ground truth using precision@k, recall@k, and F1@k.\n",
363
+ "\n",
364
+ " Parameters:\n",
365
+ " - predicted_df (pd.DataFrame): DataFrame with predicted restaurant recommendations.\n",
366
+ " - filtered_df (pd.DataFrame): DataFrame with ground truth or actual relevant restaurants.\n",
367
+ " - k (int): Value of k for top-k recommendations.\n",
368
+ "\n",
369
+ " Returns:\n",
370
+ " - precision_at_k (float): Precision@k.\n",
371
+ " - recall_at_k (float): Recall@k.\n",
372
+ " - f1_at_k (float): F1@k.\n",
373
+ " \"\"\"\n",
374
+ "\n",
375
+ " # Extract the top-k predicted restaurants\n",
376
+ " top_k_predicted = predicted_df.head(k)\n",
377
+ "\n",
378
+ " # Evaluate precision@k, recall@k, and F1@k\n",
379
+ " intersection = pd.merge(top_k_predicted, filtered_df, on=['name', 'stars', 'city', 'categories', 'hours'], how='inner')\n",
380
+ "\n",
381
+ " precision_at_k = len(intersection) / k\n",
382
+ " recall_at_k = len(intersection) / len(filtered_df)\n",
383
+ " f1_at_k = 2 * (precision_at_k * recall_at_k) / (precision_at_k + recall_at_k) if (precision_at_k + recall_at_k) > 0 else 0\n",
384
+ "\n",
385
+ " return precision_at_k, recall_at_k, f1_at_k"
386
+ ]
387
+ },
388
+ {
389
+ "cell_type": "code",
390
+ "execution_count": null,
391
+ "metadata": {
392
+ "id": "n1Xrcarvzw9P"
393
+ },
394
+ "outputs": [],
395
+ "source": [
396
+ "cajun_in_neworleans = restaurants[\n",
397
+ " (restaurants['city'] == 'New Orleans') &\n",
398
+ " (restaurants['categories'].str.contains('cajun', case=False))\n",
399
+ "]\n",
400
+ "steakhouses_in_indiana = restaurants[\n",
401
+ " (restaurants['city'] == 'Indianapolis') &\n",
402
+ " (restaurants['categories'].str.contains('steakhouse', case=False))\n",
403
+ "]\n",
404
+ "chinese_in_philadelphia = restaurants[\n",
405
+ " (restaurants['city'] == 'Philadelphia') &\n",
406
+ " (restaurants['categories'].str.contains('chinese', case=False))\n",
407
+ "]\n",
408
+ "seafood_in_tampa = restaurants[\n",
409
+ " (restaurants['city'] == 'Tampa') &\n",
410
+ " (restaurants['categories'].str.contains('seafood', case=False))\n",
411
+ "]\n",
412
+ "italian_in_stlouis = restaurants[\n",
413
+ " (restaurants['city'] == 'Saint Louis') &\n",
414
+ " (restaurants['categories'].str.contains('italian', case=False))\n",
415
+ "]"
416
+ ]
417
+ },
418
+ {
419
+ "cell_type": "code",
420
+ "execution_count": null,
421
+ "metadata": {
422
+ "id": "XEG8Eyh55lV7"
423
+ },
424
+ "outputs": [],
425
+ "source": [
426
+ "cino = recommend_restaurants(input())"
427
+ ]
428
+ },
429
+ {
430
+ "cell_type": "code",
431
+ "execution_count": null,
432
+ "metadata": {
433
+ "id": "Iarl1OLg6CW8"
434
+ },
435
+ "outputs": [],
436
+ "source": [
437
+ "print(evaluate_recommendations(cino, cajun_in_neworleans, 1))\n",
438
+ "print(evaluate_recommendations(cino, cajun_in_neworleans, 5))\n",
439
+ "print(evaluate_recommendations(cino, cajun_in_neworleans, 10))"
440
+ ]
441
+ },
442
+ {
443
+ "cell_type": "code",
444
+ "execution_count": null,
445
+ "metadata": {
446
+ "id": "jQI7iAQeEDGr"
447
+ },
448
+ "outputs": [],
449
+ "source": [
450
+ "sit = recommend_restaurants(input())"
451
+ ]
452
+ },
453
+ {
454
+ "cell_type": "code",
455
+ "execution_count": null,
456
+ "metadata": {
457
+ "id": "G2QAD-o8E6HZ"
458
+ },
459
+ "outputs": [],
460
+ "source": [
461
+ "print(evaluate_recommendations(sit, seafood_in_tampa, 1))\n",
462
+ "print(evaluate_recommendations(sit, seafood_in_tampa, 5))\n",
463
+ "print(evaluate_recommendations(sit, seafood_in_tampa, 10))"
464
+ ]
465
+ },
466
+ {
467
+ "cell_type": "code",
468
+ "execution_count": null,
469
+ "metadata": {
470
+ "id": "v7co493SGeWY"
471
+ },
472
+ "outputs": [],
473
+ "source": [
474
+ "sii = recommend_restaurants(input())"
475
+ ]
476
+ },
477
+ {
478
+ "cell_type": "code",
479
+ "execution_count": null,
480
+ "metadata": {
481
+ "id": "Te-YQGBzGl70"
482
+ },
483
+ "outputs": [],
484
+ "source": [
485
+ "print(evaluate_recommendations(sii, steakhouses_in_indiana, 1))\n",
486
+ "print(evaluate_recommendations(sii, steakhouses_in_indiana, 5))\n",
487
+ "print(evaluate_recommendations(sii, steakhouses_in_indiana, 10))"
488
+ ]
489
+ },
490
+ {
491
+ "cell_type": "code",
492
+ "execution_count": null,
493
+ "metadata": {
494
+ "id": "ILgW8nsLHuwd"
495
+ },
496
+ "outputs": [],
497
+ "source": [
498
+ "iisl = recommend_restaurants(input())"
499
+ ]
500
+ },
501
+ {
502
+ "cell_type": "code",
503
+ "execution_count": null,
504
+ "metadata": {
505
+ "id": "nGi-VJfcH3tH"
506
+ },
507
+ "outputs": [],
508
+ "source": [
509
+ "print(evaluate_recommendations(iisl, italian_in_stlouis, 1))\n",
510
+ "print(evaluate_recommendations(iisl, italian_in_stlouis, 5))\n",
511
+ "print(evaluate_recommendations(iisl, italian_in_stlouis, 10))"
512
+ ]
513
+ },
514
+ {
515
+ "cell_type": "code",
516
+ "execution_count": null,
517
+ "metadata": {
518
+ "colab": {
519
+ "background_save": true,
520
+ "base_uri": "https://localhost:8080/",
521
+ "height": 830
522
+ },
523
+ "id": "MNM-MiKwhN2I",
524
+ "outputId": "31abcb6c-b9bc-4096-f012-bf12ee7a1e24"
525
+ },
526
+ "outputs": [
527
+ {
528
+ "name": "stdout",
529
+ "output_type": "stream",
530
+ "text": [
531
+ "Setting queue=True in a Colab notebook requires sharing enabled. Setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
532
+ "\n",
533
+ "Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. To turn off, set debug=False in launch().\n",
534
+ "Running on public URL: https://4a62505eb450484ce0.gradio.live\n",
535
+ "\n",
536
+ "This share link expires in 72 hours. For free permanent hosting and GPU upgrades, run `gradio deploy` from Terminal to deploy to Spaces (https://huggingface.co/spaces)\n"
537
+ ]
538
+ },
539
+ {
540
+ "data": {
541
+ "text/html": [
542
+ "<div><iframe src=\"https://4a62505eb450484ce0.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
543
+ ],
544
+ "text/plain": [
545
+ "<IPython.core.display.HTML object>"
546
+ ]
547
+ },
548
+ "metadata": {},
549
+ "output_type": "display_data"
550
+ },
551
+ {
552
+ "name": "stdout",
553
+ "output_type": "stream",
554
+ "text": [
555
+ "Recommendation: (\"Charlie Gitto's On the Hill\", 'St. Louis', 'Restaurants, Italian', 4.5) \n",
556
+ "Recommendation: (\"Pietro's\", 'St. Louis', 'Italian, Restaurants', 4.0) \n",
557
+ "Recommendation: ('Cluster Busters', 'St. Louis', 'Italian, Restaurants, Seafood', 4.0) \n",
558
+ "Recommendation: ('Cibare Italian Kitchen', 'St. Louis', 'Restaurants, Italian', 4.0) \n",
559
+ "Recommendation: ('Toscana Pizza, Pasta & More', 'St. Petersburg', 'Italian, Restaurants, Pizza', 4.0) \n",
560
+ "Recommendation: (\"Del Pietro's\", 'St. Louis', 'Italian, Restaurants', 4.0) \n",
561
+ "Recommendation: (\"Sophia's Cucina + Enoteca\", 'St. Petersburg', 'Italian, Restaurants', 4.0) \n",
562
+ "Recommendation: (\"Moscato's Bella Cucina\", 'St. Petersburg', 'Italian, Restaurants', 3.5) \n",
563
+ "Recommendation: ('Bici Trattoria', 'St. Petersburg', 'Italian, Restaurants', 4.0) \n",
564
+ "Recommendation: ('Goodcents Deli Fresh Subs', 'St. Louis', 'Restaurants, Sandwiches', 4.0) \n",
565
+ "neutral sentiment\n"
566
+ ]
567
+ }
568
+ ],
569
+ "source": [
570
+ "# rr = recommend_restaurants(prompt, number_of_recommendations = 5)\n",
571
+ "\n",
572
+ "demo = gr.Interface(fn=recommend_restaurants, inputs=[\"text\", gr.Number(value=10, precision=0, minimum=1)], outputs=\"dataframe\")\n",
573
+ "\n",
574
+ "if __name__ == \"__main__\":\n",
575
+ " demo.launch(show_api=False, debug=True)"
576
+ ]
577
+ },
578
+ {
579
+ "cell_type": "code",
580
+ "execution_count": null,
581
+ "metadata": {
582
+ "id": "Dx0SmXB8qGln"
583
+ },
584
+ "outputs": [],
585
+ "source": [
586
+ "# import pandas as pd\n",
587
+ "# import networkx as nx\n",
588
+ "# from sklearn.feature_extraction.text import TfidfVectorizer\n",
589
+ "# from sklearn.metrics.pairwise import linear_kernel\n",
590
+ "# import nltk\n",
591
+ "# from nltk import pos_tag\n",
592
+ "# from nltk.tokenize import word_tokenize\n",
593
+ "# from datetime import datetime\n",
594
+ "\n",
595
+ "# # Download NLTK resources (if not already downloaded)\n",
596
+ "# nltk.download('punkt')\n",
597
+ "# nltk.download('averaged_perceptron_tagger')\n",
598
+ "\n",
599
+ "# # Load Yelp dataset (replace 'yelp_dataset.csv' with your actual dataset file)\n",
600
+ "# yelp_data = pd.read_csv('yelp_dataset.csv')\n",
601
+ "\n",
602
+ "# # Load existing knowledge graph (replace 'knowledge_graph.gexf' with your actual graph file)\n",
603
+ "# knowledge_graph = nx.read_gexf('knowledge_graph.gexf')\n",
604
+ "\n",
605
+ "# # Filter out relevant information (e.g., restaurant name, rating, location, categories, hours)\n",
606
+ "# restaurants = yelp_data[['name', 'stars', 'city', 'categories', 'hours']]\n",
607
+ "\n",
608
+ "# # Create a TF-IDF vectorizer to convert restaurant categories into numerical features\n",
609
+ "# tfidf_vectorizer = TfidfVectorizer(stop_words='english', lowercase=True)\n",
610
+ "# tfidf_matrix = tfidf_vectorizer.fit_transform(restaurants['categories'].fillna(''))\n",
611
+ "\n",
612
+ "# # Function to check if a restaurant is open at the current time\n",
613
+ "# def is_restaurant_open(hours, current_time):\n",
614
+ "# for day, hours_range in hours.items():\n",
615
+ "# start_time, end_time = hours_range.split('-')\n",
616
+ "# if start_time <= current_time <= end_time:\n",
617
+ "# return True\n",
618
+ "# return False\n",
619
+ "\n",
620
+ "# # Function to get similar foods from the knowledge graph\n",
621
+ "# def get_similar_foods(category, knowledge_graph):\n",
622
+ "# similar_foods = set()\n",
623
+ "\n",
624
+ "# if category in knowledge_graph.nodes:\n",
625
+ "# neighbors = list(knowledge_graph.neighbors(category))\n",
626
+ "# similar_foods.update(neighbors)\n",
627
+ "\n",
628
+ "# return similar_foods\n",
629
+ "\n",
630
+ "# # Function to recommend restaurants based on content-based filtering and availability\n",
631
+ "# def recommend_restaurants(user_input, num_recommendations=5):\n",
632
+ "# # Tokenize and perform POS tagging on the user input\n",
633
+ "# tokens = word_tokenize(user_input)\n",
634
+ "# pos_tags = pos_tag(tokens)\n",
635
+ "\n",
636
+ "# # Extract nouns and locations from POS tags\n",
637
+ "# user_nouns = [word for word, pos in pos_tags if pos.startswith('N') or pos.startswith('J')]\n",
638
+ "# user_locations = [word for word, pos in pos_tags if pos.startswith('NNP')] # Assume proper nouns are locations\n",
639
+ "\n",
640
+ "# # Filter restaurants based on the user's location\n",
641
+ "# location_filtered_restaurants = restaurants[restaurants['city'].isin(user_locations)]\n",
642
+ "\n",
643
+ "# # Transform user input into a TF-IDF vector\n",
644
+ "# user_tfidf = tfidf_vectorizer.transform([user_input])\n",
645
+ "\n",
646
+ "# # Compute the cosine similarity between the user input and restaurant categories\n",
647
+ "# cosine_similarities = linear_kernel(user_tfidf, tfidf_matrix).flatten()\n",
648
+ "\n",
649
+ "# # Get indices of restaurants with highest similarity scores\n",
650
+ "# restaurant_indices = cosine_similarities.argsort()[:-num_recommendations-1:-1]\n",
651
+ "\n",
652
+ "# # Get recommended restaurants\n",
653
+ "# recommended_restaurants = restaurants.iloc[restaurant_indices]\n",
654
+ "\n",
655
+ "# # Refine recommendations based on extracted information and location\n",
656
+ "# for _, restaurant in recommended_restaurants.iterrows():\n",
657
+ "# if any(category in restaurant['categories'] for category in user_nouns) and any(location in restaurant['city'] for location in user_locations):\n",
658
+ "# print(f\"Refined recommendation: {restaurant['name']} based on type of food and location.\")\n",
659
+ "\n",
660
+ "# # Get similar foods from the knowledge graph\n",
661
+ "# additional_categories = set()\n",
662
+ "# for user_noun in user_nouns:\n",
663
+ "# similar_foods = get_similar_foods(user_noun, knowledge_graph)\n",
664
+ "# additional_categories.update(similar_foods)\n",
665
+ "\n",
666
+ "# # Update the categories column with similar foods\n",
667
+ "# updated_categories = ', '.join(set(restaurant['categories'].split(', ') + list(additional_categories)))\n",
668
+ "# print(f\"Updated Categories: {updated_categories}\")\n",
669
+ "\n",
670
+ "# # Check restaurant availability based on current time\n",
671
+ "# current_time = datetime.now().strftime(\"%H:%M\")\n",
672
+ "# if is_restaurant_open(restaurant['hours'], current_time):\n",
673
+ "# print(f\"{restaurant['name']} is open right now!\")\n",
674
+ "# else:\n",
675
+ "# print(f\"{restaurant['name']} is currently closed.\")\n",
676
+ "\n",
677
+ "# # Example prompt\n",
678
+ "# user_prompt = input(\"Enter your restaurant preference (e.g., I want Chinese in Brooklyn): \")\n",
679
+ "\n",
680
+ "# # Get recommendations based on the user's input and availability, prioritizing location\n",
681
+ "# recommend_restaurants(user_prompt)\n"
682
+ ]
683
+ }
684
+ ],
685
+ "metadata": {
686
+ "accelerator": "GPU",
687
+ "colab": {
688
+ "provenance": []
689
+ },
690
+ "kernelspec": {
691
+ "display_name": "Python 3",
692
+ "name": "python3"
693
+ },
694
+ "language_info": {
695
+ "name": "python"
696
+ }
697
+ },
698
+ "nbformat": 4,
699
+ "nbformat_minor": 0
700
+ }