Update app.py
Browse files
app.py
CHANGED
@@ -16,25 +16,25 @@ import os
|
|
16 |
import requests
|
17 |
import json
|
18 |
import pandas as pd
|
19 |
-
import requests
|
20 |
import geopandas as gpd
|
21 |
import tzlocal
|
22 |
-
import pytz
|
23 |
from PIL import Image
|
24 |
from datetime import datetime
|
25 |
-
import matplotlib.pyplot as plt
|
26 |
from geopy.exc import GeocoderTimedOut
|
27 |
from geopy.geocoders import Nominatim
|
28 |
-
import warnings
|
29 |
-
warnings.filterwarnings('ignore')
|
30 |
import folium
|
31 |
from folium import plugins
|
32 |
import streamlit as st
|
33 |
import streamlit_folium as st_folium
|
34 |
from data import flight_data
|
35 |
-
from huggingface_hub import
|
36 |
import branca.colormap as cm
|
37 |
-
from
|
|
|
|
|
|
|
|
|
38 |
import time
|
39 |
|
40 |
# Cache the airport data to avoid reloading it every time
|
@@ -125,7 +125,6 @@ def query_llm(prompt):
|
|
125 |
def create_flight_embeddings(geo_df):
|
126 |
"""Create embeddings for flight data to enable semantic search"""
|
127 |
try:
|
128 |
-
from sentence_transformers import SentenceTransformer
|
129 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
130 |
|
131 |
# Create text representations of flight data
|
@@ -146,14 +145,12 @@ def create_flight_embeddings(geo_df):
|
|
146 |
def find_similar_flights(identifier, geo_df, embeddings, flight_texts, threshold=0.7):
|
147 |
"""Find similar flights using semantic search"""
|
148 |
try:
|
149 |
-
from sentence_transformers import SentenceTransformer
|
150 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
151 |
|
152 |
# Create query embedding
|
153 |
query_embedding = model.encode([identifier])
|
154 |
|
155 |
# Calculate similarities
|
156 |
-
from sklearn.metrics.pairwise import cosine_similarity
|
157 |
similarities = cosine_similarity(query_embedding, embeddings)[0]
|
158 |
|
159 |
# Find similar flights
|
@@ -221,7 +218,6 @@ def query_flight_data(geo_df, question):
|
|
221 |
# If still no match, try fuzzy matching
|
222 |
if flight_data is None or flight_data.empty:
|
223 |
try:
|
224 |
-
from difflib import get_close_matches
|
225 |
all_callsigns = geo_df['callsign'].fillna('').str.upper().unique()
|
226 |
close_matches = get_close_matches(identifier, all_callsigns, n=1, cutoff=0.8)
|
227 |
if close_matches:
|
|
|
16 |
import requests
|
17 |
import json
|
18 |
import pandas as pd
|
|
|
19 |
import geopandas as gpd
|
20 |
import tzlocal
|
21 |
+
import pytz
|
22 |
from PIL import Image
|
23 |
from datetime import datetime
|
|
|
24 |
from geopy.exc import GeocoderTimedOut
|
25 |
from geopy.geocoders import Nominatim
|
|
|
|
|
26 |
import folium
|
27 |
from folium import plugins
|
28 |
import streamlit as st
|
29 |
import streamlit_folium as st_folium
|
30 |
from data import flight_data
|
31 |
+
from huggingface_hub import InferenceClient
|
32 |
import branca.colormap as cm
|
33 |
+
from sentence_transformers import SentenceTransformer
|
34 |
+
from sklearn.metrics.pairwise import cosine_similarity
|
35 |
+
from difflib import get_close_matches
|
36 |
+
import warnings
|
37 |
+
warnings.filterwarnings('ignore')
|
38 |
import time
|
39 |
|
40 |
# Cache the airport data to avoid reloading it every time
|
|
|
125 |
def create_flight_embeddings(geo_df):
|
126 |
"""Create embeddings for flight data to enable semantic search"""
|
127 |
try:
|
|
|
128 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
129 |
|
130 |
# Create text representations of flight data
|
|
|
145 |
def find_similar_flights(identifier, geo_df, embeddings, flight_texts, threshold=0.7):
|
146 |
"""Find similar flights using semantic search"""
|
147 |
try:
|
|
|
148 |
model = SentenceTransformer('all-MiniLM-L6-v2')
|
149 |
|
150 |
# Create query embedding
|
151 |
query_embedding = model.encode([identifier])
|
152 |
|
153 |
# Calculate similarities
|
|
|
154 |
similarities = cosine_similarity(query_embedding, embeddings)[0]
|
155 |
|
156 |
# Find similar flights
|
|
|
218 |
# If still no match, try fuzzy matching
|
219 |
if flight_data is None or flight_data.empty:
|
220 |
try:
|
|
|
221 |
all_callsigns = geo_df['callsign'].fillna('').str.upper().unique()
|
222 |
close_matches = get_close_matches(identifier, all_callsigns, n=1, cutoff=0.8)
|
223 |
if close_matches:
|