repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
Unimer
|
Unimer-master/executions/atis/lambda_calculus/lc_evaluator.py
|
# coding=utf8
import logging
from pprint import pprint
from multiprocessing import Process, Manager
from .query import *
from .transform import transform_lambda_calculus
class TimeoutException(Exception):
pass
def get_result(lambda_calculus, return_dict):
python_lf, _, _, _= transform_lambda_calculus(
lambda_calculus)
print(lambda_calculus)
print(python_lf)
try:
results = eval(python_lf)
except:
logging.error("Exception", exc_info=True)
return_dict['is_valid'] = False
else:
return_dict['is_valid'] = True
if isinstance(results, list):
updated_results = list()
for r in results:
if isinstance(r, dict):
updated_results.append(r)
else:
assert isinstance(r, tuple)
new_r, names = dict(), dict()
for idx, v in enumerate(r):
assert isinstance(v, dict)
for k, value in v.items():
if k in names:
names[k] += 1
key = "%s_%d" % (k, names[k])
else:
key = k
names[k] = 0
assert key not in new_r
new_r[key] = value
updated_results.append(new_r)
results = updated_results
return_dict['results'] = results
close_connection()
return return_dict
def get_result_with_time_limit(lambda_calculus, time):
manager = Manager()
return_dict = manager.dict()
p = Process(target=get_result, args=(lambda_calculus, return_dict))
p.start()
p.join(time)
if p.is_alive():
p.terminate()
raise TimeoutException("Timeout")
is_valid = return_dict['is_valid']
if is_valid:
return return_dict['results']
else:
raise Exception("Lambda Calculus Execution Error")
def compare_lambda_calculus(lc_1, lc_2, time_limit=600):
try:
lc_1_results = get_result_with_time_limit(lc_1, time_limit)
lc_2_results = get_result_with_time_limit(lc_2, time_limit)
except Exception as e:
return False
if type(lc_1_results) != type(lc_2_results):
return False
if isinstance(lc_1_results, list):
if len(lc_1_results) != len(lc_2_results):
return False
for lc_1_row in lc_1_results:
for lc_2_row in lc_2_results:
is_same = True
used_keys = set()
for key, value in lc_1_row.items():
if key not in lc_2_row:
is_same = False
else:
# Key in lc_2_row
# Find key
if key.startswith("<lambda>"):
for k2, v2 in lc_2_row.items():
if k2 not in used_keys and k2.startswith("<lambda>") and value == v2:
used_keys.add(k2)
break
else:
is_same = False
else:
if lc_2_row[key] != value:
is_same = False
if is_same:
lc_2_results.remove(lc_2_row)
break
else:
return False
return True
else:
return lc_1_results == lc_2_results
if __name__ == '__main__':
lc_1 = '(_lambda x (_lambda y (_and (_aircraft_code x y) (_airline x dl:_al) (_flight x) (_from x seattle:_ci) (_to x salt_lake_city:_ci))))'
lc_2 = '(_lambda x (_lambda y (_and (_aircraft_code x y) (_airline x dl:_al) (_flight x) (_from x seattle:_ci) (_to x salt_lake_city:_ci))))'
formatted_results = get_result_with_time_limit(lc_1, 600)
pprint(formatted_results)
print(compare_lambda_calculus(lc_1, lc_2))
| 4,093 | 32.834711 | 145 |
py
|
Unimer
|
Unimer-master/executions/atis/lambda_calculus/transform.py
|
# coding=utf8
import re
import copy
from pprint import pprint
from .query import process_entity_string
ENTITY_PATTERN = re.compile(r'^[A-Z|a-z|\\]+:_([a-z]+)$')
ENTITY_TYPE_SET = set()
FUNCTION_NAME_SET = set()
FUNCTION_REPLACE_MAP = {
"_abbrev": [{"name": "abbrev", "number_of_argument": 1, "argument_type": ["airline_code"], "return_type": "bool"}],
"_capacity": [
{"name": "capacity", "number_of_argument": 1, "argument_type": ["aircraft_code"], "return_type": "capacity"},
{"name": "capacity", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "capactiy"}
],
"_flight_number": [
{"name": "flight_number", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "flight_number"},
{"name": "is_flight_number", "number_of_argument": 2, "argument_type": ["flight_id", "flight_number"], "return_type": "bool"}
],
"_airline_name": [{"name": "airline_name", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "airline_name"}],
"_departure_time": [
{"name": "departure_time", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "departure_time"},
{"name": "is_flight_departure_time", "number_of_argument": 2, "argument_type": ["flight_id", "time"], "return_type": "bool"}
],
"_miles_distant": [
{"name": "miles_distant", "number_of_argument": 2, "argument_type": ["airport_code", "city_name"], "return_type": "miles_distant"},
{"name": "miles_distant_between_city", "number_of_argument": 2, "argument_type": [
"city_name", "city_name"], "return_type": "miles_distant"}
],
"_minimum_connection_time": [
{"name": "minimum_connection_time", "number_of_argument": 1, "argument_type": ["airport_code"], "return_type": "minimum_connection_time"}
],
"_stops": [
{"name": "get_number_of_stops", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "number_of_stops"},
{"name": "is_flight_stops_specify_number_of_times", "number_of_argument": 2, "argument_type": ["flight_id", "integer"], "return_type": "bool"}
],
"_time_elapsed": [
{"name": "time_elapsed", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "time_elapsed"},
{"name": "is_time_elapsed", "number_of_argument": 2, "argument_type": ["flight_id", "hour"], "return_type": "bool"}
],
# Binary Predicate
"is_mf": [
{"name": "mf", "number_of_argument": 2, "argument_type": ["aircraft_code", "manufacturer"], "return_type": "bool"},
],
"_aircraft_basis_type": [
{"name": "is_aircraft_basis_type", "number_of_argument": 2,
"argument_type": ["aircraft_code", "basis_type"], "return_type": "bool"},
],
"_manufacturer": [
{"name": "is_mf", "number_of_argument": 2,
"argument_type": ["aircraft_code", "manufacturer"], "return_type": "bool"},
{"name": "is_flight_manufacturer", "number_of_argument": 2, "argument_type": ["flight_id", "manufacturer"], "return_type": "bool"}
],
"_services": [
{"name": "is_services", "number_of_argument": 2, "argument_type": ["airline_code", "city_name"], "return_type": "bool"},
{"name": "is_airline_services", "number_of_argument": 2, "argument_type": [
"airline_code", "airport_code"], "return_type": "bool"}
],
"_to": [
{"name": "is_to", "number_of_argument": 2, "argument_type": ["flight_id", "airport_code"], "return_type": "bool"},
{"name": "is_to", "number_of_argument": 2, "argument_type": ["flight_id", "city_name"], "return_type": "bool"},
{"name": "is_to", "number_of_argument": 2, "argument_type": ["flight_id", "state_name"], "return_type": "bool"}
],
"_from": [
{"name": "is_from", "number_of_argument": 2, "argument_type": ["flight_id", "airport_code"], "return_type": "bool"},
{"name": "is_from", "number_of_argument": 2, "argument_type": ["flight_id", "city_name"], "return_type": "bool"}
],
"_loc:_t": [
{"name": "is_loc_t", "number_of_argument": 2, "argument_type": ["airport_code", "city_name"], "return_type": "bool"},
{"name": "is_loc_t_state", "number_of_argument": 2, "argument_type": [
"airport_code", "state_name"], "return_type": "bool"},
{"name": "is_loc_t_city_time_zone", "number_of_argument": 2, "argument_type": ["city_name", "time_zone_code"], "return_type": "bool"},
],
"_from_airport": [
{"name": "is_from_airport", "number_of_argument": 2, "argument_type": ["transport_type", "airport_code"], "return_type": "bool"},
{"name": "is_from_airports_of_city", "number_of_argument": 2, "argument_type": ["transport_type", "city_name"], "return_type": "bool"},
],
"_to_airport": [
{"name": "is_to_airport", "number_of_argument": 2, "argument_type": ["transport_type", "city_name"], "return_type": "bool"},
],
"_to_city": [
{"name": "is_to_city", "number_of_argument": 2, "argument_type": ["transport_type", "city_name"], "return_type": "bool"},
],
"_airline": [
{"name": "is_flight_airline", "number_of_argument": 2, "argument_type": ["flight_id", "airline_code"], "return_type": "bool"},
{"name": "is_aircraft_airline", "number_of_argument": 2, "argument_type": ["aircraft_code", "airline_code"], "return_type": "bool"},
{"name": "is_airline_has_booking_class", "number_of_argument": 2, "argument_type": ["class_description", "airline_code"], "return_type": "bool"},
{"name": "is_airline_provide_meal", "number_of_argument": 2, "argument_type": [
"meal_code", "airline_code"], "return_type": "bool"},
{"name": "is_airline", "number_of_argument": 1, "argument_type": ["airline_code"], "return_type": "bool"}
],
"_airline:_e": [
{"name": "get_flight_airline_code", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "airline_code"},
],
"_stop": [
{"name": "is_flight_stop_at_city", "number_of_argument": 2, "argument_type": ["flight_id", "city_name"], "return_type": "bool"},
{"name": "is_flight_stop_at_airport", "number_of_argument": 2,
"argument_type": ["flight_id", "airport_code"], "return_type": "bool"},
],
"_class_type": [
{"name": "is_flight_has_class_type", "number_of_argument": 2, "argument_type": ["flight_id", "class_description"], "return_type": "bool"},
{"name": "is_fare_basis_code_class_type", "number_of_argument": 2, "argument_type": ["fare_basis_code", "class_description"], "return_type": "bool"},
],
"_after_day": [
{"name": "is_flight_after_day", "number_of_argument": 2, "argument_type": ["flight_id", "day"], "return_type": "bool"}
],
"_approx_arrival_time": [
{"name": "is_flight_approx_arrival_time", "number_of_argument": 2, "argument_type": ["flight_id", "arrival_time"], "return_type": "bool"}
],
"_arrival_time": [
{"name": "arrival_time", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "arrival_time"},
{"name": "is_flight_arrival_time", "number_of_argument": 2, "argument_type": ["flight_id", "arrival_time"], "return_type": "bool"}
],
"_approx_departure_time": [
{"name": "is_flight_approx_departure_time", "number_of_argument": 2, "argument_type": ["flight_id", "departure_time"], "return_type": "bool"}
],
"_approx_return_time": [
{"name": "is_flight_approx_return_time", "number_of_argument": 2, "argument_type": ["flight_id", "return_time"], "return_type": "bool"}
],
"_during_day": [
{"name": "is_flight_during_day", "number_of_argument": 2, "argument_type": ["flight_id", "day_period"], "return_type": "bool"}
],
"_during_day_arrival": [
{"name": "is_flight_during_day_arrival", "number_of_argument": 2, "argument_type": ["flight_id", "day_period"], "return_type": "bool"}
],
"_day_number": [
{"name": "is_flight_on_day_number", "number_of_argument": 2, "argument_type": ["flight_id", "day_number"], "return_type": "bool"}
],
"_day_arrival": [
{"name": "is_flight_day_arrival", "number_of_argument": 2, "argument_type": ["flight_id", "day"], "return_type": "bool"}
],
"_day": [
{"name": "is_flight_on_day", "number_of_argument": 2,
"argument_type": ["flight_id", "day"], "return_type": "bool"}
],
"_month": [
{"name": "is_flight_month_arrival", "number_of_argument": 2,
"argument_type": ["flight_id", "month"], "return_type": "bool"}
],
"_day_return": [
{"name": "is_flight_day_return", "number_of_argument": 2, "argument_type": ["flight_id", "day"], "return_type": "bool"}
],
"_day_number_arrival": [
{"name": "is_flight_day_number_arrival", "number_of_argument": 2, "argument_type": ["flight_id", "day_number"], "return_type": "bool"}
],
"_day_number_return": [
{"name": "is_flight_day_number_return", "number_of_argument": 2, "argument_type": ["flight_id", "day_number"], "return_type": "bool"}
],
"_month_arrival": [
{"name": "is_flight_month_arrival", "number_of_argument": 2, "argument_type": ["flight_id", "month"], "return_type": "bool"}
],
"_month_return": [
{"name": "is_flight_month_return", "number_of_argument": 2, "argument_type": ["flight_id", "month"], "return_type": "bool"}
],
"_days_from_today": [
{"name": "is_flight_days_from_today", "number_of_argument": 2,
"argument_type": ["flight_id", "integer"], "return_type": "bool"}
],
# Unit Predicate
"_aircraft": [
{"name": "is_aircraft", "number_of_argument": 1, "argument_type": ["aircraft_code"], "return_type": "bool"},
{"name": "is_flight_aircraft", "number_of_argument": 2, "argument_type": ["flight_id", "aircraft_code"], "return_type": "bool"},
],
"_city": [
{"name": "is_city", "number_of_argument": 1, "argument_type": ["city_name"], "return_type": "bool"}
],
"_airport": [
{"name": "is_airport", "number_of_argument": 1, "argument_type": ["airport_code"], "return_type": "bool"},
{"name": "is_airport_of_city", "number_of_argument": 2, "argument_type": ["city_name", "airport_code"], "return_type": "bool"}
],
"_flight": [
{"name": "is_flight", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "bool"}
],
"_tomorrow": [
{"name": "is_tomorrow_flight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_before_day": [
{"name": "is_flight_before_day", "number_of_argument": 2,
"argument_type": ["flight_id", "month"], "return_type": "bool"}
],
"_tomorrow_arrival": [
{"name": "is_tomorrow_arrival_flight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_today": [
{"name": "is_today_flight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_next_days": [
{"name": "is_next_days_flight", "number_of_argument": 2,
"argument_type": ["flight_id", "integer"], "return_type": "bool"}
],
"_day_after_tomorrow": [
{"name": "is_day_after_tomorrow_flight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_daily": [
{"name": "is_daily_flight", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "bool"}
],
"_discounted": [
{"name": "is_discounted_flight", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "bool"}
],
"_connecting": [
{"name": "is_connecting_flight", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "bool"}
],
"_oneway": [
{"name": "is_oneway", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "bool"}
],
"_has_stops": [
{"name": "is_flight_has_stop", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "bool"}
],
"_nonstop": [
{"name": "is_non_stop_flight", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "bool"}
],
"_meal:_t": [
{"name": "is_meal", "number_of_argument": 1, "argument_type": ["meal_code"], "return_type": "bool"}
],
"_meal": [
{"name": "get_flight_meal", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "meal_description"},
{"name": "is_flight_has_specific_meal", "number_of_argument": 2,
"argument_type": ["flight_id", "meal_description"], "return_type": "bool"}
],
"_meal_code": [
{"name": "is_meal_code", "number_of_argument": 1, "argument_type": ["meal_code"], "return_type": "bool"},
{"name": "is_flight_meal_code", "number_of_argument": 2, "argument_type": ["flight_id", "meal_code"], "return_type": "bool"},
],
"_has_meal": [
{"name": "is_flight_has_meal", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "bool"}
],
"_tonight": [
{"name": "is_flight_tonight", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "bool"}
],
"_booking_class:_t": [
{"name": "is_booking_class_t", "number_of_argument": 1,
"argument_type": ["class_description"], "return_type": "bool"},
],
"_booking_class": [
{"name": "get_flight_booking_class", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "class_description"},
{"name": "is_flight_has_booking_class", "number_of_argument": 2,
"argument_type": ["flight_id", "class_description"], "return_type": "bool"},
],
"_class_of_service": [
{"name": "is_class_of_service", "number_of_argument": 1, "argument_type": ["class_description"], "return_type": "bool"}
],
"_fare_basis_code": [
{"name": "is_fare_basis_code", "number_of_argument": 1, "argument_type": ["fare_basis_code"], "return_type": "bool"},
{"name": "is_flight_has_specific_fare_basis_code", "number_of_argument": 2,
"argument_type": ["flight_id", "fare_basis_code"], "return_type": "bool"},
{"name": "is_specific_fare_basis_code", "number_of_argument": 2, "argument_type": ["fare_basis_code", "fare_basis_code"], "return_type": "bool"}
],
"_economy": [
{"name": "is_flight_economy", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"},
{"name": "is_economy", "number_of_argument": 1, "argument_type": ["fare_basis_code"], "return_type": "bool"},
],
"_fare": [
{"name": "get_flight_fare", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "one_direction_cost"},
{"name": "get_booking_class_fare", "number_of_argument": 1, "argument_type": ["class_description"], "return_type": "one_direction_cost"},
{"name": "is_fare", "number_of_argument": 1, "argument_type": ["fare_id"], "return_type": "bool"},
{"name": "is_flight_cost_fare", "number_of_argument": 2, "argument_type": ["flight_id", "dollar"], "return_type": "bool"},
],
"_cost": [
{"name": "get_flight_cost", "number_of_argument": 1, "argument_type": [
"flight_id"], "return_type": "round_trip_cost"},
],
"_aircraft_code:t": [
{"name": "is_aircraft_code_t", "number_of_argument": 1, "argument_type": ["aircraft_code"], "return_type": "bool"}
],
"_aircraft_code": [
{"name": "get_flight_aircraft_code", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "aircraft_code"},
{"name": "is_flight_with_specific_aircraft", "number_of_argument": 2, "argument_type": ["flight_id", "aircraft_code"], "return_type": "bool"}
],
"_ground_transport": [
{"name": "is_ground_transport", "number_of_argument": 1, "argument_type": ["transport_type"], "return_type": "bool"}
],
"_rental_car": [
{"name": "is_rental_car", "number_of_argument": 1, "argument_type": ["transport_type"], "return_type": "bool"}
],
"_limousine": [
{"name": "is_limousine", "number_of_argument": 1, "argument_type": ["transport_type"], "return_type": "bool"}
],
"_rapid_transit": [
{"name": "is_rapid_transit", "number_of_argument": 1, "argument_type": ["transport_type"], "return_type": "bool"}
],
"_taxi": [
{"name": "is_taxi", "number_of_argument": 1, "argument_type": ["transport_type"], "return_type": "bool"}
],
"_air_taxi_operation": [
{"name": "is_air_taxi_operation", "number_of_argument": 1, "argument_type": [
"transport_type"], "return_type": "bool"}
],
"_round_trip": [
{"name": "is_round_trip", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "bool"}
],
"_weekday": [
{"name": "is_ground_transport_on_weekday", "number_of_argument": 1, "argument_type": ["transport_type"], "return_type": "bool"},
{"name": "is_flight_on_weekday", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "bool"},
],
"_year": [
{"name": "is_flight_on_year", "number_of_argument": 2, "argument_type": ["flight_id", "year"], "return_type": "bool"},
],
"_time_zone_code": [
{"name": "is_time_zone_code", "number_of_argument": 1, "argument_type": ["time_zone_code"], "return_type": "bool"},
],
"_turboprop": [
{"name": "is_flight_turboprop", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"},
{"name": "is_turboprop", "number_of_argument": 1, "argument_type": ["aircraft_code"], "return_type": "bool"},
],
"_jet": [
{"name": "is_flight_jet", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"},
],
"_aircraft_code:_t": [
{"name": "aircraft_code", "number_of_argument": 1,
"argument_type": ["aircraft_code"], "return_type": "bool"},
],
# Meta Predicate
"_equals": [
{"name": "equals", "number_of_argument": 2,
"argument_type": ["*", "*"], "is_meta": True, "return_type": "bool"},
],
"_equals:_t": [
{"name": "equals", "number_of_argument": 2,
"argument_type": ["*", "*"], "is_meta": True, "return_type": "bool"},
],
"_<": [
{"name": "less_than", "number_of_argument": 2,
"argument_type": ["*", "*"], "is_meta": True, "return_type": "bool"},
],
"_>": [
{"name": "larger_than", "number_of_argument": 2,
"argument_type": ["*", "*"], "is_meta": True, "return_type": "bool"},
],
"_=": [
{"name": "numerical_equals", "number_of_argument": 2,
"argument_type": ["*", "*"], "is_meta": True, "return_type": "bool"},
],
"the": [
{"name": "the", "number_of_argument": 1,
"argument_type": ["*"], "is_meta": True, "return_type": "*"},
],
"_not": [
{"name": "not", "number_of_argument": 1,
"argument_type": ["*"], "is_meta": True, "return_type": "bool"},
],
"_ground_fare": [
{"name": "get_ground_fare", "number_of_argument": 1,
"argument_type": ["transport_type"], "return_type": "ground_fare"},
],
"_stop_arrival_time": [
{"name": "get_flight_stop_arrival_time", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "stop_arrival_time"},
],
"_restriction_code": [
{"name": "get_flight_restriction_code", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "restriction_code"},
]
}
ENTITY_SET_MAP = {
"flight_id": "get_all_flight_ids",
"city_name": "get_all_city_names",
"airline_code": "get_all_airline_codes",
"aircraft_code": "get_all_aircraft_codes",
"airport_code": "get_all_airport_codes",
"class_description": "get_all_booking_class_descriptions",
"transport_type": "get_all_transport_types",
"meal_code": "get_all_meal_codes",
"meal_description": "get_all_meal_descriptions",
"fare_basis_code": "get_all_fare_basis_codes",
"time_zone_code": "get_all_time_zone_codes",
"one_direction_cost": "get_all_one_direction_cost",
"capacity": "get_all_capacity",
"flight_number": "get_all_flight_number",
"departure_time": "get_all_departure_time",
"stop_arrival_time": "get_all_stop_arrival_time"
}
def read_data(path):
questions, logical_forms = list(), list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
splits = line.split('\t')
questions.append(splits[0])
logical_forms.append(splits[1])
return questions, logical_forms
def standardize_lambda_calculus_varnames(ans):
toks = ans.split(' ')
varnames = {}
new_toks = []
for t in toks:
if t == 'x' or t.startswith('$'):
if ':' in t:
# var definition
splits = t.split(':')
name, var_type = splits[0], splits[1]
assert name not in varnames
new_name = '$v%d' % len(varnames)
varnames[name] = new_name
new_toks.append(new_name + ":" + var_type)
else:
# t is a variable name
if t in varnames:
new_toks.append(varnames[t])
else:
new_varname = '$v%d' % len(varnames)
varnames[t] = new_varname
new_toks.append(new_varname)
else:
new_toks.append(t)
lf = ' '.join(new_toks)
return lf
def split_tokens(lf):
replacements = [
('(', ' ( '),
(')', ' ) '),
("\\+", " \\+ "),
]
for a, b in replacements:
lf = lf.replace(a, b)
return lf
def normalize_lambda_calculus(logical_form):
lf = split_tokens(logical_form)
lf = re.sub(' +', ' ', lf)
s = standardize_lambda_calculus_varnames(lf)
variables = ["$v0", "$v1", "$v2", "$v3"]
for var in variables:
s = s.replace(var + " e ", "%s:e " % var)
s = s.replace(var + " i ", "%s:i " % var)
s = s.replace(' :', ":").replace(
'\s+', ' ').replace("( ", "(").replace(" )", ")").replace(')\s)', '))').strip().lower()
s = re.sub(' +', ' ', s)
return s
def tokenize_logical_form(logical_form):
replacements = [
('(', ' ( '),
(')', ' ) '),
# ("\\+", " \\+ "),
]
normalized_lc = re.sub(' +', ' ', logical_form)
for a, b in replacements:
normalized_lc = normalized_lc.replace(a, b)
tokens = [t for t in normalized_lc.split()]
return tokens
class Node:
def __init__(self, lf, lidx, ridx, variable_type_constraints, variable_interactions):
self.lf = lf
self.lidx = lidx
self.ridx = ridx
self.variable_type_constraints = variable_type_constraints
self.variable_interactions = variable_interactions
def extract_entity(lf):
tokens = lf.split(":_")
return tokens
def get_function_return_type(function_name):
candidates = list()
for _, funcs in FUNCTION_REPLACE_MAP.items():
for f in funcs:
if f['name'] == function_name:
candidates.append(f['return_type'])
if len(candidates) > 0:
break
if len(candidates) > 0:
for t in candidates:
if t != 'bool':
return t
return None
def add_new_interactions(interaction_set, new_interaction):
variable_map = {
"x": 0,
"y": 1,
"z": 2,
"m": 3
}
mapped_interaction = tuple(sorted(list(new_interaction), key=lambda x: variable_map[x]))
interaction_set.add(mapped_interaction)
def get_all_variables_from_interactions(interaction_set):
variables = set()
for inter in interaction_set:
variables.add(inter[0])
variables.add(inter[1])
return variables
def replace_function_name(
function_name,
number_of_arguments,
arguments,
argument_variable_constraints,
argument_variable_interactions
):
if function_name not in FUNCTION_REPLACE_MAP:
assert function_name in ['_minutes_distant',
'_named', '_overnight']
# print(function_name)
return function_name, dict(), set()
names = FUNCTION_REPLACE_MAP[function_name]
replaced_function_name = function_name
argument_types = None
is_meta_function = False
if len(names) == 1:
replaced_function_name = names[0]['name']
argument_types = names[0]["argument_type"]
is_meta_function = "is_meta" in names[0] and names[0]['is_meta'] is True
else:
# select by arugment number
feasible_index = []
for idx, name in enumerate(names):
if name['number_of_argument'] == number_of_arguments:
replaced_function_name = name['name']
argument_types = name["argument_type"]
feasible_index.append(idx)
if len(feasible_index) == 0:
raise Exception("No feasible functions in Python")
elif len(feasible_index) == 1:
idx = feasible_index[0]
replaced_function_name = names[idx]['name']
argument_types = names[idx]['argument_type']
is_meta_function = "is_meta" in names[idx] and names[idx]['is_meta'] is True
else:
best_index = 0
best_count = 0
for idx in feasible_index:
name = names[idx]
types = names[idx]['argument_type']
count = 0
for t, arg in zip(types, arguments):
_arg = arg.replace('"', "")
match = ENTITY_PATTERN.match(_arg)
if match:
e, et = process_entity_string(_arg)
if et == t:
count += 1
else:
if _arg in ['x', 'y', 'z', 'm'] and _arg in argument_variable_constraints:
et = argument_variable_constraints[_arg]
if et == t:
count += 1
if count > best_count:
best_index = idx
best_count = count
replaced_function_name = names[best_index]['name']
argument_types = names[best_index]['argument_type']
is_meta_function = "is_meta" in names[best_index] and names[best_index]['is_meta'] is True
# Derive type constraints
# print(function_name, replaced_function_name, number_of_arguments, arguments, argument_types)
variable_constraints = dict()
assert number_of_arguments == len(argument_types)
if is_meta_function:
if replaced_function_name in ['equals', 'numerical_equals', 'less_than', 'larger_than']:
if arguments[0] in ["x", "y", "z", "m"]:
arg_variable = arguments[0]
arg_func = arguments[1]
elif arguments[1] in ["x", "y", "z", "m"]:
arg_variable = arguments[1]
arg_func = arguments[0]
else:
arg_variable, arg_func = None, None
if arg_variable is not None and arg_func is not None:
match = ENTITY_PATTERN.match(arg_func.replace('"', ""))
if match:
e, et = process_entity_string(arg_func.replace('"', ""))
variable_constraints[arg_variable] = et
elif arg_func.startswith("argmin(") or arg_func.startswith("argmax("):
for _var in [" y:", " z:", " m:"]:
processed_var = _var.replace(":", "").strip()
if _var in arg_func and processed_var in argument_variable_constraints:
variable_constraints[arg_variable] = argument_variable_constraints[processed_var]
break
else:
arg_func_return_type = get_function_return_type(arg_func[:arg_func.index("(")])
# print(arg_func)
# print(arg_func[:arg_func.index("(")])
# print(arg_func_return_type)
if arg_func_return_type is not None and arg_func_return_type not in ['*', 'bool']:
variable_constraints[arg_variable] = arg_func_return_type
else:
for argument, atype in zip(arguments, argument_types):
if argument in ["x", "y", "z", "m"]:
variable_constraints[argument] = atype
# Findout interactions
interactions = set()
assert len(arguments) == len(argument_variable_interactions)
if len(arguments) == 1:
if len(argument_variable_interactions[0]) > 0:
interactions = argument_variable_interactions[0]
else:
if arguments[0] in ["x", "y", "z", "m"]:
add_new_interactions(interactions, (arguments[0], arguments[0],))
else:
assert len(arguments) == 2
if len(argument_variable_interactions[0]) == 0 and len(argument_variable_interactions[1]) == 0:
if arguments[0] in ["x", "y", "z", "m"] and arguments[1] in ["x", "y", "z", "m"]:
add_new_interactions(
interactions, (arguments[0], arguments[1],))
elif arguments[0] in ["x", "y", "z", "m"]:
add_new_interactions(
interactions, (arguments[0], arguments[0],))
elif arguments[1] in ["x", "y", "z", "m"]:
add_new_interactions(
interactions, (arguments[1], arguments[1],))
elif len(argument_variable_interactions[0]) > 0 and len(argument_variable_interactions[1]) > 0:
variables_0 = get_all_variables_from_interactions(
argument_variable_interactions[0])
variables_1 = get_all_variables_from_interactions(
argument_variable_interactions[1])
for v0 in variables_0:
for v1 in variables_1:
add_new_interactions(interactions, (v0, v1,))
elif len(argument_variable_interactions[0]) > 0:
# len(argument_variable_interactions[1]) == 0
if arguments[1] in ["x", "y", "z", "m"]:
variables_0 = get_all_variables_from_interactions(
argument_variable_interactions[0])
for v0 in variables_0:
add_new_interactions(interactions, (v0, arguments[1],))
else:
interactions = argument_variable_interactions[0]
elif len(argument_variable_interactions[1]) > 0:
# len(argument_variable_interactions[0]) == 0
if arguments[0] in ["x", "y", "z", "m"]:
variables_1 = get_all_variables_from_interactions(
argument_variable_interactions[1])
for v1 in variables_1:
add_new_interactions(interactions, (v1, arguments[0],))
else:
interactions = argument_variable_interactions[0]
return replaced_function_name, variable_constraints, interactions
def update_variable_type_constraints(constarints_1, constraints_2):
for key, value in constraints_2.items():
if key not in constarints_1:
constarints_1[key] = value
else:
# print(key, value, constarints_1[key])
assert constarints_1[key] == value
def transform_lambda_calculus(logical_form):
normalized_lf = normalize_lambda_calculus(logical_form)
# Replace Variable
python_lf = normalized_lf.replace('$v0:e ', 'x ')
python_lf = python_lf.replace('$v1:e ', 'y ')
python_lf = python_lf.replace('$v2:e ', 'z ')
python_lf = python_lf.replace('$v3:e ', 'm ')
python_lf = python_lf.replace('$v0:i ', 'x ')
python_lf = python_lf.replace('$v1:i ', 'y ')
python_lf = python_lf.replace('$v2:i ', 'z ')
python_lf = python_lf.replace('$v3:i ', 'm ')
python_lf = python_lf.replace('$v0', 'x')
python_lf = python_lf.replace('$v1', 'y')
python_lf = python_lf.replace('$v2', 'z')
python_lf = python_lf.replace('$v3', 'm')
python_lf = re.sub(' +', ' ', python_lf)
python_lf_variable_type_constraints = dict()
print(python_lf)
global_variable_constraints = dict()
free_variables = set()
python_lf_variable_interactions = set()
if python_lf.count('(') == 0:
# Simple Cases, A single entity
entity_name, entity_type = extract_entity(python_lf)
ENTITY_TYPE_SET.add(entity_type)
python_lf = '%s("%s")' % (entity_type, entity_name)
else:
left_brackets = list()
# original_lf = copy.deepcopy(python_lf)
tokens = tokenize_logical_form(python_lf)
nodes = list()
for tidx, token in enumerate(tokens):
if token == '(':
left_brackets.append(tidx)
elif token == ')':
node_variable_type_constraints = dict()
pidx = left_brackets.pop()
children_nodes = list()
for nidx, node in enumerate(nodes):
if pidx < node.lidx and tidx > node.ridx:
children_nodes.append(node)
for n in children_nodes:
nodes.remove(n)
if len(children_nodes) == 0:
sub_tokens = tokens[pidx + 1 :tidx]
function_name = sub_tokens[0]
number_of_arguments = len(sub_tokens[1:])
replaced_function_name, node_variable_type_constraints, node_variable_interactions = replace_function_name(
function_name, number_of_arguments, sub_tokens[1:], global_variable_constraints, [set() for i in range(number_of_arguments)])
_sub_lf = "%s(%s)" % (
replaced_function_name, ','.join(sub_tokens[1:]))
else:
# Has children
sub_tokens = tokens[pidx + 1:tidx]
function_name = sub_tokens[0]
# if ":" in function_name:
# function_name = function_name.split(":")[0]
_inside_bracket_stack = 0
other_children = list()
children_num = 0
children_position = list()
for sub_token in sub_tokens[1:]:
if sub_token == '(':
_inside_bracket_stack += 1
if _inside_bracket_stack == 1:
children_num += 1
children_position.append('bracket')
elif sub_token == ')':
_inside_bracket_stack -= 1
else:
if _inside_bracket_stack == 0:
children_num += 1
other_children.append(sub_token)
children_position.append('token')
assert children_num == len(children_position)
string = list()
if function_name == '_lambda':
assert len(other_children) == 1 and len(children_nodes) == 1
child_node = children_nodes.pop(0)
variable = other_children.pop(0)
node_variable_type_constraints = copy.deepcopy(child_node.variable_type_constraints)
node_variable_interactions = copy.deepcopy(child_node.variable_interactions)
free_variables.add(variable)
_sub_lf = "lambda %s: %s" % (variable, child_node.lf)
elif function_name in ['_argmin', '_argmax', '_sum']:
assert len(other_children) == 1 and len(children_nodes) == 2
variable = other_children.pop(0)
node_1, node_2 = children_nodes.pop(0), children_nodes.pop(0)
node_variable_interactions = copy.deepcopy(
node_1.variable_interactions)
update_variable_type_constraints(node_variable_type_constraints, node_1.variable_type_constraints)
update_variable_type_constraints(node_variable_type_constraints, node_2.variable_type_constraints)
if variable in node_variable_type_constraints:
set_function = ENTITY_SET_MAP[node_variable_type_constraints[variable]]
else:
set_function = "None"
if function_name in ['_argmin', '_argmax']:
replaced_function_name = function_name[1:]
else:
replaced_function_name = 'sum_predicate'
_sub_lf = "%s(%s,%s,%s)" % (
replaced_function_name,
("(lambda %s: %s)" % (variable, node_1.lf)),
("(lambda %s: %s)" % (variable, node_2.lf)),
set_function
)
elif function_name in ['_count', '_exists', '_the']:
assert len(other_children) == 1 and len(children_nodes) == 1
variable = other_children.pop(0)
child_node = children_nodes.pop(0)
update_variable_type_constraints(node_variable_type_constraints, child_node.variable_type_constraints)
node_variable_interactions = copy.deepcopy(child_node.variable_interactions)
# print(node_variable_type_constraints, variable)
if variable in node_variable_type_constraints:
_sub_lf = "%s((lambda %s: %s), %s)" % (
function_name[1:], variable, child_node.lf,
ENTITY_SET_MAP[node_variable_type_constraints[variable]])
else:
_sub_lf = "%s((lambda %s: %s), %s)" % (
function_name[1:], variable, child_node.lf, None)
elif function_name in ['_max', '_min']:
# ad-hoc
assert len(other_children) == 1 and len(
children_nodes) == 1
variable = other_children.pop(0)
child_node = children_nodes.pop(0)
update_variable_type_constraints(
node_variable_type_constraints, child_node.variable_type_constraints)
node_variable_interactions = copy.deepcopy(
child_node.variable_interactions)
assert child_node.lf.startswith('exists(')
child_lf = child_node.lf[len('exists('):-1]
ridx = child_lf.rindex(",")
function_entity_set = child_lf[ridx + 1:].strip()
child_lf = child_lf[:ridx]
# replace
pattern = '(numerical_equals\((.*),%s\))' % variable
results = re.findall(pattern, child_lf)
assert len(results) == 1
results = results[0]
child_lf = child_lf.replace(results[0], "True")
numerical_function = "lambda %s: %s(%s)" % (
variable, results[1][:results[1].index('(')], variable)
_sub_lf = "%s_predicate(%s, (%s), %s)" % (
function_name[1:], child_lf, numerical_function, function_entity_set)
elif function_name in ['_and', '_or']:
node_variable_interactions = set()
for position in children_position:
if position == 'bracket':
n = children_nodes.pop(0)
string.append(n.lf)
update_variable_type_constraints(node_variable_type_constraints, n.variable_type_constraints)
node_variable_interactions |= n.variable_interactions
else:
sub_token = other_children.pop(0)
string.append(sub_token)
if function_name == '_and':
_sub_lf = "(%s)" % (' and '.join(string))
else:
_sub_lf = "(%s)" % (' or '.join(string))
else:
argument_variable_interactions = list()
for position in children_position:
if position == 'bracket':
n = children_nodes.pop(0)
string.append(n.lf)
update_variable_type_constraints(node_variable_type_constraints, n.variable_type_constraints)
argument_variable_interactions.append(
n.variable_interactions)
else:
sub_token = other_children.pop(0)
string.append(sub_token)
argument_variable_interactions.append(set())
replaced_function_name, variable_type_constraints, node_variable_interactions = replace_function_name(
function_name, children_num, string, global_variable_constraints, argument_variable_interactions)
# Update variable constraints
update_variable_type_constraints(node_variable_type_constraints, variable_type_constraints)
_sub_lf = "%s(%s)" % (
replaced_function_name, ','.join(string))
FUNCTION_NAME_SET.add(function_name)
new_node = Node(
_sub_lf, pidx, tidx, node_variable_type_constraints, node_variable_interactions)
global_variable_constraints.update(node_variable_type_constraints)
# print(node_variable_type_constraints)
nodes.append(new_node)
else:
if tidx > 0 and (not tokens[tidx - 1] == '(') and ":_" in token:
# token is not function name
tokens[tidx] = '"%s"' % tokens[tidx]
assert len(nodes) == 1
python_lf_variable_type_constraints = nodes[0].variable_type_constraints
python_lf_variable_interactions = nodes[0].variable_interactions
python_lf = nodes[0].lf
# Remove unit variable interactions
for v in ["x", "y", "z", "m"]:
python_lf_variable_interactions -= {(v, v,)}
# Optimization
# 1. Optimize for lambda x: exists(lambda y: )
if python_lf.startswith("lambda x: exists("):
child_lf = python_lf[len("lambda x:exists(("):-1].strip()
ridx = child_lf.rindex(",")
function_entity_set = child_lf[ridx + 1:].strip()
child_lf = child_lf[:ridx]
# print(child_lf)
# replace
pattern = '(numerical_equals\((.*),x\))'
results = re.findall(pattern, child_lf)
if len(results) > 0:
assert len(results) == 1
results = results[0]
child_lf = child_lf.replace(results[0], "True")
_numerical_function = results[1][:results[1].index('(')]
if _numerical_function == 'get_ground_fare':
to_city_result = re.findall('(is_to_city\(y,(.*?)\))', child_lf)
from_airport_result = re.findall(
'(is_from_airport\(y,(.*?)\))', child_lf)
to_city, from_airport = None, None
if len(to_city_result) > 0:
assert len(to_city_result) == 1
to_city_result = to_city_result[0]
to_city = to_city_result[1]
if len(from_airport_result) > 0:
assert len(from_airport_result) == 1
from_airport_result = from_airport_result[0]
from_airport = from_airport_result[1]
if to_city is not None and from_airport is not None:
numerical_function = "lambda x: get_ground_fare_3(%s,%s,x)" % (to_city, from_airport)
elif to_city is not None:
numerical_function = "lambda x: get_ground_fare_1(%s, x)" % (to_city)
elif from_airport is not None:
# from_airport
numerical_function = "lambda x: get_ground_fare_2(%s, x)" % (
from_airport)
else:
# All None
numerical_function = "lambda x: get_ground_fare(x)"
elif _numerical_function == '_minutes_distant':
to_city_result = re.findall('(is_to_city\(y,(.*?)\))', child_lf)
from_airport_result = re.findall(
'(is_from_airport\(y,(.*?)\))', child_lf)
to_city, from_airport = None, None
if len(to_city_result) > 0:
assert len(to_city_result) == 1
to_city_result = to_city_result[0]
to_city = to_city_result[1]
if len(from_airport_result) > 0:
assert len(from_airport_result) == 1
from_airport_result = from_airport_result[0]
from_airport = from_airport_result[1]
if to_city is not None and from_airport is not None:
numerical_function = "lambda x: get_minutes_distant_3(%s,%s)" % (
to_city, from_airport)
elif to_city is not None:
numerical_function = "lambda x: get_minutes_distant_1(%s)" % (
to_city)
elif from_airport is not None:
# from_airport
numerical_function = "lambda x: get_minutes_distant_2(%s)" % (
from_airport)
else:
raise Exception("Invalid _minutes_distant")
else:
numerical_function = "lambda x: %s(x)" % _numerical_function
elif len(re.findall('is_to\(y,x\)', child_lf)) == 1:
# is_to
child_lf = child_lf.replace("is_to(y,x)", "True")
numerical_function = "lambda x: get_flight_destination(x)"
elif len(re.findall('_named\(y,x\)', child_lf)) == 1:
child_lf = child_lf.replace("_named(y,x)", "True")
numerical_function = "lambda x: x"
elif len(re.findall('is_flight_stop_at_city\(y,x\)', child_lf)) == 1:
child_lf = child_lf.replace("is_flight_stop_at_city(y,x)", "True")
numerical_function = "lambda x: get_flight_stop_city(x)"
elif len(re.findall('_minutes_distant\(y,x\)', child_lf)) == 1:
child_lf = child_lf.replace("_minutes_distant(y,x)", "True")
numerical_function = "lambda x: get_miniutes_distant(x)"
else:
numerical_function = None
if numerical_function is not None:
free_variables = set()
python_lf = "get_target_value(%s, (%s), %s)" % (
child_lf, numerical_function, function_entity_set)
else:
raise Exception("Failed to Optimize")
# 2. Wrap value
if python_lf.startswith("lambda"):
# List Comprehension
# Only on variable
# if " x:" in python_lf and " y:" not in python_lf and " z:" not in python_lf and " m:" not in python_lf:
if len(free_variables) == 1:
# One Free variables
# print(python_lf)
free_variables = set()
assert "x" in python_lf_variable_type_constraints
variable_type = python_lf_variable_type_constraints['x']
entity_set_func = ENTITY_SET_MAP[variable_type]
python_lf = "[e for e in %s() if (%s)(e)]" % (
entity_set_func, python_lf)
elif len(free_variables) == 2:
# Two free variables
# Try to optimize
assert python_lf.startswith("lambda x: lambda y:")
child_lf = python_lf[len("lambda x: lambda y:"):].strip()
# replace
pattern = '(numerical_equals\((.*?)\(([x|y])\),([y|x])\))'
results = re.findall(pattern, child_lf)
if len(results) > 0:
# Optimize for simple numerical_equals (e.g., get_flight_fare)
assert len(results) == 1
results = results[0]
child_lf = child_lf.replace(results[0], "True")
_numerical_function, _numerical_function_variable, _numerical_equals_variable = results[1], results[2], results[3]
predicate = "(lambda %s: %s)" % (_numerical_function_variable, child_lf)
numerical_function = "(lambda %s: %s(%s))" % (_numerical_equals_variable, _numerical_function, _numerical_equals_variable)
assert _numerical_function_variable in python_lf_variable_type_constraints
variable_type = python_lf_variable_type_constraints[_numerical_function_variable]
function_entity_set = ENTITY_SET_MAP[variable_type]
python_lf = "get_target_value(%s, %s, %s)" % (
predicate, numerical_function, function_entity_set)
free_variables = set()
else:
# Optimize for numerical_equals of count
pattern_1 = '(numerical_equals\(([x|y]),(count\(.*,\s*(get_all_.*?)\))\))'
pattern_2 = '(numerical_equals\((count\(.*,\s*(get_all_.*?)\)),([x|y]))\)'
results_1 = re.findall(pattern_1, child_lf)
results_2 = re.findall(pattern_2, child_lf)
is_valid = False
if len(results_1) > 0 or len(results_2) > 0:
if len(results_1) > 0:
results_1 = results_1[0]
count_function, count_variable = results_1[2], results_1[1]
to_be_replaced = results_1[0]
else:
results_2 = results_2[0]
count_function, count_variable = results_2[1], results_2[2]
to_be_replaced = results_1[0]
if count_variable == 'x':
primary_variable = 'y'
else:
primary_variable = 'x'
if primary_variable in python_lf_variable_type_constraints and count_variable not in python_lf_variable_type_constraints:
function_entity_set = ENTITY_SET_MAP[python_lf_variable_type_constraints[primary_variable]]
child_lf = child_lf.replace(to_be_replaced, "True")
predicate = "(lambda %s: %s)" % (primary_variable, child_lf)
numerical_function = "(lambda %s: %s)" % (
primary_variable, count_function)
python_lf = "get_target_value(%s, %s, %s)" % (
predicate, numerical_function, function_entity_set)
is_valid = True
free_variables = set()
if not is_valid:
if "x" in python_lf_variable_type_constraints and "y" in python_lf_variable_type_constraints:
if python_lf_variable_type_constraints["x"] == python_lf_variable_type_constraints["y"]:
# No relations between variables
# TODO make it systematic
# Assume that the expression is only made up of and & predicates
child_lf_subfunctions = child_lf[1:-1].split(" and ")
python_lf = list()
for v in ['x', 'y']:
result = list()
for lf in child_lf_subfunctions:
_lf = lf.strip()
_lf = _lf.replace("(", " ( ").replace(")", " ) ").replace(",", " , ")
if (" %s " % v) in _lf:
result.append(lf)
expression = "[e for e in %s() if (lambda %s: (%s))(e)]" % (
function_entity_set, v, " and ".join(result))
python_lf.append(expression)
python_lf = " + ".join(python_lf)
free_variables = set()
else:
# fail to optimize
x_function_entity_set = ENTITY_SET_MAP[python_lf_variable_type_constraints["x"]]
y_function_entity_set = ENTITY_SET_MAP[python_lf_variable_type_constraints["y"]]
python_lf = "[(xe, ye) for xe in %s() for ye in %s() if (lambda x,y: %s)(xe, ye)]" % (x_function_entity_set, y_function_entity_set, child_lf)
free_variables = set()
elif len(free_variables) >= 3:
# Three free variables
assert python_lf.startswith("lambda x: lambda y: lambda z: lambda m:") \
or python_lf.startswith("lambda x: lambda y: lambda z:")
if python_lf.startswith("lambda x: lambda y: lambda z: lambda m:"):
child_lf = python_lf[len(
"lambda x: lambda y: lambda z: lambda m:"): ].strip()
variable_list = ['x', 'y', 'z', 'm']
else:
child_lf = python_lf[len(
"lambda x: lambda y: lambda z:"):].strip()
variable_list = ['x', 'y', 'z']
if all([v in python_lf_variable_type_constraints for v in variable_list]):
sample_variable_type = python_lf_variable_type_constraints["x"]
if all([python_lf_variable_type_constraints[v] == sample_variable_type for v in variable_list]) and len(python_lf_variable_interactions) == 0:
function_entity_set = ENTITY_SET_MAP[python_lf_variable_type_constraints["x"]]
# No relations between variables
# TODO make it systematic
# Assume that the expression is only made up of and & predicates
child_lf_subfunctions = child_lf[1:-1].split(" and ")
python_lf = list()
for v in variable_list:
result = list()
for lf in child_lf_subfunctions:
_lf = lf.strip()
_lf = _lf.replace("(", " ( ").replace(")", " ) ").replace(",", " , ")
if (" %s " % v) in _lf:
result.append(lf)
expression = "[e for e in %s() if (lambda %s: (%s))(e)]" % (
function_entity_set, v, " and ".join(result))
python_lf.append(expression)
python_lf = " + ".join(python_lf)
free_variables = set()
else:
# analyze interactions
variable_dependencies = dict()
for s, t in python_lf_variable_interactions:
if s not in variable_dependencies:
variable_dependencies[s] = list()
if t not in variable_dependencies:
variable_dependencies[t] = list()
variable_dependencies[s].append(t)
variable_dependencies[t].append(s)
if len(variable_dependencies) == len(variable_list):
is_single_target_dependency = False
number_of_single_target_variable = 0
target_variables = set()
for v, dependents in variable_dependencies.items():
if len(dependents) == 1:
target_variables.add(dependents[0])
number_of_single_target_variable += 1
if number_of_single_target_variable == len(variable_list) - 1 \
and len(target_variables) == 1:
# Optimize
target_variable = list(target_variables)[0]
pattern_1 = '(numerical_equals\((.*?)\(%s\),([y|x|z|m])\))' % target_variable
pattern_2 = '(numerical_equals\(([y|x|z|m])\),(.*?)\(%s\),)' % target_variable
results_1 = re.findall(pattern_1, child_lf)
results_2 = re.findall(pattern_2, child_lf)
results = results_1 + results_2
if len(results) == len(variable_list) - 1:
is_single_target_dependency = True
predicate = child_lf
numerical_functions = list()
for r in results:
predicate = predicate.replace(r[0], "True")
numerical_functions.append("(lambda %s: %s(%s))" % (
r[2], r[1], r[2]))
predicate = "(lambda %s: %s)" % (
target_variable, predicate)
function_entity_set = ENTITY_SET_MAP[python_lf_variable_type_constraints[target_variable]]
if len(numerical_functions) == 2:
python_lf = "get_target_values(%s, [%s, %s], %s)" % (
predicate, numerical_functions[0], numerical_functions[1], function_entity_set)
else:
assert len(numerical_functions) == 3
python_lf = "get_target_values(%s, [%s,%s,%s,] %s)" % (
predicate, numerical_functions[0], numerical_functions[1], numerical_functions[2], function_entity_set)
free_variables = set()
if not is_single_target_dependency:
# Fail to optimize
x_function_entity_set = ENTITY_SET_MAP[python_lf_variable_type_constraints["x"]]
y_function_entity_set = ENTITY_SET_MAP[python_lf_variable_type_constraints["y"]]
z_function_entity_set = ENTITY_SET_MAP[python_lf_variable_type_constraints["z"]]
if len(variable_list) == 4:
m_function_entity_set = ENTITY_SET_MAP[python_lf_variable_type_constraints["m"]]
python_lf = "[(xe, ye, ze) for xe in %s() for ye in %s() for ze in %s() for me in %s() if (lambda x,y,z,m: %s)(xe, ye,ze,me)]" % \
(x_function_entity_set, y_function_entity_set, z_function_entity_set, m_function_entity_set, child_lf)
else:
python_lf = "[(xe, ye, ze) for xe in %s() for ye in %s() for ze in %s() if (lambda x,y,z: %s)(xe, ye,ze)]" % \
(x_function_entity_set, y_function_entity_set, z_function_entity_set, child_lf)
free_variables = set()
return python_lf, python_lf_variable_type_constraints, free_variables, python_lf_variable_interactions
if __name__ == '__main__':
questions, logical_forms = read_data(
'../../../data/atis/atis_lambda_test.tsv')
# sorted_logical_forms = sorted(logical_forms, key=lambda x: len(x))
sorted_logical_forms = [
"( _lambda $0 e ( _exists $1 ( _and ( _= ( _minutes_distant $1 ) $0 ) ( _to_city $1 boston:_ci ) ) ) )"]
for lidx, lf in enumerate(sorted_logical_forms):
# print(lidx)
# print(lf)
python_lf, python_lf_variable_type_constraints, free_variables, python_lf_variable_interactions = transform_lambda_calculus(
lf)
# if len(free_variables) > 0:
print(lidx)
print(lf)
print(python_lf)
print(python_lf_variable_type_constraints)
print(python_lf_variable_interactions)
print(free_variables)
print('==\n\n')
pprint(FUNCTION_NAME_SET)
| 61,471 | 51.006768 | 169 |
py
|
Unimer
|
Unimer-master/executions/atis/lambda_calculus/query.py
|
# coding=utf8
import sys
sys.path += ['..']
import re
import mysql.connector
from pprint import pprint
db = None
def normalize(sql):
s = re.sub(' +', ' ', sql)
s = s.replace('MAX (', 'MAX(')
s = s.replace('MIN (', 'MIN(')
s = s.replace('AVG (', 'AVG(')
s = s.replace('COUNT (', 'COUNT(')
s = s.replace('count (', 'count(')
s = s.replace('SUM (', 'SUM(')
s = s.replace('< =', '<=')
s = s.replace('> =', '>=')
return s
def format_headers(header):
s = header.replace("( ", "(").replace(" )", ")").strip().lower()
return s
def get_connection():
global db
if db and db.is_connected():
return db
else:
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="123456",
database="atis",
auth_plugin='mysql_native_password'
)
return db
def close_connection():
if db is not None and db.is_connected():
db.close()
def get_result(sql):
db = get_connection()
_sql = normalize(sql)
cursor = db.cursor()
cursor.execute(_sql)
# print(cursor.description)
headers = cursor.description
results = cursor.fetchall()
formatted_results = list()
for x in results:
r = dict()
for value, header in zip(x, headers):
r[format_headers(header[0])] = value
formatted_results.append(r)
# pprint(formatted_results)
return formatted_results
ENTITY_TYPE_MAP = {
"ac": "aircraft_code",
"al": "airline_code",
"ci": "city_name",
"ap": "airport_code",
"fn": "flight_number",
"cl": "class_description",
"ti": "time",
"pd": "day_period",
"mf": "manufacturer",
"mn": "month",
"da": "day",
"i": "integer",
"yr": "year",
"dn": "day_number",
"do": "dollar",
"hr": "hour",
"rc": "meal_code",
"st": "state_name",
"fb": "fare_basis_code",
"me": "meal_description",
"bat": "basis_type"
}
# Entity Set
def get_all_flight_ids():
sql = "SELECT distinct flight_id FROM flight"
return get_result(sql)
def get_all_city_names():
sql = "SELECT distinct city_name FROM city"
return get_result(sql)
def get_all_airline_codes():
sql = "SELECT distinct airline_code FROM airline"
return get_result(sql)
def get_all_aircraft_codes():
sql = "SELECT distinct aircraft_code FROM aircraft"
return get_result(sql)
def get_all_airport_codes():
sql = "SELECT distinct airport_code FROM airport"
return get_result(sql)
def get_all_booking_class_descriptions():
sql = "SELECT distinct class_description FROM class_of_service"
return get_result(sql)
def get_all_transport_types():
sql = "SELECT distinct transport_type FROM ground_service"
return get_result(sql)
def get_all_meal_codes():
sql = "SELECT distinct meal_code FROM food_service"
return get_result(sql)
def get_all_meal_descriptions():
sql = "SELECT distinct meal_description FROM food_service"
return get_result(sql)
def get_all_fare_basis_codes():
sql = "SELECT distinct fare_basis_code FROM fare_basis"
return get_result(sql)
def get_all_time_zone_codes():
sql = "SELECT distinct time_zone_code FROM time_zone"
return get_result(sql)
def get_all_one_direction_cost():
sql = "SELECT distinct one_direction_cost FROM fare"
return get_result(sql)
def get_all_capacity():
sql = "SELECT distinct capacity FROM aircraft"
return get_result(sql)
def get_all_flight_number():
sql = "SELECT distinct flight_number FROM flight"
return get_result(sql)
def get_all_departure_time():
sql = "SELECT distinct departure_time FROM flight"
return get_result(sql)
def get_all_stop_arrival_time():
sql = "SELECT distinct arrival_time FROM flight_stop"
return get_result(sql)
def process_entity_string(entity, default=""):
if isinstance(entity, str):
if ":_" in entity:
splits = entity.split(":_")
entity_name = splits[0]
entity_type = ENTITY_TYPE_MAP[splits[1]]
else:
entity_type = default
entity_name = entity
if '_' in entity_name:
entity_name = entity_name.replace("_", " ")
elif isinstance(entity, dict):
key = list(entity.keys())[0]
entity_type = key
entity_name = entity[key]
elif isinstance(entity, list) and len(entity) > 0:
# TODO: simply take the first one
key = list(entity[0].keys())[0]
entity_type = key
entity_name = entity[0][key]
else:
raise Exception("Invalid Entity Type %s" % str(entity))
if entity_type == 'city_name':
if entity_name == 'st louis':
entity_name = 'st. louis'
elif entity_name == 'st petersburg':
entity_name = 'st. petersburg'
elif entity_name == 'st paul':
entity_name = 'st. paul'
return entity_name, entity_type
# Entity
def fb(entity):
"""
fare basis
"""
sql = "SELECT DISTINCT fare_basis_1.fare_basis_code FROM fare_basis fare_basis_1 WHERE fare_basis_1.fare_basis_code = '%s'" % (entity)
return get_result(sql)
def rc(entity):
"""
Meal code
"""
sql = "SELECT DISTINCT food_service_1.meal_description FROM food_service food_service_1 WHERE food_service_1.meal_code = '%s'" % (entity)
return get_result(sql)
def dc(entity):
"""
day name
"""
sql = "SELECT DISTINCT days_1.day_name FROM days days_1 WHERE days_1.days_code = '%s'" % (entity)
return get_result(sql)
def al(entity):
"""
airline code
"""
sql = "SELECT DISTINCT airline_1.airline_code FROM airline airline_1 WHERE airline_1.airline_code = '%s'" % (
entity)
return get_result(sql)
def ap(entity):
"""
airport code
"""
sql = "SELECT DISTINCT airport_1.airport_code FROM airport airport_1 WHERE airport_1.airport_code = '%s'" % (entity)
return get_result(sql)
def ac(entity):
"""
aircraft code
"""
sql = "SELECT DISTINCT aircraft_1.aircraft_code FROM aircraft aircraft_1 WHERE aircraft_1.aircraft_code = '%s'" % (entity)
return get_result(sql)
def ci(city_name):
"""
city_name
return city_code
"""
entity_name, _ = process_entity_string(city_name)
sql = "SELECT DISTINCT city_code FROM city WHERE city_name = '%s'" % (
entity_name)
return get_result(sql)
def abbrev(entity):
"""
abbrev of airline_code
"""
entity_name, entity_type = process_entity_string(entity)
sql = "SELECT DISTINCT airline_1.airline_code FROM airline airline_1 WHERE airline_1.airline_name like '%" + entity_name + "%'"
results = get_result(sql)
print(results)
if len(results) == 1:
return results[0]
return results
def capacity(argument):
"""
return airline
"""
if isinstance(argument, str):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
assert isinstance(argument, dict)
entities = [argument]
results = list()
flight_number_template = "SELECT aircraft_1.capacity FROM aircraft as aircraft_1 JOIN flight as flight_1 on aircraft_1.aircraft_code = flight_1.aircraft_code_sequence WHERE flight_1.flight_number = %s;"
flight_id_template = "SELECT aircraft_1.capacity FROM aircraft as aircraft_1 JOIN flight as flight_1 on aircraft_1.aircraft_code = flight_1.aircraft_code_sequence WHERE flight_1.flight_id = %s;"
aircraft_code_template = "SELECT DISTINCT aircraft_1.capacity FROM aircraft aircraft_1 WHERE aircraft_1.aircraft_code = '%s'"
for e in entities:
entity_name, entity_type = process_entity_string(e, "aircraft_code")
if entity_type == 'aircraft_code':
sql = aircraft_code_template % entity_name
elif entity_type == 'flight_id':
# flight id
sql = flight_id_template % entity_name
else:
# entity_type == 'flight_number':
sql = flight_number_template % entity_name
results += get_result(sql)
return results
def flight_number(argument):
"""
Return flight number
_flight_number(_argmin((lambda x: _and(_flight(x),_from(x,"boston:_ci"),_to(x,"washington:_ci"))),(lambda x: _departure_time(x))))
"""
if isinstance(argument, str):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
assert isinstance(argument, dict)
entities = [argument]
results = list()
sql_template = "SELECT flight_number FROM flight WHERE flight_id = %s"
for e in entities:
entity_name, _ = process_entity_string(e, "flight_id")
sql = sql_template % entity_name
results += get_result(sql)
return results
def get_flight_destination(flight_id):
"""
:entity_type: flight_id
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
sql = "SELECT to_airport FROM flight WHERE flight_id = %s" % processed_flight_id
results = get_result(sql)
return results
def get_flight_fare(flight_id):
"""
_fare $1
:entity_type: flight_id
"""
if flight_id is None or (isinstance(flight_id, list) and len(flight_id) == 0):
return None
processed_flight_id, entity_type = process_entity_string(flight_id, "flight_id")
sql = "SELECT fare.one_direction_cost FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON fare.fare_id = flight_fare.fare_id WHERE flight.flight_id = %s" % (processed_flight_id)
results = get_result(sql)
return results
def get_flight_cost(flight_id):
"""
_cost $1
:entity_type: flight_id
"""
if flight_id is None or (isinstance(flight_id, list) and len(flight_id) == 0):
return None
processed_flight_id, entity_type = process_entity_string(flight_id, "flight_id")
sql = "SELECT fare.round_trip_cost FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON fare.fare_id = flight_fare.fare_id WHERE flight.flight_id = %s" % (processed_flight_id)
results = get_result(sql)
return results
def get_booking_class_fare(class_description):
"""
_fare $1
:entity_type: flight_id
"""
processed_class_description, entity_type = process_entity_string(
class_description, "class_description")
sql = "SELECT fare.one_direction_cost FROM fare JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code JOIN class_of_service ON fare_basis.booking_class = class_of_service.booking_class WHERE class_of_service.class_description = '%s'" % (
processed_class_description)
results = get_result(sql)
return results
def airline_name(argument):
"""
_airline_name
"""
if isinstance(argument, str):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
assert isinstance(argument, dict)
entities = [argument]
sql_tempalte = "SELECT airline_name FROM flight JOIN airline ON flight.airline_code = airline.airline_code WHERE flight.flight_id = %s"
results = list()
for e in entities:
entity_name, entity_type = process_entity_string(e, "aircraft_code")
sql = sql_tempalte % entity_name
results += get_result(sql)
return results
def departure_time(argument):
"""
_departure_time
"""
if argument is None:
return None
if isinstance(argument, str):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
assert isinstance(argument, dict)
entities = [argument]
sql_tempalte = "SELECT departure_time FROM flight WHERE flight_id = %s"
results = list()
for e in entities:
entity_name, entity_type = process_entity_string(e, "flight_id")
sql = sql_tempalte % entity_name
results += get_result(sql)
return results
def arrival_time(argument):
"""
_arrival_time
"""
if isinstance(argument, str):
entities = [argument]
elif isinstance(argument, list):
entities = argument
else:
assert isinstance(argument, dict)
entities = [argument]
sql_tempalte = "SELECT arrival_time FROM flight WHERE flight_id = %s"
results = list()
for e in entities:
entity_name, entity_type = process_entity_string(e, "flight_id")
sql = sql_tempalte % entity_name
results += get_result(sql)
return results
def miles_distant(airport_code, city_name):
"""
_miles_distant
:entity_type: (airport_code, city_name)
"""
processed_airport_code, _ = process_entity_string(
airport_code, "airport_code")
processed_city_name, _ = process_entity_string(city_name, "city_name")
sql = "SELECT airport_service.miles_distant FROM airport_service JOIN city ON city.city_code = airport_service.city_code WHERE city.city_name = '%s' AND airport_service.airport_code = '%s'" % (
processed_city_name, processed_airport_code)
return get_result(sql)
def miles_distant_between_city(city_name_1, city_name_2):
"""
_miles_distant
:entity_type: (city_name, city_name)
"""
processed_city_name_1, _ = process_entity_string(
city_name_1, "city_name")
processed_city_name_2, _ = process_entity_string(
city_name_2, "city_name_2")
sql = "SELECT distinct airport_service.miles_distant FROM airport_service JOIN city ON airport_service.city_code = city.city_code WHERE city.city_name = '%s' AND airport_service.airport_code IN (SELECT T1.airport_code FROM airport_service AS T1 JOIN city AS T2 ON T1.city_code = T2.city_code WHERE T2.city_name = '%s');" % (
processed_city_name_1, processed_city_name_2)
return get_result(sql)
def minimum_connection_time(airport_code):
processed_airport_code, _ = process_entity_string(
airport_code, "airport_code")
sql = "SELECT DISTINCT airport_1.minimum_connect_time FROM airport airport_1 WHERE airport_1.airport_code = '%s'" % (processed_airport_code)
return get_result(sql)
def get_number_of_stops(flight_id):
"""
_stops(x)
:entity_type flight_id
"""
if isinstance(flight_id, list) and len(flight_id) == 0:
return list()
processed_flight_id, entity_type = process_entity_string(flight_id, "flight_id")
sql = "SELECT stops FROM flight WHERE flight.flight_id = %s" % (
processed_flight_id)
return get_result(sql)
def time_elapsed(flight_id):
"""
_time_elapsed(x)
:entity_type flight_id
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
sql = "SELECT time_elapsed FROM flight WHERE flight_id = %s" % processed_flight_id
return get_result(sql)
def get_flight_aircraft_code(flight_id):
"""
_aircraft_code $1
:entity_type: flight_id
"""
processed_flight_id, entity_type = process_entity_string(flight_id, "flight_id")
sql = "SELECT aircraft_code FROM flight JOIN equipment_sequence AS T ON flight.aircraft_code_sequence = T.aircraft_code_sequence WHERE flight.flight_id = %s" % processed_flight_id
return get_result(sql)
def get_flight_airline_code(flight_id):
"""
_airline:_e $1
:entity_type: flight_id
"""
processed_flight_id, entity_type = process_entity_string(flight_id, "flight_id")
sql = "SELECT airline_code FROM flight WHERE flight.flight_id = %s" % processed_flight_id
return get_result(sql)
def get_flight_booking_class(flight_id):
"""
_booking_class $1
:entity_type: flight_id
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
sql = "SELECT class_of_service.class_description FROM flight_fare JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code JOIN class_of_service ON fare_basis.booking_class = class_of_service.booking_class WHERE flight_fare.flight_id = %s" % processed_flight_id
return get_result(sql)
def get_flight_meal(flight_id):
"""
_meal $1
:entity_type: flight_id
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
sql = "SELECT food_service.meal_description FROM flight JOIN food_service ON flight.meal_code = food_service.meal_code WHERE flight_id = %s" % (
processed_flight_id)
return get_result(sql)
def get_flight_stop_airport(flight_id):
"""
:entity_type: flight_id
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
sql = "SELECT flight_stop.stop_airport FROM flight_stop WHERE flight_stop.flight_id = %s" % (
processed_flight_id)
return get_result(sql)
def get_ground_fare(transport_type):
"""
_ground_fare $1
:entity_type (transport_type)
"""
processed_transport_type, _ = process_entity_string(
transport_type, "transport_type")
sql = "SELECT ground_fare FROM ground_service WHERE transport_type = '%s'" % (
processed_transport_type)
return get_result(sql)
def get_ground_fare_1(city_name, transport_type):
"""
_ground_fare $1
:entity_type (city_name, transport_type)
"""
processed_transport_type, _ = process_entity_string(
transport_type, "transport_type")
processed_city_name, _ = process_entity_string(city_name, "city_name")
sql = "SELECT ground_fare FROM ground_service JOIN city ON ground_service.city_code = city.city_code WHERE city.city_name = '%s' AND transport_type = '%s'" % (
processed_city_name, processed_transport_type)
return get_result(sql)
def get_ground_fare_2(airport_code, transport_type):
"""
_ground_fare $1
:entity_type (airport_code, transport_type)
"""
processed_transport_type, _ = process_entity_string(
transport_type, "transport_type")
processed_airport_code, _ = process_entity_string(
airport_code, "airport_code")
sql = "SELECT ground_fare FROM ground_service WHERE airport_code = '%s' AND transport_type = '%s'" % (
processed_airport_code, processed_transport_type)
return get_result(sql)
def get_ground_fare_3(city_name, airport_code, transport_type):
"""
_ground_fare $1
:entity_type (city_name, airport_code, transport_type)
"""
processed_transport_type, _ = process_entity_string(
transport_type, "transport_type")
processed_city_name, _ = process_entity_string(city_name, "city_name")
processed_airport_code, _ = process_entity_string(
airport_code, "airport_code")
sql = "SELECT ground_fare FROM ground_service JOIN city ON ground_service.city_code = city.city_code WHERE city.city_name = '%s' AND airport_code = '%s' AND transport_type = '%s'" % (
processed_city_name, processed_airport_code, processed_transport_type)
return get_result(sql)
def get_minutes_distant_1(city_name):
"""
:entity_type (city_name)
"""
processed_city_name, _ = process_entity_string(city_name, "city_name")
sql = "SELECT minutes_distant FROM airport_service JOIN city ON airport_service.city_code = city.city_code WHERE city.city_name = '%s'" % (
processed_city_name)
return get_result(sql)
def get_minutes_distant_2(airport_code):
"""
:entity_type (airport_code)
"""
processed_airport_code, _ = process_entity_string(
airport_code, "airport_code")
sql = "SELECT minutes_distant FROM airport_service WHERE airport_code = '%s'" % (
processed_airport_code)
return get_result(sql)
def get_minutes_distant_3(city_name, airport_code):
"""
:entity_type (city_name, airport_code)
"""
processed_city_name, _ = process_entity_string(city_name, "city_name")
processed_airport_code, _ = process_entity_string(
airport_code, "airport_code")
sql = "SELECT minutes_distant FROM airport_service JOIN city ON airport_service.city_code = city.city_code WHERE city.city_name = '%s' AND airport_code = '%s'" % (
processed_city_name, processed_airport_code)
return get_result(sql)
def get_flight_stop_arrival_time(flight_id):
"""
_stop_arrival_time $0
:entity_type flight_id
"""
processed_flight_id, _ = process_entity_string(flight_id, "flight_id")
sql = "SELECT flight_stop.arrival_time, city.city_name FROM flight_stop JOIN airport_service ON flight_stop.stop_airport = airport_service.airport_code JOIN city ON city.city_code = airport_service.city_code WHERE flight_stop.flight_id = %s" % (processed_flight_id)
return get_result(sql)
def get_flight_restriction_code(flight_id):
"""
_restriction_code $0
:entity_type flight_id
"""
processed_flight_id, _ = process_entity_string(flight_id, "flight_id")
sql = "SELECT restriction.restriction_code FROM flight_fare JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN restriction ON fare.restriction_code = restriction.restriction_code WHERE flight_fare.flight_id = %s" % (processed_flight_id)
return get_result(sql)
# Binary Predicate
def is_mf(entity, manufacturer):
"""
:_mf
mf(x,"boeing:_mf")
"""
return True
def is_flight_manufacturer(flight_id, manufacturer):
"""
_manufacturer(x,"boeing:_mf")
:entity_type (flight_id, manufacturer)
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_manufacturer, entity_type = process_entity_string(
manufacturer, "manufacturer")
sql = "SELECT flight.flight_id FROM flight JOIN aircraft ON flight.aircraft_code_sequence = aircraft.aircraft_code WHERE aircraft.manufacturer = '%s' AND flight.flight_id = %s" % (processed_manufacturer, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_services(airline_code, city_name):
"""
_services(x,y)
"""
processed_airline_code, _ = process_entity_string(airline_code, "airline_code")
processed_city_name, _ = process_entity_string(city_name, "city_name")
sql = "SELECT flight_id FROM flight JOIN airport_service ON flight.to_airport = airport_service.airport_code JOIN city ON city.city_code = airport_service.city_code WHERE city.city_name = '%s' AND flight.airline_code = '%s'" % (
processed_city_name, processed_airline_code)
results = get_result(sql)
return len(results) > 0
def is_airline_services(airline_code, airport_code):
"""
_services ff:_al $x
:entity_type: (airline_code, airport_code)
"""
processed_airline_code, _ = process_entity_string(airline_code, "airline_code")
processed_airport_code, _ = process_entity_string(
airport_code, "airport_code")
sql = "SELECT DISTINCT flight.to_airport FROM flight WHERE flight.to_airport = '%s' AND flight.airline_code = '%s'" % (
processed_airport_code, processed_airline_code)
results = get_result(sql)
return len(results) > 0
def is_to(flight_id, entity):
"""
_to(x,"mke:_ap"/"indianapolis:_ci")
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
entity, entity_type = process_entity_string(entity, "airport_code")
if entity_type == 'airport_code':
sql = "SELECT flight_id FROM flight WHERE flight.flight_id = %s AND flight.to_airport = '%s'" % (
processed_flight_id, entity)
elif entity_type == 'city_name':
sql = "SELECT DISTINCT flight_1.flight_id FROM flight AS flight_1 JOIN airport_service AS airport_service_1 ON flight_1.to_airport = airport_service_1.airport_code JOIN city AS city_1 ON airport_service_1.city_code = city_1.city_code WHERE city_1.city_name = '%s' AND flight_1.flight_id = %s" % (
entity, processed_flight_id)
else:
# entity_type == 'state_name':
sql = "SELECT DISTINCT flight_1.flight_id FROM flight AS flight_1 JOIN airport_service AS airport_service_1 ON flight_1.to_airport = airport_service_1.airport_code JOIN city AS city_1 ON airport_service_1.city_code = city_1.city_code JOIN state ON city_1.state_code = state.state_code WHERE state.state_name = '%s' AND flight_1.flight_id = %s" % (
entity, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_from(flight_id, entity):
"""
_from(x,"mke:_ap"/"indianapolis:_ci")
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
entity, entity_type = process_entity_string(entity, "airport_code")
if entity_type == 'airport_code':
sql = "SELECT flight_id FROM flight WHERE flight.flight_id = %s AND flight.from_airport = '%s'" % (
processed_flight_id, entity)
else:
# entity_type == 'city_name'
sql = "SELECT DISTINCT flight_1.flight_id FROM flight AS flight_1 JOIN airport_service AS airport_service_1 ON flight_1.from_airport = airport_service_1.airport_code JOIN city AS city_1 ON airport_service_1.city_code = city_1.city_code WHERE city_1.city_name = '%s' AND flight_1.flight_id = %s" % (
entity, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_loc_t(airport_code, city_name):
"""
_loc:_t(airport_code,city_name)
:entity_type (airport_code, city_name)
"""
processed_airport_code, _ = process_entity_string(
airport_code, "airport_code")
processed_city_name, _ = process_entity_string(
city_name, "city_name")
sql = "SELECT * FROM airport_service AS T JOIN city ON T.city_code = city.city_code WHERE city.city_name = '%s' AND T.airport_code = '%s';" % (
processed_city_name, processed_airport_code)
results = get_result(sql)
return len(results) > 0
def is_loc_t_state(airport_code, state_name):
"""
_loc:_t(airport_code,state_name)
:entity_type (airport_code, state_name)
"""
processed_airport_code, _ = process_entity_string(
airport_code, "airport_code")
processed_state_name, _ = process_entity_string(
state_name, "state_name")
sql = "SELECT * FROM airport_service AS T JOIN city ON T.city_code = city.city_code JOIN state ON city.state_code = state.state_code WHERE state.state_name = '%s' AND T.airport_code = '%s';" % (
processed_state_name, processed_airport_code)
results = get_result(sql)
return len(results) > 0
def is_loc_t_city_time_zone(city_name, time_zone_code):
"""
_loc:_t(city_name,time_zone_code)
:entity_type (city_name, time_zone_code)
"""
processed_city_name, _ = process_entity_string(city_name, "city_name")
processed_time_zone_code, _ = process_entity_string(
time_zone_code, "time_zone_code")
sql = "SELECT city_name FROM city WHERE city_name = '%s' AND time_zone_code = '%s'" % (
processed_city_name, processed_time_zone_code)
results = get_result(sql)
return len(results) > 0
def is_from_airport(transport_way, entity):
"""
Transport Type
_from_airport(x,"toronto:_ci"/"pit:_ap")
"""
processed_transport_way, _ = process_entity_string(transport_way, "transport_type")
entity_name, entity_type = process_entity_string(entity)
airport_code_template = "SELECT DISTINCT ground_service_1.transport_type FROM ground_service ground_service_1 WHERE ground_service_1.airport_code = '%s' AND ground_service_1.transport_type = '%s'"
if entity_type == 'city_name':
sql = city_name_template % (entity_name, processed_transport_way)
else:
# entity_type == 'airport_code'
sql = airport_code_template % (entity_name, processed_transport_way)
results = get_result(sql)
return len(results) > 0
def is_from_airports_of_city(transport_way, city_name):
"""
Transport Type
_from_airport(x,"toronto:_ci"/"pit:_ap")
"""
processed_transport_way, _ = process_entity_string(transport_way, "transport_type")
processed_city_name, _ = process_entity_string(city_name, "city_name")
sql = "SELECT DISTINCT T3.transport_type FROM airport_service AS T1 JOIN city AS T2 ON T1.city_code = T2.city_code JOIN ground_service AS T3 ON T1.airport_code = T3.airport_code WHERE T2.city_name = '%s' AND T3.transport_type = '%s'" % (
processed_city_name, processed_transport_way
)
results = get_result(sql)
return len(results) > 0
def is_to_city(transport_way, city_name):
"""
Transport Type
_to_city(x,"boston:_ci")
"""
processed_transport_way, _ = process_entity_string(
transport_way, "transport_type")
entity_name, entity_type = process_entity_string(city_name)
assert entity_type == 'city_name'
sql = "SELECT DISTINCT ground_service_1.transport_type FROM ground_service AS ground_service_1 JOIN city AS city_1 ON ground_service_1.city_code = city_1.city_code WHERE city_1.city_name = '%s' AND ground_service_1.transport_type = '%s'" % (
entity_name, processed_transport_way)
results = get_result(sql)
return len(results) > 0
def is_flight_airline(flight_id, airline_code):
"""
_airline(x,"dl:_al")
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_airline_code, _ = process_entity_string(airline_code, "airline_code")
sql = "SELECT flight_id FROM flight WHERE flight_id = %s AND airline_code = '%s'" % (
processed_flight_id, processed_airline_code)
results = get_result(sql)
return len(results) > 0
def is_aircraft_airline(aircraft_code, airline_code):
"""
_airline(x,"dl:_al")
"""
processed_aircraft_code, _ = process_entity_string(
aircraft_code, "aircraft_code")
processed_airline_code, _ = process_entity_string(
airline_code, "airline_code")
sql = "SELECT aircraft_code_sequence FROM flight WHERE aircraft_code_sequence = '%s' AND airline_code = '%s'" % (
processed_aircraft_code, processed_airline_code)
results = get_result(sql)
return len(results) > 0
def is_aircraft_basis_type(aircraft_code, basis_type):
"""
_basis_type(x,"737:_bat")
:entity_type: (aircraft_code, basis_type)
"""
processed_aircraft_code, _ = process_entity_string(
aircraft_code, "aircraft_code")
processed_basis_type, _ = process_entity_string(
basis_type, "basis_type")
sql = "SELECT aircraft_code FROM aircraft WHERE aircraft_code = '%s' AND basic_type = '%s'" % (
processed_aircraft_code, processed_basis_type)
results = get_result(sql)
return len(results) > 0
def is_flight_number(flight_id, flight_number):
"""
_flight_number(x,"201:_fn")
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_flight_number, _ = process_entity_string(
flight_number, "flight_number")
sql = "SELECT flight_id FROM flight WHERE flight_id = %s AND flight_number = '%s'" % (
processed_flight_id, processed_flight_number)
results = get_result(sql)
return len(results) > 0
def is_flight_stop_at_city(flight_id, city_name):
"""
_stop(x,"denver:_ci")
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_city_name, _ = process_entity_string(
city_name, "city_name")
sql = "SELECT flight.flight_id FROM flight JOIN flight_stop ON flight.flight_id = flight_stop.flight_id JOIN airport_service ON flight_stop.stop_airport = airport_service.airport_code JOIN city ON city.city_code = airport_service.city_code WHERE city.city_name = '%s' AND flight.flight_id = %s" % (
processed_city_name, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_flight_stop_at_airport(flight_id, airport_code):
"""
_stop(x,"denver:_ci")
:entity_type (flight_id, airport_code)
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_airport_code, _ = process_entity_string(
airport_code, "airport_code")
sql = "SELECT flight_stop.flight_id FROM flight_stop WHERE flight_stop.stop_airport = '%s' AND flight_stop.flight_id = %s" % (
processed_airport_code, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_flight_stops_specify_number_of_times(flight_id, integer):
"""
_stops(x,"a:_i")
:entity_type: (flight_id, integer)
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_integer, _ = process_entity_string(
integer, "integer")
sql = "SELECT flight_id FROM flight WHERE flight_id = %s AND stops = %s" % (processed_flight_id, processed_integer)
results = get_result(sql)
return len(results) > 0
def is_flight_has_class_type(flight_id, class_description):
"""
_class_type(x,"first:_cl")
:entity_type: (flight_id, class_type)
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_class_description, _ = process_entity_string(
class_description, "class_description")
sql = "SELECT flight_fare.flight_id FROM flight_fare JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code WHERE flight_fare.flight_id = %s AND fare_basis.class_type = '%s'" % (
processed_flight_id, processed_class_description)
results = get_result(sql)
return len(results) > 0
def is_fare_basis_code_class_type(fare_basis_code, class_description):
"""
_class_type(x,"first:_cl")
:entity_type: (fare_basis_code, class_type)
"""
processed_fare_basis_code, _ = process_entity_string(
fare_basis_code, "fare_basis_code")
processed_class_description, _ = process_entity_string(
class_description, "class_description")
sql = "SELECT fare_basis_code FROM fare_basis JOIN class_of_service ON fare_basis.booking_class = class_of_service.booking_class WHERE fare_basis_code = '%s' AND class_description = '%s'" % (
processed_fare_basis_code, processed_class_description)
results = get_result(sql)
return len(results) > 0
def is_flight_after_day(flight_id, day):
"""
_after_day(x,"wednesday:_da")
"""
return True
def is_flight_before_day(flight_id, day):
"""
_before_day(x,"wednesday:_da")
"""
return True
def is_flight_approx_arrival_time(flight_id, arrival_time):
"""
_approx_arrival_time()
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_arrival_time, _ = process_entity_string(
arrival_time, "time")
if len(processed_arrival_time) == 4:
if processed_arrival_time[2:] == '00':
start_time = "%d%d" % (int(processed_arrival_time[:2]) - 1, 30)
end_time = "%d%d" % (int(processed_arrival_time[:2]), 30)
elif processed_arrival_time[2:] == '15':
start_time = "%d%d" % (int(processed_arrival_time[:2]) - 1, 45)
end_time = "%d%d" % (int(processed_arrival_time[:2]), 45)
elif processed_arrival_time[2:] == '30':
start_time = "%d%d" % (int(processed_arrival_time[:2]), 00)
end_time = "%d%d" % (int(processed_arrival_time[:2]) + 1, 00)
else:
assert processed_arrival_time[2:] == '45'
start_time = "%d%d" % (int(processed_arrival_time[:2]), 15)
end_time = "%d%d" % (int(processed_arrival_time[:2]) + 1, 15)
else:
if processed_arrival_time[1:] == '00':
start_time = "%d%d" % (int(processed_arrival_time[:1]) - 1, 30)
end_time = "%d%d" % (int(processed_arrival_time[:1]), 30)
elif processed_arrival_time[1:] == '15':
start_time = "%d%d" % (int(processed_arrival_time[:1]) - 1, 45)
end_time = "%d%d" % (int(processed_arrival_time[:1]), 45)
elif processed_arrival_time[1:] == '30':
start_time = "%d%d" % (int(processed_arrival_time[:1]), 00)
end_time = "%d%d" % (int(processed_arrival_time[:1]) + 1, 00)
else:
assert processed_arrival_time[1:] == '45'
start_time = "%d%d" % (int(processed_arrival_time[:1]), 15)
end_time = "%d%d" % (int(processed_arrival_time[:1]) + 1, 15)
sql = "SELECT flight_1.flight_id FROM flight flight_1 WHERE flight_1.arrival_time >= %s AND flight_1.arrival_time <= %s AND flight_1.flight_id = %s" % (
start_time, end_time, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_flight_arrival_time(flight_id, arrival_time):
"""
_arrival_time(x,"1700:_ti")
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_arrival_time, _ = process_entity_string(
arrival_time, "time")
sql = "SELECT flight_1.flight_id FROM flight flight_1 WHERE flight_1.arrival_time = %s AND flight_1.flight_id = %s" % (
processed_arrival_time, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_flight_departure_time(flight_id, departure_time):
"""
_departure_time()
:entity_type: (flight_id, time)
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_departure_time, _ = process_entity_string(
departure_time, "time")
sql = "SELECT flight_1.flight_id FROM flight flight_1 WHERE flight_1.departure_time = %s AND flight_1.flight_id = %s" % (
processed_departure_time, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_flight_approx_departure_time(flight_id, departure_time):
"""
_approx_departure_time()
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_departure_time, _ = process_entity_string(
departure_time, "time")
if len(processed_departure_time) == 4:
if processed_departure_time[2:] == '00':
start_time = "%d%d" % (int(processed_departure_time[:2]) - 1, 30)
end_time = "%d%d" % (int(processed_departure_time[:2]), 30)
elif processed_departure_time[2:] == '15':
start_time = "%d%d" % (int(processed_departure_time[:2]) - 1, 45)
end_time = "%d%d" % (int(processed_departure_time[:2]), 45)
elif processed_departure_time[2:] == '30':
start_time = "%d%d" % (int(processed_departure_time[:2]), 00)
end_time = "%d%d" % (int(processed_departure_time[:2]) + 1, 00)
else:
assert processed_departure_time[2:] == '45'
start_time = "%d%d" % (int(processed_departure_time[:2]), 15)
end_time = "%d%d" % (int(processed_departure_time[:2]) + 1, 15)
sql = "SELECT flight_1.flight_id FROM flight flight_1 WHERE flight_1.departure_time >= %s AND flight_1.departure_time <= %s AND flight_1.flight_id = %s" % (
start_time, end_time, processed_flight_id)
elif len(processed_departure_time) == 3:
if processed_departure_time[1:] == '00':
start_time = "%d%d" % (int(processed_departure_time[:1]) - 1, 30)
end_time = "%d%d" % (int(processed_departure_time[:1]), 30)
elif processed_departure_time[1:] == '15':
start_time = "%d%d" % (int(processed_departure_time[:1]) - 1, 45)
end_time = "%d%d" % (int(processed_departure_time[:1]), 45)
elif processed_departure_time[1:] == '30':
start_time = "%d%d" % (int(processed_departure_time[:1]), 00)
end_time = "%d%d" % (int(processed_departure_time[:1]) + 1, 00)
else:
assert processed_departure_time[1:] == '45'
start_time = "%d%d" % (int(processed_departure_time[:1]), 15)
end_time = "%d%d" % (int(processed_departure_time[:1]) + 1, 15)
sql = "SELECT flight_1.flight_id FROM flight flight_1 WHERE flight_1.departure_time >= %s AND flight_1.departure_time <= %s AND flight_1.flight_id = %s" % (
start_time, end_time, processed_flight_id)
elif processed_departure_time == "0":
start_time = "2330"
end_time = "30"
sql = "SELECT flight_1.flight_id FROM flight flight_1 WHERE (flight_1.departure_time >= %s OR flight_1.departure_time <= %s) AND flight_1.flight_id = %s" % (
start_time, end_time, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_flight_approx_return_time(flight_id, return_time):
"""
_approx_return_time(x,"1900:_ti")
"""
return is_flight_approx_arrival_time(flight_id, return_time)
def is_flight_during_day(flight_id, day_period):
"""
_during_day(x,"evening:_pd")
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_day_period, _ = process_entity_string(
day_period, "day_period")
period_map = {
"morning": [0, 1200],
"afternoon": [1200, 1800],
"early": [0, 800],
"evening": [1800, 2200],
"pm": [1200, 2400],
"late": [601, 1759],
"breakfast": [600, 900],
"late evening": [2000, 2400],
"late night": [2159, 301],
"daytime": [600,1800]
}
if processed_day_period == 'late night':
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN days ON flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE flight.flight_id = %s AND date_day.year = 1991 AND date_day.month_number = 3 AND ( (date_day.day_number = 21 AND flight.departure_time > 2159) OR (date_day.day_number = 22 AND flight.departure_time < 301))" % (processed_flight_id)
else:
start, end = period_map[processed_day_period]
sql = "SELECT DISTINCT flight_1.flight_id FROM flight flight_1 WHERE flight_1.flight_id = %s AND flight_1.departure_time BETWEEN %d AND %d" % (
processed_flight_id, start, end)
results = get_result(sql)
return len(results) > 0
def is_flight_during_day_arrival(flight_id, day_period):
"""
_during_day(x,"evening:_pd")
"""
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_day_period, _ = process_entity_string(
day_period, "day_period")
period_map = {
"morning": [0, 1200],
"afternoon": [1200, 1800],
"early": [0, 800],
"evening": [1800, 2200],
"pm": [1200, 2400],
"late": [601, 1759],
"breakfast": [600, 900],
"late evening": [2000, 2400],
"daytime": [600, 1800],
"late night": [2159, 301],
'mealtime': [1700,2000]
}
if processed_day_period == 'late night':
sql = "SELECT DISTINCT flight.flight_id FROM flight JOIN days ON flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE flight.flight_id = %s AND date_day.year = 1991 AND date_day.month_number = 3 AND ( (date_day.day_number = 21 AND flight.arrival_time > 2159) OR (date_day.day_number = 22 AND flight.arrival_time < 301))" % (
processed_flight_id)
else:
start, end = period_map[processed_day_period]
sql = "SELECT DISTINCT flight_1.flight_id FROM flight flight_1 WHERE flight_1.flight_id = %s AND flight_1.arrival_time BETWEEN %d AND %d" % (
processed_flight_id, start, end)
results = get_result(sql)
return len(results) > 0
def is_flight_on_day_number(flight_id, day_number):
"""
_day_number(x,"26:_dn")
:entity_type (flight_id, day_number)
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
processed_day_number, _ = process_entity_string(day_number, "day_number")
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.day_number = %s AND flight.flight_id = %s" % (
processed_day_number, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_flight_on_day(flight_id, day):
"""
_day $0 monday:_da
:entity_type: (flight_id, day)
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
processed_day, _ = process_entity_string(day, "day")
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code WHERE days.day_name = '%s' AND flight.flight_id = %s" % (
processed_day, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_flight_day_arrival(flight_id, day):
"""
_day_arrival(x, "sunday:_da")
:entity_type (flight_id, day)
"""
processed_flight_id, entity_type = process_entity_string(flight_id, "flight_id")
processed_day, _ = process_entity_string(day, "day")
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code WHERE days.day_name = '%s' AND flight.flight_id = %s" % (
processed_day, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_flight_day_return(flight_id, day):
"""
_day_return(x, "tuesday:_da")
:entity_type (flight_id, day)
"""
processed_flight_id, entity_type = process_entity_string(flight_id, "flight_id")
processed_day, _ = process_entity_string(day, "day")
sql = "SELECT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code JOIN days ON fare_basis.basis_days = days.days_code WHERE flight.flight_id = %s AND days.day_name = '%s'" % (
processed_flight_id, processed_day)
results = get_result(sql)
return len(results) > 0
def is_flight_day_number_arrival(flight_id, day_number):
"""
_day_number_arrival(x, "14:_dn")
:entity_type (flight_id, day_number)
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
processed_day_number, _ = process_entity_string(day_number, "day_number")
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE flight.flight_id = %s AND date_day.year = 1991 AND ((date_day.day_number = %s AND flight.arrival_time < flight.departure_time) OR (date_day.day_number = %s))" % (
processed_flight_id, str(int(processed_day_number) - 1), processed_day_number)
results = get_result(sql)
return len(results) > 0
def is_flight_day_number_return(flight_id, day_number):
"""
_day_number_return(x, "14:_dn")
:entity_type (flight_id, day_number)
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
processed_day_number, _ = process_entity_string(day_number, "day_number")
sql = "SELECT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code JOIN days ON fare_basis.basis_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE flight.flight_id = %s AND date_day.day_number = %s" % (
processed_flight_id, processed_day_number)
results = get_result(sql)
return len(results) > 0
def is_flight_month_arrival(flight_id, month):
"""
_month_arrival(x, "june:_mn")
:entity_type (flight_id, month)
"""
processed_flight_id, entity_type = process_entity_string(flight_id, "flight_id")
processed_month, _ = process_entity_string(month, "month")
month_map = {
"january": 1,
"february": 2,
"march": 3,
"april": 4,
"may": 5,
"june": 6,
"july": 7,
"august": 8,
"september": 9,
"october": 10,
"november": 11,
"december": 12
}
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = %s AND flight.flight_id = %s" % (
month_map[processed_month], processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_flight_on_month(flight_id, month):
"""
_month(x, "june:_mn")
:entity_type (flight_id, month)
"""
return is_flight_month_arrival(flight_id, month)
def is_flight_month_return(flight_id, month):
"""
_month_return(x, "june:_mn")
:entity_type (flight_id, month)
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
processed_month, _ = process_entity_string(month, "month")
month_map = {
"january": 1,
"february": 2,
"march": 3,
"april": 4,
"may": 5,
"june": 6,
"july": 7,
"august": 8,
"september": 9,
"october": 10,
"november": 11,
"december": 12
}
sql = "SELECT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code JOIN days ON fare_basis.basis_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = %s AND flight.flight_id = %s" % (
month_map[processed_month], processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_next_days_flight(flight_id, integer):
"""
_next_days $0 2:_i
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
processed_integer, _ = process_entity_string(integer, "integer")
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = 1 AND date_day.day_number BETWEEN 20 and %s AND flight.flight_id = %s" % (
int(processed_integer) + 20, processed_flight_id, )
results = get_result(sql)
return len(results) > 0
def is_overnight_flight(flight_id):
"""
TODO implementation
_overnight $0
:entity_type flight_id
"""
return True
def is_flight_days_from_today(flight_id, integer):
"""
_overnight $0
:entity_type flight_id
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
processed_integer, _ = process_entity_string(integer, "integer")
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = 5 AND date_day.day_number = %s AND flight.flight_id = %s" % (
int(processed_integer) + 27, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_tomorrow_flight(flight_id):
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = 1 AND date_day.day_number = 20 AND flight.flight_id = %s" % (processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_tomorrow_arrival_flight(flight_id):
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = 1 AND date_day.day_number = 20 AND flight.departure_time > flight.arrival_time AND flight.flight_id = %s" % (
processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_today_flight(flight_id):
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = 6 AND date_day.day_number = 22 AND flight.flight_id = %s" % (
processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_day_after_tomorrow_flight(flight_id):
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
sql = "SELECT flight.flight_id FROM flight JOIN days on flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE date_day.year = 1991 AND date_day.month_number = 1 AND date_day.day_number = 21 AND flight.flight_id = %s" % (
processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_airport_of_city(city_name, airport_code):
"""
_airport(washington:_ci,x)
:entity_type city_name, airport_code
"""
processed_city_name, _ = process_entity_string(city_name, "city_name")
processed_airport_code, entity_type = process_entity_string(airport_code, "airport_code")
sql = 'SELECT airport_code FROM airport_service JOIN city ON city.city_code = airport_service.city_code WHERE city.city_name = "%s" AND airport_service.airport_code = "%s"' % (processed_city_name, processed_airport_code)
results = get_result(sql)
return len(results) > 0
def is_specific_fare_basis_code(entity, fare_basis_code):
"""
_fare_basis_code $0 415:_do
:entity_type: (fare_basis_code, fare_basis_code)
"""
processed_entity, _ = process_entity_string(entity, "fare_basis_code")
processed_fare_basis_code, _ = process_entity_string(fare_basis_code, "fare_basis_code")
return processed_entity.lower() == processed_fare_basis_code.lower()
def is_flight_has_specific_fare_basis_code(flight_id, fare_basis_code):
processed_flight_id, _ = process_entity_string(flight_id, "flight_id")
processed_fare_basis_code, _ = process_entity_string(
fare_basis_code, "fare_basis_code")
sql = "SELECT flight_id FROM flight_fare JOIN fare ON flight_fare.fare_id = fare.fare_id WHERE flight_id = %s AND fare.fare_basis_code = '%s'" % (processed_flight_id, processed_fare_basis_code)
results = get_result(sql)
return len(results) > 0
def is_flight_cost_fare(flight_id, dollar):
"""
_fare $0 415:_do
:entity_type: (flight_id, dollar)
"""
processed_flight_id, _ = process_entity_string(flight_id, "flight_id")
processed_dollar, _ = process_entity_string(dollar, "dollar")
sql = "SELECT fare.one_direction_cost FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON fare.fare_id = flight_fare.fare_id WHERE flight.flight_id = %s AND fare.one_direction_cost = %s" % (processed_flight_id, processed_dollar)
results = get_result(sql)
return len(results) > 0
def is_time_elapsed(flight_id, hour):
"""
_time_elapsed $0 9:_hr
:entity_type: (flight_id, hour)
"""
processed_flight_id, _ = process_entity_string(flight_id, "flight_id")
processed_hour, _ = process_entity_string(hour, "hour")
minutes = (int(processed_hour) * 60)
sql = "SELECT flight_id FROM flight WHERE flight_id = %s AND time_elapsed = %s" % (processed_flight_id, minutes)
results = get_result(sql)
return len(results) > 0
def is_flight_meal_code(flight_id, meal_code):
"""
_meal_code $0 b:_rc
:entity_type: (flight_id, meal_code)
"""
processed_flight_id, _ = process_entity_string(flight_id, "flight_id")
processed_meal_code, _ = process_entity_string(meal_code, "meal_code")
sql = "SELECT flight_id FROM flight WHERE flight_id = %s AND meal_code = '%s'" % (processed_flight_id, processed_meal_code)
results = get_result(sql)
return len(results) > 0
def is_flight_has_specific_meal(flight_id, meal_description):
"""
_meal $0 dinner:_me
:entity_type: (flight_id, meal_description)
"""
processed_flight_id, _ = process_entity_string(flight_id, "flight_id")
processed_meal_description, _ = process_entity_string(
meal_description, "meal_description")
sql = "SELECT flight_id FROM flight JOIN food_service ON flight.meal_code = food_service.meal_code WHERE flight_id = %s AND food_service.meal_description = '%s'" % (
processed_flight_id, processed_meal_description)
results = get_result(sql)
return len(results) > 0
def is_flight_aircraft(flight_id, aircraft_code):
"""
_meal_code $0 b:_rc
:entity_type: (flight_id, meal_code)
"""
processed_flight_id, _ = process_entity_string(flight_id, "flight_id")
processed_aircraft_code, _ = process_entity_string(aircraft_code, "aircraft_code")
sql = "SELECT flight_id FROM flight WHERE flight_id = %s AND aircraft_code_sequence = '%s'" % (processed_flight_id, processed_aircraft_code)
results = get_result(sql)
return len(results) > 0
def is_airline_has_booking_class(class_description, airline_code):
"""
_airline(x, us:_al)
:entity_type: (class_description, airline_code)
"""
processed_class_description, _ = process_entity_string(
class_description, "class_description")
processed_airline_code, _ = process_entity_string(
airline_code, "airline_code")
sql = "SELECT class_description FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code JOIN class_of_service ON fare_basis.booking_class = class_of_service.booking_class WHERE class_of_service.class_description = '%s' AND flight.airline_code = '%s'" % (
processed_class_description, processed_airline_code)
results = get_result(sql)
return len(results) > 0
def is_airline_provide_meal(meal_code, airline_code):
processed_meal_code, _ = process_entity_string(
meal_code, "meal_code")
processed_airline_code, _ = process_entity_string(
airline_code, "airline_code")
sql = "SELECT meal_code FROM flight WHERE airline_code = '%s' AND meal_code = '%s'" % (processed_airline_code, processed_meal_code)
results = get_result(sql)
return len(results) > 0
def is_flight_has_booking_class(flight_id, class_description):
"""
_booking_class(x, us:_al)
:entity_type: (flight_id, class_description)
"""
processed_flight_id, _ = process_entity_string(flight_id, "flight_id")
processed_class_description, _ = process_entity_string(
class_description, "class_description")
sql = "SELECT flight_fare.flight_id FROM flight_fare JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code JOIN class_of_service ON fare_basis.booking_class = class_of_service.booking_class WHERE class_of_service.class_description = '%s' AND flight_fare.flight_id = %s" % (
processed_class_description, processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_flight_with_specific_aircraft(flight_id, aircraft_code):
processed_flight_id, _ = process_entity_string(
flight_id, "flight_id")
processed_aircraft_code, _ = process_entity_string(
aircraft_code, "aircraft_code")
sql = "SELECT flight_id FROM flight JOIN equipment_sequence ON flight.aircraft_code_sequence = equipment_sequence.aircraft_code_sequence WHERE flight.flight_id = %s AND equipment_sequence.aircraft_code = '%s'" % (processed_flight_id, processed_aircraft_code)
results = get_result(sql)
return len(results) > 0
# Unit Predicate
def is_aircraft(aircraft_code):
"""
_aircraft(x)
:entity_type: aircraft_code
"""
processed_aircraft_code, _ = process_entity_string(aircraft_code, "aircraft_code")
sql = "SELECT aircraft_code FROM aircraft WHERE aircraft_code = '%s'" % (
processed_aircraft_code)
results = get_result(sql)
return len(results) > 0
def aircraft_code(aircraft_code):
"""
_aircraft_code:_t $0
:entity_type: aircraft_code
"""
return is_aircraft(aircraft_code)
def is_city(city_name):
"""
_city(x)
:entity_type: city_name
"""
processed_city_name, _ = process_entity_string(city_name, "city_name")
sql = "SELECT city_name FROM city WHERE city_name = '%s'" % (
processed_city_name)
results = get_result(sql)
return len(results) > 0
def is_airline(entity):
"""
_airline(x)
:entity_type airline_code
"""
# assert isinstance(entity, str)
entity_name, entity_type = process_entity_string(entity, "airline_code")
sql = 'SELECT airline_code FROM airline WHERE airline_code = "%s"' % entity_name
results = get_result(sql)
return len(results) > 0
def is_airport(entity):
"""
airport(x)
:entity_type airport_code
"""
# assert isinstance(entity, str)
entity_name, entity_type = process_entity_string(entity, "airport_code")
sql = 'SELECT airport_code FROM airport WHERE airport_code = "%s"' % entity_name
results = get_result(sql)
return len(results) > 0
def is_flight(entity):
"""
flight(x)
:entity_type flight_id
"""
# assert isinstance(entity, str)
entity_name, entity_type = process_entity_string(entity, "flight_id")
sql = 'SELECT flight_id FROM flight WHERE flight_id = %s' % entity_name
results = get_result(sql)
return len(results) > 0
def is_daily_flight(entity):
"""
_daily(x)
:entity_type flight_id
"""
entity_name, entity_type = process_entity_string(entity, "flight_id")
sql = "SELECT flight_id FROM flight WHERE flight_days = 'daily' AND flight_id = %s" % entity_name
results = get_result(sql)
return len(results) > 0
def is_discounted_flight(entity):
"""
_discounted(x)
:entity_type flight_id
"""
entity_name, entity_type = process_entity_string(entity, "flight_id")
sql = "SELECT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON fare.fare_id = flight_fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code WHERE fare_basis.discounted = 'YES' AND flight.flight_id = %s" % entity_name
results = get_result(sql)
return len(results) > 0
def is_connecting_flight(entity):
"""
_connecting(x)
:entity_type flight_id
"""
entity_name, entity_type = process_entity_string(entity, "flight_id")
sql = 'SELECT flight_id FROM flight WHERE flight_id = %s AND connections > 0' % entity_name
results = get_result(sql)
return len(results) > 0
def is_oneway(entity):
"""
oneway(x)
:entity_type flight_id
"""
entity_name, entity_type = process_entity_string(entity, "flight_id")
sql = 'SELECT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON flight_fare.fare_id = fare.fare_id WHERE fare.round_trip_required = "NO" AND flight.flight_id = %s' % (
entity_name)
results = get_result(sql)
return len(results) > 0
def is_flight_has_stop(entity):
"""
_has_stops(x)
:entity_type flight_id
"""
entity_name, entity_type = process_entity_string(entity, "flight_id")
sql = 'SELECT T1.flight_id FROM flight AS T1 JOIN flight_stop AS T2 ON T1.flight_id = T2.flight_id WHERE T1.flight_id = %s' % (
entity_name)
results = get_result(sql)
return len(results) > 0
def is_non_stop_flight(flight_id):
"""
_nonstop(x)
:entity_type flight_id
"""
processed_flight_id, entity_type = process_entity_string(flight_id, "flight_id")
sql = 'SELECT flight.flight_id FROM flight WHERE flight.stops = 0 AND flight.flight_id = %s' % (
processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_meal(entity):
"""
TODO: not sure
_meal:_t(x)
:entity_type meal_code
"""
entity_name, entity_type = process_entity_string(entity, "meal_code")
sql = "SELECT meal_code FROM food_service WHERE food_service.meal_code = '%s'" % (entity_name)
results = get_result(sql)
return len(results) > 0
def is_meal_code(entity):
"""
_meal_code(x)
:entity_type meal_code
"""
entity_name, entity_type = process_entity_string(entity, "meal_code")
sql = "SELECT meal_code FROM food_service WHERE food_service.meal_code = '%s'" % (
entity_name)
results = get_result(sql)
return len(results) > 0
def is_flight_has_meal(entity):
"""
_has_meal(x):
:entity_type flight_id
"""
entity_name, entity_type = process_entity_string(entity, "flight_id")
sql = "SELECT flight_id FROM flight WHERE meal_code is not NULL AND flight_id = %s" % (
entity_name)
results = get_result(sql)
return len(results) > 0
def is_flight_tonight(entity):
"""
_tonight(x)
:entity_type flight_id
"""
entity_name, entity_type = process_entity_string(entity, "flight_id")
sql = "SELECT flight_id FROM flight WHERE departure_time BETWEEN %d AND %d AND flight_id = %s" % (
1800, 2359, entity_name)
results = get_result(sql)
return len(results) > 0
def is_booking_class_t(entity):
"""
_booking_class:_t(x)
:entity_type: class_description
"""
entity_name, entity_type = process_entity_string(
entity, "class_description")
sql = "SELECT DISTINCT class_description FROM class_of_service WHERE class_description = '%s';" % (
entity_name)
results = get_result(sql)
return len(results) > 0
def is_class_of_service(entity):
"""
_class_of_service(x)
:entity_type: booking_class
"""
return is_booking_class_t(entity)
def is_fare_basis_code(entity):
"""
_fare_basis_code(x)
:entity_type: fare_basis_code
"""
entity_name, entity_type = process_entity_string(entity, "fare_basis_code")
sql = "SELECT DISTINCT fare_basis_1.fare_basis_code FROM fare_basis fare_basis_1 WHERE fare_basis_code = '%s'" % (
entity_name)
results = get_result(sql)
return len(results) > 0
def is_flight_economy(flight_id):
"""
_economy(x)
:entity_type: flight_id
"""
processed_flight_id, entity_type = process_entity_string(flight_id, "flight_id")
sql = "SELECT flight_fare.flight_id FROM flight_fare JOIN fare ON flight_fare.fare_id = fare.fare_id JOIN fare_basis ON fare.fare_basis_code = fare_basis.fare_basis_code WHERE fare_basis.economy = 'YES' AND flight_fare.flight_id = %s" % (
processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_economy(entity):
"""
_economy(x)
:entity_type: fare_basis_code
"""
entity_name, entity_type = process_entity_string(entity, "fare_basis_code")
sql = "SELECT DISTINCT fare_basis_code FROM fare_basis fare_basis_1 WHERE fare_basis_1.economy = 'YES' AND fare_basis_1.fare_basis_code = '%s'" % (
entity_name)
results = get_result(sql)
return len(results) > 0
def is_fare(entity):
"""
_fare(x)
:entity_type: fare_id
"""
entity_name, entity_type = process_entity_string(entity, "fare_id")
sql = "SELECT DISTINCT fare_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id WHERE fare_id = %s" % (
entity_name)
results = get_result(sql)
return len(results) > 0
def is_aircraft_code_t(entity):
"""
_aircraft_code:t(x)
:entity_type: aircraft_code
"""
entity_name, entity_type = process_entity_string(entity, "aircraft_code")
sql = "SELECT aircraft_code FROM aircraft WHERE aircraft_code = '%s'" % (
entity_name)
results = get_result(sql)
return len(results) > 0
def is_ground_transport(transport_type):
"""
_ground_transport(x)
:entity_type: transport_type
"""
entity_name, entity_type = process_entity_string(
transport_type, "transport_type")
sql = "SELECT DISTINCT ground_service_1.transport_type FROM ground_service ground_service_1 WHERE ground_service_1.transport_type = '%s'" % (
entity_name)
results = get_result(sql)
return len(results) > 0
def is_round_trip(entity):
"""
_round_trip(x)
:entity_type: flight_id
"""
entity_name, entity_type = process_entity_string(entity, "flight_id")
sql = 'SELECT flight.flight_id FROM flight JOIN flight_fare ON flight.flight_id = flight_fare.flight_id JOIN fare ON flight_fare.fare_id = fare.fare_id WHERE fare.round_trip_required IS NOT NULL AND flight.flight_id = %s' % (
entity_name)
results = get_result(sql)
return len(results) > 0
def is_rental_car(entity):
"""
_rental_car(x)
:entity_type: transport_type
"""
entity_name, entity_type = process_entity_string(entity, "transport_type")
return entity_name.lower() == "rental car"
def is_limousine(entity):
"""
_limousine(x)
:entity_type: transport_type
"""
entity_name, entity_type = process_entity_string(entity, "transport_type")
return entity_name.upper() == "LIMOUSINE"
def is_rapid_transit(entity):
"""
_rapid_transit(x)
:entity_type: transport_type
"""
entity_name, entity_type = process_entity_string(entity, "transport_type")
return entity_name.upper() == "RAPID TRANSIT"
def is_taxi(entity):
"""
_taxi(x)
:entity_type: transport_type
"""
entity_name, entity_type = process_entity_string(entity, "transport_type")
return entity_name.upper() == "TAXI"
def is_air_taxi_operation(entity):
"""
_air_taxi_operation(x)
:entity_type: transport_type
"""
entity_name, entity_type = process_entity_string(entity, "transport_type")
return entity_name.upper() == "AIR TAXI OPERATION"
def is_ground_transport_on_weekday(entity):
"""
_weekday(x)
:entity_type: transport_type
"""
return True
def is_flight_on_year(entity, year):
"""
_year(x,"1991:_yr")
:entity_type: flight_id
"""
entity_name, entity_type = process_entity_string(entity, "flight_id")
processed_year, _ = process_entity_string(year, "year")
sql = "SELECT flight_id FROM flight JOIN days ON flight.flight_days = days.days_code JOIN date_day ON days.day_name = date_day.day_name WHERE flight_id = %s AND date_day.year = %s" % (entity_name, processed_year)
results = get_result(sql)
return len(results) > 0
def is_flight_on_weekday(entity):
"""
_weekday(x)
:entity_type: flight_id
"""
entity_name, entity_type = process_entity_string(entity, "flight_id")
sql = "SELECT distinct day_name FROM flight JOIN days ON flight.flight_days = days.days_code WHERE flight_id = %s AND day_name IN ('MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY')" % entity_name
results = get_result(sql)
return len(results) == 5
def is_time_zone_code(entity):
"""
_time_zone_code(x)
:entity_type: time_zone_code
"""
entity_name, entity_type = process_entity_string(entity, "time_zone_code")
return entity_name.upper() in {"CST", "EST", "MST", "PST"}
def is_turboprop(aircraft_code):
"""
_turboprop(x)
:entity_type: aircraft_code
"""
processed_aircraft_code, entity_type = process_entity_string(
aircraft_code, "aircraft_code")
sql = "SELECT aircraft_code FROM aircraft WHERE aircraft_code = '%s' AND propulsion = 'TURBOPROP'" % (
processed_aircraft_code)
results = get_result(sql)
return len(results) > 0
def is_flight_turboprop(flight_id):
"""
_turboprop(x)
:entity_type: flight_id
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
sql = "SELECT flight_id FROM flight JOIN aircraft ON flight.aircraft_code_sequence = aircraft.aircraft_code WHERE propulsion = 'TURBOPROP' AND flight_id = %s" % (
processed_flight_id)
results = get_result(sql)
return len(results) > 0
def is_flight_jet(flight_id):
"""
_jet(x)
:entity_type: flight_id
"""
processed_flight_id, entity_type = process_entity_string(
flight_id, "flight_id")
sql = "SELECT flight_id FROM flight JOIN aircraft ON flight.aircraft_code_sequence = aircraft.aircraft_code WHERE propulsion = 'JET' AND flight_id = %s" % (
processed_flight_id)
results = get_result(sql)
return len(results) > 0
# Meta Predicate
# TODO implement meta-predicates
def equals(entity_1, entity_2):
if entity_1 is None or entity_2 is None:
return False
processed_entity_1, _ = process_entity_string(entity_1)
processed_entity_2, _ = process_entity_string(entity_2)
return str(processed_entity_1).lower() == str(processed_entity_2).lower()
def count(function, entity_set_function):
if entity_set_function is None:
entity_set_function = get_all_flight_ids
count = 0
for entity in entity_set_function():
if function(entity):
count += 1
return count
def exists(function, entity_set_function):
if entity_set_function is None:
entity_set_function = get_all_flight_ids
for e in entity_set_function():
if function(e):
return True
return False
def the(function, entity_set_function):
if entity_set_function is None:
entity_set_function = get_all_flight_ids
return [entity for entity in entity_set_function() if function(entity)]
def argmax(predicate, target_function, entity_set_function):
if entity_set_function is None:
entity_set_function = get_all_flight_ids
values = list()
for e in entity_set_function():
if predicate(e):
v = target_function(e)
if isinstance(v, list):
for _v in v:
assert isinstance(_v, dict)
values.append((e, _v[list(_v.keys())[0]],))
elif isinstance(v, dict):
values.append((e, v[list(v.keys())[0]],))
else:
assert isinstance(v, int) or isinstance(v, float)
values.append((e, v,))
max_value, max_indices = 0, list()
for idx, (e, v) in enumerate(values):
if v is None:
continue
if v > max_value:
max_value = v
max_indices = [idx]
elif v == max_value:
max_indices.append(idx)
if len(max_indices) > 0:
return [values[idx][0] for idx in max_indices]
return None
def argmin(predicate, target_function, entity_set_function):
if entity_set_function is None:
entity_set_function = get_all_flight_ids
values = list()
for e in entity_set_function():
if predicate(e):
v = target_function(e)
if isinstance(v, list):
for _v in v:
assert isinstance(_v, dict)
values.append((e, _v[list(_v.keys())[0]],))
elif isinstance(v, dict):
values.append((e, v[list(v.keys())[0]],))
else:
assert isinstance(v, int) or isinstance(v, float)
values.append((e, v,))
min_value, min_indices = 10000000, list()
for idx, (e, v) in enumerate(values):
if v is None:
continue
if v < min_value:
min_value = v
min_indices = [idx]
elif v == min_value:
min_indices.append(idx)
if len(min_indices) > 0:
return [values[idx][0] for idx in min_indices]
return None
def sum_predicate(predicate, target_function, entity_set_function):
if entity_set_function is None:
entity_set_function = get_all_flight_ids
values = list()
for e in entity_set_function():
if predicate(e):
v = target_function(e)
if isinstance(v, list):
for _v in v:
assert isinstance(_v, dict)
values.append((e, _v[list(_v.keys())[0]],))
elif isinstance(v, dict):
values.append((e, v[list(v.keys())[0]],))
else:
assert isinstance(v, int) or isinstance(v, float)
values.append((e, v,))
print(values)
total = 0
for e, v in values:
total += v
return total
def max_predicate(predicate, target_function, entity_set_function):
if entity_set_function is None:
entity_set_function = get_all_flight_ids
values = list()
for e in entity_set_function():
if predicate(e):
v = target_function(e)
if isinstance(v, list):
for _v in v:
assert isinstance(_v, dict)
values.append((e, _v[list(_v.keys())[0]],))
elif isinstance(v, dict):
values.append((e, v[list(v.keys())[0]],))
else:
assert isinstance(v, int) or isinstance(v, float)
values.append((e, v,))
if len(values) == 0:
return None
max_value = 0
for e, v in values:
if v > max_value:
max_value = v
return max_value
def min_predicate(predicate, target_function, entity_set_function):
if entity_set_function is None:
entity_set_function = get_all_flight_ids
values = list()
for e in entity_set_function():
if predicate(e):
v = target_function(e)
if isinstance(v, list):
for _v in v:
assert isinstance(_v, dict)
values.append((e, _v[list(_v.keys())[0]],))
elif isinstance(v, dict):
values.append((e, v[list(v.keys())[0]],))
else:
assert isinstance(v, int) or isinstance(v, float)
values.append((e, v,))
if len(values) == 0:
return None
min_value = 100000000
for e, v in values:
if v < min_value:
min_value = v
return min_value
def get_target_value(predicate, target_function, entity_set_function):
if entity_set_function is None:
entity_set_function = get_all_flight_ids
values = list()
for e in entity_set_function():
if predicate(e):
v = target_function(e)
if isinstance(v, list):
for _v in v:
assert isinstance(_v, dict)
v_dict = dict()
v_dict.update(e)
v_dict[target_function.__name__ + '_0'] = _v[list(_v.keys())[0]]
values.append(v_dict)
elif isinstance(v, dict):
v_dict = dict()
v_dict.update(e)
v_dict[target_function.__name__ + '_0'] = v[list(v.keys())[0]]
values.append(v_dict)
else:
v_dict = dict()
v_dict.update(e)
v_dict[target_function.__name__ + '_0'] = v
values.append(v_dict)
return values
def get_target_values(predicate, target_functions, entity_set_function):
if entity_set_function is None:
entity_set_function = get_all_flight_ids
values = list()
for e in entity_set_function():
if predicate(e):
_values = list()
v_dict = dict()
v_dict.update(e)
for tf_idx, tf in enumerate(target_functions):
v = tf(e)
suffix = "_%d" % tf_idx
if isinstance(v, list):
for _v in v:
assert isinstance(_v, dict)
v_dict[tf.__name__ + suffix] = _v[list(_v.keys())[0]]
# _values.append(_v[list(_v.keys())[0]])
elif isinstance(v, dict):
v_dict[tf.__name__ + suffix] = v[list(v.keys())[0]]
# values.append(v[list(v.keys())[0]])
else:
v_dict[tf.__name__ + suffix] = v
# values.append(v)
values.append(v_dict)
return values
def process_numerical_value(value):
if isinstance(value, list):
assert isinstance(value[0], dict)
_value = float(value[0][list(value[0].keys())[0]])
elif isinstance(value, dict):
_value = float(value[list(value.keys())[0]])
elif isinstance(value, str):
_value, _ = process_entity_string(value)
_value = float(_value)
else:
_value = float(value)
return _value
def process_value(value):
if isinstance(value, list):
if len(value) == 0:
return ""
assert isinstance(value[0], dict)
_value = value[0][list(value[0].keys())[0]]
elif isinstance(value, dict):
_value = value[list(value.keys())[0]]
elif isinstance(value, str):
_value, _ = process_entity_string(value)
_value = value
else:
_value = value
return _value
def less_than(value_1, value_2):
"""
_<
"""
_value_1 = process_numerical_value(value_1)
_value_2 = process_numerical_value(value_2)
return _value_1 <= _value_2
def larger_than(value_1, value_2):
"""
_>
"""
_value_1 = process_numerical_value(value_1)
_value_2 = process_numerical_value(value_2)
return _value_1 >= _value_2
def numerical_equals(value_1, value_2):
"""
_=
"""
_value_1 = process_value(value_1)
_value_2 = process_value(value_2)
return _value_1 == _value_2
if __name__ == '__main__':
result = [(xe, ye) for xe in get_all_flight_ids() for ye in get_all_aircraft_codes() if (lambda x,y: (is_flight_with_specific_aircraft(x,y) and is_flight_airline(x,"dl:_al") and is_flight(x) and is_from(x,"seattle:_ci") and is_to(x,"salt_lake_city:_ci")))(xe, ye)]
pprint(result)
| 82,088 | 35.811211 | 416 |
py
|
Unimer
|
Unimer-master/executions/jobs/evaluate_sql.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/executions/jobs/evaluate_lambda_calculus.py
|
# coding=utf8
| 16 | 3.25 | 13 |
py
|
Unimer
|
Unimer-master/executions/jobs/evaluate_funql.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/executions/jobs/evaluate_prolog.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/executions/jobs/funql/transform.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/executions/jobs/funql/evaluator.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/executions/jobs/funql/__init__.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/executions/jobs/funql/query.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/executions/jobs/sql/parse_job_data.py
|
# coding=utf8
import re
import mysql.connector
from pprint import pprint
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="123456",
database="job",
auth_plugin='mysql_native_password'
)
def parse_entry(value):
first_index = value.index('(')
last_index = value.rindex(')')
table_name = value[:first_index]
values = value[first_index + 1:last_index]
values = values.split(',')
formatted_values = list()
for v in values:
v = v.strip().replace("'", "").replace('"', "")
v = re.sub(' +', ' ', v)
if v == 'n/a':
v = None
elif re.match('^\d+$', v):
v = int(v)
formatted_values.append(v)
return table_name, formatted_values
def parse(file):
with open(file, 'r') as f:
lines = f.readlines()
data = list()
lidx = 0
while lidx < len(lines):
line = lines[lidx].strip()
nlidx = lidx
while nlidx < len(lines) and not lines[nlidx].strip().endswith('.'):
nlidx += 1
nlidx += 1
info = ' '.join([l.strip() for l in lines[lidx:nlidx]])
info = re.sub('\t', '', info)
# print(info)
data.append(info)
lidx = nlidx
value_dict = dict()
for d in data:
table_name, values = parse_entry(d)
if values[0] == '':
continue
if table_name not in value_dict:
value_dict[table_name] = list()
value_dict[table_name].append(values)
pprint(value_dict)
# Validate
for tn, values in value_dict.items():
length = len(values[0])
for vidx, v in enumerate(values):
if length != len(v):
# Hot fix
assert tn == 'raw_area' and v[0] == '[email protected]'
values[vidx] = ['[email protected]', 'atm,ip']
print("Failed: ", tn, v)
# Check Duplicate
count = 0
job_ids = set()
for vidx, values in enumerate(value_dict['raw_job']):
job_id = values[0]
job_ids.add(job_id)
for nidx, nvalues in enumerate(value_dict['raw_job']):
if nidx != vidx and nvalues[0] == job_id:
print("Duplicated")
print(values)
print(nvalues)
print("===\n\n")
count += 1
# Ensure foreign key Constraints
print(count)
age_foreign_key_violate_count = 0
for values in value_dict['raw_age']:
if values[0] not in job_ids:
print(values)
age_foreign_key_violate_count += 1
print(age_foreign_key_violate_count)
return value_dict
if __name__ == '__main__':
parse('./jobdata')
| 2,748 | 25.432692 | 91 |
py
|
Unimer
|
Unimer-master/executions/jobs/sql/evaluator.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/executions/jobs/sql/__init__.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/executions/jobs/lambda/transform.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/executions/jobs/lambda/__init__.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/metrics/sequency_accuracy.py
|
# coding=utf8
import torch
from overrides import overrides
from allennlp.training.metrics import Metric
from typing import Union, Tuple, Dict, List, Optional
class SequenceAccuracy(Metric):
def __init__(self) -> None:
self._correct_counts = 0.
self._total_counts = 0.
self._pad_index = -1
def __call__(self, predictions: torch.Tensor, gold_labels: torch.Tensor, mask: torch.Tensor) -> None:
batch_size, p_len = predictions.size()
batch_size, g_len = gold_labels.size()
if p_len >= g_len:
_predictions = predictions[:, :g_len]
else:
_predictions = torch.cat((predictions, predictions.new_ones(batch_size, g_len - p_len) * self._pad_index),
dim=-1)
assert _predictions.size(1) == g_len
masked_predictions = _predictions * mask
masked_gold_labels = gold_labels * mask
eqs = masked_gold_labels.eq(masked_predictions).int()
result = (eqs.sum(-1) == g_len).int()
self._correct_counts += result.sum()
self._total_counts += batch_size
@overrides
def get_metric(self, reset: bool) -> Union[float, Tuple[float, ...], Dict[str, float], Dict[str, List[float]]]:
"""
Returns
-------
The accumulated accuracy.
"""
if self._total_counts > 0:
accuracy = float(self._correct_counts) / float(self._total_counts)
else:
accuracy = 0
if reset:
self.reset()
return {'accuracy': accuracy}
@overrides
def reset(self) -> None:
self._correct_counts = 0.
self._total_counts = 0.
| 1,684 | 29.636364 | 118 |
py
|
Unimer
|
Unimer-master/metrics/__init__.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/data_readers/gnn_data_reader.py
|
# coding=utf8
import re
import copy
import numpy as np
from typing import Iterable, Callable, List, Dict
from overrides import overrides
from allennlp.data import Instance, Tokenizer
from allennlp.data.fields import TextField, ArrayField, MetadataField
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.dataset_readers import DatasetReader
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers import SingleIdTokenIndexer
class GNNCopyTransformerDataReader(DatasetReader):
SEP_SYMBOL = "@sep@"
TOKEN_SYMBOL = "@token@"
ENTITY_SYMBOL = "@entity@"
NON_FUNC_SYMBOL = "@nonfunc@"
TARGET_SYMBOL = "@target@"
def __init__(self,
entity_matcher,
entity_replacer,
target_grammar,
source_tokenizer: Tokenizer,
target_tokenizer: Tokenizer,
logical_form_preprocessor: Callable = None,
lazy: bool = False,
nlabels: int = 4,
relative_position_clipped_range: int = 8,
allow_drop: bool = False) -> None:
super().__init__(lazy=lazy)
self._entity_matcher = entity_matcher
self._entity_replacer = entity_replacer
self._target_grammar = target_grammar
self._source_namespace = 'source_tokens'
self._target_namespace = 'target_tokens'
self._segment_namespace = 'segment_tokens'
self._source_tokenizer = source_tokenizer
self._target_tokenizer = target_tokenizer
self._source_token_indexers = {'tokens': SingleIdTokenIndexer(namespace='source_tokens')}
self._target_token_indexers = {
"tokens": SingleIdTokenIndexer(namespace=self._target_namespace)
}
self._segment_token_indexers = {
"tokens": SingleIdTokenIndexer(namespace=self._segment_namespace)
}
self._nlabels = nlabels
self._relative_position_clipped_range = relative_position_clipped_range
self._logical_form_preprocessor = logical_form_preprocessor
self._allow_drop = allow_drop
@overrides
def text_to_instance(self, source_string: str, target_string: str = None) -> Instance:
"""
Turn raw source string and target string into an ``Instance``.
Parameters
----------
source_string : ``str``, required
target_string : ``str``, optional (default = None)
Returns
-------
Instance
See the above for a description of the fields that the instance will contain.
"""
tokenized_source = self._source_tokenizer.tokenize(source_string)
tokenized_source.insert(0, Token(START_SYMBOL))
tokenized_source.append(Token(END_SYMBOL))
segments = [Token(self.TOKEN_SYMBOL) for i in range(len(tokenized_source))]
source_entity_length = [1 for i in range(len(tokenized_source))]
token_length = len(segments)
pseudo_tokens = [t.text for t in tokenized_source]
candidates = self._entity_matcher.match(tokenized_source)
diff = 0
if len(candidates) > 0:
for entity in candidates:
value = entity['value'].replace('_', ' ')
words = value.split()
for v in words:
tokenized_source.append(Token(v))
segments.append(Token(entity['type']))
diff += 0 if len(words) == 1 else len(words) - 1
entity['index'] = len(pseudo_tokens)
source_entity_length.append(len(words))
pseudo_tokens.append(value)
tokenized_source.append(Token(self.SEP_SYMBOL))
source_entity_length.append(1)
segments.append(Token(self.NON_FUNC_SYMBOL))
pseudo_tokens.append(self.SEP_SYMBOL)
else:
tokenized_source.append(Token(self.SEP_SYMBOL))
source_entity_length.append(1)
segments.append(Token(self.NON_FUNC_SYMBOL))
pseudo_tokens.append(self.SEP_SYMBOL)
assert len(tokenized_source) == len(segments) + diff and sum(source_entity_length) == len(tokenized_source) \
and len(pseudo_tokens) == len(segments) and len(pseudo_tokens) == len(source_entity_length)
source_field = TextField(tokenized_source, self._source_token_indexers)
fields_dict = {'source_tokens': source_field,
'source_entity_length': ArrayField(np.array(source_entity_length, dtype=np.int),padding_value=0),
'segments': TextField(segments, self._segment_token_indexers)}
# TODO: fix edge mask
edge_mask = self.get_edge_mask(tokenized_source, token_length, len(segments), pseudo_tokens, candidates)
fields_dict['edge_mask'] = ArrayField(edge_mask)
preprocessed_target_string = self._logical_form_preprocessor(target_string)
meta_field = {
'target': target_string,
'pseudo_tokens': pseudo_tokens,
'entity_candidates': candidates,
'token_length': token_length
}
tokenized_target = self._target_tokenizer.tokenize(preprocessed_target_string)
tokenized_target.insert(0, Token(START_SYMBOL))
tokenized_target.append(Token(END_SYMBOL))
is_valid, replaced_target_tokens = self._entity_replacer(self._target_grammar,
preprocessed_target_string,
tokenized_target, candidates)
if not is_valid and self._allow_drop:
return None
target_field = TextField(replaced_target_tokens, self._target_token_indexers)
fields_dict['target_tokens'] = target_field
target_action_sequence = np.zeros(len(replaced_target_tokens), dtype=np.int)
copy_targets = list()
train_entity_appear_indicator = np.zeros(len(pseudo_tokens))
for tidx, token in enumerate(replaced_target_tokens):
ct = np.zeros(len(segments))
match = re.match('^@entity_(\d+)', token.text)
if match:
index = int(match.group(1))
target_action_sequence[tidx] = index
ct[index] = 1
copy_targets.append(ct)
fields_dict['copy_targets'] = ArrayField(np.array(copy_targets))
fields_dict['generate_targets'] = ArrayField(target_action_sequence)
fields_dict['meta_field'] = MetadataField(meta_field)
return Instance(fields_dict)
def get_edge_mask(self, tokenized_source: List[Token], token_length: int, segment_length: int,
pseudo_tokens: List[str], candidates: List[Dict]):
entity_length = segment_length - token_length
# Edge Labels
token_edges = np.zeros((2 * self._relative_position_clipped_range + 1, segment_length, segment_length))
for st_i_idx, st_i in enumerate(tokenized_source[:token_length]):
for st_j_idx, st_j in enumerate(tokenized_source[:token_length]):
distance = st_j_idx - st_i_idx
if distance > self._relative_position_clipped_range:
distance = self._relative_position_clipped_range
elif distance < - self._relative_position_clipped_range:
distance = - self._relative_position_clipped_range
token_edges[distance, st_i_idx, st_j_idx] = 1
entity_edges = np.pad(np.ones((entity_length, entity_length), dtype=np.int),
((token_length, 0), (token_length, 0)), mode='constant', constant_values=0)
token_in_entity_edges = np.zeros((segment_length, segment_length), dtype=np.int)
token_not_in_entity_edges = np.zeros((segment_length, segment_length), dtype=np.int)
if len(candidates) > 0 and 'indices' in candidates[0]:
# Use indices
for candidate in candidates:
for tidx in range(token_length):
if tidx in candidate['indices']:
token_in_entity_edges[tidx, candidate['index']] = 1
token_in_entity_edges[candidate['index'], tidx] = 1
else:
token_not_in_entity_edges[tidx, candidate['index']] = 1
token_not_in_entity_edges[candidate['index'], tidx] = 1
else:
for st_idx, st in enumerate(pseudo_tokens[:token_length]):
for e_idx, e in enumerate(pseudo_tokens[token_length:]):
# if st == e:
if st in e.split():
# print(st, " in ", e)
token_in_entity_edges[st_idx, token_length + e_idx] = 1
token_in_entity_edges[token_length + e_idx, st_idx] = 1
else:
token_not_in_entity_edges[st_idx, token_length + e_idx] = 1
token_not_in_entity_edges[token_length + e_idx, st_idx] = 1
edge_masks = np.stack((entity_edges, token_in_entity_edges, token_not_in_entity_edges),
axis=0)
# shape: (2k + 1 + 3, source_length, source_length)
edge_masks = np.concatenate((token_edges, edge_masks), axis=0)
return edge_masks
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, 'r') as data_file:
for line in data_file:
line = line.strip()
if not line:
continue
line_parts = line.split('\t')
assert len(line_parts) == 2
instance = self.text_to_instance(line_parts[0], line_parts[1])
if instance is None:
continue
else:
yield instance
| 9,933 | 44.360731 | 120 |
py
|
Unimer
|
Unimer-master/data_readers/seq2seq_data_reader.py
|
# coding=utf-8
import numpy as np
from typing import Iterable, Callable
from overrides import overrides
from allennlp.data import Instance
from allennlp.data.fields import TextField, ArrayField, MetadataField
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.dataset_readers import DatasetReader
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers import SingleIdTokenIndexer
class Seq2SeqDataReader(DatasetReader):
COPY_TOKEN = '@copy@'
def __init__(self, question_tokenizer, logical_form_tokenizer,
logical_form_preprocessor: Callable = None,
lazy: bool = False, is_parsing=False,
enable_copy: bool = False, maximum_target_length: int = 0,
entity_matcher = None,
exclude_target_words = None) -> None:
super().__init__(lazy=lazy)
self._question_tokenizer = question_tokenizer
self._logical_form_tokenizer = logical_form_tokenizer
self._is_parsing = is_parsing
self._source_token_indexers = {
"tokens": SingleIdTokenIndexer(namespace='source_tokens')}
self._target_token_indexers = {
"tokens": SingleIdTokenIndexer(namespace='target_tokens')}
self._logical_form_preprocessor = logical_form_preprocessor
self._maximum_target_length = maximum_target_length
self._enable_copy = enable_copy
self._entity_matcher = entity_matcher
self._exclude_target_words = exclude_target_words
@overrides
def text_to_instance(self, logical_form: str, question: str = None) -> Instance:
if self._logical_form_preprocessor:
logical_form = self._logical_form_preprocessor(logical_form)
logical_form = logical_form.lower()
if self._is_parsing:
tokenized_source = self._question_tokenizer.tokenize(question)
else:
tokenized_source = self._logical_form_tokenizer.tokenize(logical_form)
tokenized_source.insert(0, Token(START_SYMBOL))
tokenized_source.append(Token(END_SYMBOL))
source_field = TextField(tokenized_source, self._source_token_indexers)
fields_dict = {'source_tokens': source_field}
if self._is_parsing:
tokenized_target = self._logical_form_tokenizer.tokenize(
logical_form)
else:
tokenized_target = self._question_tokenizer.tokenize(question)
tokenized_target.insert(0, Token(START_SYMBOL))
tokenized_target.append(Token(END_SYMBOL))
if self._maximum_target_length > 0 and len(tokenized_target) > self._maximum_target_length:
return None
target_field = TextField(
tokenized_target, self._target_token_indexers)
fields_dict.update({
"target_tokens": target_field
})
if self._enable_copy:
if self._entity_matcher is None:
source_tokens_to_copy = [t.text for t in tokenized_source]
else:
source_tokens_to_copy = [self.COPY_TOKEN if t is None else t for t in self._entity_matcher.match(tokenized_source)]
# Prepare target_source_token_map
map_ids = list()
for target_token in tokenized_target:
if target_token.text in [START_SYMBOL, END_SYMBOL]:
map_ids.append(np.zeros(len(tokenized_source)))
elif self._exclude_target_words is not None and target_token.text.lower() in self._exclude_target_words:
map_ids.append(np.zeros(len(tokenized_source)))
else:
map_result = list()
for st in source_tokens_to_copy:
if st in [START_SYMBOL, END_SYMBOL, self.COPY_TOKEN]:
map_result.append(0)
else:
if st == target_token.text:
map_result.append(1)
else:
map_result.append(0)
map_ids.append(np.array(map_result))
meta_data = {
'source_tokens_to_copy': source_tokens_to_copy
}
fields_dict.update({
"target_source_token_map": ArrayField(np.array(map_ids, dtype=int), padding_value=0),
"meta_field": MetadataField(meta_data)
})
return Instance(fields_dict)
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, 'r') as data_file:
for line in data_file:
line = line.strip()
if not line:
continue
line_parts = line.split('\t')
assert len(line_parts) == 2
inst = self.text_to_instance(line_parts[1], line_parts[0])
if inst is not None:
yield inst
| 4,976 | 42.278261 | 131 |
py
|
Unimer
|
Unimer-master/data_readers/grammar_based_reader.py
|
# coding=utf-8
import re
import os
import json
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from typing import Iterator, List, Dict, Iterable, Tuple, Callable
from overrides import overrides
from allennlp.data import Instance
from allennlp.data.fields import TextField, ArrayField, NamespaceSwappingField, MetadataField
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.dataset_readers import DatasetReader
from allennlp.data.tokenizers import Token, Tokenizer, WordTokenizer
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.vocabulary import Vocabulary
class GrammarBasedDataReader(DatasetReader):
def __init__(self, source_tokenizer, grammar, logical_form_preprocessor: Callable = None, maximum_target_length: int = 0, lazy: bool = False) -> None:
super().__init__(lazy)
self._source_tokenizer = source_tokenizer
self._source_token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='source_tokens')}
self._grammar = grammar
self._target_token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='target_tokens')}
self.rule_pad_index = 0
self.nonterminal_pad_index = self._grammar.num_non_terminals
self.nonterminal_end_index = self._grammar.num_non_terminals + 1
self._logical_form_preprocessor = logical_form_preprocessor
self._maximum_target_length = maximum_target_length
@property
def grammar(self):
return self._grammar
@overrides
def text_to_instance(self, question: str = None, logical_form: str = None) -> Instance:
tokenized_source = self._source_tokenizer.tokenize(question)
tokenized_source.insert(0, Token(START_SYMBOL))
tokenized_source.append(Token(END_SYMBOL))
source_field = TextField(tokenized_source, self._source_token_indexers)
fields_dict = {'source_tokens': source_field}
meta_data = {
'question': question
}
if logical_form is not None:
if self._logical_form_preprocessor:
logical_form = self._logical_form_preprocessor(logical_form)
# else:
# logical_form = logical_form.replace(' ', '').replace("'", "").lower()
# TODO: this can be a potential threat to remove lower
# logical_form = logical_form.lower()
applied_production_rules = self._grammar.parse(logical_form)
rule_ids, nonterminal_ids = list(), list()
for rule in applied_production_rules:
assert rule.rule_id > 0
rule_ids.append(rule.rule_id)
lhs = rule.lhs
nonterminal_ids.append(self._grammar.get_non_terminal_id(lhs))
nonterminal_ids = nonterminal_ids[1:] + \
[self.nonterminal_end_index]
assert len(rule_ids) == len(nonterminal_ids)
if self._maximum_target_length > 0 and len(rule_ids) > self._maximum_target_length:
return None
fields_dict.update({
"target_rules": ArrayField(np.array(rule_ids, dtype=int), padding_value=self.rule_pad_index),
"target_nonterminals": ArrayField(np.array(nonterminal_ids, dtype=int), padding_value=self.nonterminal_pad_index),
"target_mask": ArrayField(np.ones(len(rule_ids)), padding_value=0),
})
meta_data.update({'logical_form': logical_form})
fields_dict.update({
"meta_field": MetadataField(meta_data)
})
return Instance(fields_dict)
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, 'r') as data_file:
for line in data_file:
line = line.strip()
if not line:
continue
line_parts = line.split('\t')
assert len(line_parts) == 2
inst = self.text_to_instance(line_parts[0], line_parts[1])
if inst is not None:
yield self.text_to_instance(line_parts[0], line_parts[1])
| 4,192 | 43.606383 | 154 |
py
|
Unimer
|
Unimer-master/data_readers/__init__.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/data_readers/grammar_copy_based_reader.py
|
# coding=utf-8
import numpy as np
from typing import Iterable, Callable
from overrides import overrides
from allennlp.data import Instance
from allennlp.data.fields import TextField, ArrayField, MetadataField
from allennlp.common.util import START_SYMBOL, END_SYMBOL
from allennlp.data.dataset_readers import DatasetReader
from allennlp.data.tokenizers import Token
from allennlp.data.token_indexers import SingleIdTokenIndexer
class GrammarCopyBasedDataReader(DatasetReader):
def __init__(self, source_tokenizer, grammar, copy_link_finder, logical_form_preprocessor: Callable = None,
utterance_preprocessor: Callable = None,
maximum_target_length: int = 0, lazy: bool = False) -> None:
super().__init__(lazy)
self._source_tokenizer = source_tokenizer
self._source_token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='source_tokens')}
self._grammar = grammar
assert self._grammar.copy_terminal_set is not None
self._target_token_indexers = {
'tokens': SingleIdTokenIndexer(namespace='target_tokens')}
self.rule_pad_index = 0
self.nonterminal_pad_index = self._grammar.num_non_terminals
self.nonterminal_end_index = self._grammar.num_non_terminals + 1
self._logical_form_preprocessor = logical_form_preprocessor
self._utterance_preprocessor = utterance_preprocessor
self._maximum_target_length = maximum_target_length
self._copy_link_finder = copy_link_finder
@property
def grammar(self):
return self._grammar
@overrides
def text_to_instance(self, question: str = None, logical_form: str = None) -> Instance:
if self._utterance_preprocessor:
question = self._utterance_preprocessor(question)
tokenized_source = self._source_tokenizer.tokenize(question)
tokenized_source.insert(0, Token(START_SYMBOL))
tokenized_source.append(Token(END_SYMBOL))
source_field = TextField(tokenized_source, self._source_token_indexers)
fields_dict = {'source_tokens': source_field}
meta_data = {
'question': question
}
if self._logical_form_preprocessor:
logical_form = self._logical_form_preprocessor(logical_form)
# logical_form = logical_form.lower()
applied_production_rules = self._grammar.parse(logical_form)
rule_ids, nonterminal_ids = list(), list()
# Allow copy mask
allow_copy_mask = list()
for rule in applied_production_rules:
assert rule.rule_id > 0
rule_ids.append(rule.rule_id)
lhs = rule.lhs
nonterminal_ids.append(self._grammar.get_non_terminal_id(lhs))
if rule.lhs in self._grammar.copy_terminal_set:
allow_copy_mask.append(1)
else:
allow_copy_mask.append(0)
nonterminal_ids = nonterminal_ids[1:] + \
[self.nonterminal_end_index]
allow_copy_mask = allow_copy_mask[1:] + [0]
assert len(rule_ids) == len(nonterminal_ids) and len(rule_ids) == len(allow_copy_mask)
if self._maximum_target_length > 0 and len(rule_ids) > self._maximum_target_length:
return None
# Copy Mask
copy_pad_index = self._grammar.root_rule_id
token_copy_index = self._copy_link_finder.match(
tokenized_source, self._grammar.production_rules, self._grammar.copy_terminal_set, copy_pad_index)
assert len(token_copy_index) == len(tokenized_source)
# Pad
maximum_match = max([len(m) for m in token_copy_index])
for idx, copy_index in enumerate(token_copy_index):
token_copy_index[idx] = np.concatenate([copy_index, np.ones(
maximum_match - len(copy_index), dtype=np.int) * copy_pad_index])
token_copy_index = np.array(token_copy_index)
fields_dict.update({
"target_rules": ArrayField(np.array(rule_ids, dtype=int), padding_value=self.rule_pad_index),
"target_nonterminals": ArrayField(np.array(nonterminal_ids, dtype=int), padding_value=self.nonterminal_pad_index),
"target_mask": ArrayField(np.ones(len(rule_ids)), padding_value=0),
"source_token_copy_indices": ArrayField(token_copy_index, padding_value=copy_pad_index, dtype=np.int),
"target_allow_copy_mask": ArrayField(np.array(allow_copy_mask, dtype=int), padding_value=0)
})
meta_data.update({'logical_form': logical_form})
fields_dict.update({
"meta_field": MetadataField(meta_data)
})
return Instance(fields_dict)
@overrides
def _read(self, file_path: str) -> Iterable[Instance]:
with open(file_path, 'r') as data_file:
for line in data_file:
line = line.strip()
if not line:
continue
line_parts = line.split('\t')
assert len(line_parts) == 2
inst = self.text_to_instance(line_parts[0], line_parts[1])
if inst is not None:
yield self.text_to_instance(line_parts[0], line_parts[1])
| 5,214 | 45.981982 | 126 |
py
|
Unimer
|
Unimer-master/data/atis/log_to_csv.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/data/atis/lambda_calculus_to_prolog.py
|
# coding=utf8
import re
import copy
ENTITY_PATTERN = re.compile(r'^[A-Z|a-z|\\|_|\d]+:_([a-z]+)$')
ENTITY_TYPE_MAP = {
"ac": "aircraft_code",
"al": "airline_code",
"ci": "city_name",
"ap": "airport_code",
"fn": "flight_number",
"cl": "class_description",
"ti": "time",
"pd": "day_period",
"mf": "manufacturer",
"mn": "month",
"da": "day",
"i": "integer",
"yr": "year",
"dn": "day_number",
"do": "dollar",
"hr": "hour",
"rc": "meal_code",
"st": "state_name",
"fb": "fare_basis_code",
"me": "meal_description",
"bat": "basis_type",
"dc": "days_code"
}
FUNCTION_REPLACE_MAP = {
"_abbrev": [
{"name": "abbrev", "number_of_argument": 1, "argument_type": ["airline_code"], "return_type": "airline_name"}
],
"_capacity": [
{"name": "capacity", "number_of_argument": 1, "argument_type": [
"aircraft_code"], "return_type": "capacity"},
{"name": "capacity", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "capactiy"}
],
"_flight_number": [
{"name": "flight_number", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "flight_number"},
{"name": "is_flight_number", "number_of_argument": 2, "argument_type": [
"flight_id", "flight_number"], "return_type": "bool"}
],
"_airline_name": [{"name": "airline_name", "number_of_argument": 1, "argument_type": ["flight_id"], "return_type": "airline_name"}],
"_departure_time": [
{"name": "departure_time", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "departure_time"},
{"name": "is_flight_departure_time", "number_of_argument": 2,
"argument_type": ["flight_id", "time"], "return_type": "bool"}
],
"_miles_distant": [
{"name": "miles_distant", "number_of_argument": 2, "argument_type": [
"airport_code", "city_name"], "return_type": "miles_distant"},
{"name": "miles_distant_between_city", "number_of_argument": 2, "argument_type": [
"city_name", "city_name"], "return_type": "miles_distant"}
],
"_minimum_connection_time": [
{"name": "minimum_connection_time", "number_of_argument": 1, "argument_type": [
"airport_code"], "return_type": "minimum_connection_time"}
],
"_stops": [
{"name": "get_number_of_stops", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "number_of_stops"},
{"name": "is_flight_stops_specify_number_of_times", "number_of_argument": 2,
"argument_type": ["flight_id", "integer"], "return_type": "bool"}
],
"_time_elapsed": [
{"name": "time_elapsed", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "time_elapsed"},
{"name": "is_time_elapsed", "number_of_argument": 2,
"argument_type": ["flight_id", "hour"], "return_type": "bool"}
],
# Binary Predicate
"is_mf": [
{"name": "mf", "number_of_argument": 2, "argument_type": [
"aircraft_code", "manufacturer"], "return_type": "bool"},
],
"_aircraft_basis_type": [
{"name": "is_aircraft_basis_type", "number_of_argument": 2,
"argument_type": ["aircraft_code", "basis_type"], "return_type": "bool"},
],
"_manufacturer": [
{"name": "is_mf", "number_of_argument": 2,
"argument_type": ["aircraft_code", "manufacturer"], "return_type": "bool"},
{"name": "is_flight_manufacturer", "number_of_argument": 2,
"argument_type": ["flight_id", "manufacturer"], "return_type": "bool"}
],
"_services": [
{"name": "is_services", "number_of_argument": 2, "argument_type": [
"airline_code", "city_name"], "return_type": "bool"},
{"name": "is_airline_services", "number_of_argument": 2, "argument_type": [
"airline_code", "airport_code"], "return_type": "bool"}
],
"_to": [
{"name": "is_to", "number_of_argument": 2, "argument_type": [
"flight_id", "airport_code"], "return_type": "bool"},
{"name": "is_to", "number_of_argument": 2, "argument_type": [
"flight_id", "city_name"], "return_type": "bool"},
{"name": "is_to", "number_of_argument": 2, "argument_type": [
"flight_id", "state_name"], "return_type": "bool"}
],
"_from": [
{"name": "is_from", "number_of_argument": 2, "argument_type": [
"flight_id", "airport_code"], "return_type": "bool"},
{"name": "is_from", "number_of_argument": 2, "argument_type": [
"flight_id", "city_name"], "return_type": "bool"}
],
"_loc:_t": [
{"name": "is_loc_t", "number_of_argument": 2, "argument_type": [
"airport_code", "city_name"], "return_type": "bool"},
{"name": "is_loc_t_state", "number_of_argument": 2, "argument_type": [
"airport_code", "state_name"], "return_type": "bool"},
{"name": "is_loc_t_city_time_zone", "number_of_argument": 2, "argument_type": [
"city_name", "time_zone_code"], "return_type": "bool"},
],
"_from_airport": [
{"name": "is_from_airport", "number_of_argument": 2, "argument_type": [
"transport_type", "airport_code"], "return_type": "bool"},
{"name": "is_from_airports_of_city", "number_of_argument": 2, "argument_type": [
"transport_type", "city_name"], "return_type": "bool"},
],
"_to_airport": [
{"name": "is_to_airport", "number_of_argument": 2, "argument_type": [
"transport_type", "city_name"], "return_type": "bool"},
],
"_to_city": [
{"name": "is_to_city", "number_of_argument": 2, "argument_type": [
"transport_type", "city_name"], "return_type": "bool"},
],
"_airline": [
{"name": "is_flight_airline", "number_of_argument": 2, "argument_type": [
"flight_id", "airline_code"], "return_type": "bool"},
{"name": "is_aircraft_airline", "number_of_argument": 2, "argument_type": [
"aircraft_code", "airline_code"], "return_type": "bool"},
{"name": "is_airline_has_booking_class", "number_of_argument": 2, "argument_type": [
"class_description", "airline_code"], "return_type": "bool"},
{"name": "is_airline_provide_meal", "number_of_argument": 2, "argument_type": [
"meal_code", "airline_code"], "return_type": "bool"},
{"name": "is_airline", "number_of_argument": 1,
"argument_type": ["airline_code"], "return_type": "bool"}
],
"_airline:_e": [
{"name": "get_flight_airline_code", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "airline_code"},
],
"_stop": [
{"name": "is_flight_stop_at_city", "number_of_argument": 2,
"argument_type": ["flight_id", "city_name"], "return_type": "bool"},
{"name": "is_flight_stop_at_airport", "number_of_argument": 2,
"argument_type": ["flight_id", "airport_code"], "return_type": "bool"},
],
"_class_type": [
{"name": "is_flight_has_class_type", "number_of_argument": 2, "argument_type": [
"flight_id", "class_description"], "return_type": "bool"},
{"name": "is_fare_basis_code_class_type", "number_of_argument": 2, "argument_type": [
"fare_basis_code", "class_description"], "return_type": "bool"},
],
"_after_day": [
{"name": "is_flight_after_day", "number_of_argument": 2,
"argument_type": ["flight_id", "day"], "return_type": "bool"}
],
"_approx_arrival_time": [
{"name": "is_flight_approx_arrival_time", "number_of_argument": 2,
"argument_type": ["flight_id", "arrival_time"], "return_type": "bool"}
],
"_arrival_time": [
{"name": "arrival_time", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "arrival_time"},
{"name": "is_flight_arrival_time", "number_of_argument": 2,
"argument_type": ["flight_id", "arrival_time"], "return_type": "bool"}
],
"_approx_departure_time": [
{"name": "is_flight_approx_departure_time", "number_of_argument": 2,
"argument_type": ["flight_id", "departure_time"], "return_type": "bool"}
],
"_approx_return_time": [
{"name": "is_flight_approx_return_time", "number_of_argument": 2,
"argument_type": ["flight_id", "return_time"], "return_type": "bool"}
],
"_during_day": [
{"name": "is_flight_during_day", "number_of_argument": 2,
"argument_type": ["flight_id", "day_period"], "return_type": "bool"}
],
"_during_day_arrival": [
{"name": "is_flight_during_day_arrival", "number_of_argument": 2,
"argument_type": ["flight_id", "day_period"], "return_type": "bool"}
],
"_day_number": [
{"name": "is_flight_on_day_number", "number_of_argument": 2,
"argument_type": ["flight_id", "day_number"], "return_type": "bool"}
],
"_day_arrival": [
{"name": "is_flight_day_arrival", "number_of_argument": 2,
"argument_type": ["flight_id", "day"], "return_type": "bool"}
],
"_day": [
{"name": "is_flight_on_day", "number_of_argument": 2,
"argument_type": ["flight_id", "day"], "return_type": "bool"}
],
"_month": [
{"name": "is_flight_month_arrival", "number_of_argument": 2,
"argument_type": ["flight_id", "month"], "return_type": "bool"}
],
"_day_return": [
{"name": "is_flight_day_return", "number_of_argument": 2,
"argument_type": ["flight_id", "day"], "return_type": "bool"}
],
"_day_number_arrival": [
{"name": "is_flight_day_number_arrival", "number_of_argument": 2,
"argument_type": ["flight_id", "day_number"], "return_type": "bool"}
],
"_day_number_return": [
{"name": "is_flight_day_number_return", "number_of_argument": 2,
"argument_type": ["flight_id", "day_number"], "return_type": "bool"}
],
"_month_arrival": [
{"name": "is_flight_month_arrival", "number_of_argument": 2,
"argument_type": ["flight_id", "month"], "return_type": "bool"}
],
"_month_return": [
{"name": "is_flight_month_return", "number_of_argument": 2,
"argument_type": ["flight_id", "month"], "return_type": "bool"}
],
"_days_from_today": [
{"name": "is_flight_days_from_today", "number_of_argument": 2,
"argument_type": ["flight_id", "integer"], "return_type": "bool"}
],
# Unit Predicate
"_aircraft": [
{"name": "is_aircraft", "number_of_argument": 1,
"argument_type": ["aircraft_code"], "return_type": "bool"},
{"name": "get_flight_aircraft_code", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "aircraft_code"},
{"name": "is_flight_aircraft", "number_of_argument": 2, "argument_type": [
"flight_id", "aircraft_code"], "return_type": "bool"},
],
"_city": [
{"name": "is_city", "number_of_argument": 1,
"argument_type": ["city_name"], "return_type": "bool"}
],
"_airport": [
{"name": "is_airport", "number_of_argument": 1,
"argument_type": ["airport_code"], "return_type": "bool"},
{"name": "is_airport_of_city", "number_of_argument": 2, "argument_type": [
"city_name", "airport_code"], "return_type": "bool"}
],
"_flight": [
{"name": "is_flight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_tomorrow": [
{"name": "is_tomorrow_flight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_before_day": [
{"name": "is_flight_before_day", "number_of_argument": 2,
"argument_type": ["flight_id", "month"], "return_type": "bool"}
],
"_tomorrow_arrival": [
{"name": "is_tomorrow_arrival_flight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_today": [
{"name": "is_today_flight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_next_days": [
{"name": "is_next_days_flight", "number_of_argument": 2,
"argument_type": ["flight_id", "integer"], "return_type": "bool"}
],
"_day_after_tomorrow": [
{"name": "is_day_after_tomorrow_flight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_daily": [
{"name": "is_daily_flight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_discounted": [
{"name": "is_discounted_flight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_connecting": [
{"name": "is_connecting_flight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_oneway": [
{"name": "is_oneway", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_has_stops": [
{"name": "is_flight_has_stop", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_nonstop": [
{"name": "is_non_stop_flight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_meal:_t": [
{"name": "is_meal", "number_of_argument": 1,
"argument_type": ["meal_code"], "return_type": "bool"}
],
"_meal": [
{"name": "get_flight_meal", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "meal_description"},
{"name": "is_flight_has_specific_meal", "number_of_argument": 2,
"argument_type": ["flight_id", "meal_description"], "return_type": "bool"}
],
"_meal_code": [
{"name": "is_meal_code", "number_of_argument": 1,
"argument_type": ["meal_code"], "return_type": "bool"},
{"name": "is_flight_meal_code", "number_of_argument": 2,
"argument_type": ["flight_id", "meal_code"], "return_type": "bool"},
],
"_has_meal": [
{"name": "is_flight_has_meal", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_tonight": [
{"name": "is_flight_tonight", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_booking_class:_t": [
{"name": "is_booking_class_t", "number_of_argument": 1,
"argument_type": ["class_description"], "return_type": "bool"},
],
"_booking_class": [
{"name": "get_flight_booking_class", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "class_description"},
{"name": "is_flight_has_booking_class", "number_of_argument": 2,
"argument_type": ["flight_id", "class_description"], "return_type": "bool"},
],
"_class_of_service": [
{"name": "is_class_of_service", "number_of_argument": 1,
"argument_type": ["class_description"], "return_type": "bool"}
],
"_fare_basis_code": [
{"name": "is_fare_basis_code", "number_of_argument": 1,
"argument_type": ["fare_basis_code"], "return_type": "bool"},
{"name": "is_flight_has_specific_fare_basis_code", "number_of_argument": 2,
"argument_type": ["flight_id", "fare_basis_code"], "return_type": "bool"},
{"name": "is_specific_fare_basis_code", "number_of_argument": 2, "argument_type": [
"fare_basis_code", "fare_basis_code"], "return_type": "bool"}
],
"_economy": [
{"name": "is_flight_economy", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"},
{"name": "is_economy", "number_of_argument": 1,
"argument_type": ["fare_basis_code"], "return_type": "bool"},
],
"_fare": [
{"name": "get_flight_fare", "number_of_argument": 1, "argument_type": [
"flight_id"], "return_type": "one_direction_cost"},
{"name": "get_booking_class_fare", "number_of_argument": 1, "argument_type": [
"class_description"], "return_type": "one_direction_cost"},
{"name": "is_fare", "number_of_argument": 1,
"argument_type": ["fare_id"], "return_type": "bool"},
{"name": "is_flight_cost_fare", "number_of_argument": 2,
"argument_type": ["flight_id", "dollar"], "return_type": "bool"},
],
"_cost": [
{"name": "get_flight_cost", "number_of_argument": 1, "argument_type": [
"flight_id"], "return_type": "round_trip_cost"},
],
"_aircraft_code:t": [
{"name": "is_aircraft_code_t", "number_of_argument": 1,
"argument_type": ["aircraft_code"], "return_type": "bool"}
],
"_aircraft_code": [
{"name": "get_flight_aircraft_code", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "aircraft_code"},
{"name": "is_flight_with_specific_aircraft", "number_of_argument": 2,
"argument_type": ["flight_id", "aircraft_code"], "return_type": "bool"}
],
"_ground_transport": [
{"name": "is_ground_transport", "number_of_argument": 1,
"argument_type": ["transport_type"], "return_type": "bool"}
],
"_rental_car": [
{"name": "is_rental_car", "number_of_argument": 1,
"argument_type": ["transport_type"], "return_type": "bool"}
],
"_limousine": [
{"name": "is_limousine", "number_of_argument": 1,
"argument_type": ["transport_type"], "return_type": "bool"}
],
"_rapid_transit": [
{"name": "is_rapid_transit", "number_of_argument": 1,
"argument_type": ["transport_type"], "return_type": "bool"}
],
"_taxi": [
{"name": "is_taxi", "number_of_argument": 1, "argument_type": [
"transport_type"], "return_type": "bool"}
],
"_air_taxi_operation": [
{"name": "is_air_taxi_operation", "number_of_argument": 1, "argument_type": [
"transport_type"], "return_type": "bool"}
],
"_round_trip": [
{"name": "is_round_trip", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"}
],
"_weekday": [
{"name": "is_ground_transport_on_weekday", "number_of_argument": 1,
"argument_type": ["transport_type"], "return_type": "bool"},
{"name": "is_flight_on_weekday", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"},
],
"_year": [
{"name": "is_flight_on_year", "number_of_argument": 2,
"argument_type": ["flight_id", "year"], "return_type": "bool"},
],
"_time_zone_code": [
{"name": "is_time_zone_code", "number_of_argument": 1,
"argument_type": ["time_zone_code"], "return_type": "bool"},
],
"_turboprop": [
{"name": "is_flight_turboprop", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"},
{"name": "is_turboprop", "number_of_argument": 1,
"argument_type": ["aircraft_code"], "return_type": "bool"},
],
"_jet": [
{"name": "is_flight_jet", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "bool"},
],
"_aircraft_code:_t": [
{"name": "aircraft_code", "number_of_argument": 1,
"argument_type": ["aircraft_code"], "return_type": "bool"},
],
# Meta Predicate
"_equals": [
{"name": "equals", "number_of_argument": 2,
"argument_type": ["*", "*"], "is_meta": True, "return_type": "bool"},
],
"_equals:_t": [
{"name": "equals", "number_of_argument": 2,
"argument_type": ["*", "*"], "is_meta": True, "return_type": "bool"},
],
"_<": [
{"name": "less_than", "number_of_argument": 2,
"argument_type": ["*", "*"], "is_meta": True, "return_type": "bool"},
],
"_>": [
{"name": "larger_than", "number_of_argument": 2,
"argument_type": ["*", "*"], "is_meta": True, "return_type": "bool"},
],
"_=": [
{"name": "numerical_equals", "number_of_argument": 2,
"argument_type": ["*", "*"], "is_meta": True, "return_type": "bool"},
],
"the": [
{"name": "the", "number_of_argument": 1,
"argument_type": ["*"], "is_meta": True, "return_type": "*"},
],
"_not": [
{"name": "not", "number_of_argument": 1,
"argument_type": ["*"], "is_meta": True, "return_type": "bool"},
],
"_ground_fare": [
{"name": "get_ground_fare", "number_of_argument": 1,
"argument_type": ["transport_type"], "return_type": "ground_fare"},
],
"_stop_arrival_time": [
{"name": "get_flight_stop_arrival_time", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "stop_arrival_time"},
],
"_restriction_code": [
{"name": "get_flight_restriction_code", "number_of_argument": 1,
"argument_type": ["flight_id"], "return_type": "restriction_code"},
]
}
def read_data(path):
questions, logical_forms = list(), list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
splits = line.split('\t')
questions.append(splits[0])
logical_forms.append(splits[1])
return questions, logical_forms
def split_tokens(lf):
replacements = [
('(', ' ( '),
(')', ' ) '),
("\\+", " \\+ "),
]
for a, b in replacements:
lf = lf.replace(a, b)
return lf
def standardize_lambda_calculus_varnames(ans):
toks = ans.split(' ')
varnames = {}
new_toks = []
for t in toks:
if t == 'x' or t.startswith('$'):
if ':' in t:
# var definition
splits = t.split(':')
name, var_type = splits[0], splits[1]
assert name not in varnames
new_name = '$v%d' % len(varnames)
varnames[name] = new_name
new_toks.append(new_name + ":" + var_type)
else:
# t is a variable name
if t in varnames:
new_toks.append(varnames[t])
else:
new_varname = '$v%d' % len(varnames)
varnames[t] = new_varname
new_toks.append(new_varname)
else:
new_toks.append(t)
lf = ' '.join(new_toks)
return lf
def normalize_lambda_calculus(logical_form):
lf = split_tokens(logical_form)
lf = re.sub(' +', ' ', lf)
s = standardize_lambda_calculus_varnames(lf)
variables = ["$v0", "$v1", "$v2", "$v3"]
for var in variables:
s = s.replace(var + " e ", "%s:e " % var)
s = s.replace(var + " i ", "%s:i " % var)
s = s.replace(' :', ":").replace(
'\s+', ' ').replace("( ", "(").replace(" )", ")").replace(')\s)', '))').strip().lower()
s = re.sub(' +', ' ', s)
return s
def extract_entity(lf):
tokens = lf.split(":_")
return tokens
def tokenize_logical_form(logical_form):
replacements = [
('(', ' ( '),
(')', ' ) '),
# ("\\+", " \\+ "),
]
normalized_lc = re.sub(' +', ' ', logical_form)
for a, b in replacements:
normalized_lc = normalized_lc.replace(a, b)
tokens = [t for t in normalized_lc.split()]
return tokens
def process_entity_string(entity, default=""):
assert isinstance(entity, str)
if ":_" in entity:
splits = entity.split(":_")
entity_name = splits[0]
entity_type = ENTITY_TYPE_MAP[splits[1]]
else:
entity_type = default
entity_name = entity
if '_' in entity_name:
entity_name = entity_name.replace("_", " ")
return entity_name, entity_type
def is_variable(var):
return re.match('[A-Z]', var) is not None
def get_new_variable(global_variables):
max_value = ord('A')
for v in global_variables:
if ord(v) > max_value:
max_value = ord(v)
assert max_value < ord('Z')
return chr(max_value + 1)
def get_function_return_type(function_name):
candidates = list()
for _, funcs in FUNCTION_REPLACE_MAP.items():
for f in funcs:
if f['name'] == function_name:
candidates.append(f['return_type'])
if len(candidates) > 0:
break
if len(candidates) > 0:
for t in candidates:
if t != 'bool':
return t
return None
def is_entity_function(function_name):
candidates = list()
for _, funcs in FUNCTION_REPLACE_MAP.items():
for f in funcs:
if f['name'] == function_name:
candidates.append(f['return_type'])
if len(candidates) > 0:
break
if len(candidates) > 0:
return candidates[0] != 'bool'
else:
return False
def rewrite(
function_name,
number_of_arguments,
arguments,
argument_variable_constraints,
global_variables,
):
if function_name not in FUNCTION_REPLACE_MAP:
assert function_name in ['_minutes_distant',
'_named', '_overnight']
resultant_lf = "%s(%s)" % (function_name, ','.join(arguments))
return resultant_lf, dict()
names = FUNCTION_REPLACE_MAP[function_name]
rewrite_function_name = function_name
argument_types = None
is_meta_function = False
if len(names) == 1:
rewrite_function_name = names[0]['name']
argument_types = names[0]["argument_type"]
is_meta_function = "is_meta" in names[0] and names[0]['is_meta'] is True
else:
# select by arugment number
feasible_index = []
for idx, name in enumerate(names):
if name['number_of_argument'] == number_of_arguments:
rewrite_function_name = name['name']
argument_types = name["argument_type"]
feasible_index.append(idx)
if len(feasible_index) == 0:
raise Exception("No feasible functions in Python")
elif len(feasible_index) == 1:
idx = feasible_index[0]
rewrite_function_name = names[idx]['name']
argument_types = names[idx]['argument_type']
is_meta_function = "is_meta" in names[idx] and names[idx]['is_meta'] is True
else:
# Select by Argument Type
best_index = 0
best_count = 0
for idx in feasible_index:
name = names[idx]
types = names[idx]['argument_type']
count = 0
for t, arg in zip(types, arguments):
_arg = arg.replace('"', "")
match = ENTITY_PATTERN.match(_arg)
if match:
e, et = process_entity_string(_arg)
if et == t:
count += 1
elif _arg.startswith("argmin_") or _arg.startswith("argmax_"):
# argmin, argmax
index = _arg.index("(") + 1
var = _arg[index:index+1]
if var in argument_variable_constraints:
et = argument_variable_constraints[var]
if et == t:
count += 1
else:
if is_variable(_arg) and _arg in argument_variable_constraints:
et = argument_variable_constraints[_arg]
if et == t:
count += 1
if count > best_count:
best_index = idx
best_count = count
rewrite_function_name = names[best_index]['name']
argument_types = names[best_index]['argument_type']
is_meta_function = "is_meta" in names[best_index] and names[best_index]['is_meta'] is True
# Derive type constraints, Type Inference
# print(function_name, rewrite_function_name, number_of_arguments, arguments, argument_types)
variable_constraints = dict()
assert number_of_arguments == len(argument_types)
if is_meta_function:
if rewrite_function_name in ['equals', 'numerical_equals', 'less_than', 'larger_than']:
if is_variable(arguments[0]):
arg_variable = arguments[0]
arg_func = arguments[1]
elif is_variable(arguments[1]):
arg_variable = arguments[1]
arg_func = arguments[0]
else:
arg_variable, arg_func = None, None
if arg_variable is not None and arg_func is not None:
match = ENTITY_PATTERN.match(arg_func.replace('"', ""))
if match:
e, et = process_entity_string(arg_func.replace('"', ""))
variable_constraints[arg_variable] = et
elif arg_func.startswith("argmin(") or arg_func.startswith("argmax("):
for _var in [" A:", " B:", " C:"]:
processed_var = _var.replace(":", "").strip()
if _var in arg_func and processed_var in argument_variable_constraints:
variable_constraints[arg_variable] = argument_variable_constraints[processed_var]
break
else:
arg_func_return_type = get_function_return_type(
arg_func[:arg_func.index("(")])
if arg_func_return_type is not None and arg_func_return_type not in ['*', 'bool']:
variable_constraints[arg_variable] = arg_func_return_type
else:
for argument, atype in zip(arguments, argument_types):
if is_variable(argument):
variable_constraints[argument] = atype
# Rewrite, all functions/predicates except const, are only allowed to take variables as arguments
rewrite_arguments = list()
additional_lf = []
for argument, atype in zip(arguments, argument_types):
if is_variable(argument):
rewrite_arguments.append(argument)
else:
match = ENTITY_PATTERN.match(argument.replace('"', ""))
if match:
e, et = process_entity_string(argument.replace('"', ""))
new_variable = get_new_variable(global_variables)
const_lf = 'const(%s,%s(%s))' % (new_variable, et, e.replace(" ", "_"))
global_variables.add(new_variable)
rewrite_arguments.append(new_variable)
else:
# TODO
new_variable = get_new_variable(global_variables)
const_lf = 'const_expr(%s,%s)' % (new_variable, argument.replace('"', ""))
global_variables.add(new_variable)
rewrite_arguments.append(new_variable)
additional_lf.append(const_lf)
resultant_lf = "%s(%s)" % (rewrite_function_name,
",".join(rewrite_arguments))
if len(additional_lf) > 0:
additional_lf = ','.join(additional_lf)
resultant_lf = resultant_lf + ',' + additional_lf
# match pattern like: numerical_equals(D,A),const_expr(D,get_ground_fare(B))
match = re.match(
"numerical_equals\(([A-Z]),([A-Z])\),const_expr\(([A-Z]),(.*)\(([A-Z]),([A-Z])\)", resultant_lf)
# less_than/larger_than
comparative_pattern_1 = re.compile(
"(larger_than|less_than)\(([A-Z]),([A-Z])\),const_expr\(([A-Z]),(.*)\(([A-Z]),[A-Z]\)\),const\(([A-Z]),(.*?)\((.*?)\)\)")
comparative_pattern_match_1 = comparative_pattern_1.match(resultant_lf)
# less_than/larger_than/equals
comparative_pattern_2 = re.compile(
"(larger_than|less_than)\(([A-Z]),([A-Z])\),const_expr\(([A-Z]),(.*)\),const_expr\(([A-Z]),(.*)\)")
comparative_pattern_match_2 = comparative_pattern_2.match(resultant_lf)
# equals pattern
equal_pattern_1 = re.compile(
"equals\(([A-Z]),([A-Z])\),const_expr\(([A-Z]),(.*)\),const_expr\(([A-Z]),(.*)\)")
equal_pattern_match_1 = equal_pattern_1.match(resultant_lf)
equal_pattern_2 = re.compile(
"equals\(([A-Z]),([A-Z])\),const\(([A-Z]),(.+\(.+\))\),const_expr\(([A-Z]),(.*)\)")
equal_pattern_match_2 = equal_pattern_2.match(resultant_lf)
equal_pattern_3 = re.compile(
"equals\(([A-Z]),([A-Z])\),const_expr\(([A-Z]),(.*)\)")
equal_pattern_match_3 = equal_pattern_3.match(resultant_lf)
if match is not None and (match[2] == match[3] or match[1] == match[3]):
if match[2] == match[3]:
new_var = match[1]
else:
new_var = match[2]
predicate = match[4].replace("get_", "p_")
predicate_var = match[5]
resultant_lf = "%s(%s,%s)" % (predicate, predicate_var, new_var)
elif comparative_pattern_match_1 and ((comparative_pattern_match_1[2] == comparative_pattern_match_1[4]
and comparative_pattern_match_1[3] == comparative_pattern_match_1[7]) or \
(comparative_pattern_match_1[2] == comparative_pattern_match_1[7] and \
comparative_pattern_match_1[3] == comparative_pattern_match_1[4])):
match = comparative_pattern_match_1
entity_function = match[5]
predicate_name = (match[1] + "_" + entity_function).replace("_get_", "_")
subject_var = match[6]
object_var = match[7]
resultant_lf = "%s(%s,%s),const(%s,%s(%s))" % (predicate_name, subject_var, \
object_var, object_var, match[8], match[9])
elif comparative_pattern_match_2:
print("Comparative Pattern 2")
assert comparative_pattern_match_2[2] == comparative_pattern_match_2[4] and comparative_pattern_match_2[3] == comparative_pattern_match_2[6]
first_entity_function = comparative_pattern_match_2[5]
first_entity_predicate = first_entity_function[:first_entity_function.index('(')]
assert is_entity_function(first_entity_predicate)
index_1 = first_entity_function.index(',')
first_entity_function_var = first_entity_function[index_1-1:index_1]
first_remain_predicate = first_entity_function[first_entity_function.index(')')+1:]
if first_remain_predicate.startswith(','):
first_remain_predicate = first_remain_predicate[1:]
second_entity_function = comparative_pattern_match_2[7]
second_entity_predicate = second_entity_function[:second_entity_function.index('(')]
assert is_entity_function(second_entity_predicate) and first_entity_predicate == second_entity_predicate
index_2 = second_entity_function.index(',')
second_entity_function_var = second_entity_function[index_2 - 1:index_2]
second_remain_predicate = second_entity_function[second_entity_function.index(')') + 1:]
if second_remain_predicate.startswith(','):
second_remain_predicate = second_remain_predicate[1:]
predicate_name = (comparative_pattern_match_2[1] + "_" + second_entity_predicate).replace("_get_", "_")
resultant_lf = '%s(%s,%s)' % (predicate_name, first_entity_function_var, second_entity_function_var)
if len(first_remain_predicate):
resultant_lf += ',%s' % first_remain_predicate
if len(second_remain_predicate):
resultant_lf += ',%s' % second_remain_predicate
elif equal_pattern_match_1:
print("Equal Pattern 1")
assert equal_pattern_match_1[1] == equal_pattern_match_1[3] \
and equal_pattern_match_1[2] == equal_pattern_match_1[5]
first_entity_function = equal_pattern_match_1[4]
first_entity_predicate = first_entity_function[:first_entity_function.index('(')]
assert is_entity_function(first_entity_predicate)
index_1 = first_entity_function.index(',')
first_entity_function_var = first_entity_function[index_1-1:index_1]
first_remain_predicate = first_entity_function[first_entity_function.index(')')+1:]
if first_remain_predicate.startswith(','):
first_remain_predicate = first_remain_predicate[1:]
second_entity_function = equal_pattern_match_1[6]
second_entity_predicate = second_entity_function[:second_entity_function.index('(')]
assert is_entity_function(second_entity_predicate) and first_entity_predicate == second_entity_predicate
index_2 = second_entity_function.index(',')
second_entity_function_var = second_entity_function[index_2 - 1:index_2]
second_remain_predicate = second_entity_function[second_entity_function.index(')') + 1:]
if second_remain_predicate.startswith(','):
second_remain_predicate = second_remain_predicate[1:]
predicate_name = ("equals_" + second_entity_predicate).replace("_get_", "_")
resultant_lf = '%s(%s,%s)' % (predicate_name, first_entity_function_var, second_entity_function_var)
if len(first_remain_predicate):
resultant_lf += ',%s' % first_remain_predicate
if len(second_remain_predicate):
resultant_lf += ',%s' % second_remain_predicate
elif equal_pattern_match_2:
print("Equal Pattern 2")
# Or the predicate
assert equal_pattern_match_2[1] == equal_pattern_match_2[3] \
and equal_pattern_match_2[2] == equal_pattern_match_2[5]
second_entity_function = equal_pattern_match_2[6]
second_entity_predicate = second_entity_function[:second_entity_function.index('(')]
# assert is_entity_function(second_entity_predicate) or second_entity_predicate.startswith("argmin") \
# or second_entity_predicate.startswith("argmax")
if is_entity_function(second_entity_predicate):
index_2 = second_entity_function.index(',')
second_entity_function_var = second_entity_function[index_2 - 1:index_2]
second_remain_predicate = second_entity_function[second_entity_function.index(')') + 1:]
if second_remain_predicate.startswith(','):
second_remain_predicate = second_remain_predicate[1:]
elif second_entity_predicate.startswith("argmin") \
or second_entity_predicate.startswith("argmax"):
index_2 = second_entity_function.index('(') + 1
second_entity_function_var = second_entity_function[index_2:index_2 + 1]
second_remain_predicate = second_entity_function
else:
# predicate
# The first predicate is prediction function
# TODO: to be more systematic
# Assume that the first variable should be the primary variable
lindex = second_entity_function.index('(')
rindex = second_entity_function.index(')')
predicate_arguments = second_entity_function[lindex:rindex].split(",")
second_entity_function_var = predicate_arguments[0]
for a in predicate_arguments:
if is_variable(a):
second_entity_function_var = a
break
second_remain_predicate = second_entity_function
resultant_lf = 'equals(%s,%s),const(%s,%s)' % (equal_pattern_match_2[1],second_entity_function_var,
equal_pattern_match_2[3], equal_pattern_match_2[4])
if len(second_remain_predicate) > 0:
resultant_lf += ",%s" % second_remain_predicate
elif equal_pattern_match_3:
print("Equal Pattern 3")
assert equal_pattern_match_3[2] == equal_pattern_match_3[3]
entity_function = equal_pattern_match_3[4]
entity_predicate = entity_function[:entity_function.index('(')]
assert is_entity_function(entity_predicate) or entity_predicate.startswith("argmin") \
or entity_predicate.startswith("argmax")
if is_entity_function(entity_predicate):
index_2 = entity_predicate.index(',')
entity_function_var = entity_predicate[index_2 - 1:index_2]
remain_predicate = entity_predicate[entity_predicate.index(')') + 1:]
if remain_predicate.startswith(','):
remain_predicate = remain_predicate[1:]
else:
index_2 = entity_function.index('(') + 1
entity_function_var = entity_function[index_2:index_2 + 1]
remain_predicate = entity_function
resultant_lf = 'equals(%s,%s)' % (equal_pattern_match_3[1],entity_function_var)
if len(remain_predicate) > 0:
resultant_lf += ",%s" % remain_predicate
elif is_entity_function(rewrite_function_name) and number_of_arguments == 1:
print("Entity Function")
# Entity Function
# Check if it is entity_function
pattern = re.compile("%s\(([A-Z])\),const_expr\(([A-Z]),(.*)\)" % rewrite_function_name)
pattern_1_match = pattern.match(resultant_lf)
pattern_2 = re.compile(
"%s\(([A-Z])\),const\(([A-Z]),(.*)\)" % rewrite_function_name)
pattern_2_match = pattern_2.match(resultant_lf)
pattern_3 = re.compile(
"%s\(([A-Z])\)" % rewrite_function_name)
pattern_3_match = pattern_3.match(resultant_lf)
if pattern_1_match is not None and pattern_1_match[1] == pattern_1_match[2]:
print("Match Pattern 1")
match = pattern_1_match
entity_variable, predicate_function = match[1], match[3]
if predicate_function.startswith('(') and predicate_function.endswith(')'):
predicate_function = predicate_function[1:-1]
first_predicate = predicate_function[:predicate_function.index("(")]
print(first_predicate, is_entity_function(first_predicate))
if is_entity_function(first_predicate):
rindex = predicate_function.index(')')
ref_variable = predicate_function[rindex - 1:rindex]
new_var = get_new_variable(global_variables)
resultant_lf = '%s(%s,%s),%s' % (
rewrite_function_name, ref_variable, new_var, predicate_function)
global_variables.add(new_var)
elif first_predicate.startswith("argmin") or first_predicate.startswith("argmax"):
index = len(first_predicate)
ref_variable = predicate_function[index + 1:index + 2]
new_var = get_new_variable(global_variables)
resultant_lf = '%s(%s,%s),%s' % (
rewrite_function_name, ref_variable, new_var, predicate_function)
global_variables.add(new_var)
else:
# The first predicate is prediction function
# TODO: to be more systematic
# Assume that the first variable should be the primary variable
rindex = predicate_function.index('(')
ref_variable = predicate_function[rindex + 1:rindex+2]
new_var = get_new_variable(global_variables)
resultant_lf = '%s(%s,%s),%s' % (
rewrite_function_name, ref_variable, new_var, predicate_function)
print("Match Predicate")
print(resultant_lf, ref_variable)
global_variables.add(new_var)
elif pattern_2_match is not None and pattern_2_match[1] == pattern_2_match[2]:
print("Match Pattern 2")
match = pattern_2_match
# Simple Entity function
new_var = get_new_variable(global_variables)
index = resultant_lf.index(')')
resultant_lf = resultant_lf[:index] + ",%s" % new_var + resultant_lf[index:]
global_variables.add(new_var)
else:
assert pattern_3_match is not None
print("Match Pattern 3")
# Simple Entity function
new_var = get_new_variable(global_variables)
index = resultant_lf.index(')')
resultant_lf = resultant_lf[:index] + ",%s" % new_var + resultant_lf[index:]
global_variables.add(new_var)
return resultant_lf, variable_constraints
class Node:
def __init__(self, lf, lidx, ridx, variable_type_constraints):
self.lf = lf
self.lidx = lidx
self.ridx = ridx
self.variable_type_constraints = variable_type_constraints
def update_variable_type_constraints(constarints_1, constraints_2):
for key, value in constraints_2.items():
if key not in constarints_1:
constarints_1[key] = value
else:
# print(key, value, constarints_1[key])
assert constarints_1[key] == value
def transform_lambda_calculus(logical_form):
normalized_lf = normalize_lambda_calculus(logical_form)
# Replace Variable
normalized_lf = normalized_lf.replace('$v0:e ', 'A ')
normalized_lf = normalized_lf.replace('$v1:e ', 'B ')
normalized_lf = normalized_lf.replace('$v2:e ', 'C ')
normalized_lf = normalized_lf.replace('$v3:e ', 'D ')
normalized_lf = normalized_lf.replace('$v0:i ', 'A ')
normalized_lf = normalized_lf.replace('$v1:i ', 'B ')
normalized_lf = normalized_lf.replace('$v2:i ', 'C ')
normalized_lf = normalized_lf.replace('$v3:i ', 'D ')
normalized_lf = normalized_lf.replace('$v0', 'A')
normalized_lf = normalized_lf.replace('$v1', 'B')
normalized_lf = normalized_lf.replace('$v2', 'C')
normalized_lf = normalized_lf.replace('$v3', 'D')
normalized_lf = re.sub(' +', ' ', normalized_lf)
# Translate
if normalized_lf.count('(') == 0:
# Simple Cases, A single entity
entity_name, entity_type = extract_entity(normalized_lf)
prolog = 'answer(A,(const(A,%s(%s))))' % (
ENTITY_TYPE_MAP[entity_type], entity_name)
else:
left_brackets = list()
# original_lf = copy.deepcopy(python_lf)
tokens = tokenize_logical_form(normalized_lf)
global_variable_set = {token for token in tokens if token in {'A', 'B', 'C', 'D'}}
global_variable_constraints = dict()
answer_variable_set = set()
nodes = list()
for tidx, token in enumerate(tokens):
if token == '(':
left_brackets.append(tidx)
elif token == ')':
node_variable_type_constraints = dict()
pidx = left_brackets.pop()
children_nodes = list()
for nidx, node in enumerate(nodes):
if pidx < node.lidx and tidx > node.ridx:
children_nodes.append(node)
for n in children_nodes:
nodes.remove(n)
# Rewrite
# Prolog has a very plat structure
if len(children_nodes) == 0:
sub_tokens = tokens[pidx + 1:tidx]
function_name = sub_tokens[0]
number_of_arguments = len(sub_tokens[1:])
rewritten_lf, node_variable_type_constraints = rewrite(function_name, number_of_arguments,
sub_tokens[1:], global_variable_constraints,
global_variable_set)
else:
# Has children
sub_tokens = tokens[pidx + 1:tidx]
function_name = sub_tokens[0]
_inside_bracket_stack = 0
other_children = list()
children_num = 0
children_position = list()
for sub_token in sub_tokens[1:]:
if sub_token == '(':
_inside_bracket_stack += 1
if _inside_bracket_stack == 1:
children_num += 1
children_position.append('bracket')
elif sub_token == ')':
_inside_bracket_stack -= 1
else:
if _inside_bracket_stack == 0:
children_num += 1
other_children.append(sub_token)
children_position.append('token')
assert children_num == len(children_position)
string = list()
if function_name == '_lambda':
assert len(other_children) == 1 and len(children_nodes) == 1
child_node = children_nodes.pop(0)
variable = other_children.pop(0)
node_variable_type_constraints = copy.deepcopy(
child_node.variable_type_constraints)
answer_variable_set.add(variable)
rewritten_lf = child_node.lf
elif function_name in ['_argmin', '_argmax', '_sum']:
assert len(other_children) == 1 and len(
children_nodes) == 2
variable = other_children.pop(0)
node_1, node_2 = children_nodes.pop(
0), children_nodes.pop(0)
update_variable_type_constraints(node_variable_type_constraints, node_1.variable_type_constraints)
update_variable_type_constraints(node_variable_type_constraints, node_2.variable_type_constraints)
# Arg max/min entity function
entity_function = node_2.lf[:node_2.lf.index('(')]
predicate_name = "%s_%s" % (function_name[1:], entity_function)
rewritten_lf = "%s(%s,%s)" % (
predicate_name, variable, node_1.lf)
elif function_name == '_count':
assert len(other_children) == 1 and len(children_nodes) == 1
variable = other_children.pop(0)
child_node = children_nodes.pop(0)
update_variable_type_constraints(
node_variable_type_constraints, child_node.variable_type_constraints)
# print(node_variable_type_constraints, variable)
new_variable = get_new_variable(global_variable_set)
rewritten_lf = "count(%s,(%s),%s)" % (
variable, child_node.lf, new_variable)
global_variable_set.add(new_variable)
elif function_name in ['_exists', '_the']:
assert len(other_children) == 1 and len(
children_nodes) == 1
variable = other_children.pop(0)
child_node = children_nodes.pop(0)
update_variable_type_constraints(
node_variable_type_constraints, child_node.variable_type_constraints)
rewritten_lf = "%s" % child_node.lf
elif function_name in ['_max', '_min']:
assert len(other_children) == 1 and len(
children_nodes) == 1
variable = other_children.pop(0)
child_node = children_nodes.pop(0)
update_variable_type_constraints(
node_variable_type_constraints, child_node.variable_type_constraints)
child_lf = child_node.lf
# replace
# pattern = '(numerical_equals\((.*?),%s\),const_expr\((.*?),(.*?\))\))' % variable
# results = re.findall(pattern, child_lf)
# assert len(results) == 1
# results = results[0]
# assert results[1] == results[2]
# child_lf = child_lf.replace(results[0], "true")
# entity_function = results[3][:results[3].index('(')]
# predicate_name = "%s_%s" % (
# function_name[1:], entity_function)
# # numerical_function = "%s(%s,(%s))" % (
# # predicate_name, variable, child_lf)
rewritten_lf = "%s(%s,%s)" % (
function_name, variable, child_lf)
elif function_name in ['_and', '_or']:
for position in children_position:
if position == 'bracket':
n = children_nodes.pop(0)
string.append(n.lf)
update_variable_type_constraints(
node_variable_type_constraints, n.variable_type_constraints)
else:
sub_token = other_children.pop(0)
string.append(sub_token)
if function_name == '_and':
rewritten_lf = "(%s)" % (','.join(string))
else:
rewritten_lf = "or(%s)" % (','.join(string))
elif function_name == '_not':
for position in children_position:
if position == 'bracket':
n = children_nodes.pop(0)
string.append(n.lf)
update_variable_type_constraints(
node_variable_type_constraints, n.variable_type_constraints)
else:
sub_token = other_children.pop(0)
string.append(sub_token)
rewritten_lf = "not(%s)" % (','.join(string))
else:
for position in children_position:
if position == 'bracket':
n = children_nodes.pop(0)
string.append(n.lf)
update_variable_type_constraints(node_variable_type_constraints, n.variable_type_constraints)
else:
sub_token = other_children.pop(0)
string.append(sub_token)
rewritten_lf, variable_type_constraints = rewrite(
function_name, children_num, string,
global_variable_constraints, global_variable_set)
# Update variable constraints
update_variable_type_constraints(node_variable_type_constraints, variable_type_constraints)
new_node = Node(
rewritten_lf, pidx, tidx, node_variable_type_constraints)
global_variable_constraints.update(node_variable_type_constraints)
# print(node_variable_type_constraints)
nodes.append(new_node)
else:
if tidx > 0 and (not tokens[tidx - 1] == '(') and ":_" in token:
# token is not function name
tokens[tidx] = '"%s"' % tokens[tidx]
assert len(nodes) == 1
prolog_variable_type_constraints = nodes[0].variable_type_constraints
prolog = nodes[0].lf
if len(answer_variable_set) > 0:
prefix = "%s" % len(answer_variable_set)
prolog = "answer_%s(%s,%s)" % (prefix,
','.join(sorted(answer_variable_set)), prolog)
elif is_entity_function(prolog[:prolog.index('(')]):
index = prolog.index(')')
answer_var = prolog[index - 1:index]
prolog = "answer_1(%s,(%s))" % (answer_var,prolog)
elif prolog.startswith('argmin') or prolog.startswith('argmax') \
or prolog.startswith('_min') or prolog.startswith('_max'):
index = prolog.index('(') + 1
answer_var = prolog[index:index+1]
prolog = "answer_1(%s,(%s))" % (answer_var, prolog)
elif prolog.startswith('count'):
index = prolog.rindex(')')
answer_var = prolog[index-1:index]
prolog = "answer_1(%s,(%s))" % (answer_var, prolog)
else:
print("Fail to translate to prolog")
return prolog
if __name__ == '__main__':
questions, logical_forms = read_data(
'./atis_lambda_test.tsv')
sorted_logical_forms = sorted([(q,lf,) for q, lf in zip(questions, logical_forms)], key=lambda x: len(x[1]))
# with open("atis_prolog_test.log", "w") as f:
for lidx, (question, lf) in enumerate(sorted_logical_forms):
print(lidx)
print(question)
print(lf)
normalized_lf = transform_lambda_calculus(lf)
print(normalized_lf)
print("===\n\n")
# f.write("%s\n%s\n%s\n===\n\n" % (question, lf, normalized_lf))
| 57,781 | 45.560838 | 148 |
py
|
Unimer
|
Unimer-master/data/atis/lambda_calculus_to_funql.py
|
# coding=utf8
import re
import copy
from pprint import pprint
from lambda_calculus_to_prolog import FUNCTION_REPLACE_MAP
ENTITY_PATTERN = re.compile(r'^[A-Z|a-z|\\|_|\d]+:_([a-z]+)$')
ENTITY_TYPE_MAP = {
"ac": "aircraft_code",
"al": "airline_code",
"ci": "city_name",
"ap": "airport_code",
"fn": "flight_number",
"cl": "class_description",
"ti": "time",
"pd": "day_period",
"mf": "manufacturer",
"mn": "month",
"da": "day",
"i": "integer",
"yr": "year",
"dn": "day_number",
"do": "dollar",
"hr": "hour",
"rc": "meal_code",
"st": "state_name",
"fb": "fare_basis_code",
"me": "meal_description",
"bat": "basis_type",
"dc": "days_code"
}
def read_data(path):
questions, logical_forms = list(), list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
splits = line.split('\t')
questions.append(splits[0])
logical_forms.append(splits[1])
return questions, logical_forms
def split_tokens(lf):
replacements = [
('(', ' ( '),
(')', ' ) '),
("\\+", " \\+ "),
]
for a, b in replacements:
lf = lf.replace(a, b)
return lf
def standardize_lambda_calculus_varnames(ans):
toks = ans.split(' ')
varnames = {}
new_toks = []
for t in toks:
if t == 'x' or t.startswith('$'):
if ':' in t:
# var definition
splits = t.split(':')
name, var_type = splits[0], splits[1]
assert name not in varnames
new_name = '$v%d' % len(varnames)
varnames[name] = new_name
new_toks.append(new_name + ":" + var_type)
else:
# t is a variable name
if t in varnames:
new_toks.append(varnames[t])
else:
new_varname = '$v%d' % len(varnames)
varnames[t] = new_varname
new_toks.append(new_varname)
else:
new_toks.append(t)
lf = ' '.join(new_toks)
return lf
def normalize_lambda_calculus(logical_form):
lf = split_tokens(logical_form)
lf = re.sub(' +', ' ', lf)
s = standardize_lambda_calculus_varnames(lf)
variables = ["$v0", "$v1", "$v2", "$v3"]
for var in variables:
s = s.replace(var + " e ", "%s:e " % var)
s = s.replace(var + " i ", "%s:i " % var)
s = s.replace(' :', ":").replace(
'\s+', ' ').replace("( ", "(").replace(" )", ")").replace(')\s)', '))').strip().lower()
s = re.sub(' +', ' ', s)
return s
def extract_entity(lf):
tokens = lf.split(":_")
return tokens
def tokenize_logical_form(logical_form):
replacements = [
('(', ' ( '),
(')', ' ) '),
# ("\\+", " \\+ "),
]
normalized_lc = re.sub(' +', ' ', logical_form)
for a, b in replacements:
normalized_lc = normalized_lc.replace(a, b)
tokens = [t for t in normalized_lc.split()]
return tokens
def is_var(string):
return re.match('^[A-Z|a-z]$', string) is not None
def is_entity(string):
match = ENTITY_PATTERN.match(string.replace('"', ""))
return match is not None
class Node:
def __init__(self, lf, lidx, ridx, input_vars=None, output_var=None):
self.lf = lf
self.lidx = lidx
self.ridx = ridx
self.input_vars = input_vars
self.output_var = output_var
def get_function_return_type(function_name):
candidates = list()
for _, funcs in FUNCTION_REPLACE_MAP.items():
for f in funcs:
if f['name'] == function_name:
candidates.append(f['return_type'])
if len(candidates) > 0:
break
if len(candidates) > 0:
for t in candidates:
if t != 'bool':
return t
return None
def process_entity_string(entity, default=""):
assert isinstance(entity, str)
if ":_" in entity:
splits = entity.split(":_")
entity_name = splits[0]
entity_type = ENTITY_TYPE_MAP[splits[1]]
else:
entity_type = default
entity_name = entity
if '_' in entity_name:
entity_name = entity_name.replace("_", " ")
return entity_name, entity_type
def is_entity_function(function_name, number_of_arguments, arguments, variable_constraints):
if function_name not in FUNCTION_REPLACE_MAP:
assert function_name in ['_minutes_distant',
'_named', '_overnight']
if function_name == '_minutes_distant':
return True
return False
names = FUNCTION_REPLACE_MAP[function_name]
rewrite_function_name = function_name
argument_types = None
is_meta_function = False
if len(names) == 1:
rewrite_function_name = names[0]['name']
argument_types = names[0]["argument_type"]
is_meta_function = "is_meta" in names[0] and names[0]['is_meta'] is True
else:
# select by arugment number
feasible_index = []
for idx, name in enumerate(names):
if name['number_of_argument'] == number_of_arguments:
rewrite_function_name = name['name']
argument_types = name["argument_type"]
feasible_index.append(idx)
if len(feasible_index) == 0:
raise Exception("No feasible functions in Python")
elif len(feasible_index) == 1:
idx = feasible_index[0]
rewrite_function_name = names[idx]['name']
argument_types = names[idx]['argument_type']
is_meta_function = "is_meta" in names[idx] and names[idx]['is_meta'] is True
else:
# Select by Argument Type
best_index = 0
best_count = 0
for idx in feasible_index:
name = names[idx]
types = names[idx]['argument_type']
count = 0
for t, arg in zip(types, arguments):
_arg = arg.replace('"', "")
match = ENTITY_PATTERN.match(_arg)
if match:
e, et = process_entity_string(_arg)
if et == t:
count += 1
elif _arg.startswith("argmin_") or _arg.startswith("argmax_"):
# argmin, argmax
index = _arg.index("(") + 1
var = _arg[index:index+1]
if var in variable_constraints:
et = variable_constraints[var]
if et == t:
count += 1
else:
if is_var(_arg) and _arg in variable_constraints:
et = variable_constraints[_arg]
if et == t:
count += 1
if count > best_count:
best_index = idx
best_count = count
rewrite_function_name = names[best_index]['name']
argument_types = names[best_index]['argument_type']
is_meta_function = "is_meta" in names[best_index] and names[best_index]['is_meta'] is True
# Variable Inference
# Derive type constraints, Type Inference
# print(function_name, rewrite_function_name, number_of_arguments, arguments, argument_types)
assert number_of_arguments == len(argument_types)
if is_meta_function:
if rewrite_function_name in ['equals', 'numerical_equals', 'less_than', 'larger_than']:
if is_var(arguments[0]):
arg_variable = arguments[0]
arg_func = arguments[1]
elif is_var(arguments[1]):
arg_variable = arguments[1]
arg_func = arguments[0]
else:
arg_variable, arg_func = None, None
if arg_variable is not None and arg_func is not None:
match = ENTITY_PATTERN.match(arg_func.replace('"', ""))
if match:
e, et = process_entity_string(arg_func.replace('"', ""))
variable_constraints[arg_variable] = et
elif arg_func.startswith("argmin(") or arg_func.startswith("argmax("):
for _var in [" A:", " B:", " C:"]:
processed_var = _var.replace(":", "").strip()
if _var in arg_func and processed_var in variable_constraints:
variable_constraints[arg_variable] = variable_constraints[processed_var]
break
else:
arg_func_return_type = get_function_return_type(
arg_func[:arg_func.index("(")])
if arg_func_return_type is not None and arg_func_return_type not in ['*', 'bool']:
variable_constraints[arg_variable] = arg_func_return_type
else:
for argument, atype in zip(arguments, argument_types):
if is_var(argument):
variable_constraints[argument] = atype
candidates = list()
for _, funcs in FUNCTION_REPLACE_MAP.items():
for f in funcs:
if f['name'] == rewrite_function_name:
candidates.append(f['return_type'])
if len(candidates) > 0:
break
if len(candidates) > 0:
return candidates[0] != 'bool'
else:
return False
def rewrite(function_name, number_of_arguments, arguments, variable_constraints):
is_entity_func = is_entity_function(function_name, number_of_arguments, arguments, variable_constraints)
rewritten_function_name = function_name
rewritten_arguments = list()
argument_vars = list()
for arg in arguments:
if is_var(arg):
argument_vars.append(1)
else:
argument_vars.append(0)
# Rewrite argument
if is_entity(arg):
entity_name, entity_type = extract_entity(arg)
rewritten_arguments.append('%s(%s)' % (ENTITY_TYPE_MAP[entity_type], entity_name))
else:
rewritten_arguments.append(arg)
# print(number_of_arguments, sum(argument_vars), arguments, rewritten_arguments, is_var('airport_code(mke)'))
output_variable = None
input_variables = list()
if number_of_arguments == 1:
if sum(argument_vars) > 0:
if is_entity_func:
input_variables = rewritten_arguments
output_variable = None
expr = "%s(%s)" % (rewritten_function_name, rewritten_arguments[0])
else:
# predicate cast
# TODO: fix
input_variables = rewritten_arguments
output_variable = None
expr = "%s(all)" % rewritten_function_name
else:
# no variable
input_variables = list()
expr = "%s(%s)" % (rewritten_function_name, ",".join(rewritten_arguments))
else:
assert number_of_arguments == 2
if sum(argument_vars) == number_of_arguments:
# TODO: fix
input_variables = rewritten_arguments
expr = "%s(%s)" % (rewritten_function_name, ",".join(rewritten_arguments))
elif sum(argument_vars) == 0:
input_variables = list()
expr = "%s(%s)" % (rewritten_function_name, ",".join(rewritten_arguments))
if rewritten_function_name in ['_=', '_<', '_>']:
print("Rewrite Meta Predicate (Equal, Less Than, larger than)")
# ( _< ( _fare $0 ) 150:_do )
for arg_idx, arg in enumerate(arguments):
if is_entity(arg):
entity_function_name = arguments[number_of_arguments - arg_idx - 1]
entity_function_name = entity_function_name[:entity_function_name.index('(')]
predicate = "%s%s_%d" % (rewritten_function_name,
entity_function_name, number_of_arguments - arg_idx)
expr = "%s(%s)" % (predicate, rewritten_arguments[arg_idx])
break
else:
# No variable & No entity
# _>(_capacity(A),_capacity(_aircraft(_turboprop(all))))
child_func_1, child_func_2 = arguments[0], arguments[1]
child_func_name_1, child_func_name_2 = child_func_1[:child_func_1.index("(")], \
child_func_2[:child_func_2.index("(")]
assert child_func_name_1 == child_func_name_2
pattern = re.compile("%s\([A-Z]\)" % child_func_name_1)
child_1_match, child_2_match = pattern.match(child_func_1), pattern.match(child_func_2)
assert(child_1_match is not None) ^ (child_2_match is not None)
if child_1_match is not None:
index = 2
child = child_func_2
else:
index = 1
child = child_func_1
child = child_func_2[child_func_2.index("(")+1:-1]
predicate = "%s%s_%d" % (rewritten_function_name, child_func_name_1, index)
expr = "%s(%s)" % (predicate, child)
else:
index = argument_vars.index(1)
input_variables = [rewritten_arguments[number_of_arguments - index - 1]]
output_variable = rewritten_arguments[index]
expr = "%s_%d(%s)" % (rewritten_function_name, number_of_arguments - index,
rewritten_arguments[number_of_arguments - index - 1])
if "%s_%d" % (rewritten_function_name, number_of_arguments - index) in ['_equals_2', '_equals_1']\
and is_entity(arguments[number_of_arguments - index - 1]):
expr = rewritten_arguments[number_of_arguments - index - 1]
return expr, input_variables, output_variable
def rewrite_intersection(child_nodes, function_name):
meta_predicate = "intersection" if function_name == '_and' else 'or'
assert len(child_nodes) > 1
_child_nodes = copy.copy(child_nodes)
object_pattern = re.compile('([A-Z|a-z|:|_]+)\(all\)')
object_cast = list()
united_output_vars = set()
object_united_input_vars = set()
# Remove unary predicates
for node in _child_nodes:
if object_pattern.match(node.lf):
object_cast.append(node)
if node.input_vars is not None:
object_united_input_vars |= set(node.input_vars)
else:
if node.output_var is not None and \
(not (node.lf.startswith("_=") or node.lf.startswith("_<") or node.lf.startswith("_>"))):
united_output_vars.add(node.output_var)
for n in object_cast:
_child_nodes.remove(n)
print(object_united_input_vars, united_output_vars)
if len(object_united_input_vars) == 1 and len(united_output_vars) == 1 \
and len(object_united_input_vars & united_output_vars) == 1:
if len(_child_nodes) > 1:
rewritten_lf = "%s(%s)" % (meta_predicate, ",".join([n.lf for n in _child_nodes]))
else:
rewritten_lf = "%s" % _child_nodes[0].lf
for n in reversed(object_cast):
predicate = n.lf[:n.lf.index('(')]
rewritten_lf = "%s(%s)" % (predicate, rewritten_lf)
elif len(object_united_input_vars) == 0:
rewritten_lf = "%s(%s)" % (meta_predicate, ",".join([n.lf for n in _child_nodes]))
elif len(united_output_vars) == 0 and len(object_united_input_vars) == 1:
if meta_predicate == 'intersection':
if len(_child_nodes) == 0:
rewritten_lf = ""
for idx, n in enumerate(reversed(object_cast)):
if idx == 0:
rewritten_lf = n.lf
else:
predicate = n.lf[:n.lf.index('(')]
rewritten_lf = "%s(%s)" % (predicate, rewritten_lf)
else:
if len(_child_nodes) > 1:
rewritten_lf = "intersection(%s)" % (",".join([n.lf for n in _child_nodes]))
else:
rewritten_lf = "%s" % _child_nodes[0].lf
for n in reversed(object_cast):
predicate = n.lf[:n.lf.index('(')]
rewritten_lf = "%s(%s)" % (predicate, rewritten_lf)
else:
# or
rewritten_lf = "or(%s)" % (",".join([n.lf for n in child_nodes]))
else:
rewritten_lf = "%s(%s)" % (meta_predicate, ",".join([n.lf for n in child_nodes]))
return rewritten_lf
def replace_sub_lf(main_lf, replace_lf):
result = main_lf.replace(replace_lf + ",", "")
result = result.replace("," + replace_lf, "")
result = result.replace(replace_lf, "")
return result
def transform_lambda_calculus(logical_form):
normalized_lf = normalize_lambda_calculus(logical_form)
# Replace Variable
normalized_lf = normalized_lf.replace('$v0:e ', 'A ')
normalized_lf = normalized_lf.replace('$v1:e ', 'B ')
normalized_lf = normalized_lf.replace('$v2:e ', 'C ')
normalized_lf = normalized_lf.replace('$v3:e ', 'D ')
normalized_lf = normalized_lf.replace('$v0:i ', 'A ')
normalized_lf = normalized_lf.replace('$v1:i ', 'B ')
normalized_lf = normalized_lf.replace('$v2:i ', 'C ')
normalized_lf = normalized_lf.replace('$v3:i ', 'D ')
normalized_lf = normalized_lf.replace('$v0', 'A')
normalized_lf = normalized_lf.replace('$v1', 'B')
normalized_lf = normalized_lf.replace('$v2', 'C')
normalized_lf = normalized_lf.replace('$v3', 'D')
normalized_lf = re.sub(' +', ' ', normalized_lf)
# Translate
if normalized_lf.count('(') == 0:
# Simple Cases, A single entity
entity_name, entity_type = extract_entity(normalized_lf)
funql = 'answer(%s(%s))' % (
ENTITY_TYPE_MAP[entity_type], entity_name)
else:
left_brackets = list()
tokens = tokenize_logical_form(normalized_lf)
global_variable_constraints = dict()
nodes = list()
for tidx, token in enumerate(tokens):
if token == '(':
left_brackets.append(tidx)
elif token == ')':
pidx = left_brackets.pop()
children_nodes = list()
for nidx, node in enumerate(nodes):
if pidx < node.lidx and tidx > node.ridx:
children_nodes.append(node)
for n in children_nodes:
nodes.remove(n)
# Rewrite
# FunQL has a very nested structure
if len(children_nodes) == 0:
sub_tokens = tokens[pidx + 1:tidx]
function_name = sub_tokens[0]
number_of_arguments = len(sub_tokens[1:])
rewritten_lf, input_vars, output_var = rewrite(
function_name, number_of_arguments, sub_tokens[1:],
global_variable_constraints
)
else:
# Has children
sub_tokens = tokens[pidx + 1:tidx]
function_name = sub_tokens[0]
_inside_bracket_stack = 0
other_children = list()
children_num = 0
children_position = list()
for sub_token in sub_tokens[1:]:
if sub_token == '(':
_inside_bracket_stack += 1
if _inside_bracket_stack == 1:
children_num += 1
children_position.append('bracket')
elif sub_token == ')':
_inside_bracket_stack -= 1
else:
if _inside_bracket_stack == 0:
children_num += 1
other_children.append(sub_token)
children_position.append('token')
assert children_num == len(children_position)
string = list()
if function_name == '_lambda':
assert len(other_children) == 1 and len(children_nodes) == 1
child_node = children_nodes.pop(0)
rewritten_lf = child_node.lf
input_vars = child_node.input_vars
output_var = child_node.output_var
elif function_name in ['_argmin', '_argmax', '_sum']:
assert len(other_children) == 1 and len(
children_nodes) == 2
variable = other_children.pop(0)
node_1, node_2 = children_nodes.pop(
0), children_nodes.pop(0)
entity_function = node_2.lf[:node_2.lf.index('(')]
predicate_name = "%s%s" % (function_name[1:], entity_function)
rewritten_lf = "%s(%s)" % (
predicate_name, node_1.lf)
output_var = variable
input_vars = list()
elif function_name == '_count':
assert len(other_children) == 1 and len(children_nodes) == 1
variable = other_children.pop(0)
child_node = children_nodes.pop(0)
input_vars = list()
output_var = None
rewritten_lf = "count(%s)" % (child_node.lf)
elif function_name == '_exists':
assert len(other_children) == 1 and len(
children_nodes) == 1
variable = other_children.pop(0)
child_node = children_nodes.pop(0)
rewritten_lf = "%s" % child_node.lf
input_vars = child_node.input_vars
output_var = child_node.output_var
match_count = 0
# Match predicates with two variable
pattern_1 = re.compile("(([A-Z|a-z|:|_]+)\(([A-Z|a-z]),([A-Z|a-z])\))")
results = pattern_1.findall(child_node.lf)
if len(results) > 0 and len(results) == 1:
match_count += 1
print("Exists Match predicates with two variable")
assert len(results) == 1 and variable in global_variable_constraints
result = results[0]
replace_predicate = result[0]
child_lf = replace_sub_lf(child_node.lf, replace_predicate)
target_index = result.index(variable) - 1
rewritten_lf = "%s_%d(%s)" % (result[1], target_index, child_lf)
print(rewritten_lf)
pattern_2 = re.compile("(((_=|_<|_>)_[1|2])\(([_|A-Z|a-z|:|\d]+)\(([A-Z])\)\))")
results = pattern_2.findall(child_node.lf)
if len(results) > 0:
match_count += 1
print("Exists Match Meta Predicate", len(results))
print(results)
assert len(results) in [1, 2] and variable in global_variable_constraints
if len(results) == 1:
result = results[0]
replace_predicate = result[0]
child_lf = replace_sub_lf(child_node.lf, replace_predicate)
if child_lf == '':
if global_variable_constraints[variable] == 'flight_id':
child_lf = '_flight(all)'
print(child_lf)
assert len(child_lf) > 0 and result[-1] == variable and result[2] == '_='
rewritten_lf = "%s(%s)" % (result[3], child_lf)
else:
# TODO: manually fixed
print("TODO: manually fixed")
print("Exists Match Count: %d" % match_count)
elif function_name == '_the':
assert len(other_children) == 1 and len(
children_nodes) == 1
variable = other_children.pop(0)
child_node = children_nodes.pop(0)
rewritten_lf = "%s" % child_node.lf
input_vars = child_node.input_vars
output_var = child_node.output_var
elif function_name in ['_max', '_min']:
assert len(other_children) == 1 and len(
children_nodes) == 1
variable = other_children.pop(0)
child_node = children_nodes.pop(0)
child_lf = child_node.lf
rewritten_lf = "%s(%s)" % (function_name, child_lf)
input_vars = list()
output_var = None
elif function_name in ['_and', '_or']:
child_node_count = 0
output_var = children_nodes[0].output_var
input_vars = list()
rewritten_lf = rewrite_intersection(children_nodes, function_name)
elif function_name == '_not':
assert len(children_position) == 1 and len(children_nodes) == 1
child_node = children_nodes.pop(0)
rewritten_lf = "not(%s)" % (child_node.lf)
input_vars, output_var = child_node.input_vars, child_node.output_var
else:
for position in children_position:
if position == 'bracket':
n = children_nodes.pop(0)
string.append(n.lf)
else:
sub_token = other_children.pop(0)
string.append(sub_token)
rewritten_lf, input_vars, output_var = rewrite(function_name, children_num, string,
global_variable_constraints)
new_node = Node(rewritten_lf, pidx, tidx, input_vars=input_vars, output_var=output_var)
nodes.append(new_node)
else:
if tidx > 0 and (not tokens[tidx - 1] == '(') and ":_" in token:
# token is not function name
tokens[tidx] = '%s' % tokens[tidx]
assert len(nodes) == 1
funql = nodes[0].lf
funql = "answer(%s)" % funql
return funql
def tokenize_funql(funql):
normalized_lf = funql.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [t if "::" not in t else t.replace("::", " ") for t in normalized_lf.split()]
return tokens
def is_correct_funql(funql):
tokens = tokenize_funql(funql)
if funql.count(")") != funql.count("("):
return False
if "()" in funql:
print("Empty Object")
return False
for token in tokens:
if re.match('[A-Z]', token):
return False
return True
def tokenize_funql_2(funql):
normalized_lf = funql.replace(" ", "::")
replacements = [
('(', '( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [t if "::" not in t else t.replace("::", " ") for t in normalized_lf.split()]
return tokens
class FunQLNode:
def __init__(self, lf, lidx, ridx, function_name):
self.lf = lf
self.lidx = lidx
self.ridx = ridx
self.function_name = function_name
self.children = list()
def fix_funql_intersection(funql):
# Build FunQL Tree
left_brackets = list()
funql_tokens = tokenize_funql_2(funql)
nodes = list()
for tidx, token in enumerate(funql_tokens):
if token.endswith('('):
left_brackets.append(tidx)
elif token == ')':
pidx = left_brackets.pop()
children_nodes = list()
for nidx, node in enumerate(nodes):
if pidx < node.lidx and tidx > node.ridx:
children_nodes.append(node)
for n in children_nodes:
nodes.remove(n)
sub_lf = "".join(funql_tokens[pidx:tidx+1])
function_name = funql_tokens[pidx][:-1]
if len(children_nodes) == 0:
function_name = sub_lf
node = FunQLNode(sub_lf, pidx, tidx, function_name)
for child in children_nodes:
node.children.append(child)
nodes.append(node)
tidx += 1
assert len(nodes) == 1 and nodes[0].lf == funql
node = nodes[0]
def _fix(node):
# Fix intersection
for cidx, child in enumerate(node.children):
if child.lf.startswith("intersection") and len(child.children) == 1:
print("Problematic Intersection")
node.children[cidx] = child.children[0]
_fix(child)
_fix(node)
# Get fixed funql
def _aggregate(node):
if len(node.children) == 0:
node_lf = node.function_name
else:
child_lf = list()
for child in node.children:
child_lf.append(_aggregate(child))
node_lf = "%s(%s)" % (node.function_name, ",".join(child_lf))
return node_lf
fixed_funql = _aggregate(node)
return fixed_funql
def simplify_funql_object_predicates(funql):
funql = funql.replace("_economy(all)", "_economy(_flight(all))")
funql = funql.replace("_nonstop(all)", "_nonstop(_flight(all))")
funql = funql.replace("_connecting(all)", "_connecting(_flight(all))")
funql = funql.replace("_limousine(all)", "_limousine(_ground_transport(all))")
funql = funql.replace("_taxi(all)", "_taxi(_ground_transport(all))")
funql = funql.replace("_oneway(all)", "_oneway(_flight(all))")
funql = funql.replace("_round_trip(all)", "_round_trip(_flight(all))")
funql = funql.replace("_aircraft(_turboprop(all))", "_turboprop(_aircraft(all))")
funql = funql.replace("_turboprop(all)", "_turboprop(_aircraft(all))")
funql = funql.replace("_aircraft_code:_t(all)", "_aircraft(all)")
funql = funql.replace("_meal:_t(all)", "_meal_code(all)")
return funql
if __name__ == '__main__':
questions, logical_forms = read_data(
'./atis_lambda_train.tsv')
all_object_predicates = set()
sorted_logical_forms = sorted([(q,lf,) for q, lf in zip(questions, logical_forms)], key=lambda x: len(x[1]))
# with open("atis_funql_test.log", "w") as f:
for lidx, (question, lf) in enumerate(sorted_logical_forms):
print(lidx)
print(question)
print(lf)
funql = transform_lambda_calculus(lf)
funql = fix_funql_intersection(funql)
funql = simplify_funql_object_predicates(funql)
print(funql)
# Find all object predicates
all_object_pattern = re.compile('([_|a-z|\d|:]+)\(all\)')
results = all_object_pattern.findall(funql)
for result in results:
all_object_predicates.add(result)
print("===\n\n")
# if not is_correct_funql(funql):
# f.write("Incorrect FunQL\n")
# f.write("%s\n%s\n%s\n===\n\n" % (question, lf, funql))
pprint(all_object_predicates)
| 32,555 | 41.062016 | 113 |
py
|
Unimer
|
Unimer-master/data/job/log_to_csv.py
|
# coding=utf8
if __name__ == '__main__':
questions, logical_forms = list(), list()
with open("job_funql_train.log", 'r') as f:
lines = f.readlines()
lidx = 0
while lidx < len(lines):
line = lines[lidx]
line = line.strip()
print(lidx)
if len(line) > 0:
if line == "Incorrect FunQL":
lidx += 1
line = lines[lidx].strip()
question = line
lidx += 1
lc = lines[lidx].strip()
lidx += 1
lf = lines[lidx].strip()
print(question)
print(lf)
questions.append(question)
logical_forms.append(lf)
lidx += 1
assert lines[lidx].startswith('==')
lidx += 1
with open("job_funql_train.tsv", 'w') as f:
for question, logical_form in zip(questions, logical_forms):
f.write("%s\t%s\n" % (question, logical_form))
| 1,040 | 32.580645 | 68 |
py
|
Unimer
|
Unimer-master/data/job/funql_to_lambda.py
|
# coding-utf8
import re
def read_data(path):
questions, logical_forms = list(), list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
splits = line.split('\t')
questions.append(splits[0])
logical_forms.append(splits[1])
return questions, logical_forms
def tokenize_funql(funql):
normalized_lf = funql.replace(" ", "::")
replacements = [
('(', '( '),
(')', ' ) '),
(',', ' , '),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [t if "::" not in t else t.replace("::", " ") for t in normalized_lf.split()]
return tokens
class Node:
def __init__(self, lf, lidx, ridx):
self.lf = lf
self.lidx = lidx
self.ridx = ridx
def derive_fo_logits(function_name, arguments, fo_logits, funcs, vars):
function_name = function_name[:-1]
if function_name == 'const':
return
if function_name in {"loc_1", "req_exp_1", "req_deg_1", "platform_1",
"language_1", "application_1", "company_1", "recruiter_1",
"des_deg_1", "des_exp_1", "country_1", "title_1", "area_1"}:
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s|\+|/]+)\)$', argument_str)
assert match is not None
value = match.group(1)
job_var = vars['job']
logit = "(%s %s %s)" % (function_name[:function_name.rindex("_")], job_var, value)
fo_logits.append(logit)
elif function_name in {"req_exp", "des_exp", "req_deg", "des_deg"}:
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s|\+|/]+)\)$', argument_str)
assert match is None
job_var = vars['job']
logit = "(%s %s)" % (function_name, job_var)
fo_logits.append(logit)
elif function_name in {"salary_greater_than", "salary_less_than"}:
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^(\d+),(year|hour|month|)$', argument_str)
job_var = vars['job']
logit = "(%s %s %s %s)" % (function_name, job_var,
int(match.group(1)), match.group(2))
fo_logits.append(logit)
elif function_name in {"req_exp_0", "req_deg_0", "platform_0", "language_0", "company_0",
"des_exp_0", "title_0", "loc_0"}:
# Exists
vars['return_entity'] = "$1"
job_var = vars['job']
logit = "(%s %s %s)" % (function_name[:function_name.rindex("_")], job_var, "$1")
fo_logits.append(logit)
funcs.append("req_deg_0")
elif function_name == 'job':
job_var = vars['job']
logit = "(%s %s)" % ("job", job_var)
for l in fo_logits:
if l == logit:
break
else:
fo_logits.insert(0, logit)
elif function_name == "not":
argument_str = "".join(arguments[:-1]).replace("'", '"')
index = argument_str.index('(')
child_function_name = argument_str[:index]
match = re.match('^\(const\((["|a-z|A-Z|_|\d|_|\s|\+|/]+)\)\)$', argument_str[index:])
if child_function_name in {'req_exp_1', 'area_1', 'loc_1', 'req_deg_1', 'language_1',
'platform_1', 'company_1', 'area_1'}:
job_var = vars['job']
value = match.group(1)
key = "(%s %s %s)" % (child_function_name[:child_function_name.rindex("_")], job_var, value)
elif child_function_name in {"req_exp", "des_exp", "req_deg", "des_deg"}:
job_var = vars['job']
key = "(%s %s)" % (child_function_name, job_var)
else:
assert child_function_name == 'or'
# raise Exception("Not other")
if child_function_name != 'or':
target_logit = None
target_logit_idx = 0
for fidx, logit in enumerate(fo_logits):
if logit == key:
if target_logit is not None:
raise Exception("Only expect 1 logit")
target_logit = logit
target_logit_idx = fidx
print(key)
assert target_logit is not None
fo_logits[target_logit_idx] = "(not_ %s)" % target_logit
def to_lc(lambda_fo_logits, lambda_func):
if len(lambda_fo_logits) == 1:
body = lambda_fo_logits[0]
else:
body = "(and_ %s)" % " ".join(lambda_fo_logits)
if lambda_func == "job":
lc = "(lambda $0:e %s)" % body
else:
lc = "(lambda $1:e (exists_ $0:e %s))" % body
return lc
def translate(funql):
funql_tokens = tokenize_funql(funql)
# A list of four tuples (table, column, op, value)
lambda_func = list()
lambda_fo_logits = list()
left_brackets = list()
nodes = list()
vars = {"job": "$0"}
for tidx, token in enumerate(funql_tokens):
if token.endswith('('):
left_brackets.append(tidx)
elif token == ')':
pidx = left_brackets.pop()
children_nodes = list()
for nidx, node in enumerate(nodes):
if pidx < node.lidx and tidx > node.ridx:
children_nodes.append(node)
for n in children_nodes:
nodes.remove(n)
if len(children_nodes) == 0:
sub_tokens = funql_tokens[pidx:tidx+1]
function_name = sub_tokens[0]
derive_fo_logits(
function_name, sub_tokens[1:], lambda_fo_logits,
lambda_func, vars
)
lf = "".join(sub_tokens)
else:
# Has children
sub_tokens = funql_tokens[pidx:tidx+1]
function_name = sub_tokens[0]
_inside_bracket_stack = 0
other_children = list()
children_num = 0
children_position = list()
for sub_token in sub_tokens[1:]:
if sub_token.endswith('('):
_inside_bracket_stack += 1
if _inside_bracket_stack == 1:
children_num += 1
children_position.append('bracket')
elif sub_token == ')':
_inside_bracket_stack -= 1
else:
if _inside_bracket_stack == 0 and sub_token != ',':
children_num += 1
other_children.append(sub_token)
children_position.append('token')
assert children_num == len(children_position)
lf = "".join(sub_tokens)
derive_fo_logits(
function_name, sub_tokens[1:], lambda_fo_logits,
lambda_func, vars
)
new_node = Node(lf, pidx, tidx)
nodes.append(new_node)
print(lambda_fo_logits)
if len(lambda_func) == 0:
lambda_func.append("job")
assert len(lambda_func) == 1 and len(lambda_fo_logits) >= 1
lc = to_lc(lambda_fo_logits, lambda_func[0])
return lc
if __name__ == '__main__':
questions, funqls = read_data('./job_funql_test_fixed.tsv')
sorted_funqls = sorted([(q, lf) for q, lf in zip(questions, funqls)], key=lambda x: len(x[1]))
with open("job_lambda_test.log", "w") as f:
for idx, (question, funql) in enumerate(sorted_funqls):
print(idx)
print(question)
print(funql)
lc = translate(funql)
print(lc)
print("==\n\n")
f.write("%s\n%s\n%s\n===\n\n" % (question, funql, lc))
| 7,857 | 37.145631 | 104 |
py
|
Unimer
|
Unimer-master/data/job/funql_to_sql.py
|
# coding=utf8
import re
import mysql.connector
db = mysql.connector.connect(
host="localhost",
user="root",
passwd="123456",
database="job",
auth_plugin='mysql_native_password'
)
def get_result(sql):
_sql = sql
cursor = db.cursor()
cursor.execute(_sql)
# print(cursor.description)
headers = cursor.description
results = cursor.fetchall()
return results
def is_country_value(value):
if '"' in value:
query_value = value
else:
query_value = '"%s"' % value
if query_value in ['"canada"']:
return True
sql = "SELECT DISTINCT country FROM country WHERE country = %s" % query_value
return len(get_result(sql)) > 0
def is_city_value(value):
if '"' in value:
query_value = value
else:
query_value = '"%s"' % value
if query_value in ['"texas"', '"boston"', '"washington"', '"tulsa"',
'"new york"', '"los alamos"', '"seattle"', '"nashville"',
'"california"', '"colorado"', '"san jose"']:
return True
sql = "SELECT DISTINCT city_name FROM city WHERE city_name = %s" % query_value
return len(get_result(sql)) > 0
def is_degree_value(value):
if '"' in value:
query_value = value
else:
query_value = '"%s"' % value
if query_value in ['"BACS"', '"MSEE"', '"master"', '"MBA"', '"MA"']:
return True
sql = "SELECT DISTINCT job_id FROM job WHERE req_deg = %s or des_deg = %s" % (query_value, query_value)
return len(get_result(sql)) > 0
def is_language_value(value):
if '"' in value:
query_value = value
else:
query_value = '"%s"' % value
if query_value in ['"fortran"', '"cobol ii"', '"visual j++"', '"haskell"',
'"lisp"', '"pascal"', '"ada"', '"latex"', '"prolog"',
'"oracle"']:
return True
sql = "SELECT DISTINCT language FROM language WHERE language = %s" % query_value
return len(get_result(sql)) > 0
def is_platform_value(value):
if '"' in value:
query_value = value
else:
query_value = '"%s"' % value
if query_value in ['"pdp11"', '"silicon graphics"', '"x86"', '"linux"', '"commodores"']:
return True
sql = "SELECT DISTINCT platform FROM platform WHERE platform = %s" % query_value
return len(get_result(sql)) > 0
def is_application_value(value):
if '"' in value:
query_value = value
else:
query_value = '"%s"' % value
if query_value in ['"autocad"', '"microsoft word"', '"apache"', '"speedy3dgraphics"']:
return True
sql = "SELECT DISTINCT application FROM application WHERE application = %s" % query_value
return len(get_result(sql)) > 0
def is_company_value(value):
if '"' in value:
query_value = value
else:
query_value = '"%s"' % value
if query_value in ['"Microsoft"', '"JPL"', '"Dell"', '"Trilogy"', '"ibm"',
'"IBM"', '"HP"', '"Apple"', '"National Instruments"', '"Boeing"',
'"Lockheed Martin Aeronautics"', '"Compaq"', '"Applied Materials"']:
return True
sql = "SELECT DISTINCT company FROM job WHERE company = %s" % query_value
return len(get_result(sql)) > 0
def is_recruiter_value(value):
if '"' in value:
query_value = value
else:
query_value = '"%s"' % value
if query_value in ['"Longhorn"', '"Phil Smith"']:
return True
sql = "SELECT DISTINCT recruiter FROM job WHERE recruiter = %s" % query_value
return len(get_result(sql)) > 0
def is_title_value(value):
if '"' in value:
query_value = value
else:
query_value = '"%s"' % value
if query_value in ['"Ic Design Engineer"', '"Test Engineer"', '"research assistant"',
'"Sql Engineer"', '"Senior Consulting Engineer"',
'"NetWare Administrator"', '"Senior Development Engineer"',
'"Manufacturing Manager"', '"intern"', '"Consultant"']:
return True
sql = "SELECT DISTINCT title FROM job WHERE title = %s" % query_value
return len(get_result(sql)) > 0
def is_area_value(value):
if '"' in value:
query_value = value
else:
query_value = '"%s"' % value
if query_value in ['"ai"', '"statistics"', '"oil pipeline modeling"', '"management"']:
return True
sql = "SELECT DISTINCT area FROM area WHERE area = %s" % query_value
return len(get_result(sql)) > 0
def read_data(path):
questions, logical_forms = list(), list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
splits = line.split('\t')
questions.append(splits[0])
logical_forms.append(splits[1])
return questions, logical_forms
def tokenize_funql(funql):
normalized_lf = funql.replace(" ", "::")
replacements = [
('(', '( '),
(')', ' ) '),
(',', ' , '),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [t if "::" not in t else t.replace("::", " ") for t in normalized_lf.split()]
return tokens
def derive_filters(function_name, arguments, filters, select):
function_name = function_name[:-1]
if function_name == 'const':
return
# Filters
if function_name == 'loc_1':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s]+)\)$', argument_str)
assert match is not None
value = match.group(1)
assert is_city_value(value) or is_country_value(value)
if is_country_value(value):
filters.append(('country', 'country', '=', value))
else:
filters.append(('city', 'city_name', '=', value))
elif function_name == 'req_exp_1':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s]+)\)$', argument_str)
assert match is not None
value = match.group(1)
assert re.match('^\d+$', value)
filters.append(('job', 'req_exp', '=', int(value)))
elif function_name == 'req_deg_1':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s]+)\)$', argument_str)
assert match is not None
value = match.group(1)
assert is_degree_value(value)
filters.append(('job', 'req_deg', '=', value))
elif function_name == 'platform_1':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s]+)\)$', argument_str)
assert match is not None
value = match.group(1)
assert is_platform_value(value)
filters.append(('platform', 'platform', '=', value))
elif function_name == 'language_1':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s|\+|/]+)\)$', argument_str)
assert match is not None
value = match.group(1)
assert is_language_value(value)
filters.append(('language', 'language', '=', value))
elif function_name == 'application_1':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s|\+|/]+)\)$', argument_str)
assert match is not None
value = match.group(1)
assert is_application_value(value)
filters.append(('application', 'application', '=', value))
elif function_name == 'company_1':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s|\+|/]+)\)$', argument_str)
assert match is not None
value = match.group(1)
assert is_company_value(value)
filters.append(('job', 'company', '=', value))
elif function_name == 'recruiter_1':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s|\+|/]+)\)$', argument_str)
assert match is not None
value = match.group(1)
assert is_recruiter_value(value)
filters.append(('job', 'recruiter', '=', value))
elif function_name == 'des_deg_1':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s]+)\)$', argument_str)
assert match is not None
value = match.group(1)
assert is_degree_value(value)
filters.append(('job', 'des_deg', '=', value))
elif function_name == 'des_exp_1':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s]+)\)$', argument_str)
assert match is not None
value = match.group(1)
assert re.match('^\d+$', value)
filters.append(('job', 'des_exp', '=', int(value)))
elif function_name == 'country_1':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s]+)\)$', argument_str)
assert match is not None
value = match.group(1)
assert is_country_value(value)
filters.append(('country', 'country', '=', value))
elif function_name == 'title_1':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s|\+|/]+)\)$', argument_str)
assert match is not None
value = match.group(1)
assert is_title_value(value)
filters.append(('job', 'title', '=', value))
elif function_name == 'area_1':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s|\+|/]+)\)$', argument_str)
assert match is not None
value = match.group(1)
assert is_area_value(value)
filters.append(('area', 'area', '=', value))
# Unary
elif function_name == 'req_exp':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^\d+$', argument_str)
assert match is None
filters.append(('job', 'req_exp', 'is not', 'NULL'))
elif function_name == 'des_exp':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^\d+$', argument_str)
assert match is None
filters.append(('job', 'des_exp', 'is not', 'NULL'))
elif function_name == 'req_deg':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s|\+|/]+)\)$', argument_str)
assert match is None
filters.append(('job', 'req_deg', 'is not', 'NULL'))
elif function_name == 'des_deg':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^const\((["|a-z|A-Z|_|\d|_|\s|\+|/]+)\)$', argument_str)
assert match is None
filters.append(('job', 'des_deg', 'is not', 'NULL'))
elif function_name == 'salary_greater_than':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^(\d+),(year|hour|month|)$', argument_str)
filters.append(('salary', 'money', '>=', int(match.group(1))))
filters.append(('salary', 'time', '=', match.group(2)))
elif function_name == 'salary_less_than':
argument_str = "".join(arguments[:-1]).replace("'", '"')
match = re.match('^(\d+),(year|hour|month|)$', argument_str)
filters.append(('salary', 'money', '<=', int(match.group(1))))
filters.append(('salary', 'time', '=', match.group(2)))
# Alter select
elif function_name == 'req_exp_0':
# Return experience
select.append(('job', 'req_exp'))
elif function_name == 'req_deg_0':
# Return experience
select.append(('job', 'req_deg'))
elif function_name == 'platform_0':
# Return experience
select.append(('platform', 'platform'))
elif function_name == 'language_0':
select.append(('language', 'language'))
elif function_name == 'company_0':
select.append(('job', 'company'))
elif function_name == 'des_exp_0':
select.append(('job', 'des_exp'))
elif function_name == 'title_0':
select.append(('job', 'title'))
elif function_name == 'loc_0':
select.append(('city', 'city_name'))
elif function_name == "not":
argument_str = "".join(arguments[:-1]).replace("'", '"')
index = argument_str.index('(')
child_function_name = argument_str[:index]
match = re.match('^\(const\((["|a-z|A-Z|_|\d|_|\s|\+|/]+)\)\)$', argument_str[index:])
if child_function_name == 'req_exp_1':
value = match.group(1)
key = ('job', 'req_exp', int(value))
elif child_function_name == 'req_deg':
key = ('job', 'req_deg',)
elif child_function_name == 'area_1':
value = match.group(1)
key = ('area', 'area', value)
elif child_function_name == 'loc_1':
value = match.group(1)
key = ('city', 'city_name', value)
elif child_function_name == 'req_deg_1':
value = match.group(1)
key = ('job', 'req_deg', value)
elif child_function_name == 'language_1':
value = match.group(1)
print("Language Not Value: ", value)
key = ('language', 'language', value)
elif child_function_name == 'platform_1':
value = match.group(1)
key = ('platform', 'platform', value)
elif child_function_name == 'company_1':
value = match.group(1)
key = ('job', 'company', value)
elif child_function_name == 'req_exp':
key = ('job', 'req_exp')
elif child_function_name == 'area_1':
value = match.group(1)
key = ('area', 'area', value)
else:
assert child_function_name == 'or'
# raise Exception("Not other")
if child_function_name != 'or':
target_filter = None
target_filter_idx = 0
for fidx, filter in enumerate(filters):
if filter[0] == key[0] and filter[1] == key[1]:
if len(key) == 3:
if filter[-1] == key[-1]:
if target_filter is not None:
raise Exception("Only expect 1 filter")
target_filter = filter
target_filter_idx = fidx
else:
if target_filter is not None:
raise Exception("Only expect 1 filter")
target_filter = filter
target_filter_idx =fidx
print(key)
assert target_filter is not None
op = target_filter[-2]
not_filter = None
if op == '=':
not_filter = (target_filter[0], target_filter[1], "!=", target_filter[-1])
elif op == 'is not':
not_filter = (target_filter[0], target_filter[1], "is", target_filter[-1])
elif op == '>':
not_filter = (target_filter[0], target_filter[1], "is", "NULL")
assert not_filter is not None
filters[target_filter_idx] = not_filter
class Node:
def __init__(self, lf, lidx, ridx):
self.lf = lf
self.lidx = lidx
self.ridx = ridx
def to_sql(select, filters):
select_clause = "SELECT DISTINCT %s.%s" % (select[0][0], select[0][1])
where_clause = list()
tables = {select[0][0]}
for filter in filters:
tables.add(filter[0])
value = filter[-1]
if isinstance(value, str) and value != 'NULL':
if '"' in value:
query_value = value
else:
query_value = '"%s"' % value
else:
query_value = value
clause = "%s.%s %s %s" % (filter[0], filter[1], filter[2], query_value)
where_clause.append(clause)
where_clause = " AND ".join(where_clause)
if "job" not in tables:
tables.add("job")
assert "job" in tables
# from clause
tables = sorted(list(tables))
tables.remove("job")
tables = ['job'] + tables
if len(tables) == 1:
from_clause = "job"
else:
from_clause = ""
for tidx, t in enumerate(tables):
if tidx == 0:
continue
elif tidx == 1:
from_clause = "job JOIN %s ON job.job_id = %s.job_id" % (t, t)
else:
from_clause += " JOIN %s ON job.job_id = %s.job_id" % (t, t)
if len(where_clause) > 0:
sql = select_clause + " FROM " + from_clause + " WHERE " + where_clause
else:
sql = select_clause + " FROM " + from_clause
return sql
def translate(funql):
funql_tokens = tokenize_funql(funql)
# A list of four tuples (table, column, op, value)
sql_select = list()
sql_where = list()
left_brackets = list()
nodes = list()
for tidx, token in enumerate(funql_tokens):
if token.endswith('('):
left_brackets.append(tidx)
elif token == ')':
pidx = left_brackets.pop()
children_nodes = list()
for nidx, node in enumerate(nodes):
if pidx < node.lidx and tidx > node.ridx:
children_nodes.append(node)
for n in children_nodes:
nodes.remove(n)
if len(children_nodes) == 0:
sub_tokens = funql_tokens[pidx:tidx+1]
function_name = sub_tokens[0]
derive_filters(
function_name, sub_tokens[1:], sql_where, sql_select
)
lf = "".join(sub_tokens)
else:
# Has children
sub_tokens = funql_tokens[pidx:tidx+1]
function_name = sub_tokens[0]
_inside_bracket_stack = 0
other_children = list()
children_num = 0
children_position = list()
for sub_token in sub_tokens[1:]:
if sub_token.endswith('('):
_inside_bracket_stack += 1
if _inside_bracket_stack == 1:
children_num += 1
children_position.append('bracket')
elif sub_token == ')':
_inside_bracket_stack -= 1
else:
if _inside_bracket_stack == 0 and sub_token != ',':
children_num += 1
other_children.append(sub_token)
children_position.append('token')
assert children_num == len(children_position)
lf = "".join(sub_tokens)
derive_filters(
function_name, sub_tokens[1:], sql_where, sql_select
)
new_node = Node(lf, pidx, tidx)
nodes.append(new_node)
print(sql_where)
if len(sql_select) == 0:
sql_select.append(("job", "job_id"))
print(sql_select)
assert len(sql_select) == 1
sql = to_sql(sql_select, sql_where)
print(sql)
return sql
if __name__ == '__main__':
questions, prologs = read_data('./job_funql_test_fixed.tsv')
sorted_prologs = sorted([(q, lf) for q, lf in zip(questions, prologs)], key=lambda x: len(x[1]))
with open("job_sql_test.log", "w") as f:
for idx, (question, prolog) in enumerate(sorted_prologs):
print(idx)
print(question)
print(prolog)
sql = translate(prolog)
print("==\n\n")
f.write("%s\n%s\n%s\n===\n\n" % (question, prolog, sql))
| 19,937 | 37.268714 | 107 |
py
|
Unimer
|
Unimer-master/data/job/auto_funql_transform.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/data/job/prolog_to_funql.py
|
# coding=utf8
import re
def tokenize_prolog(logical_form):
# Tokenize Prolog
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
(';', ' ; '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
toks = [t if "::" not in t else t.replace(
"::", " ") for t in normalized_lf.split()]
return toks
def standardize_prolog_varnames(prolog):
toks = tokenize_prolog(prolog)
varnames = {}
new_toks = []
for t in toks:
if re.match('^[A-Z]$', t) or re.match('^_\d+$', t):
if t in varnames:
new_toks.append(varnames[t])
else:
new_varname = chr(ord('A')+len(varnames))
varnames[t] = new_varname
new_toks.append(new_varname)
else:
new_toks.append(t)
lf = ''.join(new_toks)
lf = lf.replace('\\+ (', '\+(')
return lf
def read_data(path):
questions, logical_forms = list(), list()
with open(path, 'r') as f:
for line in f:
line = line.strip()
splits = line.split('\t')
questions.append(splits[0])
logical_forms.append(standardize_prolog_varnames(splits[1]))
return questions, logical_forms
def tokenize_prolog_2(funql):
normalized_lf = funql.replace(" ", "::")
replacements = [
('(', '( '),
(')', ' ) '),
(',', ' , '),
(';', ' ; '),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [t.strip() if "::" not in t else t.replace("::", " ").strip() for t in normalized_lf.split()]
return tokens
class Node:
def __init__(self, lf, lidx, ridx, input_vars=None, output_var=None):
self.lf = lf
self.lidx = lidx
self.ridx = ridx
self.input_vars = input_vars
self.output_var = output_var
def is_var(t):
return re.match('^[A-Z]$', t) or re.match('^_\d+$', t)
def rewrite(function_name, arguments, variable_constraints):
rewritten_function_name = function_name[:-1]
valid_arguments = [arg for arg in arguments if arg != ',']
number_of_arguments = len(valid_arguments)
argument_vars = list()
for arg in valid_arguments:
if is_var(arg):
argument_vars.append(1)
else:
argument_vars.append(0)
output_variable = None
input_variables = list()
if number_of_arguments == 1:
if sum(argument_vars) == 0:
raise Exception("No unary entity predicate")
else:
# no variable
input_variables = valid_arguments
output_variable = valid_arguments[0]
expr = "%s(all)" % rewritten_function_name
else:
# 2 or 3
if sum(argument_vars) == number_of_arguments:
# TODO: fix
input_variables = valid_arguments
expr = "%s(%s)" % (rewritten_function_name, ",".join(valid_arguments))
elif sum(argument_vars) == 0:
raise Exception("No binary entity predicate")
else:
assert sum(argument_vars) == 1
# At least one argument vars
index = argument_vars.index(1)
input_variables = valid_arguments
output_variable = valid_arguments[index]
valid_arguments.remove(valid_arguments[index])
expr = "%s(%s)" % (rewritten_function_name, ",".join(valid_arguments))
return expr, input_variables, output_variable
def rewrite_intersection(nodes):
input_vars = list()
output_var = None
for n in nodes:
if n.output_var is not None:
output_var = n.output_var
break
# Merge Const
const_nodes = list()
for node in nodes:
if node.lf.startswith('const('):
const_nodes.append(node)
for cn in const_nodes:
nodes.remove(cn)
# Merge
for cn in const_nodes:
assert cn.output_var is not None
for node in nodes:
if node.input_vars is not None \
and cn.output_var in node.input_vars:
tokens = [t for t in tokenize_prolog_2(node.lf) if t != ',']
for tidx, t in enumerate(tokens):
if is_var(t) and tokens[tidx] == cn.output_var:
tokens[tidx] = cn.lf
break
if len(node.input_vars) == 2:
node.lf = '%s_%d(%s)' % (tokens[0][:-1], tidx - 1, cn.lf)
index = 1 - node.input_vars.index(cn.output_var)
node.output_var = node.input_vars[index]
output_var = node.output_var
# print("Rewrite Output Var: ", node.output_var)
else:
node.lf = ''.join(tokens)
break
is_all_same = True
prev_output_var = None
for nidx, node in enumerate(nodes):
if nidx == 0:
prev_output_var = node.output_var
else:
if node.output_var is None or node.output_var != prev_output_var:
is_all_same = False
break
unary_predicate_nodes = list()
if is_all_same:
# Take job(all) first
for node in nodes:
if node.lf.endswith('(all)'):
unary_predicate_nodes.append(node)
for un in unary_predicate_nodes:
nodes.remove(un)
unary_predicate_nodes = unary_predicate_nodes[::-1]
if len(unary_predicate_nodes) > 0:
if len(nodes) == 0:
rewritten_lf = unary_predicate_nodes[0].lf
for un in unary_predicate_nodes[1:]:
tokens = tokenize_prolog_2(un.lf)
rewritten_lf = "%s(%s)" % (tokens[0][:-1], rewritten_lf)
else:
if len(nodes) == 1:
rewritten_lf = nodes[0].lf
else:
rewritten_lf = "%s(%s)" % ("intersect", ",".join([n.lf for n in nodes]))
for un in unary_predicate_nodes:
tokens = tokenize_prolog_2(un.lf)
rewritten_lf = "%s(%s)" % (tokens[0][:-1], rewritten_lf)
else:
assert len(nodes) > 0
if len(nodes) == 1:
rewritten_lf = nodes[0].lf
else:
rewritten_lf = "%s(%s)" % ("intersect", ",".join([n.lf for n in nodes]))
return rewritten_lf, input_vars, output_var
def translate(prolog):
left_brackets = list()
tokens = tokenize_prolog_2(prolog)
# print(tokens)
global_variable_constraints = dict()
nodes = list()
for tidx, token in enumerate(tokens):
if token.endswith('('):
left_brackets.append(tidx)
elif token == ')':
pidx = left_brackets.pop()
children_nodes = list()
for nidx, node in enumerate(nodes):
if pidx < node.lidx and tidx > node.ridx:
children_nodes.append(node)
for n in children_nodes:
nodes.remove(n)
# Rewrite
# FunQL has a very nested structure
if len(children_nodes) == 0:
sub_tokens = tokens[pidx:tidx]
function_name = sub_tokens[0]
rewritten_lf, input_vars, output_var = rewrite(
function_name, sub_tokens[1:],
global_variable_constraints
)
else:
# Has children
sub_tokens = tokens[pidx:tidx]
function_name = sub_tokens[0]
_inside_bracket_stack = 0
other_children = list()
children_num = 0
children_position = list()
for sub_token in sub_tokens[1:]:
if sub_token.endswith('('):
_inside_bracket_stack += 1
if _inside_bracket_stack == 1:
children_num += 1
children_position.append('bracket')
elif sub_token == ')':
_inside_bracket_stack -= 1
else:
if _inside_bracket_stack == 0 and sub_token != ',':
children_num += 1
other_children.append(sub_token)
children_position.append('token')
assert children_num == len(children_position)
if function_name == '(':
if ';' in other_children:
meta = 'or'
else:
meta = 'intersect'
if meta == 'intersect':
rewritten_lf, input_vars, output_var = rewrite_intersection(children_nodes)
else:
output_var = children_nodes[0].output_var
input_vars = list()
rewritten_lf = "%s(%s)" % (meta, ",".join([n.lf for n in children_nodes]))
elif function_name == '\\+(':
output_var = children_nodes[0].output_var
input_vars = list()
rewritten_lf = "%s(%s)" % ("not", ",".join([n.lf for n in children_nodes]))
# print("Not: ", input_vars, output_var)
else:
arguments = list()
for position in children_position:
if position == 'bracket':
n = children_nodes.pop(0)
arguments.append(n.lf)
else:
sub_token = other_children.pop(0)
arguments.append(sub_token)
rewritten_lf, input_vars, output_var = rewrite(
function_name, arguments,
global_variable_constraints
)
new_node = Node(rewritten_lf, pidx, tidx, input_vars=input_vars, output_var=output_var)
nodes.append(new_node)
assert len(nodes) == 1
funql = nodes[0].lf
return funql
def tokenize_funql(funql):
normalized_lf = funql.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [t if "::" not in t else t.replace("::", " ") for t in normalized_lf.split()]
return tokens
def validate(funql):
tokens = tokenize_funql(funql)
for token in tokens:
if is_var(token):
return False
return True
if __name__ == '__main__':
questions, prologs = read_data('./job_prolog_train.tsv')
sorted_prologs = sorted([(q, lf) for q, lf in zip(questions, prologs)], key=lambda x: len(x[1]))
with open("job_funql_train.log", "w") as f:
for idx, (question, prolog) in enumerate(sorted_prologs):
print(idx)
print(question)
print(prolog)
funql = translate(prolog)
is_valid = validate(funql)
print("Is Valid: ", is_valid)
print(funql)
print('===\n\n')
if not is_valid:
f.write("Incorrect FunQL\n")
f.write("%s\n%s\n%s\n===\n\n" % (question, prolog, funql))
else:
f.write("%s\n%s\n%s\n===\n\n" % (question, prolog, funql))
| 11,528 | 34.042553 | 106 |
py
|
Unimer
|
Unimer-master/grammars/entity_matcher.py
|
# coding=utf8
import os
import re
import numpy as np
from typing import List
from overrides import overrides
from nltk.corpus import stopwords
from allennlp.data.tokenizers import Token
from .atis.atis_entity_matcher import ATISEntityMatcher
from .atis.atis_sql_entity_matcher import ATISSQLEntityMatcher
from .atis.atis_lambda_calculus_entity_matcher import ATISLambdaCalculusEntityMatcher
from .atis.atis_seq2seq_entity_matcher import ATISSeq2SeqEntityMatcher
from .atis.atis_seq2seq_sql_entity_matcher import ATISSeq2SeqSQLEntityMatcher
from .atis.atis_seq2seq_lambda_calculus_entity_matcher import ATISSeq2SeqLambdaCalculusEntityMatcher
class BasicEntityMatcher():
def process_terminal_rule(self, rule):
# Process terminal
terminal = rule.rhs.strip('[] ')
terminal = terminal.replace("'", "").replace('"', "").replace("_", " ").replace("%", "").replace(":", " : ")
terminal = re.sub(' +', ' ', terminal)
terminal_tokens = terminal.lower().split(" ")
try:
index = terminal_tokens.index(":")
except ValueError:
pass
else:
terminal_tokens = terminal_tokens[:index]
return terminal_tokens
def match(self, question_tokens: List[Token], rules: List,
copy_terminal_set: List, pad_index: int, max_ngram=6):
token_rule_map = list()
stop_words = set(stopwords.words('english'))
for token in question_tokens:
matches = list()
if token.text in stop_words:
matches = [pad_index]
else:
for rule in rules: # Instance of Production Rule
if rule.lhs in copy_terminal_set:
# Process terminal
terminal = rule.rhs.strip('[] ')
terminal = terminal.replace("'", "").replace('"', "").replace("_", " ").replace("%",
"").replace(
":", " ")
terminal = re.sub(' +', ' ', terminal)
terminal_tokens = terminal.lower().split(" ")
if token.text in terminal_tokens:
matches.append(rule.rule_id)
if len(matches) == 0:
matches = [pad_index]
token_rule_map.append(np.array(matches, dtype=np.int))
return token_rule_map
class EntityMatcher(BasicEntityMatcher):
@overrides
def match(self, question_tokens: List[Token], rules: List,
copy_terminal_set: List, pad_index: int, max_ngram=6):
length = len(question_tokens)
token_rule_map = [list() for i in range(length)]
stop_words = set(stopwords.words('english'))
tidx = 0
while tidx < length:
token = question_tokens[tidx]
if token.text in stop_words:
tidx += 1
continue
for i in range(min(max_ngram, length - tidx)):
string = ' '.join([t.text for t in question_tokens[tidx:tidx + 1 + i]]).strip().lower()
for rule in rules:
if rule.lhs in copy_terminal_set:
terminal_tokens = self.process_terminal_rule(rule)
terminal_string = ' '.join(terminal_tokens)
if string == terminal_string:
# Add rule
for index in range(tidx, tidx + 1 + i):
token_rule_map[index].append(rule.rule_id)
tidx += 1
for midx, m in enumerate(token_rule_map):
if len(m) == 0:
m.append(pad_index)
token_rule_map[midx] = np.array(m, dtype=np.int)
return token_rule_map
class GEOLambdaCalculusEntityMatcher(EntityMatcher):
@overrides
def process_terminal_rule(self, rule):
# Process terminal
terminal = rule.rhs.strip('[] ')
terminal = terminal.replace("'", "").replace('"', "").lower().strip()
terminal = re.sub(' +', ' ', terminal)
terminal_tokens = terminal.split(':')
assert len(terminal_tokens) == 2
terminal_type = terminal_tokens[1]
terminal_tokens = terminal_tokens[0].split("_")
if terminal_type == 'r':
# River
terminal_tokens.remove("river")
elif terminal_type == 'c':
terminal_tokens = terminal_tokens[:-1]
return terminal_tokens
def get_entity_matcher(task, language):
matcher = None
if task == 'atis':
db_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'atis', 'db')
if language in ['lambda', 'lambda2', 'lambda3', 'lambda4',]:
matcher = ATISLambdaCalculusEntityMatcher(db_path)
elif language in ['prolog', 'funql', 'typed_funql', 'prolog2']:
matcher = ATISEntityMatcher(db_path)
else:
matcher = ATISSQLEntityMatcher(db_path)
elif task == 'geo':
if language in ['lambda', 'lambda2']:
matcher = GEOLambdaCalculusEntityMatcher()
else:
matcher = EntityMatcher()
elif task == 'job':
matcher = EntityMatcher()
return matcher
def get_seq2seq_entity_matcher(task, language):
matcher = None
if task == 'atis':
db_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'atis', 'db')
if language in ['lambda', 'lambda2', 'lambda3', 'lambda4',]:
matcher = ATISSeq2SeqLambdaCalculusEntityMatcher(db_path)
elif language in ['prolog', 'funql', 'typed_funql', 'prolog2']:
matcher = ATISSeq2SeqEntityMatcher(db_path)
else:
matcher = ATISSeq2SeqSQLEntityMatcher(db_path)
return matcher
| 5,894 | 40.223776 | 116 |
py
|
Unimer
|
Unimer-master/grammars/utils.py
|
# coding=utf8
import re
from collections import defaultdict
from sys import exc_info
from typing import List, Dict, Set
from overrides import overrides
from parsimonious.exceptions import VisitationError, UndefinedLabel
from parsimonious.expressions import Literal, OneOf, Sequence
from parsimonious.grammar import Grammar
from parsimonious.nodes import Node, NodeVisitor
from six import reraise
from allennlp.data.tokenizers import WordTokenizer
from .geo import geo_normalization, geo_tokenizer
from .job import job_normalization, job_tokenizer
from .atis import atis_normalization, atis_tokenizer
WHITESPACE_REGEX = re.compile(" wsp |wsp | wsp| ws |ws | ws")
AGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=',
'<=', '!=', 'in', 'like', 'is', 'exists')
def format_grammar_string(grammar_dictionary: Dict[str, List[str]]) -> str:
"""
Formats a dictionary of production rules into the string format expected
by the Parsimonious Grammar class.
"""
return '\n'.join([f"{nonterminal} = {' / '.join(right_hand_side)}"
for nonterminal, right_hand_side in grammar_dictionary.items()])
def initialize_valid_actions(grammar: Grammar,
keywords_to_uppercase: List[str] = None) -> Dict[str, List[str]]:
"""
We initialize the valid actions with the global actions. These include the
valid actions that result from the grammar and also those that result from
the tables provided. The keys represent the nonterminals in the grammar
and the values are lists of the valid actions of that nonterminal.
"""
valid_actions: Dict[str, Set[str]] = defaultdict(set)
for key in grammar:
rhs = grammar[key]
# Sequence represents a series of expressions that match pieces of the text in order.
# Eg. A -> B C
if isinstance(rhs, Sequence):
valid_actions[key].add(
format_action(key, " ".join(rhs._unicode_members()), # pylint: disable=protected-access
keywords_to_uppercase=keywords_to_uppercase))
# OneOf represents a series of expressions, one of which matches the text.
# Eg. A -> B / C
elif isinstance(rhs, OneOf):
for option in rhs._unicode_members(): # pylint: disable=protected-access
valid_actions[key].add(format_action(key, option,
keywords_to_uppercase=keywords_to_uppercase))
# A string literal, eg. "A"
elif isinstance(rhs, Literal):
if rhs.literal != "":
valid_actions[key].add(format_action(key, repr(rhs.literal),
keywords_to_uppercase=keywords_to_uppercase))
else:
valid_actions[key] = set()
valid_action_strings = {key: sorted(value)
for key, value in valid_actions.items()}
return valid_action_strings
def format_action(nonterminal: str,
right_hand_side: str,
is_string: bool = False,
is_number: bool = False,
keywords_to_uppercase: List[str] = None) -> str:
"""
This function formats an action as it appears in models. It
splits productions based on the special `ws` and `wsp` rules,
which are used in grammars to denote whitespace, and then
rejoins these tokens a formatted, comma separated list.
Importantly, note that it `does not` split on spaces in
the grammar string, because these might not correspond
to spaces in the language the grammar recognises.
Parameters
----------
nonterminal : ``str``, required.
The nonterminal in the action.
right_hand_side : ``str``, required.
The right hand side of the action
(i.e the thing which is produced).
is_string : ``bool``, optional (default = False).
Whether the production produces a string.
If it does, it is formatted as ``nonterminal -> ['string']``
is_number : ``bool``, optional, (default = False).
Whether the production produces a string.
If it does, it is formatted as ``nonterminal -> ['number']``
keywords_to_uppercase: ``List[str]``, optional, (default = None)
Keywords in the grammar to uppercase. In the case of sql,
this might be SELECT, MAX etc.
"""
keywords_to_uppercase = keywords_to_uppercase or []
if right_hand_side.upper() in keywords_to_uppercase:
right_hand_side = right_hand_side.upper()
if is_string:
return f'{nonterminal} -> ["\'{right_hand_side}\'"]'
elif is_number:
return f'{nonterminal} -> ["{right_hand_side}"]'
else:
right_hand_side = right_hand_side.lstrip("(").rstrip(")")
child_strings = [token for token in WHITESPACE_REGEX.split(
right_hand_side) if token]
child_strings = [tok.upper() if tok.upper(
) in keywords_to_uppercase else tok for tok in child_strings]
return f"{nonterminal} -> [{' '.join(child_strings)}]"
def action_sequence_to_logical_form(action_sequences: List[str], add_table_names: bool = False) -> str:
# Convert an action sequence like ['statement -> [query, ";"]', ...] to the
# SQL string.
query = []
for action in action_sequences:
nonterminal, right_hand_side = action.split(' -> ')
right_hand_side_tokens = right_hand_side[1:-1].split(' ')
if nonterminal == 'statement':
query.extend(right_hand_side_tokens)
else:
for query_index, token in list(enumerate(query)):
if token == nonterminal:
# if nonterminal == 'column_name' and '@' in right_hand_side_tokens[0] and len(right_hand_side_tokens) == 1:
# if add_table_names:
# table_name, column_name = right_hand_side_tokens[0].split('@')
# if '.' in table_name:
# table_name = table_name.split('.')[0]
# right_hand_side_tokens = [table_name + '.' + column_name]
# else:
# right_hand_side_tokens = [right_hand_side_tokens[0].split('@')[-1]]
query = query[:query_index] + \
right_hand_side_tokens + \
query[query_index + 1:]
break
return ' '.join([token.strip('"') for token in query])
class SqlVisitor(NodeVisitor):
"""
``SqlVisitor`` performs a depth-first traversal of the the AST. It takes the parse tree
and gives us an action sequence that resulted in that parse. Since the visitor has mutable
state, we define a new ``SqlVisitor`` for each query. To get the action sequence, we create
a ``SqlVisitor`` and call parse on it, which returns a list of actions. Ex.
sql_visitor = SqlVisitor(grammar_string)
action_sequence = sql_visitor.parse(query)
Importantly, this ``SqlVisitor`` skips over ``ws`` and ``wsp`` nodes,
because they do not hold any meaning, and make an action sequence
much longer than it needs to be.
Parameters
----------
grammar : ``Grammar``
A Grammar object that we use to parse the text.
keywords_to_uppercase: ``List[str]``, optional, (default = None)
Keywords in the grammar to uppercase. In the case of sql,
this might be SELECT, MAX etc.
"""
def __init__(self, grammar: Grammar, keywords_to_uppercase: List[str] = None) -> None:
self.action_sequence: List[str] = []
self.grammar: Grammar = grammar
self.keywords_to_uppercase = keywords_to_uppercase or []
@overrides
def generic_visit(self, node: Node, visited_children: List[None]) -> List[str]:
self.add_action(node)
if node.expr.name == 'statement':
return self.action_sequence
return []
def add_action(self, node: Node) -> None:
"""
For each node, we accumulate the rules that generated its children in a list.
"""
if node.expr.name and node.expr.name not in ['ws', 'wsp']:
nonterminal = f'{node.expr.name} -> '
if isinstance(node.expr, Literal):
right_hand_side = f'["{node.text}"]'
else:
child_strings = []
for child in node.__iter__():
if child.expr.name in ['ws', 'wsp']:
continue
if child.expr.name != '':
child_strings.append(child.expr.name)
else:
child_right_side_string = child.expr._as_rhs().lstrip("(").rstrip(
")") # pylint: disable=protected-access
child_right_side_list = [tok for tok in
WHITESPACE_REGEX.split(child_right_side_string) if tok]
child_right_side_list = [tok.upper() if tok.upper() in
self.keywords_to_uppercase else tok
for tok in child_right_side_list]
child_strings.extend(child_right_side_list)
right_hand_side = "[" + " ".join(child_strings) + "]"
rule = nonterminal + right_hand_side
self.action_sequence = [rule] + self.action_sequence
@overrides
def visit(self, node):
"""
See the ``NodeVisitor`` visit method. This just changes the order in which
we visit nonterminals from right to left to left to right.
"""
method = getattr(self, 'visit_' + node.expr_name, self.generic_visit)
# Call that method, and show where in the tree it failed if it blows
# up.
try:
# Changing this to reverse here!
return method(node, [self.visit(child) for child in reversed(list(node))])
except (VisitationError, UndefinedLabel):
# Don't catch and re-wrap already-wrapped exceptions.
raise
except self.unwrapped_exceptions:
raise
except Exception: # pylint: disable=broad-except
# Catch any exception, and tack on a parse tree so it's easier to
# see where it went wrong.
exc_class, exc, traceback = exc_info()
reraise(VisitationError, VisitationError(
exc, exc_class, node), traceback)
def format_col_unit(col_unit, column_names, table_names):
agg_id, col_id, _ = col_unit
agg = '' if int(agg_id) == 0 else AGG_OPS[agg_id]
if col_id == 0:
result = "*"
else:
result = "%s@%s" % (
table_names[column_names[col_id][0]], column_names[col_id][1])
return agg, result
def transform_query_tree(query_tree, schema):
# from
from_clause = query_tree['from']
table_names = schema['table_names_original']
column_names = schema['column_names_original']
# table_units = from_clause['table_units']
# column_length = len(schema['column_names'])
# from_table_names = list()
# from_table_entities = list()
# for tu in table_units:
# if tu[0] == 'table_unit':
# from_table_names.append(table_names[tu[1]])
# from_table_entities.append("@entity_%d" % (tu[1] + column_length))
# select
select_clause = query_tree['select']
select_columns = list()
for agg_id, val_unit in select_clause[1]:
unit_op, col_unit1, col_unit2 = val_unit
col_unit1[0] = agg_id
select_columns.append(format_col_unit(
col_unit1, column_names, table_names))
# groupby clause
groupby_clause = query_tree.get('groupBy', None)
groupby_columns = list()
if groupby_clause:
for col_unit in groupby_clause:
groupby_columns.append(format_col_unit(
col_unit, column_names, table_names))
# orderby clause
orderby_clause = query_tree.get('orderBy', None)
orderby_direction = ''
orderby_columns = list()
if orderby_clause:
orderby_direction = orderby_clause[0]
for val_unit in orderby_clause[1]:
unit_op, col_unit1, col_unit2 = val_unit
orderby_columns.append(format_col_unit(
col_unit1, column_names, table_names))
# limit clause
limit_clause = query_tree.get('limit', None)
limit_value = -1
if limit_clause:
limit_value = limit_clause
# where clause
where_clause = query_tree.get('where', None)
where_columns = list()
if where_clause:
for cond_unit in where_clause:
if isinstance(cond_unit, str):
where_columns.append(cond_unit)
continue
not_op, op_id, val_unit, val1, val2 = cond_unit
unit_op, col_unit1, col_unit2 = val_unit
if not_op:
operator = "not " + WHERE_OPS[op_id]
else:
operator = WHERE_OPS[op_id]
agg, col = format_col_unit(col_unit1, column_names, table_names)
if operator != 'between':
if isinstance(val1, dict):
value1 = '(' + transform_query_tree(val1, schema) + ')'
else:
value1 = 'value'
where_columns.append((col, operator, str(value1)))
else:
if isinstance(val1, dict):
value1 = '(' + transform_query_tree(val1, schema) + ')'
else:
value1 = 'value'
if isinstance(val2, dict):
value2 = '(' + transform_query_tree(val2, schema) + ')'
else:
value2 = 'value'
where_columns.append(
(col, operator, str(value1), "and", str(value2)))
# having clause
having_clause = query_tree.get('having', None)
having_columns = list()
if having_clause:
for cond_unit in having_clause:
if isinstance(cond_unit, str):
having_columns.append(cond_unit)
continue
not_op, op_id, val_unit, val1, val2 = cond_unit
unit_op, col_unit1, col_unit2 = val_unit
if not_op:
operator = "not " + WHERE_OPS[op_id]
else:
operator = WHERE_OPS[op_id]
agg, col_idx = format_col_unit(
col_unit1, column_names, table_names)
if operator != 'between':
if isinstance(val1, dict):
value1 = '(' + transform_query_tree(val1, schema) + ')'
else:
value1 = 'value'
having_columns.append(
(agg + '(%s)' % col_idx, operator, str(value1)))
else:
if isinstance(val1, dict):
value1 = '(' + transform_query_tree(val1, schema) + ')'
else:
value1 = 'value'
if isinstance(val2, dict):
value2 = '(' + transform_query_tree(val2, schema) + ')'
else:
value2 = 'value'
having_columns.append(
(agg + '(%s)' % col_idx, operator, str(value1), "and", str(value2)))
sql = "SELECT " + \
', '.join([col if agg == '' else agg + '(%s)' %
col for agg, col in select_columns])
# sql += " FROM " + " JOIN ".join(from_table_entities)
if len(where_columns) > 0:
where_str = " WHERE "
for wc in where_columns:
if isinstance(wc, str):
where_str += wc + " "
else:
assert isinstance(wc, tuple)
where_str += ' '.join(wc) + " "
sql += where_str
if len(groupby_columns) > 0:
groupby_str = ' GROUPBY '
groupby_str += ', '.join([col if agg == '' else agg + '(%s)' %
col for agg, col in groupby_columns])
sql += groupby_str
if len(having_columns) > 0:
having_str = " HAVING "
for hc in having_columns:
if isinstance(hc, str):
having_str += hc + " "
else:
assert isinstance(hc, tuple)
having_str += ' '.join(hc) + " "
sql += having_str
if len(orderby_columns) > 0:
orderby_str = ' ORDERBY '
orderby_str += ', '.join([col if agg == '' else agg + '(%s)' %
col for agg, col in orderby_columns])
orderby_str += " %s" % orderby_direction
sql += orderby_str
if limit_value > 0:
sql += " LIMIT %d " % 1
union_clause = query_tree.get('union', None)
if union_clause:
sql += ' UNION ' + transform_query_tree(union_clause, schema)
except_clause = query_tree.get('except', None)
if except_clause:
sql += ' EXCEPT ' + transform_query_tree(except_clause, schema)
intersect_clause = query_tree.get('intersect', None)
if intersect_clause:
sql += ' INTERSECT ' + transform_query_tree(intersect_clause, schema)
sql = re.sub("\s+", " ", sql)
return sql
def get_logical_form_preprocessor(task, language, normalize_var_with_de_brujin_index=False):
logical_form_preprocessor = None
if task == 'geo':
if language in ['prolog', 'prolog2']:
if normalize_var_with_de_brujin_index:
# Normalize Prolog Variable
logical_form_preprocessor = geo_normalization.normalize_prolog_variable_names
else:
# Original Form
logical_form_preprocessor = geo_normalization.normalize_prolog
# Anonymize Prolog Variable
# logical_form_preprocessor = geo_normalization.anonymize_prolog_variable_names
elif language in ['sql', 'sql2', 'sql3']:
logical_form_preprocessor = geo_normalization.normalize_sql
elif language in ['lambda', 'lambda2']:
logical_form_preprocessor = geo_normalization.normalize_lambda_calculus
else:
# FunQL or typed_funql
logical_form_preprocessor = geo_normalization.normalize_funql
elif task == 'atis':
if language in ['lambda', 'lambda2', 'lambda3', 'lambda4']:
logical_form_preprocessor = atis_normalization.normalize_lambda_calculus
elif language in ['prolog', 'prolog2']:
if normalize_var_with_de_brujin_index:
logical_form_preprocessor = atis_normalization.normalize_prolog_variable_names
else:
logical_form_preprocessor = atis_normalization.preprocess_prolog
elif language in ['funql', 'typed_funql']:
logical_form_preprocessor = atis_normalization.preprocess_funql
else:
# elif language == 'sql':
logical_form_preprocessor = atis_normalization.preprocess_sql
elif task == 'job':
if language in ['prolog', 'prolog2']:
logical_form_preprocessor = job_normalization.preprocess_prolog
elif language in ['funql', 'funql2']:
logical_form_preprocessor = job_normalization.preprocess_funql
elif language in ['sql', 'sql2']:
logical_form_preprocessor = job_normalization.normalize_sql
elif language in ['lambda', 'lambda2']:
logical_form_preprocessor = job_normalization.normalize_lambda_calculus
return logical_form_preprocessor
def get_logical_form_postprocessor(task, language):
logical_form_postprocessor = None
if task == 'atis':
if language in ['sql', 'sql2', 'sql3']:
logical_form_postprocessor = atis_normalization.postprocess_sql
elif language in ['lambda', 'lambda2', 'lambda3', 'lambda4']:
logical_form_postprocessor = atis_normalization.postprocess_lambda_calculus
elif task == 'job':
if language in ['prolog', 'prolog2']:
logical_form_postprocessor = job_normalization.postprocess_prolog
elif language in ['funql', 'funql2']:
logical_form_postprocessor = job_normalization.postprocess_prolog
elif language in ['sql', 'sql2']:
logical_form_postprocessor = job_normalization.postprocess_sql
elif language in ['lambda', 'lambda2']:
logical_form_postprocessor = job_normalization.postprocess_sql
return logical_form_postprocessor
def get_logical_form_tokenizer(task, language):
if task == 'geo':
splitter = geo_tokenizer.get_logical_tokenizer(language)
elif task == 'job':
splitter = job_tokenizer.get_logical_tokenizer(language)
else:
assert task == 'atis'
splitter = atis_tokenizer.get_logical_tokenizer(language)
tokenizer = WordTokenizer(splitter)
return tokenizer
def get_utterance_preprocessor(task, language):
preprocessor = None
if task == 'job' and language in ['prolog', 'funql', 'sql', 'lambda']:
preprocessor = lambda x: x.replace("'", "").replace(
"windows nt", "windo nt").replace("windows 95", "windo 95")
return preprocessor
| 21,295 | 40.8389 | 128 |
py
|
Unimer
|
Unimer-master/grammars/parse_ast.py
|
# coding=utf8
from grammars.basic import ProductionRule
from grammars.utils import action_sequence_to_logical_form
class ASTNode:
def __init__(self, production_rule: ProductionRule, parent, node_id: int, nonterminals_to_skip: list = None):
self._production_rule = production_rule
if nonterminals_to_skip is None:
self._nonterminals_to_skip = list()
else:
self._nonterminals_to_skip = nonterminals_to_skip
self._rhs_nodes, self._rhs_nonterminals = list(), list()
for r in production_rule.rhs_nonterminal:
if r not in self._nonterminals_to_skip:
self._rhs_nodes.append(None)
self._rhs_nonterminals.append(r)
self._parent = parent
self._id = node_id
@property
def is_complete(self):
complete = True
for r in self._rhs_nodes:
if r is None:
complete = False
break
return complete
@property
def production_rule(self):
return self._production_rule
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, parent):
self._parent = parent
@property
def lhs(self):
return self._production_rule.lhs
@property
def rhs(self):
terms = [term for term in self._production_rule.rhs.strip(
'[] ').split(' ')]
nodes = list()
idx = 0
for t in terms:
if idx < len(self._rhs_nonterminals) and self._rhs_nonterminals[idx] == t:
nodes.append(self.rhs_nodes[idx])
idx += 1
else:
nodes.append(t)
return nodes
@property
def rhs_nodes(self):
return self._rhs_nodes
@property
def rhs_nonterminals(self):
return self._rhs_nonterminals
@property
def node_id(self):
return self._id
def add_column(self, column_id):
pass
def add_node(self, node):
for ridx, r in enumerate(self._rhs_nodes):
if r is None:
assert self._rhs_nonterminals[ridx] == node.production_rule.lhs
self._rhs_nodes[ridx] = node
break
else:
raise Exception("AST Node %s Already Completed" %
str(self._production_rule))
def get_curr_non_terminal(self):
for ridx, r in enumerate(self._rhs_nodes):
if r is None:
return self._rhs_nonterminals[ridx]
return None
def clean_up(self):
self._parent = None
def __str__(self):
return str(self._production_rule)
def __repr__(self):
return str(self)
class AST:
def __init__(self, root_rule: ProductionRule, is_sketch_only: bool = False, nonterminals_to_skip: list = None):
self._is_sketch_only = is_sketch_only
if self._is_sketch_only:
self._nonterminals_to_skip = nonterminals_to_skip
else:
self._nonterminals_to_skip = None
self._root = ASTNode(root_rule, parent=None, node_id=0,
nonterminals_to_skip=nonterminals_to_skip)
self._curr_node = self._root
self._last_node = self._curr_node
self._curr_node_id = 1
self._nodes = [self._root]
def clean_up(self):
def traverse(node):
node.clean_up()
for rhs in node.rhs_nodes:
traverse(rhs)
traverse(self._root)
@property
def root(self):
return self._root
@property
def nodes(self):
return self._nodes
@property
def curr_node_id(self):
return self._curr_node_id
def increase_node_id(self):
self._curr_node_id += 1
@property
def is_complete(self):
def traverse(node):
if node is None:
return False
_is_complete = node.is_complete
for rhs in node.rhs_nodes:
_is_complete &= traverse(rhs)
return _is_complete
return traverse(self._root)
def get_curr_node(self):
return self._curr_node
def get_last_production_rule(self):
return self._last_node.production_rule
def add_rule(self, rule):
node = ASTNode(rule, parent=self._curr_node, node_id=self._curr_node_id,
nonterminals_to_skip=self._nonterminals_to_skip)
self._nodes.append(node)
self._curr_node_id += 1
self._curr_node.add_node(node)
self._last_node = node
if node.is_complete:
_node = node
while _node != self._root and _node.is_complete:
_node = _node.parent
self._curr_node = _node
else:
# self._last_node = self._curr_node
self._curr_node = node
def get_production_rules(self):
def traverse(node, rules):
if node is not None:
rules.append(node.production_rule)
for rhs in node.rhs_nodes:
traverse(rhs, rules)
production_rules = list()
traverse(self._root, production_rules)
return production_rules
def get_parent_production_rules(self):
def traverse(node, rules):
if node is not None:
if node.parent is None:
rules.append(None)
else:
rules.append(node.parent.production_rule)
for rhs in node.rhs_nodes:
traverse(rhs, rules)
production_rules = list()
traverse(self._root, production_rules)
return production_rules
def get_parent_ids(self):
def traverse(node, ids):
if node is not None:
if node.parent is None:
ids.append(-1)
else:
ids.append(node.parent.node_id)
for rhs in node.rhs_nodes:
traverse(rhs, ids)
ids = list()
traverse(self._root, ids)
return ids
def get_curr_parent_node(self):
return self._curr_node
def get_curr_non_terminal(self):
return self._curr_node.get_curr_non_terminal()
def _print(node, indent, string_array):
if not isinstance(node, ASTNode):
# str
string_array.append(' ' * indent + node)
return
string_array.append(' ' * indent + node.lhs)
if len(node.rhs) > 0:
for child in node.rhs:
_print(child, indent + 1, string_array)
def print_ast(ast):
print(get_tree_str(ast.root))
def get_tree_str(t):
string_array = list()
_print(t, 0, string_array)
return '\n'.join(string_array)
| 6,741 | 27.935622 | 115 |
py
|
Unimer
|
Unimer-master/grammars/grammar.py
|
# coding=utf8
import copy
from typing import List, Dict, Set
from pprint import pprint
from parsimonious.exceptions import ParseError
from parsimonious.grammar import Grammar as _Grammar
from grammars.basic import ProductionRule
from grammars.utils import format_grammar_string, initialize_valid_actions, SqlVisitor
from grammars.geo import prolog_grammar, funql_grammar, sql_grammar, \
lambda_calculus_grammar, typed_funql_grammar, typed_prolog_grammar, \
sql_grammar_2, sql_grammar_3, lambda_calculus_grammar_2
from grammars.atis import lambda_calculus_grammar as atis_lambda_calculus_grammar
from grammars.atis import lambda_calculus_grammar_2 as atis_lambda_calculus_grammar_2
from grammars.atis import lambda_calculus_grammar_3 as atis_lambda_calculus_grammar_3
from grammars.atis import lambda_calculus_grammar_4 as atis_lambda_calculus_grammar_4
from grammars.atis import sql_grammar as atis_sql_grammar
from grammars.atis import sql_grammar_2 as atis_sql_grammar_2
from grammars.atis import sql_grammar_3 as atis_sql_grammar_3
from grammars.atis import prolog_grammar as atis_prolog_grammar
from grammars.atis import prolog_grammar_2 as atis_prolog_grammar_2
from grammars.atis import funql_grammar as atis_funql_grammar
from grammars.atis import typed_funql_grammar as atis_typed_funql_grammar
from grammars.job import prolog_grammar as job_prolog_grammar
from grammars.job import funql_grammar as job_funql_grammar
from grammars.job import sql_grammar as job_sql_grammar
from grammars.job import lambda_grammar as job_lambda_grammar
class Grammar:
def __init__(self, grammar_dictionary: Dict, root_rule: str, copy_terminal_set: Set = None):
self._grammar_dictionary = copy.deepcopy(grammar_dictionary)
# Non terminals
self._non_terminals = sorted(list(self._grammar_dictionary.keys()))
pprint(format_grammar_string(self._grammar_dictionary))
_grammar = _Grammar(format_grammar_string(self._grammar_dictionary))
self._grammar = _grammar
valid_actions = initialize_valid_actions(_grammar)
all_actions = set()
for action_list in valid_actions.values():
all_actions.update(action_list)
production_rule_strs = sorted(all_actions)
self._root_rule = root_rule
self._production_rules = list()
self._nonterminals_dict = dict()
self._rule2id = dict()
self._id2rule = dict()
rule_id = 1
for production_rule_str in production_rule_strs:
print(production_rule_str)
nonterminal, rhs = production_rule_str.split(' -> ')
production_rule_str = ' '.join(production_rule_str.split(' '))
assert nonterminal in self._non_terminals
rhs_nonterminal = [term for term in rhs.strip(
'[] ').split(' ') if term in self._non_terminals]
self._production_rules.append(ProductionRule(
rule_id, production_rule_str, nonterminal, rhs, rhs_nonterminal))
self._rule2id[production_rule_str] = rule_id
self._id2rule[rule_id] = self._production_rules[-1]
if nonterminal not in self._nonterminals_dict:
self._nonterminals_dict[nonterminal] = list()
self._nonterminals_dict[nonterminal].append(
self._production_rules[-1])
rule_id += 1
self._copy_terminal_set = copy_terminal_set
@property
def production_rules(self):
return self._production_rules
@property
def copy_terminal_set(self):
return self._copy_terminal_set
@property
def root_rule_id(self):
return self._rule2id[self._root_rule]
@property
def num_rules(self):
return len(self._rule2id)
@property
def num_non_terminals(self):
return len(self._non_terminals)
def parse(self, query: str):
sql_visitor = SqlVisitor(self._grammar)
q = query.replace("``", "'").replace("''", "'")
try:
applied_production_rules = sql_visitor.parse(q) if query else []
except ParseError as e:
raise e
# applied_production_rules = list()
rules = list()
for rule in applied_production_rules:
lhs, rhs = rule.split(' -> ')
rule_str = rule
rules.append(copy.deepcopy(self.get_production_rule_by_id(
self.get_production_rule_id(rule_str))))
return rules
def get_production_rule_by_id(self, rule_id) -> ProductionRule:
if rule_id not in self._id2rule:
return None
return self._id2rule[rule_id]
def get_production_rule_ids_by_nonterminal_id(self, nonterminal_id: int) -> List[int]:
nonterminal = self._non_terminals[nonterminal_id]
production_rules = self._nonterminals_dict[nonterminal]
return [p.rule_id for p in production_rules]
def get_production_rule_ids_by_nonterminal(self, nonterminal: str) -> List[int]:
production_rules = self._nonterminals_dict[nonterminal]
return [p.rule_id for p in production_rules]
def get_production_rules_by_nonterminal(self, nonterminal: str) -> List[ProductionRule]:
return self._nonterminals_dict[nonterminal]
def get_production_rule_id(self, production_rule: str) -> int:
return self._rule2id[production_rule]
def get_non_terminal_id(self, nonterminal):
return self._non_terminals.index(nonterminal)
def get_non_terminal(self, nonterminal_id):
if nonterminal_id >= len(self._non_terminals):
return None
return self._non_terminals[nonterminal_id]
def get_grammar(dataset, language):
if dataset == 'geo':
if language == 'funql':
copy_terminal_set = funql_grammar.COPY_TERMINAL_SET
return Grammar(funql_grammar.GRAMMAR_DICTIONARY, funql_grammar.ROOT_RULE, copy_terminal_set)
if language == 'typed_funql':
copy_terminal_set = typed_funql_grammar.COPY_TERMINAL_SET
return Grammar(typed_funql_grammar.GRAMMAR_DICTIONARY, typed_funql_grammar.ROOT_RULE, copy_terminal_set)
elif language == 'prolog':
copy_terminal_set = prolog_grammar.COPY_TERMINAL_SET
return Grammar(prolog_grammar.GRAMMAR_DICTIONARY, prolog_grammar.ROOT_RULE, copy_terminal_set)
elif language == 'prolog2':
copy_terminal_set = typed_prolog_grammar.COPY_TERMINAL_SET
return Grammar(typed_prolog_grammar.GRAMMAR_DICTIONARY, typed_prolog_grammar.ROOT_RULE, copy_terminal_set)
elif language == 'sql':
copy_terminal_set = sql_grammar.COPY_TERMINAL_SET
return Grammar(sql_grammar.GRAMMAR_DICTIONARY, sql_grammar.ROOT_RULE, copy_terminal_set)
elif language == 'sql2':
copy_terminal_set = sql_grammar_2.COPY_TERMINAL_SET
return Grammar(sql_grammar_2.GRAMMAR_DICTIONARY, sql_grammar_2.ROOT_RULE, copy_terminal_set)
elif language == 'sql3':
return Grammar(sql_grammar_3.GRAMMAR_DICTIONARY, sql_grammar_3.ROOT_RULE)
elif language == 'lambda':
copy_terminal_set = lambda_calculus_grammar.COPY_TERMINAL_SET
return Grammar(lambda_calculus_grammar.GRAMMAR_DICTIONARY, lambda_calculus_grammar.ROOT_RULE, copy_terminal_set)
elif language == 'lambda2':
copy_terminal_set = lambda_calculus_grammar_2.COPY_TERMINAL_SET
return Grammar(lambda_calculus_grammar_2.GRAMMAR_DICTIONARY, lambda_calculus_grammar_2.ROOT_RULE, copy_terminal_set)
elif dataset == 'job':
if language == 'prolog':
copy_terminal_set = job_prolog_grammar.COPY_TERMINAL_SET
return Grammar(job_prolog_grammar.GRAMMAR_DICTIONARY, job_prolog_grammar.ROOT_RULE, copy_terminal_set)
elif language == 'funql':
copy_terminal_set = job_funql_grammar.COPY_TERMINAL_SET
return Grammar(job_funql_grammar.GRAMMAR_DICTIONARY, job_funql_grammar.ROOT_RULE, copy_terminal_set)
elif language == 'sql':
copy_terminal_set = job_sql_grammar.COPY_TERMINAL_SET
return Grammar(job_sql_grammar.GRAMMAR_DICTIONARY, job_sql_grammar.ROOT_RULE, copy_terminal_set)
elif language == 'lambda':
copy_terminal_set = job_lambda_grammar.COPY_TERMINAL_SET
return Grammar(job_lambda_grammar.GRAMMAR_DICTIONARY, job_lambda_grammar.ROOT_RULE, copy_terminal_set)
elif dataset == 'atis':
if language == 'lambda':
copy_terminal_set = atis_lambda_calculus_grammar.COPY_TERMINAL_SET
return Grammar(atis_lambda_calculus_grammar.GRAMMAR_DICTIONARY, atis_lambda_calculus_grammar.ROOT_RULE, copy_terminal_set)
elif language == 'lambda2':
copy_terminal_set = atis_lambda_calculus_grammar_2.COPY_TERMINAL_SET
return Grammar(atis_lambda_calculus_grammar_2.GRAMMAR_DICTIONARY, atis_lambda_calculus_grammar_2.ROOT_RULE, copy_terminal_set)
elif language == 'lambda3':
copy_terminal_set = atis_lambda_calculus_grammar_3.COPY_TERMINAL_SET
return Grammar(atis_lambda_calculus_grammar_3.GRAMMAR_DICTIONARY, atis_lambda_calculus_grammar_3.ROOT_RULE, copy_terminal_set)
elif language == 'lambda4':
copy_terminal_set = atis_lambda_calculus_grammar_4.COPY_TERMINAL_SET
return Grammar(atis_lambda_calculus_grammar_4.GRAMMAR_DICTIONARY, atis_lambda_calculus_grammar_4.ROOT_RULE,
copy_terminal_set)
elif language in 'sql':
copy_terminal_set = atis_sql_grammar.COPY_TERMINAL_SET
return Grammar(atis_sql_grammar.GRAMMAR_DICTIONARY, atis_sql_grammar.ROOT_RULE, copy_terminal_set)
elif language == 'sql2':
copy_terminal_set = atis_sql_grammar_2.COPY_TERMINAL_SET
return Grammar(atis_sql_grammar_2.GRAMMAR_DICTIONARY, atis_sql_grammar_2.ROOT_RULE, copy_terminal_set)
elif language == 'sql3':
copy_terminal_set = atis_sql_grammar_3.COPY_TERMINAL_SET
return Grammar(atis_sql_grammar_3.GRAMMAR_DICTIONARY, atis_sql_grammar_3.ROOT_RULE, copy_terminal_set)
elif language == 'prolog':
copy_terminal_set = atis_prolog_grammar.COPY_TERMINAL_SET
return Grammar(atis_prolog_grammar.GRAMMAR_DICTIONARY, atis_prolog_grammar.ROOT_RULE, copy_terminal_set)
elif language == 'prolog2':
copy_terminal_set = atis_prolog_grammar_2.COPY_TERMINAL_SET
return Grammar(atis_prolog_grammar_2.GRAMMAR_DICTIONARY, atis_prolog_grammar_2.ROOT_RULE, copy_terminal_set)
elif language == 'funql':
copy_terminal_set = atis_funql_grammar.COPY_TERMINAL_SET
return Grammar(atis_funql_grammar.GRAMMAR_DICTIONARY, atis_funql_grammar.ROOT_RULE, copy_terminal_set)
elif language == 'typed_funql':
copy_terminal_set = atis_typed_funql_grammar.COPY_TERMINAL_SET
return Grammar(atis_typed_funql_grammar.GRAMMAR_DICTIONARY, atis_typed_funql_grammar.ROOT_RULE, copy_terminal_set)
return None
| 11,090 | 50.586047 | 138 |
py
|
Unimer
|
Unimer-master/grammars/__init__.py
|
# coding=utf-8
| 14 | 14 | 14 |
py
|
Unimer
|
Unimer-master/grammars/basic.py
|
# coding=utf8
from typing import List, Dict
class ProductionRule:
def __init__(self, rule_id: int, rule: str, lhs: str, rhs: List[str], rhs_nonterminal: List[str], attrs: Dict = None):
self._rule = rule
self._rule_id = rule_id
self._lhs = lhs
self._rhs = rhs
self._rhs_nonterminal = rhs_nonterminal
self._attrs = attrs
@property
def rule_id(self):
return self._rule_id
@property
def rhs_nonterminal(self):
return self._rhs_nonterminal
@property
def lhs(self):
return self._lhs
@property
def rhs(self):
return self._rhs
@property
def attrs(self):
return self._attrs
@property
def rule(self):
return self._rule
def set_attr(self, key, value):
if self._attrs is None:
self._attrs = dict()
self._attrs[key] = value
def __str__(self):
attr_str = "" if self.attrs is None else str(self.attrs)
if attr_str:
return self._rule + " Attrs: " + attr_str
return self._rule
def __repr__(self):
return str(self)
| 1,139 | 21.8 | 122 |
py
|
Unimer
|
Unimer-master/grammars/gnn_entity_matcher.py
|
# coding=utf8
import os
from .geo import geo_entity_extractor, geo_gnn_entity_matcher
from .atis import atis_entity_extractor, atis_gnn_entity_matcher
def get_gnn_entity_matcher(task, language):
matcher = None
if task == 'geo':
base_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'geo')
entity_path = os.path.join(base_path, 'geo_entities.json')
if language in ['funql', 'prolog']:
matcher = geo_gnn_entity_matcher.GeoGNNEntityMatcher(entity_path)
elif language == 'lambda':
matcher = geo_gnn_entity_matcher.GeoLambdaCalculusGNNEntityMatcher(entity_path)
elif language == 'sql':
matcher = geo_gnn_entity_matcher.GeoSQLGNNEntityMatcher(entity_path)
elif task == 'atis':
db_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'atis', 'db')
if language in ['lambda', 'lambda2', 'lambda3', 'lambda4',]:
matcher = atis_gnn_entity_matcher.ATISGNNLambdaCalculusEntityMatcher(db_path)
elif language in ['funql', 'prolog']:
matcher = atis_gnn_entity_matcher.ATISGNNEntityMatcher(db_path)
return matcher
def get_gnn_entity_extractor(task, language):
"""
Extract entities from logical form
:param task:
:param language:
:return:
"""
extractor = None
if task == 'geo':
if language == 'funql':
extractor = geo_entity_extractor.funql_entity_extractor
elif language == 'prolog':
extractor = geo_entity_extractor.prolog_entity_extractor
elif language == 'lambda':
extractor = geo_entity_extractor.lambda_calculus_entity_extractor
elif language == 'sql':
extractor = geo_entity_extractor.sql_entity_extractor
elif task == 'atis':
if language == 'lambda':
extractor = atis_entity_extractor.lambda_calculus_entity_extractor
elif language == 'funql':
extractor = atis_entity_extractor.funql_entity_extractor
elif language == 'prolog':
extractor = atis_entity_extractor.prolog_entity_extractor
return extractor
def get_gnn_entity_replacer(task, language):
"""
Replace entities in logical form with recognized entities from utterance
:param task:
:param language:
:return:
"""
replacer = None
if task == 'geo':
if language == 'funql':
replacer = geo_entity_extractor.replace_funql_entity
elif language == 'prolog':
replacer = geo_entity_extractor.replace_prolog_entity
elif language == 'lambda':
replacer = geo_entity_extractor.replace_lambda_calculus_entity
elif language == 'sql':
replacer = geo_entity_extractor.replace_sql_entity
elif task == 'atis':
if language == 'lambda':
replacer = atis_entity_extractor.replace_lambda_calculus_entity
elif language == 'funql':
replacer = atis_entity_extractor.replace_funql_entity
elif language == 'prolog':
replacer = atis_entity_extractor.replace_prolog_entity
return replacer
| 3,164 | 38.5625 | 102 |
py
|
Unimer
|
Unimer-master/grammars/geo/get_funql_terminals.py
|
# coding=utf8
import re
stateid_pattern = re.compile('stateid\((.*?)\)')
riverid_pattern = re.compile('riverid\((.*?)\)')
countryid_pattern = re.compile('countryid\((.*?)\)')
cityid_pattern = re.compile('cityid\((.*?),(.*?)\)')
placeid_pattern = re.compile('placeid\((.*?)\)')
if __name__ == '__main__':
test_data = '../../data/geo/geo_funql_test.tsv'
train_data = '../../data/geo/geo_funql_train.tsv'
funqls = list()
with open(test_data, 'r') as f:
for line in f:
line = line.strip()
funqls.append(line.split('\t')[1].lower())
with open(train_data, 'r') as f:
for line in f:
line = line.strip()
funqls.append(line.split('\t')[1].lower())
state_names = set()
for p in funqls:
matches = stateid_pattern.findall(p)
for m in matches:
state_names.add(m)
print("State Names: ")
print(['"%s"' % c for c in state_names])
print("====\n\n")
country_names = set()
for p in funqls:
matches = countryid_pattern.findall(p)
for m in matches:
country_names.add(m)
print("Country Names: ")
print(['"%s"' % c for c in country_names])
print("====\n\n")
river_names = set()
for p in funqls:
matches = riverid_pattern.findall(p)
for m in matches:
river_names.add(m)
print("River Names: ")
print(['"%s"' % c for c in river_names])
print("====\n\n")
place_names = set()
for p in funqls:
matches = placeid_pattern.findall(p)
for m in matches:
place_names.add(m)
print("Place Names: ")
print(['"%s"' % c for c in place_names])
print("====\n\n")
city_names = set()
state_abbres = set()
for p in funqls:
matches = cityid_pattern.findall(p)
for c, s in matches:
city_names.add(c)
state_abbres.add(s)
print("City Names: ")
print(['"%s"' % c for c in city_names])
print("====\n\n")
print("State Abbres: ")
print(['"%s"' % c for c in state_abbres])
print("====\n\n")
| 2,100 | 26.644737 | 54 |
py
|
Unimer
|
Unimer-master/grammars/geo/lambda_calculus_grammar.py
|
# coding=utf8
ROOT_RULE = 'statement -> [expression]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['(expression ws)']
GRAMMAR_DICTIONARY['expression'] = ['(application)', '(abstraction)', '(constant)', '(variable)']
GRAMMAR_DICTIONARY['abstraction'] = ['("(" ws "lambda" wsp variable_definition wsp expression ws ")")']
GRAMMAR_DICTIONARY['application'] = ['("(" ws function ws ")")']
GRAMMAR_DICTIONARY['function'] = [
'("capital:<c,t>" wsp expression)',
'("capital:<s,c>" wsp expression)',
'("high_point:<e,<e,t>>" wsp expression wsp expression)',
'("capital:<s,<c,t>>" wsp expression wsp expression)',
'("argmax:<<e,t>,<<e,i>,e>>" wsp expression wsp expression)',
'("city:<c,t>" wsp expression)',
'("the:<<e,t>,e>" wsp expression)',
'("size:<lo,i>" wsp expression)',
'("area:<lo,i>" wsp expression)',
'("high_point:<e,l>" wsp expression)',
'("argmin:<<e,t>,<<e,i>,e>>" wsp expression wsp expression)',
'("population:<lo,i>" wsp expression)',
'("state:<s,t>" wsp expression)',
'("sum:<<e,t>,<<e,i>,i>>" wsp expression wsp expression)',
'("town:<lo,t>" wsp expression)',
'("lake:<l,t>" wsp expression)',
'("river:<r,t>" wsp expression)',
'("population:<lo,<i,t>>" wsp expression wsp expression)',
'("forall:<<e,t>,t>" wsp expression)',
'("mountain:<m,t>" wsp expression)',
'("elevation:<lo,i>" wsp expression)',
'("next_to:<lo,<lo,t>>" wsp expression wsp expression)',
'("place:<p,t>" wsp expression)',
'("=:<i,<i,t>>" wsp expression wsp expression)',
'("<:<i,<i,t>>" wsp expression wsp expression)',
'(">:<i,<i,t>>" wsp expression wsp expression)',
'("count:<<e,t>,i>" wsp expression)',
'("exists:<<e,t>,t>" wsp expression)',
'("loc:<lo,<lo,t>>" wsp expression wsp expression)',
'("elevation:<lo,<i,t>>" wsp expression wsp expression)',
'("capital2:<s,<c,t>>" wsp expression wsp expression)',
'("equals:<e,<e,t>>" wsp expression wsp expression)',
'("density:<lo,i>" wsp expression)',
'("density:<lo,<i,t>>" wsp expression wsp expression)',
'("named:<e,<n,t>>" wsp expression wsp expression)',
'("len:<r,i>" wsp expression)',
'("major:<lo,t>" wsp expression)',
'("in:<lo,<lo,t>>" wsp expression wsp expression)',
'("and:<t*,t>" wsp application wsp polyvariadic_expression)',
'("or:<t*,t>" wsp application wsp polyvariadic_expression)',
'("not:<t,t>" wsp expression)',
]
GRAMMAR_DICTIONARY['polyvariadic_expression'] = ['(application ws polyvariadic_expression)', '""']
GRAMMAR_DICTIONARY['variable'] = ['"$0"', '"$1"', '"$2"', '"$3"', '"$4"']
GRAMMAR_DICTIONARY['variable_definition'] = ['"$0:e"', '"$1:e"', '"$2:e"', '"$3:e"', '"$4:e"', '"$0:i"']
GRAMMAR_DICTIONARY['constant'] = ['(state)', '(city)', '(river)', '("death_valley:lo")',
'(names)', '(place)', '(mountain)', '("usa:co")', '("0:i")']
GRAMMAR_DICTIONARY['state'] = ['"oklahoma:s"', '"mississippi:s"', '"west_virginia:s"', '"arkansas:s"', '"virginia:s"', '"vermont:s"', '"maine:s"', '"nevada:s"', '"maryland:s"', '"wisconsin:s"', '"new_york:s"', '"arizona:s"', '"ohio:s"', '"missouri:s"', '"tennessee:s"', '"pennsylvania:s"', '"massachusetts:s"', '"texas:s"', '"hawaii:s"', '"south_dakota:s"', '"illinois:s"', '"utah:s"', '"kentucky:s"', '"alabama:s"', '"new_hampshire:s"', '"new_mexico:s"', '"colorado:s"', '"rhode_island:s"', '"south_carolina:s"', '"delaware:s"', '"michigan:s"', '"new_jersey:s"', '"louisiana:s"', '"florida:s"', '"minnesota:s"', '"alaska:s"', '"north_dakota:s"', '"california:s"', '"georgia:s"', '"iowa:s"', '"idaho:s"', '"indiana:s"', '"north_carolina:s"', '"oregon:s"', '"montana:s"', '"kansas:s"', '"nebraska:s"', '"washington:s"', '"wyoming:s"']
GRAMMAR_DICTIONARY['city'] = ['"kalamazoo_mi:c"', '"san_diego_ca:c"', '"denver_co:c"', '"portland_me:c"', '"san_francisco_ca:c"', '"flint_mi:c"', '"tempe_az:c"', '"austin_tx:c"', '"des_moines_ia:c"', '"springfield_il:c"', '"springfield_mo:c"', '"baton_rouge_la:c"', '"atlanta_ga:c"', '"columbus_oh:c"', '"rochester_ny:c"', '"springfield_sd:c"', '"tucson_az:c"', '"boulder_co:c"', '"salem_or:c"', '"sacramento_ca:c"', '"detroit_mi:c"', '"san_jose_ca:c"', '"indianapolis_in:c"', '"erie_pa:c"', '"san_antonio_tx:c"', '"pittsburgh_pa:c"', '"albany_ny:c"', '"portland_or:c"', '"dallas_tx:c"', '"dover_de:c"', '"boston_ma:c"', '"scotts_valley_ca:c"', '"riverside_ca:c"', '"chicago_il:c"', '"montgomery_al:c"', '"seattle_wa:c"', '"new_orleans_la:c"', '"new_york_ny:c"', '"minneapolis_mn:c"', '"fort_wayne_in:c"', '"miami_fl:c"', '"spokane_wa:c"', '"san_franciso_ca:c"', '"houston_tx:c"', '"washington_dc:c"']
GRAMMAR_DICTIONARY['river'] = ['"potomac_river:r"', '"mississippi_river:r"', '"colorado_river:r"', '"north_platte_river:r"', '"rio_grande_river:r"', '"red_river:r"', '"missouri_river:r"', '"ohio_river:r"', '"delaware_river:r"', '"chattahoochee_river:r"']
GRAMMAR_DICTIONARY['mountain'] = ['"mount_mckinley:m"', '"mount_whitney:m"', '"guadalupe_peak:m"']
GRAMMAR_DICTIONARY['place'] = ['"mount_mckinley:p"', '"mount_whitney:p"']
GRAMMAR_DICTIONARY['names'] = ['"austin:n"', '"springfield:n"', '"dallas:n"', '"salt_lake_city:n"', '"portland:n"', '"rochester:n"', '"plano:n"', '"durham:n"', '"colorado:n"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY["wsp"] = ['~"\s+"i']
COPY_TERMINAL_SET = {'state', 'city', 'river', 'mountain',
'place', 'names'}
| 5,431 | 77.724638 | 901 |
py
|
Unimer
|
Unimer-master/grammars/geo/geo_tokenizer.py
|
# coding=utf8
from typing import List
from overrides import overrides
from allennlp.data.tokenizers import Token, WordTokenizer
from allennlp.data.tokenizers.word_splitter import WordSplitter
class FunQLWordSplitter(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [Token(t) if "::" not in t else Token(
t.replace("::", " ")) for t in normalized_lf.split()]
return tokens
class FunQLWordSplitter2(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
("'", " ' "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = list()
for t in normalized_lf.split():
if "::" not in t:
tokens.append(Token(t))
else:
tokens += [Token(_t) for _t in t.replace("::", " ").split()]
return tokens
class PrologWordSplitter(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [Token(t) if "::" not in t else Token(
t.replace("::", " ")) for t in normalized_lf.split()]
return tokens
class PrologWordSplitter2(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
("'", " ' "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = list()
for t in normalized_lf.split():
if "::" not in t:
tokens.append(Token(t))
else:
tokens += [Token(_t) for _t in t.replace("::", " ").split()]
return tokens
class PrologWordSplitter3(WordSplitter):
PREDS = [
'cityid', 'countryid', 'placeid', 'riverid', 'stateid',
'capital', 'city', 'lake', 'major', 'mountain', 'place', 'river',
'state', 'area', 'const', 'density', 'elevation', 'high_point',
'higher', 'loc', 'longer', 'low_point', 'lower', 'len', 'next_to',
'population', 'size', 'traverse',
'answer', 'largest', 'smallest', 'highest', 'lowest', 'longest',
'shortest', 'count', 'most', 'fewest', 'sum']
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
("'", " ' "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = list()
for t in normalized_lf.split():
if "::" not in t:
if t in self.PREDS:
tokens.append(Token("_%s" % t))
else:
tokens.append(Token(t))
else:
tokens += [Token(_t) for _t in t.replace("::", " ").split()]
return tokens
class SQLWordSplitter(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_sql = logical_form
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("'", ""),
('.', ' . '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_sql = normalized_sql.replace(a, b)
tokens = [Token(t) for t in normalized_sql.split()]
return tokens
class LambdaCalculusWordSplitter(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lc = logical_form
replacements = [
('(', ' ( '),
(')', ' ) '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lc = normalized_lc.replace(a, b)
tokens = [Token(t) for t in normalized_lc.split()]
return tokens
class LambdaCalculusWordSplitter2(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lc = logical_form
replacements = [
('(', ' ( '),
(')', ' ) '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lc = normalized_lc.replace(a, b)
tokens = list()
for t in normalized_lc.split():
if any([t.endswith(suffix) for suffix in [':s', ':co', ':n', ':c', ':r', ':m', ':p', ':lo']]):
_t = t.replace(":", " : ").replace("_", " ")
tokens.append(Token("'"))
for _token in _t.split():
tokens.append(Token(_token))
tokens.append(Token("'"))
else:
tokens.append(Token(t))
return tokens
def get_logical_tokenizer(language: str) -> WordTokenizer:
splitter = None
if language == 'funql':
# splitter = FunQLWordSplitter()
splitter = FunQLWordSplitter2()
elif language == 'prolog':
# splitter = PrologWordSplitter()
splitter = PrologWordSplitter2()
elif language == 'prolog2':
splitter = PrologWordSplitter3()
elif language == 'sql':
splitter = SQLWordSplitter()
elif language == 'lambda':
splitter = LambdaCalculusWordSplitter()
elif language == 'lambda2':
splitter = LambdaCalculusWordSplitter2()
assert splitter is not None
return splitter
if __name__ == '__main__':
spliiter = get_logical_tokenizer('lambda2')
tokenizer = WordTokenizer(spliiter)
tokens = tokenizer.tokenize("(count:<<e,t>,i> (lambda $0:e (and:<t*,t> (river:<r,t> $0) (loc:<lo,<lo,t>> $0 new_york:s))))")
print(tokens)
| 6,678 | 31.740196 | 128 |
py
|
Unimer
|
Unimer-master/grammars/geo/geo_normalization.py
|
# coding=utf8
import re
def anonymize_prolog_variable_names(logical_form):
p = re.sub('[A-G]', 'A', logical_form).lower()
return p
def tokenize_prolog(logical_form):
# Tokenize Prolog
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
toks = [t if "::" not in t else t.replace(
"::", " ") for t in normalized_lf.split()]
return toks
def normalize_prolog_variable_names(logical_form):
"""Standardize variable names in Prolog with De Brujin indices."""
toks = tokenize_prolog(logical_form)
# Replace Variable
cur_vars = []
new_toks = []
for w in toks:
if len(w) == 1 and w.isalpha() and re.match('[A-G]', w):
if w in cur_vars:
ind_from_end = len(cur_vars) - cur_vars.index(w) - 1
new_toks.append('V%d' % ind_from_end)
else:
cur_vars.append(w)
new_toks.append('NV')
else:
new_toks.append(w)
return ''.join(new_toks).lower()
def recover_normalized_prolog_variable_name(logical_form):
"""Undo the variable name standardization."""
toks = tokenize_prolog(logical_form)
cur_var = chr(ord('A') - 1)
new_toks = []
for w in toks:
if w == 'NV' or w == 'nv':
cur_var = chr(ord(cur_var) + 1)
new_toks.append(cur_var)
elif re.match('[V|v]\d+', w):
ind = int(w[1:])
new_toks.append(chr(ord(cur_var) - ind))
else:
new_toks.append(w)
return ''.join(new_toks)
def normalize_sql(logical_form):
s = logical_form.replace("( ", "(").replace(" )", ")").replace(
";", "").replace('"', "'").replace(' . ', '.').strip().lower()
s = s.replace('max (', 'max(')
s = s.replace('min (', 'min(')
s = s.replace('avg (', 'avg(')
s = s.replace('count (', 'count(')
s = s.replace('sum (', 'sum(')
s = s.replace('count(1)', 'count(*)')
return s
def normalize_lambda_calculus(logical_form):
s = logical_form.replace(
'\s+', ' ').replace("( ", "(").replace(" )", ")").replace(') )', '))').replace(' :', ':').strip().lower()
return s
def normalize_prolog(lf):
l = re.sub(r"\s*\(\s*", "(", lf)
l = re.sub(r"\s*\)\s*", ")", l)
l = re.sub(r"\s*,\s*", ",", l)
l = l.lower()
return l
def normalize_funql(lf):
l = re.sub(r"\s*\(\s*", "(", lf)
l = re.sub(r"\s*\)\s*", ")", l)
l = re.sub(r"\s*,\s*", ",", l)
l = l.lower()
return l
| 2,678 | 27.2 | 113 |
py
|
Unimer
|
Unimer-master/grammars/geo/get_prolog_terminals.py
|
# coding=utf8
import re
stateid_pattern = re.compile('_stateid\((.*?)\)')
riverid_pattern = re.compile('_riverid\((.*?)\)')
countryid_pattern = re.compile('_countryid\((.*?)\)')
cityid_pattern = re.compile('_cityid\((.*?),(.*?)\)')
placeid_pattern = re.compile('_placeid\((.*?)\)')
if __name__ == '__main__':
test_data = '../../data/geo/geo_prolog_test.tsv'
train_data = '../../data/geo/geo_prolog_train.tsv'
prologs = list()
with open(test_data, 'r') as f:
for line in f:
line = line.strip()
prologs.append(line.split('\t')[1].replace(' ', '').replace("'", "").lower())
with open(train_data, 'r') as f:
for line in f:
line = line.strip()
prologs.append(line.split('\t')[1].replace(' ', '').replace("'", "").lower())
state_names = set()
for p in prologs:
matches = stateid_pattern.findall(p)
for m in matches:
state_names.add(m)
print("State Names: ")
print(['"%s"' % c for c in state_names])
print("====\n\n")
country_names = set()
for p in prologs:
matches = countryid_pattern.findall(p)
for m in matches:
country_names.add(m)
print("Country Names: ")
print(['"%s"' % c for c in country_names])
print("====\n\n")
river_names = set()
for p in prologs:
matches = riverid_pattern.findall(p)
for m in matches:
river_names.add(m)
print("River Names: ")
print(['"%s"' % c for c in river_names])
print("====\n\n")
place_names = set()
for p in prologs:
matches = placeid_pattern.findall(p)
for m in matches:
place_names.add(m)
print("Place Names: ")
print(['"%s"' % c for c in place_names])
print("====\n\n")
city_names = set()
state_abbres = set()
for p in prologs:
matches = cityid_pattern.findall(p)
for c, s in matches:
city_names.add(c)
state_abbres.add(s)
print("City Names: ")
print(['"%s"' % c for c in city_names])
print("====\n\n")
print("State Abbres: ")
print(['"%s"' % c for c in state_abbres])
print("====\n\n")
| 2,182 | 28.106667 | 89 |
py
|
Unimer
|
Unimer-master/grammars/geo/funql_grammar.py
|
# coding=utf-8
"""
FunQL Grammar
"""
import copy
from typing import List, Dict
from pprint import pprint
from parsimonious.exceptions import ParseError
from parsimonious.grammar import Grammar as _Grammar
# First-order logical form
ROOT_RULE = 'statement -> [answer]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['(answer ws)']
GRAMMAR_DICTIONARY['answer'] = ['("answer" ws "(" ws predicate ws ")" )']
GRAMMAR_DICTIONARY['predicate'] = [
'meta', 'object', 'collection', 'relation', '("intersection" ws "(" ws predicate ws "," ws predicate ws ")")',
'("exclude" ws "(" ws predicate ws "," ws predicate ws ")")',
]
# Meta Predicates
GRAMMAR_DICTIONARY['meta'] = [
'largest', 'smallest', 'highest', 'lowest', 'longest', 'shortest', 'count', 'most', 'fewest',
'largest_one_area', 'largest_one_density', 'largest_one_population',
'smallest_one_area', 'smallest_one_density', 'smallest_one_population', 'sum'
]
GRAMMAR_DICTIONARY['largest'] = ['("largest" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['smallest'] = ['("smallest" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['highest'] = ['("highest" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['lowest'] = ['("lowest" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['longest'] = ['("longest" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['shortest'] = ['("shortest" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['count'] = ['("count" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['most'] = ['("most" ws "(" ws predicate ws")")']
GRAMMAR_DICTIONARY['fewest'] = ['("fewest" ws "(" ws predicate ws")")']
GRAMMAR_DICTIONARY['largest_one_area'] = [
'("largest_one" ws "(" ws "area_1" ws "(" ws predicate ws ")" ws")")']
GRAMMAR_DICTIONARY['largest_one_density'] = [
'("largest_one" ws "(" ws "density_1" ws "(" ws predicate ws ")" ws")")']
GRAMMAR_DICTIONARY['largest_one_population'] = [
'("largest_one" ws "(" ws "population_1" ws "(" ws predicate ws ")" ws")")']
GRAMMAR_DICTIONARY['smallest_one_area'] = [
'("smallest_one" ws "(" ws "area_1" ws "(" ws predicate ws ")" ws")")']
GRAMMAR_DICTIONARY['smallest_one_density'] = [
'("smallest_one" ws "(" ws "density_1" ws "(" ws predicate ws ")" ws")")']
GRAMMAR_DICTIONARY['smallest_one_population'] = [
'("smallest_one" ws "(" ws "population_1" ws "(" ws predicate ws ")" ws")")']
GRAMMAR_DICTIONARY['sum'] = ['("sum" ws "(" ws predicate ws")")']
# Object
GRAMMAR_DICTIONARY['object'] = ['city', 'state', 'river', 'place', '"countryid(\'usa\')"']
GRAMMAR_DICTIONARY['city'] = [
'("cityid" ws "(" ws city_name ws "," ws state_abbre ws ")")']
GRAMMAR_DICTIONARY['state'] = ['("stateid" ws "(" ws state_name ws ")")']
GRAMMAR_DICTIONARY['river'] = ['("riverid" ws "(" ws river_name ws ")")']
GRAMMAR_DICTIONARY['place'] = ['("placeid" ws "(" ws place_name ws ")")']
# Collection
GRAMMAR_DICTIONARY['collection'] = ['all_capital_cities', 'all_cities',
'all_lakes', 'all_mountains', 'all_places', 'all_rivers', 'all_states']
GRAMMAR_DICTIONARY['all_capital_cities'] = [
'("capital" ws "(" ws "all" ws ")")', ]
GRAMMAR_DICTIONARY['all_cities'] = ['("city" ws "(" ws "all" ws ")")', ]
GRAMMAR_DICTIONARY['all_lakes'] = ['("late" ws "(" ws "all" ws ")")', ]
GRAMMAR_DICTIONARY['all_mountains'] = ['("mountain" ws "(" ws "all" ws ")")', ]
GRAMMAR_DICTIONARY['all_places'] = ['("place" ws "(" ws "all" ws ")")', ]
GRAMMAR_DICTIONARY['all_rivers'] = ['("river" ws "(" ws "all" ws ")")', ]
GRAMMAR_DICTIONARY['all_states'] = ['("state" ws "(" ws "all" ws ")")', ]
# Relations
GRAMMAR_DICTIONARY['relation'] = [
'is_captial', 'is_city', 'is_lake', 'is_major', 'is_mountain', 'is_place',
'is_river', 'is_state', 'is_area_state', 'is_captial_country', 'is_captial_city',
'is_density_place', 'is_elevation_place', 'is_elevation_value', 'is_high_point_state', 'is_high_point_place',
'is_higher_place_2', 'is_loc_x', 'is_loc_y', 'is_longer', 'is_lower_place_2', 'is_len', 'is_next_to_state_1', 'is_next_to_state_2',
'is_population', 'is_size', 'is_traverse_river', 'is_traverse_state', 'is_low_point_state', 'is_low_point_place',
]
GRAMMAR_DICTIONARY['is_captial'] = ['("capital" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_city'] = ['("city" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_lake'] = ['("lake" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_major'] = ['("major" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_mountain'] = ['("mountain" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_place'] = ['("place" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_river'] = ['("river" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_state'] = ['("state" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_area_state'] = ['("area_1" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_captial_country'] = [
'("capital_1" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_captial_city'] = [
'("capital_2" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_density_place'] = [
'("density_1" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_elevation_place'] = [
'("elevation_1" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_elevation_value'] = [
'("elevation_2" ws "(" ws number ws ")")']
GRAMMAR_DICTIONARY['is_high_point_state'] = [
'("high_point_1" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_high_point_place'] = [
'("high_point_2" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_low_point_state'] = [
'("low_point_1" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_low_point_place'] = [
'("low_point_2" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_higher_place_2'] = [
'("higher_2" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_loc_x'] = ['("loc_1" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_loc_y'] = ['("loc_2" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_longer'] = ['("longer" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_lower_place_2'] = [
'("lower_2" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_len'] = ['("len" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_next_to_state_1'] = [
'("next_to_1" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_next_to_state_2'] = [
'("next_to_2" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_population'] = [
'("population_1" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_size'] = ['("size" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_traverse_river'] = [
'("traverse_1" ws "(" ws predicate ws ")")']
GRAMMAR_DICTIONARY['is_traverse_state'] = [
'("traverse_2" ws "(" ws predicate ws ")")']
# Terminal
GRAMMAR_DICTIONARY['number'] = ['"0.0"', '"1.0"', '"0"']
GRAMMAR_DICTIONARY['city_name'] = ['"\'washington\'"', '"\'minneapolis\'"', '"\'sacramento\'"', '"\'rochester\'"', '"\'indianapolis\'"', '"\'portland\'"', '"\'new york\'"', '"\'erie\'"', '"\'san diego\'"', '"\'baton rouge\'"', '"\'miami\'"', '"\'kalamazoo\'"', '"\'durham\'"', '"\'salt lake city\'"', '"\'des moines\'"', '"\'pittsburgh\'"', '"\'riverside\'"', '"\'dover\'"', '"\'chicago\'"', '"\'albany\'"', '"\'tucson\'"', '"\'austin\'"',
'"\'san antonio\'"', '"\'houston\'"', '"\'scotts valley\'"', '"\'montgomery\'"', '"\'springfield\'"', '"\'boston\'"', '"\'boulder\'"', '"\'san francisco\'"', '"\'flint\'"', '"\'fort wayne\'"', '"\'spokane\'"', '"\'san jose\'"', '"\'tempe\'"', '"\'dallas\'"', '"\'new orleans\'"', '"\'seattle\'"', '"\'denver\'"', '"\'salem\'"', '"\'detroit\'"', '"\'plano\'"', '"\'atlanta\'"', '"\'columbus\'"']
GRAMMAR_DICTIONARY['state_abbre'] = ['"\'dc\'"', '"\'pa\'"', '"\'ga\'"', '"\'me\'"', '"\'wa\'"', '"\'tx\'"',
'"\'ma\'"', '"\'sd\'"', '"\'az\'"', '"\'mn\'"', '"\'mo\'"', '"_"']
GRAMMAR_DICTIONARY['state_name'] = ['"\'washington\'"', '"\'kansas\'"', '"\'pennsylvania\'"', '"\'new york\'"', '"\'south carolina\'"', '"\'california\'"', '"\'west virginia\'"', '"\'kentucky\'"', '"\'vermont\'"', '"\'hawaii\'"', '"\'new mexico\'"', '"\'montana\'"', '"\'illinois\'"', '"\'georgia\'"', '"\'louisiana\'"', '"\'indiana\'"', '"\'oklahoma\'"', '"\'utah\'"', '"\'arkansas\'"', '"\'michigan\'"', '"\'alaska\'"', '"\'alabama\'"', '"\'missouri\'"', '"\'wisconsin\'"', '"\'wyoming\'"',
'"\'maine\'"', '"\'florida\'"', '"\'south dakota\'"', '"\'tennessee\'"', '"\'north carolina\'"', '"\'new jersey\'"', '"\'minnesota\'"', '"\'arizona\'"', '"\'new hampshire\'"', '"\'texas\'"', '"\'colorado\'"', '"\'mississippi\'"', '"\'idaho\'"', '"\'oregon\'"', '"\'maryland\'"', '"\'north dakota\'"', '"\'nebraska\'"', '"\'rhode island\'"', '"\'ohio\'"', '"\'massachusetts\'"', '"\'virginia\'"', '"\'nevada\'"', '"\'delaware\'"', '"\'iowa\'"']
GRAMMAR_DICTIONARY['river_name'] = ['"\'chattahoochee\'"', '"\'north platte\'"', '"\'rio grande\'"', '"\'ohio\'"',
'"\'potomac\'"', '"\'missouri\'"', '"\'red\'"', '"\'colorado\'"', '"\'mississippi\'"', '"\'delaware\'"']
GRAMMAR_DICTIONARY['place_name'] = ['"\'guadalupe peak\'"', '"\'mount whitney\'"',
'"\'mount mckinley\'"', '"\'death valley\'"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
COPY_TERMINAL_SET = {'number', 'city_name', 'state_abbre',
'state_name', 'river_name', 'place_name'}
| 9,398 | 63.82069 | 492 |
py
|
Unimer
|
Unimer-master/grammars/geo/lambda_calculus_grammar_2.py
|
# coding=utf8
ROOT_RULE = 'statement -> [expression]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['(expression ws)']
GRAMMAR_DICTIONARY['expression'] = ['(application)', '(abstraction)', '(constant)', '(variable)']
GRAMMAR_DICTIONARY['abstraction'] = ['("(" ws "lambda" wsp variable_definition wsp expression ws ")")']
GRAMMAR_DICTIONARY['application'] = ['("(" ws function ws ")")']
GRAMMAR_DICTIONARY['function'] = ['meta_predicate', 'unit_relation', 'binary_relation', 'entity_function']
GRAMMAR_DICTIONARY['meta_predicate'] = [
'("argmax:<<e,t>,<<e,i>,e>>" wsp abstraction wsp abstraction)',
'("argmin:<<e,t>,<<e,i>,e>>" wsp abstraction wsp abstraction)',
'("sum:<<e,t>,<<e,i>,i>>" wsp abstraction wsp abstraction)',
'("forall:<<e,t>,t>" wsp abstraction)',
'("=:<i,<i,t>>" wsp expression wsp expression)',
'("<:<i,<i,t>>" wsp expression wsp expression)',
'(">:<i,<i,t>>" wsp expression wsp expression)',
'("count:<<e,t>,i>" wsp abstraction)',
'("exists:<<e,t>,t>" wsp abstraction)',
'("and:<t*,t>" wsp application wsp polyvariadic_expression)',
'("or:<t*,t>" wsp application wsp polyvariadic_expression)',
'("not:<t,t>" wsp application)',
'("equals:<e,<e,t>>" wsp variable wsp expression)',
]
GRAMMAR_DICTIONARY['unit_relation'] = [
'("capital:<c,t>" wsp variable)',
'("city:<c,t>" wsp variable)',
'("state:<s,t>" wsp variable)',
'("town:<lo,t>" wsp variable)',
'("lake:<l,t>" wsp variable)',
'("river:<r,t>" wsp variable)',
'("mountain:<m,t>" wsp variable)',
'("place:<p,t>" wsp variable)',
'("major:<lo,t>" wsp variable)',
]
GRAMMAR_DICTIONARY['binary_relation'] = [
'("high_point:<e,<e,t>>" wsp variable wsp variable)',
'("capital:<s,<c,t>>" wsp variable wsp variable)',
'("population:<lo,<i,t>>" wsp variable wsp variable)',
'("next_to:<lo,<lo,t>>" wsp expression wsp expression)',
'("loc:<lo,<lo,t>>" wsp expression wsp expression)',
'("elevation:<lo,<i,t>>" wsp expression wsp expression)',
'("capital2:<s,<c,t>>" wsp expression wsp expression)',
'("density:<lo,<i,t>>" wsp expression wsp expression)',
'("named:<e,<n,t>>" wsp expression wsp expression)',
'("in:<lo,<lo,t>>" wsp expression wsp expression)',
]
GRAMMAR_DICTIONARY['entity_function'] = [
# Return entity
'("capital:<s,c>" wsp expression)',
'("the:<<e,t>,e>" wsp expression)',
'("size:<lo,i>" wsp expression)',
'("area:<lo,i>" wsp expression)',
'("high_point:<e,l>" wsp expression)',
'("population:<lo,i>" wsp expression)',
'("elevation:<lo,i>" wsp expression)',
'("density:<lo,i>" wsp expression)',
'("len:<r,i>" wsp expression)',
]
GRAMMAR_DICTIONARY['polyvariadic_expression'] = ['(application ws polyvariadic_expression)', '""']
GRAMMAR_DICTIONARY['variable'] = ['"$0"', '"$1"', '"$2"', '"$3"', '"$4"']
GRAMMAR_DICTIONARY['variable_definition'] = [
'(variable ":e")', '(variable ":i")']
GRAMMAR_DICTIONARY['constant'] = ['(state)', '(city)', '(river)', '("death_valley:lo")',
'(names)', '(place)', '(mountain)', '("usa:co")', '("0:i")']
GRAMMAR_DICTIONARY['state'] = ['"oklahoma:s"', '"mississippi:s"', '"west_virginia:s"', '"arkansas:s"', '"virginia:s"', '"vermont:s"', '"maine:s"', '"nevada:s"', '"maryland:s"', '"wisconsin:s"', '"new_york:s"', '"arizona:s"', '"ohio:s"', '"missouri:s"', '"tennessee:s"', '"pennsylvania:s"', '"massachusetts:s"', '"texas:s"', '"hawaii:s"', '"south_dakota:s"', '"illinois:s"', '"utah:s"', '"kentucky:s"', '"alabama:s"', '"new_hampshire:s"', '"new_mexico:s"', '"colorado:s"', '"rhode_island:s"', '"south_carolina:s"', '"delaware:s"', '"michigan:s"', '"new_jersey:s"', '"louisiana:s"', '"florida:s"', '"minnesota:s"', '"alaska:s"', '"north_dakota:s"', '"california:s"', '"georgia:s"', '"iowa:s"', '"idaho:s"', '"indiana:s"', '"north_carolina:s"', '"oregon:s"', '"montana:s"', '"kansas:s"', '"nebraska:s"', '"washington:s"', '"wyoming:s"']
GRAMMAR_DICTIONARY['city'] = ['"kalamazoo_mi:c"', '"san_diego_ca:c"', '"denver_co:c"', '"portland_me:c"', '"san_francisco_ca:c"', '"flint_mi:c"', '"tempe_az:c"', '"austin_tx:c"', '"des_moines_ia:c"', '"springfield_il:c"', '"springfield_mo:c"', '"baton_rouge_la:c"', '"atlanta_ga:c"', '"columbus_oh:c"', '"rochester_ny:c"', '"springfield_sd:c"', '"tucson_az:c"', '"boulder_co:c"', '"salem_or:c"', '"sacramento_ca:c"', '"detroit_mi:c"', '"san_jose_ca:c"', '"new_york_city:c"', '"indianapolis_in:c"', '"erie_pa:c"', '"san_antonio_tx:c"', '"pittsburgh_pa:c"', '"albany_ny:c"', '"portland_or:c"', '"dallas_tx:c"', '"dover_de:c"', '"boston_ma:c"', '"scotts_valley_ca:c"', '"riverside_ca:c"', '"chicago_il:c"', '"montgomery_al:c"', '"seattle_wa:c"', '"new_orleans_la:c"', '"new_york_ny:c"', '"minneapolis_mn:c"', '"fort_wayne_in:c"', '"miami_fl:c"', '"spokane_wa:c"', '"san_franciso_ca:c"', '"houston_tx:c"', '"washington_dc:c"']
GRAMMAR_DICTIONARY['river'] = ['"potomac_river:r"', '"mississippi_river:r"', '"colorado_river:r"', '"north_platte_river:r"', '"rio_grande_river:r"', '"red_river:r"', '"missouri_river:r"', '"ohio_river:r"', '"delaware_river:r"', '"chattahoochee_river:r"']
GRAMMAR_DICTIONARY['mountain'] = ['"mount_mckinley:m"', '"mount_whitney:m"', '"guadalupe_peak:m"']
GRAMMAR_DICTIONARY['place'] = ['"mount_mckinley:p"', '"mount_whitney:p"']
GRAMMAR_DICTIONARY['names'] = ['"austin:n"', '"springfield:n"', '"dallas:n"', '"salt_lake_city:n"', '"portland:n"', '"rochester:n"', '"plano:n"', '"durham:n"', '"colorado:n"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY["wsp"] = ['~"\s+"i']
COPY_TERMINAL_SET = {'state', 'city', 'river', 'mountain',
'place', 'names'}
| 5,675 | 72.714286 | 922 |
py
|
Unimer
|
Unimer-master/grammars/geo/get_sql_terminals.py
|
# coding=utf8
import os
import re
import pandas as pd
from pprint import pprint
number_pattern = re.compile('[+-]?([0-9]*[.])?[0-9]+')
if __name__ == '__main__':
path = '../../data/geo/sql_data'
terminals = set()
terminal_dict = dict()
for filename in os.listdir(path):
table_name = filename.split('.')[0]
filepath = os.path.join(path, filename)
df = pd.read_csv(filepath)
for column in df.columns:
if column not in terminal_dict:
terminal_dict[column] = list()
values = df[column].tolist()
v = values[0]
if number_pattern.match(str(v).strip()):
# number
for i in values:
if '.' in str(i):
# float
terminal_dict[column].append('"%s"' % str(float(i)))
terminals.add('"%s"' % str(float(i)))
else:
terminal_dict[column].append('"%s"' % str(int(i)))
terminals.add('"%s"' % str(int(i)))
else:
# str
for i in values:
i = i.strip()
terminal_dict[column].append('"%s"' % i)
terminals.add('"%s"' % i)
# print(terminals)
for terminal, values in terminal_dict.items():
terminal_dict[terminal] = list(set(values))
pprint(terminal_dict)
| 1,456 | 32.113636 | 76 |
py
|
Unimer
|
Unimer-master/grammars/geo/typed_prolog_grammar.py
|
# codinng=utf8
ROOT_RULE = 'statement -> ["answer(" Var "," Form ")"]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['("answer(" Var "," Form ")")']
# Variable
GRAMMAR_DICTIONARY['Var'] = ['"a"', '"b"', '"c"',
'"d"', '"e"', '"f"', '"g"', '"nv"', '"v0"', '"v1"', '"v2"',
'"v3"', '"v4"', '"v5"', '"v6"', '"v7"'] # Normalized Variable
GRAMMAR_DICTIONARY['Form'] = [
'("(" Form conjunction ")")',
'("area(" Var "," Var ")")',
'("capital(" Var ")")',
'("capital(" Var "," Var ")")',
'("city(" Var ")")',
'("country(" Var ")")',
'("state(" Var ")")',
'("lake(" Var ")")',
'("river(" Var ")")',
'("mountain(" Var ")")',
'("place(" Var ")")',
'("major(" Var ")")',
'("const(" Var "," City ")")',
'("const(" Var "," "countryid(usa)" ")")',
'("const(" Var "," Place ")")',
'("const(" Var "," River ")")',
'("const(" Var "," State ")")',
'("count(" Var "," Form "," Var ")")',
'("density(" Var "," Var ")")',
'("elevation(" Var "," Num ")")',
'("elevation(" Var "," Var ")")',
'("fewest(" Var "," Var "," Form ")")',
'("high_point(" Var "," Var ")")',
'("higher(" Var "," Var ")")',
'("highest(" Var "," Form ")")',
'("largest(" Var "," Form ")")',
'("len(" Var "," Var ")")',
'("loc(" Var "," Var ")")',
'("longer(" Var "," Var ")")',
'("longest(" Var "," Form ")")',
'("low_point(" Var "," Var ")")',
'("lower(" Var "," Var ")")',
'("lowest(" Var "," Form ")")',
'("most(" Var "," Var "," Form ")")',
'("next_to(" Var "," Var ")")',
'("not(" Form ")")',
'("population(" Var "," Var ")")',
'("shortest(" Var "," Form ")")',
'("size(" Var "," Var ")")',
'("smallest(" Var "," Form ")")',
'("sum(" Var "," Form "," Var ")")',
'("traverse(" Var "," Var ")")'
]
GRAMMAR_DICTIONARY['conjunction'] = [
'("," Form conjunction)',
'""'
]
GRAMMAR_DICTIONARY['City'] = [
'("cityid(" CityName "," StateAbbrev ")")',
'("cityid(" CityName ",_)")',
]
GRAMMAR_DICTIONARY['State'] = ['("stateid(" StateName ")")']
GRAMMAR_DICTIONARY['River'] = ['("riverid(" RiverName ")")']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY['Place'] = ['("placeid(" PlaceName ")")']
GRAMMAR_DICTIONARY['Num'] = ['"0.0"', '"1.0"', '"0"']
GRAMMAR_DICTIONARY['CityName'] = ['"albany"', '"tempe"', '"chicago"', '"montgomery"', '"columbus"', '"kalamazoo"', '"\'new orleans\'"', '"riverside"', '"\'fort wayne\'"', '"\'scotts valley\'"', '"boston"', '"flint"', '"dallas"', '"atlanta"', '"\'san jose\'"', '"denver"', '"plano"', '"boulder"', '"minneapolis"', '"seattle"', '"\'baton rouge\'"',
'"sacramento"', '"washington"', '"\'des moines\'"', '"rochester"', '"springfield"', '"indianapolis"', '"dover"', '"detroit"', '"tucson"', '"houston"', '"portland"', '"salem"', '"durham"', '"miami"', '"\'san diego\'"', '"\'salt lake city\'"', '"spokane"', '"austin"', '"pittsburgh"', '"erie"', '"\'new york\'"', '"\'san francisco\'"', '"\'san antonio\'"']
GRAMMAR_DICTIONARY['StateAbbrev'] = ['"_"', '"dc"', '"sd"',
'"az"', '"mo"', '"wa"', '"tx"', '"mn"', '"me"', '"ma"', '"pa"']
GRAMMAR_DICTIONARY['StateName'] = ['"\'new hampshire\'"', '"utah"', '"delaware"', '"tennessee"', '"\'new mexico\'"', '"oregon"', '"arizona"', '"iowa"',
'"georgia"', '"arkansas"', '"pennsylvania"', '"oklahoma"', '"illinois"', '"kentucky"', '"wisconsin"', '"\'new jersey\'"', '"hawaii"', '"minnesota"', '"nebraska"', '"maryland"', '"massachusetts"', '"mississippi"',
'"nevada"', '"\'south carolina\'"', '"kansas"', '"idaho"', '"michigan"', '"alabama"', '"louisiana"', '"virginia"', '"washington"', '"california"', '"alaska"', '"texas"', '"colorado"', '"missouri"', '"vermont"', '"montana"', '"florida"', '"wyoming"', '"ohio"', '"\'west virginia\'"', '"indiana"', '"\'north carolina\'"', '"\'rhode island\'"', '"maine"', '"\'new york\'"', '"\'north dakota\'"', '"\'south dakota\'"']
GRAMMAR_DICTIONARY['RiverName'] = ['"ohio"', '"\'rio grande\'"', '"delaware"', '"\'north platte\'"',
'"chattahoochee"', '"mississippi"', '"colorado"', '"missouri"', '"red"', '"potomac"']
GRAMMAR_DICTIONARY['PlaceName'] = ['"\'death valley\'"',
'"\'mount whitney\'"', '"\'mount mckinley\'"', '"\'guadalupe peak\'"']
COPY_TERMINAL_SET = {'Num', 'CityName', 'StateAbbrev', 'StateName',
'RiverName', 'PlaceName'}
| 4,619 | 51.5 | 450 |
py
|
Unimer
|
Unimer-master/grammars/geo/__init__.py
|
# coding=utf-8
| 14 | 14 | 14 |
py
|
Unimer
|
Unimer-master/grammars/geo/prolog_grammar.py
|
# coding=utf-8
"""
Prolog Grammar
"""
import copy
from typing import List, Dict
from pprint import pprint
from parsimonious.exceptions import ParseError
from parsimonious.grammar import Grammar as _Grammar
# First-order logical form
ROOT_RULE = 'statement -> ["answer(" var "," goal ")"]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['("answer(" var "," goal ")")']
# Goal
GRAMMAR_DICTIONARY['goal'] = [
'(unit_relation)',
'(meta)',
'("(" predicate conjunction ")")'
]
GRAMMAR_DICTIONARY['conjunction'] = [
'("," predicate conjunction)',
'""'
]
GRAMMAR_DICTIONARY['predicate'] = ['(meta)', '(unit_relation)', '(binary_relation)',
'(declaration)', '("not" declaration)', '("not((" predicate conjunction "))")']
# Meta Predicates
GRAMMAR_DICTIONARY['meta'] = [
'(largest)', '(smallest)', '(highest)', '(lowest)', '(longest)', '(shortest)', '(count)', '(most)', '(fewest)', '(sum)'
]
GRAMMAR_DICTIONARY['largest'] = [
'("largest(" var "," goal ")")']
GRAMMAR_DICTIONARY['smallest'] = [
'("smallest(" var "," goal ")")']
GRAMMAR_DICTIONARY['highest'] = [
'("highest(" var "," goal ")")']
GRAMMAR_DICTIONARY['lowest'] = [
'("lowest(" var "," goal ")")']
GRAMMAR_DICTIONARY['longest'] = [
'("longest(" var "," goal ")")']
GRAMMAR_DICTIONARY['shortest'] = [
'("shortest(" var "," goal ")")']
GRAMMAR_DICTIONARY['count'] = [
'("count(" var "," goal "," var ")")']
GRAMMAR_DICTIONARY['most'] = [
'("most(" var "," var "," goal")")']
GRAMMAR_DICTIONARY['fewest'] = [
'("fewest(" var "," var "," goal")")']
GRAMMAR_DICTIONARY['sum'] = [
'("sum(" var "," goal "," var ")")']
# Declaration
GRAMMAR_DICTIONARY['declaration'] = [
'("const(" var "," object ")")']
# Object
GRAMMAR_DICTIONARY['object'] = [
'("countryid(usa)")', '(city)', '(state)', '(river)', '(place)']
GRAMMAR_DICTIONARY['city'] = [
'("cityid(" city_name "," state_abbre ")")']
GRAMMAR_DICTIONARY['state'] = ['("stateid(" state_name ")")']
GRAMMAR_DICTIONARY['river'] = ['("riverid(" river_name ")")']
GRAMMAR_DICTIONARY['place'] = ['("placeid(" place_name ")")']
# Retrieve
GRAMMAR_DICTIONARY['retrieve'] = [
'(area)', '(len)', '(population)'
]
GRAMMAR_DICTIONARY['area'] = ['("area(" var ")")']
GRAMMAR_DICTIONARY['len'] = ['("len(" var ")")']
GRAMMAR_DICTIONARY['population'] = ['("population(" var ")")']
# Relation
GRAMMAR_DICTIONARY['unit_relation'] = [
'(is_capital)',
'(is_city)',
'(is_major)',
'(is_place)',
'(is_river)',
'(is_state)',
'(is_lake)',
'(is_mountain)',
]
GRAMMAR_DICTIONARY['is_capital'] = ['("capital(" var ")")']
GRAMMAR_DICTIONARY['is_city'] = ['("city(" var ")")']
GRAMMAR_DICTIONARY['is_major'] = ['("major(" var ")")']
GRAMMAR_DICTIONARY['is_place'] = ['("place(" var ")")']
GRAMMAR_DICTIONARY['is_river'] = ['("river(" var ")")']
GRAMMAR_DICTIONARY['is_lake'] = ['("lake(" var ")")']
GRAMMAR_DICTIONARY['is_state'] = ['("state(" var ")")']
GRAMMAR_DICTIONARY['is_mountain'] = ['("mountain(" var ")")']
GRAMMAR_DICTIONARY['binary_relation'] = [
'(is_area)',
'(is_captial_of)',
'(is_equal)',
'(is_density)',
'(is_elevation)',
'(is_high_point)',
'(is_low_point)',
'(is_higher)',
'(is_lower)',
'(is_longer)',
'(is_located_in)',
'(is_len)',
'(is_next_to)',
'(is_size)',
'(is_traverse)',
'(is_population)'
]
GRAMMAR_DICTIONARY['is_area'] = [
'("area(" var "," var ")")']
GRAMMAR_DICTIONARY['is_captial_of'] = [
'("capital(" var "," var ")")']
GRAMMAR_DICTIONARY['is_equal'] = [
'"equal(" var "," var ")"']
GRAMMAR_DICTIONARY['is_density'] = [
'"density(" var "," var ")"']
GRAMMAR_DICTIONARY['is_elevation'] = [
'("elevation(" var "," var ")")', '("elevation(" var "," literal ")")']
GRAMMAR_DICTIONARY['is_high_point'] = [
'("high_point(" var "," var ")")']
GRAMMAR_DICTIONARY['is_low_point'] = [
'("low_point(" var "," var ")")']
GRAMMAR_DICTIONARY['is_higher'] = [
'("higher(" var "," var ")")']
GRAMMAR_DICTIONARY['is_lower'] = [
'("lower(" var "," var ")")']
GRAMMAR_DICTIONARY['is_longer'] = [
'("longer(" var "," var ")")']
GRAMMAR_DICTIONARY['is_located_in'] = [
'("loc(" var "," var ")")']
GRAMMAR_DICTIONARY['is_len'] = ['("len(" var "," var ")")']
GRAMMAR_DICTIONARY['is_next_to'] = [
'("next_to(" var "," var ")")']
GRAMMAR_DICTIONARY['is_size'] = [
'("size(" var "," var ")")']
GRAMMAR_DICTIONARY['is_traverse'] = [
'("traverse(" var "," var ")")']
GRAMMAR_DICTIONARY['is_population'] = [
'("population(" var "," var ")")']
# Terminal
# Original Variable
GRAMMAR_DICTIONARY['var'] = ['"a"', '"b"', '"c"',
'"d"', '"e"', '"f"', '"g"', '"nv"', '"v0"', '"v1"', '"v2"',
'"v3"', '"v4"', '"v5"', '"v6"', '"v7"'] # Normalized Variable
GRAMMAR_DICTIONARY['literal'] = ['"0"', '"0.0"', '"1.0"']
GRAMMAR_DICTIONARY['city_name'] = ['"albany"', '"tempe"', '"chicago"', '"montgomery"', '"columbus"', '"kalamazoo"', '"\'new orleans\'"', '"riverside"', '"\'fort wayne\'"', '"\'scotts valley\'"', '"boston"', '"flint"', '"dallas"', '"atlanta"', '"\'san jose\'"', '"denver"', '"plano"', '"boulder"', '"minneapolis"', '"seattle"', '"\'baton rouge\'"',
'"sacramento"', '"washington"', '"\'des moines\'"', '"rochester"', '"springfield"', '"indianapolis"', '"dover"', '"detroit"', '"tucson"', '"houston"', '"portland"', '"salem"', '"durham"', '"miami"', '"\'san diego\'"', '"\'salt lake city\'"', '"spokane"', '"austin"', '"pittsburgh"', '"erie"', '"\'new york\'"', '"\'san francisco\'"', '"\'san antonio\'"']
GRAMMAR_DICTIONARY['state_abbre'] = ['"_"', '"dc"', '"sd"',
'"az"', '"mo"', '"wa"', '"tx"', '"mn"', '"me"', '"ma"', '"pa"']
GRAMMAR_DICTIONARY['state_name'] = ['"\'new hampshire\'"', '"utah"', '"delaware"', '"tennessee"', '"\'new mexico\'"', '"oregon"', '"arizona"', '"iowa"',
'"georgia"', '"arkansas"', '"pennsylvania"', '"oklahoma"', '"illinois"', '"kentucky"', '"wisconsin"', '"\'new jersey\'"', '"hawaii"', '"minnesota"', '"nebraska"', '"maryland"', '"massachusetts"', '"mississippi"',
'"nevada"', '"\'south carolina\'"', '"kansas"', '"idaho"', '"michigan"', '"alabama"', '"louisiana"', '"virginia"', '"washington"', '"california"', '"alaska"', '"texas"', '"colorado"', '"missouri"', '"vermont"', '"montana"', '"florida"', '"wyoming"', '"ohio"', '"\'west virginia\'"', '"indiana"', '"\'north carolina\'"', '"\'rhode island\'"', '"maine"', '"\'new york\'"', '"\'north dakota\'"', '"\'south dakota\'"']
GRAMMAR_DICTIONARY['river_name'] = ['"ohio"', '"\'rio grande\'"', '"delaware"', '"\'north platte\'"',
'"chattahoochee"', '"mississippi"', '"colorado"', '"missouri"', '"red"', '"potomac"']
GRAMMAR_DICTIONARY['place_name'] = ['"\'death valley\'"',
'"\'mount whitney\'"', '"\'mount mckinley\'"', '"\'guadalupe peak\'"']
COPY_TERMINAL_SET = {'literal', 'city_name', 'state_abbre',
'state_name', 'river_name', 'place_name'}
| 7,194 | 42.083832 | 450 |
py
|
Unimer
|
Unimer-master/grammars/geo/geo_gnn_entity_matcher.py
|
# coding=utf8
import re
import os
import copy
import json
import numpy as np
from nltk.corpus import stopwords
from typing import List, Dict
from overrides import overrides
from allennlp.data.tokenizers import Token, WordTokenizer
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from .geo_entity_extractor import funql_entity_extractor, lambda_calculus_entity_extractor, sql_entity_extractor
from .lambda_calculus_grammar import GRAMMAR_DICTIONARY
class GeoGNNEntityMatcher:
def __init__(self, entity_path, max_ngram: int = 6):
with open(entity_path, 'r') as f:
self._entities = json.load(f)
self._max_ngram = max_ngram
self._stop_words = set(stopwords.words('english'))
def match(self, tokens: List[Token]) -> List[Dict]:
length = len(tokens)
candidates = list()
for eidx, entity in enumerate(self._entities):
value = entity['value'].strip()
abbreviation = entity.get('abbreviation', None)
for tidx, t in enumerate(tokens):
if t.text in self._stop_words:
continue
for i in range(min(self._max_ngram, length - tidx)):
string = ' '.join([t.text for t in tokens[tidx:tidx+1+i]]).strip()
if string == value or (abbreviation is not None and string == abbreviation):
e = copy.copy(entity)
e['span_beg_idx'] = tidx
e['span_end_idx'] = tidx + 1 + i
candidates.append(e)
is_remove_other_country = any([e['type'] == 'country' and e['value'] == 'usa' for e in candidates])
valid_candidates = list()
if is_remove_other_country:
for candidate in candidates:
if candidate['type'] != 'country' or candidate['value'] == 'usa':
valid_candidates.append(candidate)
else:
valid_candidates = candidates
return valid_candidates
class GeoSQLGNNEntityMatcher(GeoGNNEntityMatcher):
@overrides
def match(self, tokens: List[Token]) -> List[Dict]:
candidates = super().match(tokens)
for number in ['1', '150000', '750']:
candidates.append({"value": number, "type": "number"})
return candidates
class GeoLambdaCalculusGNNEntityMatcher(GeoGNNEntityMatcher):
SUFFIX_MAP = {
"p": "place",
"lo": "location",
"m": "mountain",
"l": "lake",
"c": "city",
"s": "state",
"ro": "road",
"r": "river"
}
def __init__(self, entity_path, max_ngram: int = 6):
super().__init__(entity_path, max_ngram)
self.get_formatted_value()
def get_formatted_value(self):
for state in GRAMMAR_DICTIONARY['state']:
state_value = state.replace('"', "").replace(':s', "").replace("_", " ")
for entity in self._entities:
if entity['value'] == state_value and entity['type'] == 'state':
entity['formatted_value'] = state.replace('"', "")
for city in GRAMMAR_DICTIONARY['city']:
city_value = city.replace('"', "")
match = re.match('^([a-z|_]+)_([a-z]+):c$', city_value)
city_value = match.group(1).replace("_", " ")
new_entities = list()
for entity in self._entities:
if entity['value'] == city_value and entity['type'] == 'city':
if "formatted_value" in entity:
if len(new_entities) > 0:
continue
print("Add new entities: ", city, entity)
ne = copy.deepcopy(entity)
ne['formatted_value'] = city.replace('"', "")
new_entities.append(ne)
else:
entity['formatted_value'] = city.replace('"', "")
self._entities += new_entities
for river in GRAMMAR_DICTIONARY['river']:
# First preserve river and find all entities
# Then, remove river
river_value = river.replace('"', "")[:-2].replace("_", " ")
is_found = False
for entity in self._entities:
if entity['value'] == river_value and entity['type'] == 'river':
entity['formatted_value'] = river.replace('"', "")
is_found = True
if is_found:
continue
assert river_value.split()[-1] == 'river'
river_value = ' '.join(river_value.split()[:-1])
for entity in self._entities:
if entity['value'] == river_value and entity['type'] == 'river':
assert 'formatted_value' not in entity
entity['formatted_value'] = river.replace('"', "")
for mountain in GRAMMAR_DICTIONARY['mountain']:
mountain_value = mountain.replace('"', "")[:-2].replace('_', ' ')
print(mountain_value)
for entity in self._entities:
if entity['value'] == mountain_value and (entity['type'] in ['mountain', 'highest point', 'lowest point']):
assert 'formatted_value' not in mountain_value
# entity['type'] = 'mountain'
entity['formatted_value'] = mountain.replace('"', "")
for place in GRAMMAR_DICTIONARY['place']:
place_value = place.replace('"', "")[:-2].replace('_', ' ')
print(place_value)
new_entities = list()
for entity in self._entities:
if entity['value'] == place_value and (entity['type'] in ['mountain', 'highest point', 'lowest point']):
if 'formatted_value' not in entity:
entity['formatted_value'] = place.replace('"', "")
print(entity)
else:
if len(new_entities) > 0:
continue
ne = copy.deepcopy(entity)
ne['formatted_value'] = place.replace('"', "")
new_entities.append(ne)
print("Add new entities: ", place, ne)
if len(new_entities):
self._entities += new_entities
for name in GRAMMAR_DICTIONARY['names']:
name_value = name.replace('"', "")[:-2].replace('_', ' ')
new_entities = list()
for entity in self._entities:
if entity['value'] == name_value:
if 'formatted_value' not in entity:
entity['formatted_value'] = name.replace('"', "")
print(entity)
else:
if len(new_entities) > 0:
continue
ne = copy.deepcopy(entity)
ne['type'] = 'name'
ne['formatted_value'] = name.replace('"', "")
new_entities.append(ne)
print("Add new entities: ", name, ne)
if len(new_entities):
self._entities += new_entities
# Country
for entity in self._entities:
if 'formatted_value' in entity:
continue
if entity['type'] == 'country':
assert entity['abbreviation'] == 'usa'
entity['formatted_value'] = 'usa:co'
elif entity['value'] == 'death valley':
entity['formatted_value'] = 'death_valley:lo'
elif entity['type'] == 'city':
entity['formatted_value'] = entity['value'].replace(' ', "_") + ":c"
elif entity['type'] == 'state':
entity['formatted_value'] = entity['value'].replace(' ', "_") + ":s"
elif entity['type'] == 'river':
if entity['value'].endswith('river'):
entity['formatted_value'] = entity['value'].replace(' ', '_') + ':r'
else:
entity['formatted_value'] = entity['value'].replace(" ", "_") + "_river:r"
elif entity['type'] in ['lowest point', 'highest point']:
entity['formatted_value'] = entity['value'].replace(' ', '_') + ':p'
elif entity['type'] == 'mountain':
entity['formatted_value'] = entity['value'].replace(' ', '_') + ':m'
elif entity['type'] == 'lake':
entity['formatted_value'] = entity['value'].replace(' ', '_') + ':l'
elif entity['type'] == 'road':
entity['formatted_value'] = entity['value'].replace(' ', '_') + ':ro'
def get_state_name_by_abbreviation(self, abbrev):
name = None
for entity in self._entities:
if entity.get('abbreviation', '') == abbrev:
name = entity['value']
assert name is not None
return name
@overrides
def match(self, tokens: List[Token]) -> List[Dict]:
candidates = super().match(tokens)
# Avoid duplicate value
for cidx1, can1 in enumerate(candidates):
for can2 in candidates[cidx1+1:]:
if can1['value'] == can2['value'] and can1['type'] == can2['type']:
suffix_1, suffix_2 = can1['formatted_value'].split(':')[-1], can2['formatted_value'].split(':')[-1]
if suffix_1 != suffix_2:
suffix_2_string = self.SUFFIX_MAP[suffix_2]
can2['value'] = can2['value'] + ' ' + suffix_2_string
else:
if suffix_1 == 'c':
match1 = re.match('^([a-z|_]+)_([a-z]+):c$', can1['formatted_value'])
state_1 = self.get_state_name_by_abbreviation(match1.group(2))
can1['value'] = can1['value'] + ' ' + state_1
match2 = re.match('^([a-z|_]+)_([a-z]+):c$', can2['formatted_value'])
state_2 = self.get_state_name_by_abbreviation(match2.group(2))
can2['value'] = can2['value'] + ' ' + state_2
return candidates
def test_entity_linking():
base_path = os.path.join('../../', 'data', 'geo')
entity_path = os.path.join(base_path, 'geo_entities.json')
matcher = GeoSQLGNNEntityMatcher(entity_path, max_ngram=6)
toknerizer = WordTokenizer(SpacyWordSplitter())
import sys
sys.path += ['../../']
from grammars.grammar import get_grammar
from grammars.utils import get_logical_form_preprocessor
preprocessor = get_logical_form_preprocessor('geo', 'sql', normalize_var_with_de_brujin_index=False)
grammar = get_grammar('geo', 'sql')
train_data = os.path.join(base_path, 'geo_sql_question_based_train.tsv')
empty_count = 0
max_number_of_candidates = 0
numbers = list()
invalid_count = 0
with open(train_data, 'r') as f:
for lidx, line in enumerate(f):
line = line.strip()
sentence, funql = line.split('\t')
tokens = toknerizer.tokenize(sentence)
candidates = matcher.match(tokens)
if len(candidates) > max_number_of_candidates:
max_number_of_candidates = len(candidates)
has_duplicate_entity = False
for cidx1, can1 in enumerate(candidates):
for cidx2, can2 in enumerate(candidates):
if cidx1 == cidx2:
continue
if can1['value'] == can2['value'] and can1['type'] == can2['type']:
has_duplicate_entity = True
break
if has_duplicate_entity:
break
if len(candidates) == 0:
empty_count += 1
numbers.append(len(candidates))
# Validate
processed_funql = preprocessor(funql).lower()
golden_entities = sql_entity_extractor(grammar, processed_funql)
valid = True
for ge in golden_entities:
for candidate in candidates:
compare_value = candidate['value'] if 'formatted_value' not in candidate \
else candidate['formatted_value']
if compare_value == ge or candidate.get('abbreviation', "") == ge:
break
else:
valid = False
if not valid:
invalid_count += 1
print(lidx)
print(sentence)
print(funql)
print("Number of Candidates: ", len(candidates))
print("Has Duplicate Candidates: ", has_duplicate_entity)
print(candidates)
print(golden_entities)
print("Is Valid: ", valid)
print('===\n\n')
print("Largest number of candidates: ", max_number_of_candidates)
print("Number of empty candidates: ", empty_count)
print("Averaged candidates: ", np.mean(numbers))
print("Invalid Count: ", invalid_count)
if __name__ == '__main__':
# base_path = os.path.join('../../', 'data', 'geo')
# entity_path = os.path.join(base_path, 'geo_entities.json')
#
# matcher = GeoLambdaCalculusGNNEntityMatcher(entity_path, max_ngram=6)
test_entity_linking()
| 13,433 | 41.920128 | 123 |
py
|
Unimer
|
Unimer-master/grammars/geo/sql_grammar_3.py
|
# coding=utf8
ROOT_RULE = 'statement -> [mquery]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY["statement"] = ['(mquery ws)']
GRAMMAR_DICTIONARY["mquery"] = ['(select_core ws)']
# SELECT
GRAMMAR_DICTIONARY["select_core"] = [
'(select_with_distinct ws select_results ws from_clause ws where_clause)',
'(select_with_distinct ws select_results ws from_clause ws groupby_clause)',
'(select_with_distinct ws select_results ws from_clause ws orderby_clause)',
'(select_with_distinct ws select_results ws from_clause)'
]
GRAMMAR_DICTIONARY["select_with_distinct"] = ['(ws "select" ws "distinct")', '(ws "select")']
GRAMMAR_DICTIONARY["select_results"] = ['(ws select_result ws "," ws select_results)', '(ws select_result)']
GRAMMAR_DICTIONARY["select_result"] = [
'(function ws selectop ws function)',
'(function wsp "as" wsp column_alias)',
'function',
'(col_ref ws selectop ws col_ref)',
'col_ref'
]
# FROM
GRAMMAR_DICTIONARY["from_clause"] = [
'(ws "from" ws table_source ws join_clauses)',
'(ws "from" ws source)']
GRAMMAR_DICTIONARY["join_clauses"] = ['(join_clause ws join_clauses)', 'join_clause']
GRAMMAR_DICTIONARY["join_clause"] = ['joinop ws table_source ws "on" ws join_condition_clause']
GRAMMAR_DICTIONARY["joinop"] = ['"join"', '"left outer join"']
GRAMMAR_DICTIONARY["join_condition_clause"] = ['(join_condition ws "and" ws join_condition_clause)', 'join_condition']
GRAMMAR_DICTIONARY["join_condition"] = ['ws col_ref ws "=" ws col_ref']
GRAMMAR_DICTIONARY["source"] = ['(ws single_source ws "," ws source)', '(ws single_source)']
GRAMMAR_DICTIONARY["single_source"] = ['table_source',
'("(" ws mquery ws ")" wsp "as" wsp table_alias)']
GRAMMAR_DICTIONARY["table_source"] = ['(table_name ws "as" ws table_alias)']
# WHERE
GRAMMAR_DICTIONARY["where_clause"] = [
'(ws _where_clause ws groupby_clause)',
'(ws _where_clause ws orderby_clause)',
'(ws _where_clause)'
]
GRAMMAR_DICTIONARY["_where_clause"] = [
'(ws "where" wsp expr ws where_conj)', '(ws "where" wsp expr)']
GRAMMAR_DICTIONARY["where_conj"] = ['(ws "and" wsp expr ws where_conj)', '(ws "and" wsp expr)',
'(ws "or" wsp expr ws where_conj)', '(ws "or" wsp expr)']
# ORDER
GRAMMAR_DICTIONARY["orderby_clause"] = [
'(ws _orderby_clause ws "limit 1")', '(ws _orderby_clause)']
GRAMMAR_DICTIONARY["_orderby_clause"] = [
'ws "order by" ws order_clause'
]
GRAMMAR_DICTIONARY["order_clause"] = ['(ordering_term ws "," ws order_clause)', 'ordering_term']
GRAMMAR_DICTIONARY["ordering_term"] = [
'(ws ordering_expr ws ordering)', '(ws ordering_expr)']
GRAMMAR_DICTIONARY["ordering"] = ['(ws "asc")', '(ws "desc")']
GRAMMAR_DICTIONARY['ordering_expr'] = ['function', 'col_ref']
# GROUP BY
GRAMMAR_DICTIONARY["groupby_clause"] = [
'(ws "group by" ws group_clause ws having_clause)',
'(ws "group by" ws group_clause ws orderby_clause)',
'(ws "group by" ws group_clause)']
GRAMMAR_DICTIONARY["group_clause"] = [
'(ws col_ref ws "," ws group_clause)', '(ws col_ref)']
# HAVING
GRAMMAR_DICTIONARY["having_clause"] = [
'(ws _having_clause ws orderby_clause)',
'(ws _having_clause)']
GRAMMAR_DICTIONARY["_having_clause"] = [
'(ws "having" wsp having_expr ws having_conj)', '(ws "having" wsp having_expr)']
GRAMMAR_DICTIONARY["having_conj"] = ['(ws "and" wsp having_expr ws having_conj)', '(ws "and" wsp having_expr)',
'(ws "or" wsp having_expr ws having_conj)', '(ws "or" wsp having_expr)']
GRAMMAR_DICTIONARY["col_ref"] = ['(table_alias ws "." ws column_name)', 'column_name', '(column_name wsp "as" wsp column_alias)']
GRAMMAR_DICTIONARY["having_expr"] = [
'(function wsp "not" wsp "in" wsp source_subq)',
'(function wsp "in" ws source_subq)',
# Binary expressions.
'(function ws binaryop ws "all" ws source_subq)',
'(function ws binaryop ws "any" ws source_subq)',
'(function ws binaryop ws source_subq)']
GRAMMAR_DICTIONARY["expr"] = [
'(col_ref wsp "not" wsp "in" wsp source_subq)',
'(col_ref wsp "in" ws source_subq)',
# Binary expressions.
'(col_ref ws binaryop ws "all" ws source_subq)',
'(col_ref ws binaryop ws "any" ws source_subq)',
'(col_ref ws binaryop ws source_subq)',
'(col_ref ws binaryop ws value)'
]
GRAMMAR_DICTIONARY["source_subq"] = ['("(" ws mquery ws ")")']
GRAMMAR_DICTIONARY["value"] = [
'col_ref',
'string'
]
GRAMMAR_DICTIONARY["function"] = [
'(fname ws "(" ws "distinct" ws arg_list_or_star ws ")")',
'(fname ws "(" ws arg_list_or_star ws ")")',
]
GRAMMAR_DICTIONARY["arg_list_or_star"] = ['col_ref', '"*"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY['wsp'] = ['~"\s+"i']
GRAMMAR_DICTIONARY["table_name"] = ['"state"', '"city"', '"lake"', '"river"', '"border_info"', '"highlow"', '"mountain"']
GRAMMAR_DICTIONARY["table_alias"] = [
'"statealias0"', '"statealias1"', '"statealias2"', '"statealias3"', '"statealias4"', '"statealias5"',
'"cityalias0"', '"cityalias1"', '"cityalias2"',
'"lakealias0"', '"mountainalias0"', '"mountainalias1"',
'"riveralias0"', '"riveralias1"', '"riveralias2"', '"riveralias3"',
'"border_infoalias0"', '"border_infoalias1"', '"border_infoalias2"', '"border_infoalias3"',
'"highlowalias0"', '"highlowalias1"', '"derived_tablealias0"', '"derived_tablealias1"',
'"tmp"',
]
GRAMMAR_DICTIONARY["column_name"] = [
'"city_name"', '"population"', '"country_name"', '"state_name"', # city
'"border"', # border_info
'"highest_elevation"', '"lowest_point"', '"highest_point"', '"lowest_elevation"', # highlow
'"lake_name"', '"area"', '"country_name"', # lake
'"mountain_name"', '"mountain_altitude"', # mountain
'"river_name"', '"length"', '"traverse"', # river
'"capital"', '"density"', # state,
'"derived_fieldalias0"', '"derived_fieldalias1"'
]
GRAMMAR_DICTIONARY['column_alias'] = [
'"derived_fieldalias0"', '"derived_fieldalias1"' # custom
]
GRAMMAR_DICTIONARY["fname"] = ['"count"', '"sum"', '"max"', '"min"', '"avg"', '"all"']
# TODO(MARK): This is not tight enough. AND/OR are strictly boolean value operators.
GRAMMAR_DICTIONARY["binaryop"] = ['"+"', '"-"', '"*"', '"/"', '"="', '"!="', '"<>"',
'">="', '"<="', '">"', '"<"', '"like"', '"not like"']
GRAMMAR_DICTIONARY['selectop'] = ['"/"', '"+"', '"-"']
GRAMMAR_DICTIONARY["unaryop"] = ['"+"', '"-"', '"not"']
# Terminal
GRAMMAR_DICTIONARY["string"] = ['"\'usa\'"', 'state_name', 'city_name', 'place',
'mountain_name', '"\'red\'"', 'river_name', 'digit_value']
GRAMMAR_DICTIONARY["digit_value"] = ['"750"', '"0"', '"150000"']
GRAMMAR_DICTIONARY['state_name'] = ['"\'oregon\'"', '"\'georgia\'"', '"\'wisconsin\'"', '"\'montana\'"', '"\'colorado\'"', '"\'west virginia\'"', '"\'hawaii\'"', '"\'new hampshire\'"', '"\'washington\'"', '"\'florida\'"', '"\'north dakota\'"', '"\'idaho\'"', '"\'minnesota\'"', '"\'tennessee\'"', '"\'vermont\'"', '"\'kentucky\'"', '"\'alabama\'"', '"\'oklahoma\'"', '"\'maryland\'"', '"\'nebraska\'"', '"\'iowa\'"', '"\'kansas\'"', '"\'california\'"', '"\'wyoming\'"',
'"\'massachusetts\'"', '"\'missouri\'"', '"\'nevada\'"', '"\'south dakota\'"', '"\'utah\'"', '"\'rhode island\'"', '"\'new york\'"', '"\'new jersey\'"', '"\'indiana\'"', '"\'new mexico\'"', '"\'maine\'"', '"\'illinois\'"', '"\'louisiana\'"', '"\'michigan\'"', '"\'mississippi\'"', '"\'ohio\'"', '"\'south carolina\'"', '"\'arkansas\'"', '"\'texas\'"', '"\'virginia\'"', '"\'pennsylvania\'"', '"\'north carolina\'"', '"\'alaska\'"', '"\'arizona\'"', '"\'delaware\'"']
GRAMMAR_DICTIONARY['river_name'] = ['"\'north platte\'"',
'"\'chattahoochee\'"', '"\'rio grande\'"', '"\'potomac\'"']
GRAMMAR_DICTIONARY['mountain_name'] = ['"\'mckinley\'"', '"\'whitney\'"']
GRAMMAR_DICTIONARY['place'] = ['"\'death valley\'"',
'"\'mount mckinley\'"', '"\'guadalupe peak\'"']
GRAMMAR_DICTIONARY['city_name'] = ['"\'detroit\'"', '"\'plano\'"', '"\'des moines\'"', '"\'boston\'"', '"\'salem\'"', '"\'fort wayne\'"', '"\'houston\'"', '"\'portland\'"', '"\'montgomery\'"', '"\'minneapolis\'"', '"\'tempe\'"', '"\'boulder\'"', '"\'seattle\'"', '"\'columbus\'"', '"\'dover\'"', '"\'indianapolis\'"', '"\'san antonio\'"', '"\'albany\'"', '"\'flint\'"', '"\'chicago\'"', '"\'miami\'"',
'"\'scotts valley\'"', '"\'san francisco\'"', '"\'springfield\'"', '"\'sacramento\'"', '"\'salt lake city\'"', '"\'new orleans\'"', '"\'atlanta\'"', '"\'tucson\'"', '"\'denver\'"', '"\'riverside\'"', '"\'erie\'"', '"\'san jose\'"', '"\'durham\'"', '"\'kalamazoo\'"', '"\'baton rouge\'"', '"\'san diego\'"', '"\'pittsburgh\'"', '"\'spokane\'"', '"\'austin\'"', '"\'rochester\'"', '"\'dallas\'"']
| 8,959 | 53.969325 | 502 |
py
|
Unimer
|
Unimer-master/grammars/geo/typed_funql_grammar.py
|
# coding=utf8
"""
Typed Prolog Grammar
"""
ROOT_RULE = 'statement -> [Query]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['(Query ws)']
GRAMMAR_DICTIONARY['Query'] = [
'("answer(" City ")")',
'("answer(" Country ")")',
'("answer(" Num ")")',
'("answer(" Place ")")',
'("answer(" State ")")',
'("answer(" River ")")',
]
# Country
GRAMMAR_DICTIONARY['Country'] = [
'("countryid(\'usa\')")',
'("country(all)")',
'("each(" Country ")")',
'("exclude(" Country "," Country ")")',
'("intersection(" Country "," Country ")")',
'("largest(" Country ")")',
'("smallest(" Country ")")',
'("loc_1(" City ")")',
'("loc_1(" Place ")")',
'("loc_1(" River ")")',
'("loc_1(" State ")")',
'("most(" Country ")")',
'("traverse_1(" River ")")',
]
# State
GRAMMAR_DICTIONARY['State'] = [
'("stateid(" StateName ")")',
'("state(" State ")")',
'("state(all)")',
'("smallest(" State ")")',
'("smallest_one(area_1(" State "))")',
'("smallest_one(density_1(" State "))")',
'("smallest_one(population_1(" State "))")',
'("largest(" State ")")',
'("largest_one(area_1(" State "))")',
'("largest_one(density_1(" State "))")',
'("largest_one(population_1(" State "))")',
'("each(" State ")")',
'("exclude(" State "," State ")")',
'("intersection(" State "," State ")")',
'("fewest(" State ")")',
'("most(" State ")")',
'("most(" Place ")")',
'("most(" River ")")',
'("most(" City ")")',
'("next_to_1(" State ")")',
'("next_to_2(" State ")")',
'("next_to_2(" River ")")',
'("traverse_1(" River ")")',
'("loc_1(" River ")")',
'("capital_2(" City ")")',
'("loc_1(" City ")")',
'("high_point_2(" Place ")")',
'("low_point_2(" Place ")")',
'("loc_1(" Place ")")',
'("loc_2(" Country ")")',
]
GRAMMAR_DICTIONARY['StateAbbrev'] = ['"\'dc\'"', '"\'pa\'"', '"\'ga\'"', '"\'me\'"', '"\'wa\'"', '"\'tx\'"',
'"\'ma\'"', '"\'sd\'"', '"\'az\'"', '"\'mn\'"', '"\'mo\'"']
GRAMMAR_DICTIONARY['StateName'] = ['"\'washington\'"', '"\'kansas\'"', '"\'pennsylvania\'"', '"\'new york\'"', '"\'south carolina\'"', '"\'california\'"', '"\'west virginia\'"', '"\'kentucky\'"', '"\'vermont\'"', '"\'hawaii\'"', '"\'new mexico\'"', '"\'montana\'"', '"\'illinois\'"', '"\'georgia\'"', '"\'louisiana\'"', '"\'indiana\'"', '"\'oklahoma\'"', '"\'utah\'"', '"\'arkansas\'"', '"\'michigan\'"', '"\'alaska\'"', '"\'alabama\'"', '"\'missouri\'"', '"\'wisconsin\'"', '"\'wyoming\'"',
'"\'maine\'"', '"\'florida\'"', '"\'south dakota\'"', '"\'tennessee\'"', '"\'north carolina\'"', '"\'new jersey\'"', '"\'minnesota\'"', '"\'arizona\'"', '"\'new hampshire\'"', '"\'texas\'"', '"\'colorado\'"', '"\'mississippi\'"', '"\'idaho\'"', '"\'oregon\'"', '"\'maryland\'"', '"\'north dakota\'"', '"\'nebraska\'"', '"\'rhode island\'"', '"\'ohio\'"', '"\'massachusetts\'"', '"\'virginia\'"', '"\'nevada\'"', '"\'delaware\'"', '"\'iowa\'"']
# City
GRAMMAR_DICTIONARY['City'] = [
'("city(all)")',
'("city(" City ")")',
'("loc_2(" State ws ")")',
'("loc_2(" Country ")")',
'("capital(" City ")")',
'("capital(" Place ")")',
'("capital(all)")',
'("capital_1(" Country ")")',
'("capital_1(" State ")")',
'("cityid(" CityName "," StateAbbrev ")")',
'("cityid(" CityName ",_)")',
'("each(" City ")")',
'("exclude(" City "," City ")")',
'("intersection(" City "," City ")")',
'("fewest(" City ")")',
'("largest(" City ")")',
'("largest_one(density_1(" City "))")',
'("largest_one(population_1(" City "))")',
'("largest_one(density_1(" City "))")',
'("smallest(" City ")")',
'("smallest_one(population_1(" City "))")',
'("loc_1(" Place ")")',
'("major(" City ")")',
'("most(" City ")")',
'("traverse_1(" River ")")',
]
GRAMMAR_DICTIONARY['CityName'] = ['"\'washington\'"', '"\'minneapolis\'"', '"\'sacramento\'"', '"\'rochester\'"', '"\'indianapolis\'"', '"\'portland\'"', '"\'new york\'"', '"\'erie\'"', '"\'san diego\'"', '"\'baton rouge\'"', '"\'miami\'"', '"\'kalamazoo\'"', '"\'durham\'"', '"\'salt lake city\'"', '"\'des moines\'"', '"\'pittsburgh\'"', '"\'riverside\'"', '"\'dover\'"', '"\'chicago\'"', '"\'albany\'"', '"\'tucson\'"', '"\'austin\'"',
'"\'san antonio\'"', '"\'houston\'"', '"\'scotts valley\'"', '"\'montgomery\'"', '"\'springfield\'"', '"\'boston\'"', '"\'boulder\'"', '"\'san francisco\'"', '"\'flint\'"', '"\'fort wayne\'"', '"\'spokane\'"', '"\'san jose\'"', '"\'tempe\'"', '"\'dallas\'"', '"\'new orleans\'"', '"\'seattle\'"', '"\'denver\'"', '"\'salem\'"', '"\'detroit\'"', '"\'plano\'"', '"\'atlanta\'"', '"\'columbus\'"']
# Num
GRAMMAR_DICTIONARY['Num'] = [
'(Digit)',
'("area_1(" City ")")',
'("area_1(" Country ")")',
'("area_1(" Place ")")',
'("area_1(" State ")")',
'("count(" City ")")',
'("count(" Country ")")',
'("count(" Place ")")',
'("count(" River ")")',
'("count(" State ")")',
'("density_1(" City ")")',
'("density_1(" Country ")")',
'("density_1(" State ")")',
'("elevation_1(" Place ")")',
'("population_1(" City ")")',
'("population_1(" Country ")")',
'("population_1(" State ")")',
'("size(" City ")")',
'("size(" Country ")")',
'("size(" State ")")',
'("smallest(" Num ")")',
'("sum(" Num ")")',
'("len(" River ")")'
]
GRAMMAR_DICTIONARY['Digit'] = ['"0.0"', '"1.0"', '"0"']
# Place
GRAMMAR_DICTIONARY['Place'] = [
'("loc_2(" City ")")',
'("loc_2(" State ")")',
'("loc_2(" Country ws")")',
'("each(" Place ")")',
'("elevation_2(" Num ")")',
'("exclude(" Place "," Place ")")',
'("intersection(" Place "," Place ")")',
'("fewest(" Place ")")',
# '("most(" Place ")")',
'("largest(" Place ")")',
'("smallest(" Place ")")',
'("highest(" Place ")")',
'("lowest(" Place ")")',
'("high_point_1(" State ")")',
'("low_point_1(" State ")")',
'("higher_1(" Place ")")',
'("higher_2(" Place ")")',
'("lower_1(" Place ")")',
'("lower_2(" Place ")")',
'("lake(" Place ")")',
'("lake(all)")',
'("mountain(" Place ")")',
'("mountain(all)")',
'("place(" Place ")")',
'("place(all)")',
'("placeid(" PlaceName ")")',
'("major(" Place ")")'
]
GRAMMAR_DICTIONARY['PlaceName'] = ['"\'guadalupe peak\'"', '"\'mount whitney\'"',
'"\'mount mckinley\'"', '"\'death valley\'"']
# River
GRAMMAR_DICTIONARY['River'] = [
'("river(" River ")")',
'("loc_2(" State ")")',
'("loc_2(" Country ")")',
'("each(" River ")")',
'("exclude(" River "," River ")")',
'("intersection(" River "," River ")")',
'("fewest(" River ")")',
'("longer(" River ")")',
'("longest(" River ")")',
'("major(" River ")")',
# '("most(" River ")")',
'("most(" State ws")")',
'("river(all)")',
'("riverid(" RiverName ")")',
'("shortest(" River ")")',
'("traverse_2(" City ")")',
'("traverse_2(" Country ")")',
'("traverse_2(" State ")")',
]
GRAMMAR_DICTIONARY['RiverName'] = ['"\'chattahoochee\'"', '"\'north platte\'"', '"\'rio grande\'"', '"\'ohio\'"',
'"\'potomac\'"', '"\'missouri\'"', '"\'red\'"', '"\'colorado\'"', '"\'mississippi\'"', '"\'delaware\'"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
COPY_TERMINAL_SET = {'RiverName', 'PlaceName', 'Digit', 'CityName',
'StateName', 'StateAbbrev'}
| 7,591 | 38.336788 | 491 |
py
|
Unimer
|
Unimer-master/grammars/geo/geo_entity_extractor.py
|
# coding=utf8
import os
from allennlp.data.tokenizers import Token, WordTokenizer
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
def funql_entity_extractor(grammar, funql):
"""
:param grammar: FunQL grammar 1
:param funql:
:return:
"""
applied_production_rules = grammar.parse(funql)
entities = set()
for rule in applied_production_rules:
if rule.lhs == 'object' and len(rule.rhs_nonterminal) == 0:
# country
entities.add('usa')
elif rule.lhs in grammar.copy_terminal_set:
rhs = rule.rhs.replace('"', "").replace("\'", "").replace('[', "").replace("]", "")
if rhs == '_':
continue
entities.add(rhs)
return entities
def prolog_entity_extractor(grammar, prolog):
"""
:param grammar: Prolog Grammar 1
:param prolog:
:return:
"""
applied_production_rules = grammar.parse(prolog)
entities = set()
for rule in applied_production_rules:
if rule.lhs == 'object' and len(rule.rhs_nonterminal) == 0:
# country
entities.add('usa')
elif rule.lhs in grammar.copy_terminal_set:
rhs = rule.rhs.replace('"', "").replace("\'", "").replace('[', "").replace("]", "")
if rhs == '_':
continue
entities.add(rhs)
return entities
def lambda_calculus_entity_extractor(grammar, lc):
"""
:param grammar: Lambda Calculus Grammar 1
:param prolog:
:return:
"""
applied_production_rules = grammar.parse(lc)
entities = set()
for rule in applied_production_rules:
if rule.lhs == 'constant':
print(len(rule.rhs_nonterminal))
if rule.lhs == 'constant' and len(rule.rhs_nonterminal) == 0:
# country
entities.add(rule.rhs.replace('"', "").replace('(', '').replace(')', '').replace("\'", "").replace('[', "").replace("]", ""))
print(rule)
elif rule.lhs in grammar.copy_terminal_set:
rhs = rule.rhs.replace('"', "").replace("\'", "").replace('[', "").replace("]", "")
if rhs == '_':
continue
entities.add(rhs)
return entities
def sql_entity_extractor(grammar, lc):
"""
:param grammar: SQL Grammar 1
:param prolog:
:return:
"""
applied_production_rules = grammar.parse(lc)
entities = set()
for rule in applied_production_rules:
if rule.lhs in grammar.copy_terminal_set:
rhs = rule.rhs.replace('"', "").replace("\'", "").replace('[', "").replace("]", "")
entities.add(rhs)
return entities
def replace_funql_entity(grammar, funql, funql_tokens, candidates):
entities = funql_entity_extractor(grammar, funql)
replaced_tokens = list()
is_valid = True
for token in funql_tokens:
text = token.text.replace("'", "")
if text in entities:
# entity
for candidate in candidates:
if candidate['value'] == text or \
('abbreviation' in candidate and candidate['abbreviation'] == text):
replaced_tokens.append(Token('@entity_%d' % candidate['index']))
break
else:
is_valid = False
replaced_tokens.append(token)
else:
replaced_tokens.append(token)
return is_valid, replaced_tokens
def replace_prolog_entity(grammar, prolog, prolog_tokens, candidates):
entities = prolog_entity_extractor(grammar, prolog)
replaced_tokens = list()
is_valid = True
for token in prolog_tokens:
text = token.text.replace("'", "")
if text in entities:
# entity
for candidate in candidates:
if candidate['value'] == text or \
('abbreviation' in candidate and candidate['abbreviation'] == text):
replaced_tokens.append(Token('@entity_%d' % candidate['index']))
break
else:
is_valid = False
replaced_tokens.append(token)
else:
replaced_tokens.append(token)
return is_valid, replaced_tokens
def replace_lambda_calculus_entity(grammar, lc, lc_tokens, candidates):
entities = lambda_calculus_entity_extractor(grammar, lc)
replaced_tokens = list()
is_valid = True
for token in lc_tokens:
text = token.text.replace("'", "")
if text in entities:
# entity
for candidate in candidates:
print(candidate)
if candidate['formatted_value'] == text:
replaced_tokens.append(Token('@entity_%d' % candidate['index']))
break
else:
is_valid = False
replaced_tokens.append(token)
else:
replaced_tokens.append(token)
return is_valid, replaced_tokens
def replace_sql_entity(grammar, sql, sql_tokens, candidates):
entities = sql_entity_extractor(grammar, sql)
replaced_tokens = list()
is_valid = True
for token in sql_tokens:
text = token.text.replace("'", "").replace('"', "")
if text in entities:
# entity
for candidate in candidates:
if candidate['value'] == text or \
('abbreviation' in candidate and candidate['abbreviation'] == text):
replaced_tokens.append(Token('@entity_%d' % candidate['index']))
break
else:
is_valid = False
replaced_tokens.append(token)
else:
replaced_tokens.append(token)
return is_valid, replaced_tokens
def test_funql_entity_extractor():
import sys
sys.path += ['../../']
from grammars.grammar import get_grammar
from grammars.utils import get_logical_form_preprocessor
preprocessor = get_logical_form_preprocessor('geo', 'funql')
grammar = get_grammar('geo', 'funql')
funql = preprocessor(
"answer(count(intersection(state(loc_2(countryid('usa'))), traverse_1(shortest(river(all))))))")
entities = funql_entity_extractor(grammar, funql)
print(entities)
def test_funql_entity_replacer():
import sys
sys.path += ['../../']
from grammars.grammar import get_grammar
from grammars.utils import get_logical_form_preprocessor, get_logical_form_tokenizer
preprocessor = get_logical_form_preprocessor('geo', 'funql')
grammar = get_grammar('geo', 'funql')
funql = preprocessor(
"answer(count(intersection(state(loc_2(countryid('usa'))), traverse_1(shortest(river(all))))))")
# Test Replace
funql = preprocessor("answer(len(longest(river(loc_2(stateid('california'))))))")
funql_tokenizer = get_logical_form_tokenizer('geo', 'funql')
funql_tokens = funql_tokenizer.tokenize(funql)
question = 'how long is the longest river in california'
question_tokenizer = WordTokenizer(SpacyWordSplitter())
question_tokens = question_tokenizer.tokenize(question)
from geo_gnn_entity_matcher import GeoGNNEntityMatcher
base_path = os.path.join('../../', 'data', 'geo')
entity_path = os.path.join(base_path, 'geo_entities.json')
matcher = GeoGNNEntityMatcher(entity_path, max_ngram=6)
candidates = matcher.match(question_tokens)
for can_idx, can in enumerate(candidates):
can['index'] = can_idx
is_valid, replaced_tokens = replace_funql_entity(grammar, funql, funql_tokens, candidates)
print(funql_tokens)
print(is_valid)
print(replaced_tokens)
def test_prolog_entity_extractor():
import sys
sys.path += ['../../']
from grammars.grammar import get_grammar
from grammars.utils import get_logical_form_preprocessor
preprocessor = get_logical_form_preprocessor('geo', 'prolog')
grammar = get_grammar('geo', 'prolog')
prolog = preprocessor("answer(A,(capital(A),loc(A,B),state(B),loc(C,B),city(C),const(C,cityid(durham,_))))")
entities = funql_entity_extractor(grammar, prolog)
print(entities)
def test_prolog_entity_replacer():
import sys
sys.path += ['../../']
from grammars.grammar import get_grammar
from grammars.utils import get_logical_form_preprocessor, get_logical_form_tokenizer
preprocessor = get_logical_form_preprocessor('geo', 'prolog', normalize_var_with_de_brujin_index=True)
grammar = get_grammar('geo', 'prolog')
prolog = preprocessor(
"answer(A,(capital(A),loc(A,B),state(B),loc(C,B),city(C),const(C,cityid(durham,_))))",
).lower()
# Test Replace
prolog_tokenizer = get_logical_form_tokenizer('geo', 'prolog')
prolog_tokens = prolog_tokenizer.tokenize(prolog)
question = 'what is the capital of states that have cities named durham ?'
question_tokenizer = WordTokenizer(SpacyWordSplitter())
question_tokens = question_tokenizer.tokenize(question)
from geo_gnn_entity_matcher import GeoGNNEntityMatcher
base_path = os.path.join('../../', 'data', 'geo')
entity_path = os.path.join(base_path, 'geo_entities.json')
matcher = GeoGNNEntityMatcher(entity_path, max_ngram=6)
candidates = matcher.match(question_tokens)
for can_idx, can in enumerate(candidates):
can['index'] = can_idx
is_valid, replaced_tokens = replace_funql_entity(grammar, prolog, prolog_tokens, candidates)
print(prolog_tokens)
print(is_valid)
print(replaced_tokens)
def test_lambda_calculus_entity_replacer():
import sys
sys.path += ['../../']
from grammars.grammar import get_grammar
from grammars.utils import get_logical_form_preprocessor, get_logical_form_tokenizer
preprocessor = get_logical_form_preprocessor('geo', 'lambda')
grammar = get_grammar('geo', 'lambda')
lc = preprocessor(
"(lambda $0:e (and:<t*,t> (major:<lo,t> $0) (city:<c,t> $0) (loc:<lo,<lo,t>> $0 alaska:s)))",
)
# Test Replace
lc_tokenizer = get_logical_form_tokenizer('geo', 'lambda')
lc_tokens = lc_tokenizer.tokenize(lc)
question = 'what are the major cities in alaska'
question_tokenizer = WordTokenizer(SpacyWordSplitter())
question_tokens = question_tokenizer.tokenize(question)
from geo_gnn_entity_matcher import GeoLambdaCalculusGNNEntityMatcher
base_path = os.path.join('../../', 'data', 'geo')
entity_path = os.path.join(base_path, 'geo_entities.json')
matcher = GeoLambdaCalculusGNNEntityMatcher(entity_path, max_ngram=6)
candidates = matcher.match(question_tokens)
for can_idx, can in enumerate(candidates):
can['index'] = can_idx
is_valid, replaced_tokens = replace_lambda_calculus_entity(grammar, lc, lc_tokens, candidates)
print(lc_tokens)
print(is_valid)
print(replaced_tokens)
def test_sql_entity_replacer():
import sys
sys.path += ['../../']
from grammars.grammar import get_grammar
from grammars.utils import get_logical_form_preprocessor, get_logical_form_tokenizer
preprocessor = get_logical_form_preprocessor('geo', 'sql')
grammar = get_grammar('geo', 'sql')
sql = preprocessor(
'SELECT CITYalias0.POPULATION FROM CITY AS CITYalias0 WHERE CITYalias0.CITY_NAME = "erie" AND CITYalias0.STATE_NAME = "pennsylvania" ;')
# Test Replace
sql_tokenizer = get_logical_form_tokenizer('geo', 'sql')
sql_tokens = sql_tokenizer.tokenize(sql)
question = 'what is the population of erie pennsylvania'
question_tokenizer = WordTokenizer(SpacyWordSplitter())
question_tokens = question_tokenizer.tokenize(question)
from geo_gnn_entity_matcher import GeoGNNEntityMatcher
base_path = os.path.join('../../', 'data', 'geo')
entity_path = os.path.join(base_path, 'geo_entities.json')
matcher = GeoGNNEntityMatcher(entity_path, max_ngram=6)
candidates = matcher.match(question_tokens)
for can_idx, can in enumerate(candidates):
can['index'] = can_idx
is_valid, replaced_tokens = replace_sql_entity(grammar, sql, sql_tokens, candidates)
print(sql_tokens)
print("Is Valid: ", is_valid)
print(replaced_tokens)
def test_sql_entity_extractor():
import sys
sys.path += ['../../']
from grammars.grammar import get_grammar
from grammars.utils import get_logical_form_preprocessor
preprocessor = get_logical_form_preprocessor('geo', 'sql')
grammar = get_grammar('geo', 'sql')
print(grammar.copy_terminal_set)
sql = preprocessor('SELECT CITYalias0.POPULATION FROM CITY AS CITYalias0 WHERE CITYalias0.CITY_NAME = "erie" AND CITYalias0.STATE_NAME = "pennsylvania" ;')
entities = sql_entity_extractor(grammar, sql)
print(entities)
if __name__ == '__main__':
test_sql_entity_replacer()
| 12,755 | 36.517647 | 159 |
py
|
Unimer
|
Unimer-master/grammars/geo/sql_grammar_2.py
|
# coding=utf8
ROOT_RULE = 'statement -> [mquery]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY["statement"] = ['(mquery ws)']
GRAMMAR_DICTIONARY["mquery"] = ['(query ws)']
GRAMMAR_DICTIONARY["query"] = ['(ws select_core ws groupby_clause ws orderby_clause ws "limit 1")',
'(ws select_core ws groupby_clause ws orderby_clause)',
'(ws select_core ws orderby_clause ws "limit 1")',
'(ws select_core ws groupby_clause)',
'(ws select_core ws orderby_clause)',
'(ws select_core)']
# SELECT
GRAMMAR_DICTIONARY["select_core"] = ['(select_with_distinct ws select_results ws from_clause ws where_clause)',
'(select_with_distinct ws select_results ws from_clause)']
GRAMMAR_DICTIONARY["select_with_distinct"] = ['(ws "select" ws "distinct")', '(ws "select")']
GRAMMAR_DICTIONARY["select_results"] = ['(ws select_result ws "," ws select_results)', '(ws select_result)']
GRAMMAR_DICTIONARY["select_result"] = [
'(function ws selectop ws function)',
'(function wsp "as" wsp column_alias)',
'function',
'(col_ref ws selectop ws col_ref)',
'col_ref'
]
# FROM
GRAMMAR_DICTIONARY["from_clause"] = [
'(ws "from" ws table_source ws join_clauses)',
'(ws "from" ws source)']
GRAMMAR_DICTIONARY["join_clauses"] = ['(join_clause ws join_clauses)', 'join_clause']
GRAMMAR_DICTIONARY["join_clause"] = ['joinop ws table_source ws "on" ws join_condition_clause']
GRAMMAR_DICTIONARY["joinop"] = ['"join"', '"left outer join"']
GRAMMAR_DICTIONARY["join_condition_clause"] = ['(join_condition ws "and" ws join_condition_clause)', 'join_condition']
GRAMMAR_DICTIONARY["join_condition"] = ['ws col_ref ws "=" ws col_ref']
GRAMMAR_DICTIONARY["source"] = ['(ws single_source ws "," ws source)', '(ws single_source)']
GRAMMAR_DICTIONARY["single_source"] = ['table_source',
'("(" ws mquery ws ")" wsp "as" wsp table_alias)']
GRAMMAR_DICTIONARY["table_source"] = ['(table_name ws "as" ws table_alias)']
# ORDER
GRAMMAR_DICTIONARY["orderby_clause"] = ['ws "order" ws "by" ws order_clause']
GRAMMAR_DICTIONARY["order_clause"] = ['(ordering_term ws "," ws order_clause)', 'ordering_term']
GRAMMAR_DICTIONARY["ordering_term"] = [
'(ws ordering_expr ws ordering)', '(ws ordering_expr)']
GRAMMAR_DICTIONARY["ordering"] = ['(ws "asc")', '(ws "desc")']
GRAMMAR_DICTIONARY['ordering_expr'] = ['function', 'col_ref']
# GROUP BY
GRAMMAR_DICTIONARY["groupby_clause"] = ['(ws "group" ws "by" ws group_clause ws having_clause)',
'(ws "group" ws "by" ws group_clause)']
GRAMMAR_DICTIONARY["group_clause"] = [
'(ws col_ref ws "," ws group_clause)', '(ws col_ref)']
# WHERE
GRAMMAR_DICTIONARY["where_clause"] = ['(ws "where" wsp expr ws where_conj)', '(ws "where" wsp expr)']
GRAMMAR_DICTIONARY["where_conj"] = ['(ws "and" wsp expr ws where_conj)', '(ws "and" wsp expr)',
'(ws "or" wsp expr ws where_conj)', '(ws "or" wsp expr)']
# HAVING
GRAMMAR_DICTIONARY["having_clause"] = [
'(ws "having" wsp having_expr ws having_conj)', '(ws "having" wsp having_expr)']
GRAMMAR_DICTIONARY["having_conj"] = ['(ws "and" wsp having_expr ws having_conj)', '(ws "and" wsp having_expr)',
'(ws "or" wsp having_expr ws having_conj)', '(ws "or" wsp having_expr)']
GRAMMAR_DICTIONARY["col_ref"] = ['(table_alias ws "." ws column_name)', 'column_name', '(column_name wsp "as" wsp column_alias)']
GRAMMAR_DICTIONARY["having_expr"] = [
'(function wsp "not" wsp "in" wsp source_subq)',
'(function wsp "in" ws source_subq)',
# Binary expressions.
'(function ws binaryop ws "all" ws source_subq)',
'(function ws binaryop ws "any" ws source_subq)',
'(function ws binaryop ws source_subq)']
GRAMMAR_DICTIONARY["expr"] = [
'(col_ref wsp "not" wsp "in" wsp source_subq)',
'(col_ref wsp "in" ws source_subq)',
# Binary expressions.
'(col_ref ws binaryop ws "all" ws source_subq)',
'(col_ref ws binaryop ws "any" ws source_subq)',
'(col_ref ws binaryop ws source_subq)',
'(col_ref ws binaryop ws value)'
]
GRAMMAR_DICTIONARY["source_subq"] = ['("(" ws query ws ")")']
GRAMMAR_DICTIONARY["value"] = [
'col_ref',
'string'
]
GRAMMAR_DICTIONARY["function"] = [
'(fname ws "(" ws "distinct" ws arg_list_or_star ws ")")',
'(fname ws "(" ws arg_list_or_star ws ")")',
]
GRAMMAR_DICTIONARY["arg_list_or_star"] = ['col_ref', '"*"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY['wsp'] = ['~"\s+"i']
GRAMMAR_DICTIONARY["table_name"] = ['"state"', '"city"', '"lake"', '"river"', '"border_info"', '"highlow"', '"mountain"']
GRAMMAR_DICTIONARY["table_alias"] = [
'"statealias0"', '"statealias1"', '"statealias2"', '"statealias3"', '"statealias4"', '"statealias5"',
'"cityalias0"', '"cityalias1"', '"cityalias2"',
'"lakealias0"', '"mountainalias0"', '"mountainalias1"',
'"riveralias0"', '"riveralias1"', '"riveralias2"', '"riveralias3"',
'"border_infoalias0"', '"border_infoalias1"', '"border_infoalias2"', '"border_infoalias3"',
'"highlowalias0"', '"highlowalias1"', '"derived_tablealias0"', '"derived_tablealias1"',
'"tmp"',
]
GRAMMAR_DICTIONARY["column_name"] = [
'"city_name"', '"population"', '"country_name"', '"state_name"', # city
'"border"', # border_info
'"highest_elevation"', '"lowest_point"', '"highest_point"', '"lowest_elevation"', # highlow
'"lake_name"', '"area"', '"country_name"', # lake
'"mountain_name"', '"mountain_altitude"', # mountain
'"river_name"', '"length"', '"traverse"', # river
'"capital"', '"density"', # state,
'"derived_fieldalias0"', '"derived_fieldalias1"'
]
GRAMMAR_DICTIONARY['column_alias'] = [
'"derived_fieldalias0"', '"derived_fieldalias1"' # custom
]
GRAMMAR_DICTIONARY["fname"] = ['"count"', '"sum"', '"max"', '"min"', '"avg"', '"all"']
# TODO(MARK): This is not tight enough. AND/OR are strictly boolean value operators.
GRAMMAR_DICTIONARY["binaryop"] = ['"+"', '"-"', '"*"', '"/"', '"="', '"!="', '"<>"',
'">="', '"<="', '">"', '"<"', '"like"', '"not like"']
GRAMMAR_DICTIONARY['selectop'] = ['"/"', '"+"', '"-"']
GRAMMAR_DICTIONARY["unaryop"] = ['"+"', '"-"', '"not"']
# Terminal
GRAMMAR_DICTIONARY["string"] = ['"\'usa\'"', 'state_name', 'city_name', 'place',
'mountain_name', '"\'red\'"', 'river_name', 'digit_value']
GRAMMAR_DICTIONARY["digit_value"] = ['"750"', '"0"', '"150000"']
GRAMMAR_DICTIONARY['state_name'] = ['"\'oregon\'"', '"\'georgia\'"', '"\'wisconsin\'"', '"\'montana\'"', '"\'colorado\'"', '"\'west virginia\'"', '"\'hawaii\'"', '"\'new hampshire\'"', '"\'washington\'"', '"\'florida\'"', '"\'north dakota\'"', '"\'idaho\'"', '"\'minnesota\'"', '"\'tennessee\'"', '"\'vermont\'"', '"\'kentucky\'"', '"\'alabama\'"', '"\'oklahoma\'"', '"\'maryland\'"', '"\'nebraska\'"', '"\'iowa\'"', '"\'kansas\'"', '"\'california\'"', '"\'wyoming\'"',
'"\'massachusetts\'"', '"\'missouri\'"', '"\'nevada\'"', '"\'south dakota\'"', '"\'utah\'"', '"\'rhode island\'"', '"\'new york\'"', '"\'new jersey\'"', '"\'indiana\'"', '"\'new mexico\'"', '"\'maine\'"', '"\'illinois\'"', '"\'louisiana\'"', '"\'michigan\'"', '"\'mississippi\'"', '"\'ohio\'"', '"\'south carolina\'"', '"\'arkansas\'"', '"\'texas\'"', '"\'virginia\'"', '"\'pennsylvania\'"', '"\'north carolina\'"', '"\'alaska\'"', '"\'arizona\'"', '"\'delaware\'"']
GRAMMAR_DICTIONARY['river_name'] = ['"\'north platte\'"',
'"\'chattahoochee\'"', '"\'rio grande\'"', '"\'potomac\'"']
GRAMMAR_DICTIONARY['mountain_name'] = ['"\'mckinley\'"', '"\'whitney\'"']
GRAMMAR_DICTIONARY['place'] = ['"\'death valley\'"',
'"\'mount mckinley\'"', '"\'guadalupe peak\'"']
GRAMMAR_DICTIONARY['city_name'] = ['"\'detroit\'"', '"\'plano\'"', '"\'des moines\'"', '"\'boston\'"', '"\'salem\'"', '"\'fort wayne\'"', '"\'houston\'"', '"\'portland\'"', '"\'montgomery\'"', '"\'minneapolis\'"', '"\'tempe\'"', '"\'boulder\'"', '"\'seattle\'"', '"\'columbus\'"', '"\'dover\'"', '"\'indianapolis\'"', '"\'san antonio\'"', '"\'albany\'"', '"\'flint\'"', '"\'chicago\'"', '"\'miami\'"',
'"\'scotts valley\'"', '"\'san francisco\'"', '"\'springfield\'"', '"\'sacramento\'"', '"\'salt lake city\'"', '"\'new orleans\'"', '"\'atlanta\'"', '"\'tucson\'"', '"\'denver\'"', '"\'riverside\'"', '"\'erie\'"', '"\'san jose\'"', '"\'durham\'"', '"\'kalamazoo\'"', '"\'baton rouge\'"', '"\'san diego\'"', '"\'pittsburgh\'"', '"\'spokane\'"', '"\'austin\'"', '"\'rochester\'"', '"\'dallas\'"']
COPY_TERMINAL_SET = {'digit_value', 'state_name', 'river_name', 'mountain_name',
'place', 'city_name'}
| 9,005 | 57.480519 | 502 |
py
|
Unimer
|
Unimer-master/grammars/geo/sql_grammar.py
|
# coding=utf8
ROOT_RULE = 'statement -> [mquery]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY["statement"] = ['(mquery ws)']
GRAMMAR_DICTIONARY["mquery"] = [
'(ws select_clause ws from_clause ws where_clause ws groupby_clause ws having_clause ws orderby_clause ws limit)',
'(ws select_clause ws from_clause ws where_clause ws groupby_clause ws having_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws where_clause ws groupby_clause ws having_clause)',
'(ws select_clause ws from_clause ws where_clause ws groupby_clause ws orderby_clause ws limit)',
'(ws select_clause ws from_clause ws where_clause ws groupby_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws where_clause ws groupby_clause)',
'(ws select_clause ws from_clause ws where_clause ws orderby_clause ws limit)',
'(ws select_clause ws from_clause ws where_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws where_clause)',
'(ws select_clause ws from_clause ws groupby_clause ws having_clause ws orderby_clause ws limit)',
'(ws select_clause ws from_clause ws groupby_clause ws having_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws groupby_clause ws having_clause)',
'(ws select_clause ws from_clause ws groupby_clause ws orderby_clause ws limit)',
'(ws select_clause ws from_clause ws groupby_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws groupby_clause)',
'(ws select_clause ws from_clause ws orderby_clause ws limit)',
'(ws select_clause ws from_clause ws orderby_clause)',
'(ws select_clause ws from_clause)'
]
# SELECT
GRAMMAR_DICTIONARY["select_clause"] = [
'(select_with_distinct ws select_results)']
GRAMMAR_DICTIONARY["select_with_distinct"] = [
'(ws "select" ws "distinct")', '(ws "select")']
GRAMMAR_DICTIONARY["select_results"] = [
'(ws select_result ws "," ws select_results)', '(ws select_result)']
GRAMMAR_DICTIONARY["select_result"] = [
'(subject ws selectop ws subject)',
'(subject wsp "as" wsp column_alias)',
'subject',
]
# FROM
GRAMMAR_DICTIONARY["from_clause"] = ['(ws "from" ws table_source ws join_clauses)',
'(ws "from" ws source)']
GRAMMAR_DICTIONARY["join_clauses"] = [
'(join_clause ws join_clauses)', 'join_clause']
GRAMMAR_DICTIONARY["join_clause"] = [
'joinop ws table_source ws "on" ws join_condition_clause']
GRAMMAR_DICTIONARY["joinop"] = ['"join"', '"left outer join"']
GRAMMAR_DICTIONARY["join_condition_clause"] = [
'(join_condition ws "and" ws join_condition_clause)', 'join_condition']
GRAMMAR_DICTIONARY["join_condition"] = ['ws col_ref ws "=" ws col_ref']
GRAMMAR_DICTIONARY["source"] = [
'(ws single_source ws "," ws source)', '(ws single_source)']
GRAMMAR_DICTIONARY["single_source"] = ['table_source', 'source_subq']
GRAMMAR_DICTIONARY["source_subq"] = ['("(" ws mquery ws ")" wsp "as" wsp table_alias)',
'("(" ws mquery ws ")" wsp table_alias)', '("(" ws mquery ws ")")']
GRAMMAR_DICTIONARY["table_source"] = [
'(table_name ws "as" ws table_alias)', 'table_name']
# LIMIT
GRAMMAR_DICTIONARY["limit"] = ['("limit" ws non_literal_number)']
# ORDER
GRAMMAR_DICTIONARY["orderby_clause"] = ['ws "order" ws "by" ws order_clause']
GRAMMAR_DICTIONARY["order_clause"] = [
'(ordering_term ws "," ws order_clause)', 'ordering_term']
GRAMMAR_DICTIONARY["ordering_term"] = [
'(ws subject ws ordering)', '(ws subject)']
GRAMMAR_DICTIONARY["ordering"] = ['(ws "asc")', '(ws "desc")']
# WHERE
GRAMMAR_DICTIONARY["where_clause"] = [
'(ws "where" wsp expr ws where_conj)', '(ws "where" wsp expr)']
GRAMMAR_DICTIONARY["where_conj"] = ['(ws "and" wsp expr ws where_conj)', '(ws "and" wsp expr)',
'(ws "or" wsp expr ws where_conj)', '(ws "or" wsp expr)']
# GROUP BY
GRAMMAR_DICTIONARY["groupby_clause"] = ['(ws "group" ws "by" ws group_clause)']
GRAMMAR_DICTIONARY["group_clause"] = [
'(ws subject ws "," ws group_clause)', '(ws subject)']
# HAVING
GRAMMAR_DICTIONARY["having_clause"] = [
'(ws "having" wsp expr ws having_conj)', '(ws "having" wsp expr)']
GRAMMAR_DICTIONARY["having_conj"] = ['(ws "and" wsp expr ws having_conj)', '(ws "and" wsp expr)',
'(ws "or" wsp expr ws having_conj)', '(ws "or" wsp expr)']
GRAMMAR_DICTIONARY["expr"] = [
'(subject wsp "not" wsp "in" wsp "(" ws mquery ws ")")',
'(subject wsp "in" ws "(" ws mquery ws ")")',
'(subject ws binaryop ws "all" ws "(" ws mquery ws ")")',
'(subject ws binaryop ws "any" ws "(" ws mquery ws ")")',
'(subject ws binaryop ws "(" ws mquery ws ")")',
'(subject ws binaryop ws value)',
]
GRAMMAR_DICTIONARY["value"] = ['non_literal_number', 'col_ref', 'string']
GRAMMAR_DICTIONARY["subject"] = ['function', 'col_ref']
GRAMMAR_DICTIONARY["col_ref"] = [
'(table_alias ws "." ws column_name)', 'column_name']
GRAMMAR_DICTIONARY["function"] = ['(fname ws "(" ws "distinct" ws col_ref ws ")")',
'(fname ws "(" ws col_ref ws ")")']
GRAMMAR_DICTIONARY["fname"] = ['"count"',
'"sum"', '"max"', '"min"', '"avg"', '"all"']
# TODO(MARK): This is not tight enough. AND/OR are strictly boolean value operators.
GRAMMAR_DICTIONARY["binaryop"] = ['"="', '"!="', '"<>"',
'">="', '"<="', '">"', '"<"', '"like"', '"not like"']
GRAMMAR_DICTIONARY['selectop'] = ['"/"', '"+"', '"-"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY['wsp'] = ['~"\s+"i']
GRAMMAR_DICTIONARY["table_name"] = ['"state"', '"city"',
'"lake"', '"river"', '"border_info"', '"highlow"', '"mountain"']
GRAMMAR_DICTIONARY["table_alias"] = [
'"statealias0"', '"statealias1"', '"statealias2"', '"statealias3"', '"statealias4"', '"statealias5"',
'"cityalias0"', '"cityalias1"', '"cityalias2"',
'"lakealias0"', '"mountainalias0"', '"mountainalias1"',
'"riveralias0"', '"riveralias1"', '"riveralias2"', '"riveralias3"',
'"border_infoalias0"', '"border_infoalias1"', '"border_infoalias2"', '"border_infoalias3"',
'"highlowalias0"', '"highlowalias1"', '"derived_tablealias0"', '"derived_tablealias1"',
'"tmp"',
]
GRAMMAR_DICTIONARY["column_name"] = [
'"*"', '"city_name"', '"population"', '"country_name"', '"state_name"', # city
'"border"', # border_info
'"highest_elevation"', '"lowest_point"', '"highest_point"', '"lowest_elevation"', # highlow
'"lake_name"', '"area"', '"country_name"', # lake
'"mountain_name"', '"mountain_altitude"', # mountain
'"river_name"', '"length"', '"traverse"', # river
'"capital"', '"density"', # state,
'"derived_fieldalias0"', '"derived_fieldalias1"',
]
GRAMMAR_DICTIONARY['column_alias'] = [
'"derived_fieldalias0"', '"derived_fieldalias1"' # custom
]
GRAMMAR_DICTIONARY["non_literal_number"] = [
'"150000"', '"750"', '"0"', '"1"', '"2"', '"3"', '"4"', ]
GRAMMAR_DICTIONARY['string'] = ['"\'usa\'"', '"\'red\'"', '"750"', '"0"', '"150000"', '"\'oregon\'"', '"\'georgia\'"', '"\'wisconsin\'"', '"\'montana\'"', '"\'colorado\'"', '"\'west virginia\'"', '"\'hawaii\'"', '"\'new hampshire\'"', '"\'washington\'"', '"\'florida\'"', '"\'north dakota\'"', '"\'idaho\'"', '"\'minnesota\'"', '"\'tennessee\'"', '"\'vermont\'"', '"\'kentucky\'"', '"\'alabama\'"', '"\'oklahoma\'"', '"\'maryland\'"', '"\'nebraska\'"', '"\'iowa\'"', '"\'kansas\'"', '"\'california\'"', '"\'wyoming\'"',
'"\'massachusetts\'"', '"\'missouri\'"', '"\'nevada\'"', '"\'south dakota\'"', '"\'utah\'"', '"\'rhode island\'"', '"\'new york\'"', '"\'new jersey\'"', '"\'indiana\'"', '"\'new mexico\'"', '"\'maine\'"', '"\'illinois\'"', '"\'louisiana\'"', '"\'michigan\'"', '"\'mississippi\'"', '"\'ohio\'"', '"\'south carolina\'"', '"\'arkansas\'"', '"\'texas\'"', '"\'virginia\'"', '"\'pennsylvania\'"', '"\'north carolina\'"', '"\'alaska\'"', '"\'arizona\'"', '"\'delaware\'"', '"\'north platte\'"',
'"\'chattahoochee\'"', '"\'rio grande\'"', '"\'potomac\'"', '"\'mckinley\'"', '"\'whitney\'"', '"\'death valley\'"', '"\'mount mckinley\'"', '"\'guadalupe peak\'"', '"\'detroit\'"', '"\'plano\'"', '"\'des moines\'"', '"\'boston\'"', '"\'salem\'"', '"\'fort wayne\'"', '"\'houston\'"', '"\'portland\'"', '"\'montgomery\'"', '"\'minneapolis\'"', '"\'tempe\'"', '"\'boulder\'"', '"\'seattle\'"', '"\'columbus\'"', '"\'dover\'"', '"\'indianapolis\'"', '"\'san antonio\'"', '"\'albany\'"', '"\'flint\'"', '"\'chicago\'"', '"\'miami\'"',
'"\'scotts valley\'"', '"\'san francisco\'"', '"\'springfield\'"', '"\'sacramento\'"', '"\'salt lake city\'"', '"\'new orleans\'"', '"\'atlanta\'"', '"\'tucson\'"', '"\'denver\'"', '"\'riverside\'"', '"\'erie\'"', '"\'san jose\'"', '"\'durham\'"', '"\'kalamazoo\'"', '"\'baton rouge\'"', '"\'san diego\'"', '"\'pittsburgh\'"', '"\'spokane\'"', '"\'austin\'"', '"\'rochester\'"', '"\'dallas\'"']
COPY_TERMINAL_SET = {'non_literal_number', 'string'}
| 9,095 | 60.877551 | 563 |
py
|
Unimer
|
Unimer-master/grammars/geo/get_lambda_calculus_terminals.py
|
# coding=utf8
import re
state_pattern = re.compile('\s([a-z|_|.]+?:s)[\s|)]')
city_pattern = re.compile('\s([a-z|_|.]+?:c)[\s|)]')
river_pattern = re.compile('\s([a-z|_|.]+?:r)[\s|)]')
place_pattern = re.compile('\s([a-z|_|.]+?:p)[\s|)]')
lake_pattern = re.compile('\s([a-z|_|.]+?:l)[\s|)]')
location_pattern = re.compile('\s([a-z|_|.]+?:lo)[\s|)]')
mountain_pattern = re.compile('\s([a-z|_|.]+?:m)[\s|)]')
name_pattern = re.compile('\s([a-z|_|.]+?:n)[\s|)]')
if __name__ == '__main__':
test_data = '../../data/geo/geo_lambda_calculus_test.tsv'
train_data = '../../data/geo/geo_lambda_calculus_train.tsv'
logical_forms = list()
with open(test_data, 'r') as f:
for line in f:
line = line.strip()
logical_forms.append(line.split('\t')[1].lower())
with open(train_data, 'r') as f:
for line in f:
line = line.strip()
logical_forms.append(line.split('\t')[1].lower())
state_names = set()
for p in logical_forms:
matches = state_pattern.findall(p)
for m in matches:
state_names.add(m)
print("State Names: ")
print(['"%s"' % c for c in state_names])
print("====\n\n")
city_names = set()
for p in logical_forms:
matches = city_pattern.findall(p)
for m in matches:
city_names.add(m)
print("City Names: ")
print(['"%s"' % c for c in city_names])
print("====\n\n")
river_names = set()
for p in logical_forms:
matches = river_pattern.findall(p)
for m in matches:
river_names.add(m)
print("River Names: ")
print(['"%s"' % c for c in river_names])
print("====\n\n")
lake_names = set()
for p in logical_forms:
matches = lake_pattern.findall(p)
for m in matches:
lake_names.add(m)
print("Lake Names: ")
print(['"%s"' % c for c in lake_names])
print("====\n\n")
location_names = set()
for p in logical_forms:
matches = location_pattern.findall(p)
for m in matches:
location_names.add(m)
print("Location Names: ")
print(['"%s"' % c for c in location_names])
print("====\n\n")
mountain_names = set()
for p in logical_forms:
matches = mountain_pattern.findall(p)
for m in matches:
mountain_names.add(m)
print("Mountain Names: ")
print(['"%s"' % c for c in mountain_names])
print("====\n\n")
names = set()
for p in logical_forms:
matches = name_pattern.findall(p)
for m in matches:
names.add(m)
print("Names: ")
print(['"%s"' % c for c in names])
print("====\n\n")
| 2,675 | 28.733333 | 63 |
py
|
Unimer
|
Unimer-master/grammars/atis/atis_sql_entity_matcher.py
|
# coding=utf8
import os
import re
import itertools
import collections
import numpy as np
from typing import List, Dict
from nltk.corpus import stopwords
from allennlp.data.tokenizers import Token
def clean_id(s, id_suffix, strip=None):
true_id = re.sub(' +', ' ', s)
if strip:
for v in strip:
true_id = true_id.replace(v, '').strip()
return '%s' % true_id
def clean_name(s, strip=None, split=None, prefix=None):
if split:
for v in split:
s = s.replace(v, ' ')
if strip:
for v in strip:
s = s.replace(v, '')
if prefix:
s = prefix + s
return s
def read_db(db_path, basename, id_col, name_col, id_suffix,
strip_id=None, strip_name=None, split_name=None, prefix_name=None):
filename = os.path.join(db_path, basename)
data = [] # Pairs of (name, id)
with open(filename) as f:
for line in f:
row = [s[1:-1] for s in re.findall('"[^"]*"|[0-9]+', line.strip())]
cur_name = clean_name(row[name_col].lower(), strip=strip_name,
split=split_name, prefix=prefix_name)
cur_id = clean_id(row[id_col].lower(), id_suffix, strip=strip_id)
data.append((cur_name, cur_id))
return data
def strip_unk(w):
# Strip unk:%06d identifiers
m = re.match('^unk:[0-9]{6,}:(.*)$', w)
if m:
return m.group(1)
else:
return w
class ATISSQLEntityMatcher:
DAYS_OF_WEEK = [
(s, '%s' % s)
for s in ('monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday', 'sunday')
]
# For dates
WORD_NUMBERS = [('one', '1'), ('two', '2'), ('three', '3'), ('four', '4'), ('five', '5'),
('six', '6'), ('seven', '7'), ('eight', '8'), ('nine', '9'), ('ten', '10'),
('eleven', '11'), ('twelve', '12'), ('thirteen', '13'), ('fourteen', '14'),
('fifteen', '15'), ('sixteen', '16'), ('seventeen', '17'), ('eighteen', '18'),
('nineteen', '19'), ('twenty', '20'), ('twenty one', '21'),
('twenty two', '22'),
('twenty three', '23'), ('twenty four', '24'), ('twenty five', '25'),
('twenty six', '26'), ('twenty seven', '27'), ('twenty eight', '28'),
('twenty nine', '29'), ('thirty', '30'), ('thirty one', '31')]
ORDINAL_NUMBERS = [('second', '2'), ('third', '3'), ('fourth', '4'), ('fifth', '5'),
('sixth', '6'), ('seventh', '7'), ('eighth', '8'), ('ninth', '9'),
('tenth', '10'), ('eleventh', '11'), ('twelfth', '12'), ('thirteenth', '13'),
('fourteenth', '14'), ('fifteenth', '15'), ('sixteenth', '16'),
('seventeenth', '17'), ('eighteenth', '18'), ('nineteenth', '19'),
('twentieth', '20'), ('twenty first', '21'), ('twenty second', '22'),
('twenty third', '23'), ('twenty fourth', '24'), ('twenty fifth', '25'),
('twenty sixth', '26'), ('twenty seventh', '27'), ('twenty eighth', '28'),
('twenty ninth', '29'), ('thirtieth', '30'),
('thirty first', '31')]
MONTH_NUMBERS = [('august', '8'), ('april', '4'), ('may', '5'), ('october', '10'),
('june', '6'), ('november', '11'), ('september', '9'), ('february', '2'),
('december', '12'), ('march', '3'), ('july', '7'), ('january', '1')]
MEALS = [(m, '%s' % m) for m in ('breakfast', 'lunch', 'dinner', 'snack')]
ST_CITIES = [(m, "%s" % m) for m in ('st . louis', 'st . petersburg', 'st . paul')]
MEAL_CODES = [('s/' ,'s/'), ('sd / d' ,'sd/d'), ('d / s', 'd/s')]
def __init__(self, db_path):
self.entries = collections.OrderedDict()
self.handlers = []
self.unique_word_map = collections.OrderedDict()
self.seen_words = set()
self.stop_words = set(stopwords.words('english'))
# CCG Lexicon
filename = os.path.join(db_path, 'lexicon.txt')
entries = []
with open(filename) as f:
for line in f:
x, y = line.strip().split(' :- NP : ')
y = y[:y.index(":")]
entries.append((x, y))
self.add_entries(entries)
# Read DB
city_entries = read_db(db_path, 'CITY.TAB', 1, 1, '', strip_id=['.'])
self.add_entries(city_entries)
self.add_entries(self.DAYS_OF_WEEK)
self.add_entries([(x + 's', y) for x, y in self.DAYS_OF_WEEK]) # Handle "on tuesdays"
self.add_entries(read_db(db_path, 'AIRLINE.TAB', 0, 1, '',
strip_name=[', inc.', ', ltd.']))
self.add_entries(read_db(db_path, 'INTERVAL.TAB', 0, 0, ''))
self.add_entries(read_db(db_path, 'MONTH.TAB', 1, 1, ''))
self.add_entries(read_db(db_path, 'AIRPORT.TAB', 0, 1, '',
strip_name=[], split_name=['/']))
self.add_entries(read_db(db_path, 'COMP_CLS.TAB', 1, 1, ''))
self.add_entries(read_db(db_path, 'CLS_SVC.TAB', 0, 0, '', prefix_name='code '))
self.add_entries(self.MEALS)
self.add_entries(self.WORD_NUMBERS)
self.add_entries(self.ORDINAL_NUMBERS)
self.add_entries(self.ST_CITIES)
self.add_entries(self.MONTH_NUMBERS)
self.add_entries(self.MEAL_CODES)
self.remove_entries(self.DAYS_OF_WEEK)
self.remove_entries([('as', 'as')])
self.handle_times()
self.handle_rc()
self.handle_stop()
self.handle_dollars()
self.handle_flight_numbers()
def handle_times(self):
# Mod 12 deals with 12am/12pm special cases...
self.add_handler('([0-9]{1,2}) am$',
lambda m: '%d00' % (int(m.group(1)) % 12))
self.add_handler('([0-9]{1,2}) pm$',
lambda m: '%d00' % (int(m.group(1)) % 12 + 12))
self.add_handler('([0-9]{1,2})([0-9]{2}) am$',
lambda m: '%d%02d' % (int(m.group(1)) % 12, int(m.group(2))))
self.add_handler('([0-9]{1,2})([0-9]{2}) pm$',
lambda m: '%d%02d' % (int(m.group(1)) % 12 + 12, int(m.group(2))))
self.add_handler("([0-9]{1,2}) o'clock$",
lambda m: '%d00' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock am$",
lambda m: '%d00' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock pm$",
lambda m: '%d00' % (int(m.group(1)) % 12 + 12))
self.add_handler("([0-9]+) hours$",
lambda m: '%d' % (int(m.group(1))))
def handle_flight_numbers(self):
self.add_handler('[0-9]{2,}$', lambda m: '%d' % int(m.group(0)))
def handle_dollars(self):
self.add_handler('([0-9]+)$', lambda m: '%d' % int(m.group(1)))
self.add_handler('([0-9]+) dollars$', lambda m: '%d' % int(m.group(1)))
def handle_rc(self):
self.add_handler(re.compile(r'ap/(\d+)$'), lambda m: 'ap_%d' % int(m.group(1)))
self.add_handler(re.compile(r'ap(\d+)$'), lambda m: 'ap_%d' % int(m.group(1)))
def handle_stop(self):
self.add_handler('([0-9]+) stop$', lambda m: '%d' % int(m.group(1)))
self.add_handler('([0-9]+) stops$', lambda m: '%d' % int(m.group(1)))
def add_entries(self, entries):
for name, entity in entries:
# Update self.entries
if name in self.entries:
if entity not in self.entries[name]:
self.entries[name].append(entity)
else:
self.entries[name] = [entity]
# Update self.unique_word_map
for w in name.split(' '):
if w in self.seen_words or w in self.stop_words:
# This word is not unique!
if w in self.unique_word_map:
del self.unique_word_map[w]
else:
self.unique_word_map[w] = entity
self.seen_words.add(w)
def remove_entries(self, entries):
for name, entity in entries:
if name not in self.entries or entity not in self.entries[name]:
continue
self.entries[name].remove(entity)
def add_handler(self, regex, func):
self.handlers.append((regex, func))
def get_rule_ids(self, entities, rules: List, copy_terminal_set: List) -> List:
rule_ids = list()
if isinstance(entities, str):
entities = [entities]
for entity in entities:
for rule in rules:
if rule.lhs not in copy_terminal_set:
continue
terminal = rule.rhs.strip('[] ').replace("'", "").replace('"', '').replace(" ", "")
if terminal == entity.replace(" ", ""):
rule_ids.append(rule.rule_id)
break
else:
print("Cannot find a corresponding rule for terminal %s" % entity)
return rule_ids
def _match_candidates(self, tokens: List[Token], rules: List, copy_terminal_set: List, ) -> List:
words = [t.text for t in tokens]
entities = [[] for i in range(len(words))]
ind_pairs = sorted(list(itertools.combinations(range(len(words) + 1), 2)),
key=lambda x: x[0] - x[1])
ret_entries = []
words = [strip_unk(w) for w in words] # Strip unk:%06d stuff
# Handlers
for i, j in ind_pairs:
if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
for regex, func in self.handlers:
m = re.match(regex, span)
if m:
entity = func(m)
rule_ids = self.get_rule_ids(entity, rules, copy_terminal_set)
if len(rule_ids) > 0:
for k in range(i, j):
entities[k] += rule_ids
ret_entries.append(((i, j), rule_ids))
# Entries
for i, j in ind_pairs:
if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
if span in self.entries:
entity = self.entries[span]
rule_ids = self.get_rule_ids(entity, rules, copy_terminal_set)
if len(rule_ids) > 0:
for k in range(i, j):
entities[k] = rule_ids
ret_entries.append(((i, j), rule_ids))
# Unique words
for i in range(len(words)):
if any(x for x in entities[i:i+1]): continue
word = words[i]
if entities[i]: continue
if word in self.unique_word_map:
entity = self.unique_word_map[word]
rule_ids = self.get_rule_ids(entity, rules, copy_terminal_set)
if len(rule_ids) > 0:
entities[i] = [entity]
ret_entries.append(((i, i+1), rule_ids))
# Remove One-way
for widx, word in enumerate(words):
if word == 'one' and widx + 1 < len(words) and words[widx + 1] == 'way':
for entry in ret_entries:
if entry[0][0] == widx and entry[0][1] == widx + 1:
ret_entries.remove(entry)
break
return ret_entries
def match(self, tokens: List[Token], rules: List, copy_terminal_set: List, pad_index: int,) -> List[List]:
entity_candidates = self._match_candidates(tokens, rules, copy_terminal_set)
token_rule_map = [list() for i in range(len(tokens))]
for (beg_idx, end_idx), rule_ids in entity_candidates:
for index in range(beg_idx, end_idx):
token_rule_map[index] += rule_ids
for midx, m in enumerate(token_rule_map):
if len(m) == 0:
m.append(pad_index)
token_rule_map[midx] = np.array(list(set(m)), dtype=np.int)
return token_rule_map
if __name__ == '__main__':
matcher = ATISSQLEntityMatcher('../../data/atis/db')
| 12,292 | 41.243986 | 110 |
py
|
Unimer
|
Unimer-master/grammars/atis/get_funql_terminals.py
|
# coding=utf8
def read_data():
questions, logical_forms = list(), list()
paths = [
"../../data/atis/atis_funql_train.tsv",
"../../data/atis/atis_funql_dev.tsv",
"../../data/atis/atis_funql_test.tsv"]
for p in paths:
with open(p, "r") as f:
for line in f:
line = line.strip()
splits = line.split('\t')
questions.append(splits[0])
logical_forms.append(splits[1])
return questions, logical_forms
def tokenize_funql(funql):
normalized_lf = funql.replace(" ", "::")
replacements = [
('(', '( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [t if "::" not in t else t.replace("::", " ") for t in normalized_lf.split()]
return tokens
def get_relations(logical_forms):
relations = set()
meta_relations = set()
for lf in logical_forms:
tokens = tokenize_funql(lf)
for token in tokens:
if token.endswith("(") and token[:-1] not in ['intersection', 'or', 'not']:
if token.startswith('argmax') or token.startswith('argmin') \
or token.startswith('_<') or token.startswith('_>') \
or token.startswith('_=') or token.startswith('_equals') \
or token.startswith('sum') or token.startswith('count'):
meta_relations.add(token[:-1])
else:
relations.add(token[:-1])
return sorted(list(relations)), sorted(list(meta_relations))
if __name__ == '__main__':
questions, logical_forms = read_data()
relations, meta_relations = get_relations(logical_forms)
# Meta Relations
print("Binary Relations")
print("""GRAMMAR_DICTIONARY['meta'] = %s""" % (["(%s)" % r for r in meta_relations]))
for r in meta_relations:
print("""GRAMMAR_DICTIONARY['%s'] = ['("%s(" predicate ")")']""" % (r, r))
print("==\n\n")
# Relations
print("Relations")
print("""GRAMMAR_DICTIONARY['relation'] = %s""" % (["(%s)" % r for r in relations]))
for r in relations:
print("""GRAMMAR_DICTIONARY['%s'] = ['("%s(" predicate ")")']""" % (r, r))
print("==\n\n")
| 2,337 | 32.884058 | 90 |
py
|
Unimer
|
Unimer-master/grammars/atis/lambda_calculus_grammar.py
|
# coding=utf8
ROOT_RULE = 'statement -> [expression]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['(expression ws)']
GRAMMAR_DICTIONARY['expression'] = [
'(constant)', '(application)', '(abstraction)', '(variable)']
GRAMMAR_DICTIONARY['abstraction'] = [
'("(" ws "_lambda" wsp variable_definition wsp expression ws ")")']
GRAMMAR_DICTIONARY['application'] = ['("(" ws function ws ")")']
GRAMMAR_DICTIONARY['function'] = [
'("_month_arrival" wsp expression wsp expression)',
'("_stop_arrival_time" wsp expression)',
'("_stops" wsp expression wsp expression)',
'("_stops" wsp expression)',
'("_weekday" wsp expression)',
'("_day_number" wsp expression wsp expression)',
'("_meal:_t" wsp expression wsp expression)',
'("_meal:_t" wsp expression)',
'("_meal" wsp expression wsp expression)',
'("_meal" wsp expression)',
'("_approx_return_time" wsp expression wsp expression)',
'("_booking_class:_t" wsp expression)',
'("_booking_class" wsp expression wsp expression)',
'("_booking_class" wsp expression)',
'("_approx_arrival_time" wsp expression wsp expression)',
'("_fare" wsp expression wsp expression)',
'("_fare" wsp expression)',
'("_flight" wsp expression)',
'("_the" wsp expression wsp expression)',
'("_aircraft_basis_type" wsp expression wsp expression)',
'("_aircraft_code:_t" wsp expression)',
'("_aircraft_code" wsp expression wsp expression)',
'("_aircraft_code" wsp expression)',
'("_economy" wsp expression)',
'("_has_stops" wsp expression)',
'("_minutes_distant" wsp expression wsp expression)',
'("_minutes_distant" wsp expression)',
'("_>" wsp expression wsp expression)',
'("_departure_time" wsp expression wsp expression)',
'("_departure_time" wsp expression)',
'("_ground_fare" wsp expression)',
'("_oneway" wsp expression)',
'("_airport" wsp expression wsp expression)',
'("_airport" wsp expression)',
'("_=" wsp expression wsp expression)',
'("_named" wsp expression wsp expression)',
'("_taxi" wsp expression)',
'("_flight_number" wsp expression wsp expression)',
'("_flight_number" wsp expression)',
'("_loc:_t" wsp expression wsp expression)',
'("_airline" wsp expression wsp expression)',
'("_airline" wsp expression)',
'("_during_day" wsp expression wsp expression)',
'("_arrival_month" wsp expression wsp expression)',
'("_manufacturer" wsp expression wsp expression)',
'("_fare_basis_code" wsp expression wsp expression)',
'("_fare_basis_code" wsp expression)',
'("_approx_departure_time" wsp expression wsp expression)',
'("_rapid_transit" wsp expression)',
'("_arrival_time" wsp expression wsp expression)',
'("_arrival_time" wsp expression)',
'("_tonight" wsp expression)',
'("_max" wsp expression wsp expression)',
'("_min" wsp expression wsp expression)',
'("_services" wsp expression wsp expression)',
'("_next_days" wsp expression wsp expression)',
'("_not" wsp application)',
'("_or" wsp application wsp polyvariadic_expression)',
'("_and" wsp application wsp polyvariadic_expression)',
'("_from" wsp expression wsp expression)',
'("_today" wsp expression)',
'("_argmax" wsp variable wsp expression wsp expression)',
'("_argmin" wsp variable wsp expression wsp expression)',
'("_connecting" wsp expression)',
'("_overnight" wsp expression)',
'("_airline:_e" wsp expression)',
'("_restriction_code" wsp expression)',
'("_<" wsp expression wsp expression)',
'("_round_trip" wsp expression)',
'("_stop" wsp expression wsp expression)',
'("_year" wsp expression wsp expression)',
'("_day_after_tomorrow" wsp expression)',
'("_sum" wsp variable wsp expression wsp expression)',
'("_day_return" wsp expression wsp expression)',
'("_discounted" wsp expression)',
'("_time_zone_code" wsp expression)',
'("_equals" wsp expression wsp expression)',
'("_limousine" wsp expression)',
'("_daily" wsp expression)',
'("_class_type" wsp expression wsp expression)',
'("_day_arrival" wsp expression wsp expression)',
'("_during_day_arrival" wsp expression wsp expression)',
'("_days_from_today" wsp expression wsp expression)',
'("_from_airport" wsp expression wsp expression)',
'("_to_city" wsp expression wsp expression)',
'("_has_meal" wsp expression)',
'("_minimum_connection_time" wsp expression wsp expression)',
'("_minimum_connection_time" wsp expression)',
'("_tomorrow" wsp expression)',
'("_tomorrow_arrival" wsp expression wsp expression)',
'("_tomorrow_arrival" wsp expression)',
'("_day_number_arrival" wsp expression wsp expression)',
'("_aircraft" wsp expression wsp expression)',
'("_aircraft" wsp expression)',
'("_capacity" wsp expression)',
'("_month" wsp expression wsp expression)',
'("_cost" wsp expression)',
'("_day_number_return" wsp expression wsp expression)',
'("_rental_car" wsp expression)',
'("_day" wsp expression wsp expression)',
'("_equals:_t" wsp expression wsp expression)',
'("_airline_name" wsp expression)',
'("_before_day" wsp expression wsp expression)',
'("_exists" wsp variable wsp expression)',
'("_jet" wsp expression)',
'("_count" wsp variable wsp expression)',
'("_miles_distant" wsp expression wsp expression)',
'("_miles_distant" wsp expression)',
'("_city" wsp expression)',
'("_class_of_service" wsp expression wsp expression)',
'("_class_of_service" wsp expression)',
'("_turboprop" wsp expression)',
'("_to" wsp expression wsp expression)',
'("_time_elapsed" wsp expression wsp expression)',
'("_time_elapsed" wsp expression)',
'("_abbrev" wsp expression)',
'("_month_return" wsp expression wsp expression)',
'("_ground_transport" wsp expression)',
'("_nonstop" wsp expression)',
'("_after_day" wsp expression wsp expression)',
'("_meal_code" wsp expression wsp expression)',
'("_meal_code" wsp expression)',
'("_air_taxi_operation" wsp expression)',
]
GRAMMAR_DICTIONARY['polyvariadic_expression'] = [
'(application ws polyvariadic_expression)', '""']
GRAMMAR_DICTIONARY['variable_definition'] = [
'(variable ":e")', '(variable ":i")']
GRAMMAR_DICTIONARY['variable'] = ['"$v0"', '"$v1"', '"$v2"', '"$v3"']
GRAMMAR_DICTIONARY['constant'] = ['(do)', '(city)', '(al)', '(time)',
'(meal)', '(fn)', '(ap)', '(rc)', '(cl)', '(ac)', '(da)', '(dn)',
'"9:_hr"', '"boeing:_mf"', '"sa:_dc"', '(mn)', '(yr)', '(pd)', '(fb)', '(st)', '(i)', '(bat)']
GRAMMAR_DICTIONARY['do'] = ['"100:_do"', '"1000:_do"', '"466:_do"', '"416:_do"', '"124:_do"', '"329:_do"', '"1100:_do"', '"415:_do"',
'"200:_do"', '"150:_do"', '"932:_do"', '"500:_do"', '"1288:_do"', '"300:_do"', '"400:_do"', '"1500:_do"']
GRAMMAR_DICTIONARY['city'] = ['"nashville:_ci"', '"indianapolis:_ci"', '"san_diego:_ci"', '"long_beach:_ci"', '"atlanta:_ci"', '"kansas_city:_ci"', '"miami:_ci"', '"st_louis:_ci"', '"columbus:_ci"', '"toronto:_ci"', '"las_vegas:_ci"', '"burbank:_ci"', '"cleveland:_ci"', '"tacoma:_ci"', '"st_petersburg:_ci"', '"memphis:_ci"', '"denver:_ci"', '"dallas:_ci"', '"detroit:_ci"', '"oakland:_ci"', '"baltimore:_ci"', '"pittsburgh:_ci"', '"philadelphia:_ci"', '"milwaukee:_ci"', '"salt_lake_city:_ci"', '"san_jose:_ci"', '"tampa:_ci"', '"orlando:_ci"', '"chicago:_ci"', '"seattle:_ci"', '"new_york:_ci"', '"san_francisco:_ci"', '"boston:_ci"', '"washington:_ci"', '"cincinnati:_ci"', '"charlotte:_ci"', '"newark:_ci"', '"westchester_county:_ci"', '"los_angeles:_ci"', '"fort_worth:_ci"', '"minneapolis:_ci"', '"ontario:_ci"', '"montreal:_ci"', '"st_paul:_ci"', '"houston:_ci"', '"phoenix:_ci"']
GRAMMAR_DICTIONARY['al'] = ['"wn:_al"', '"ml:_al"', '"cp:_al"', '"nw:_al"', '"yx:_al"', '"ac:_al"', '"dl:_al"', '"kw:_al"', '"delta:_al"', '"as:_al"', '"tw:_al"',
'"co:_al"', '"ff:_al"', '"ea:_al"', '"ua:_al"', '"canadian_airlines_international:_al"', '"hp:_al"', '"lh:_al"', '"nx:_al"', '"usair:_al"', '"aa:_al"', '"us:_al"']
GRAMMAR_DICTIONARY['time'] = ['"1200:_ti"', '"1628:_ti"', '"1830:_ti"', '"823:_ti"', '"1245:_ti"', '"1524:_ti"', '"200:_ti"', '"1615:_ti"', '"1230:_ti"', '"705:_ti"', '"1045:_ti"', '"1700:_ti"', '"1115:_ti"', '"1645:_ti"', '"1730:_ti"', '"815:_ti"', '"0:_ti"', '"500:_ti"', '"1205:_ti"', '"1940:_ti"', '"2000:_ti"', '"1400:_ti"', '"1130:_ti"', '"2200:_ti"', '"645:_ti"', '"718:_ti"', '"2220:_ti"', '"600:_ti"', '"630:_ti"', '"800:_ti"', '"838:_ti"', '"1330:_ti"', '"845:_ti"', '"1630:_ti"', '"1715:_ti"', '"2010:_ti"', '"1000:_ti"', '"1619:_ti"',
'"2100:_ti"', '"1505:_ti"', '"2400:_ti"', '"1923:_ti"', '"1:_ti"', '"1145:_ti"', '"2300:_ti"', '"1620:_ti"', '"2023:_ti"', '"2358:_ti"', '"1500:_ti"', '"1815:_ti"', '"1425:_ti"', '"720:_ti"', '"1024:_ti"', '"1600:_ti"', '"100:_ti"', '"1310:_ti"', '"1300:_ti"', '"700:_ti"', '"650:_ti"', '"1800:_ti"', '"1110:_ti"', '"1410:_ti"', '"1030:_ti"', '"1900:_ti"', '"1017:_ti"', '"1430:_ti"', '"1850:_ti"', '"900:_ti"', '"1930:_ti"', '"1133:_ti"', '"1220:_ti"', '"2226:_ti"', '"1100:_ti"', '"819:_ti"', '"755:_ti"', '"2134:_ti"', '"555:_ti"']
GRAMMAR_DICTIONARY['meal'] = ['"snack:_me"',
'"lunch:_me"', '"dinner:_me"', '"breakfast:_me"']
GRAMMAR_DICTIONARY['fn'] = ['"838:_fn"', '"1059:_fn"', '"417:_fn"', '"323:_fn"', '"311:_fn"', '"137338:_fn"', '"315:_fn"', '"825:_fn"', '"345:_fn"', '"270:_fn"', '"271:_fn"', '"4400:_fn"', '"296:_fn"', '"1765:_fn"', '"343:_fn"', '"1222:_fn"', '"217:_fn"', '"459:_fn"', '"279:_fn"', '"1083:_fn"', '"324:_fn"', '"746:_fn"', '"281:_fn"', '"269:_fn"', '"98:_fn"',
'"212:_fn"', '"505:_fn"', '"852:_fn"', '"82:_fn"', '"352:_fn"', '"928:_fn"', '"19:_fn"', '"139:_fn"', '"415:_fn"', '"539:_fn"', '"3357:_fn"', '"813:_fn"', '"257:_fn"', '"297:_fn"', '"1055:_fn"', '"405:_fn"', '"201:_fn"', '"71:_fn"', '"1291:_fn"', '"402:_fn"', '"771:_fn"', '"106:_fn"', '"1039:_fn"', '"210:_fn"', '"2153:_fn"', '"3724:_fn"', '"1209:_fn"', '"21:_fn"']
GRAMMAR_DICTIONARY['ap'] = ['"ewr:_ap"', '"jfk:_ap"', '"pit:_ap"', '"oak:_ap"', '"bur:_ap"', '"las:_ap"', '"lga:_ap"', '"den:_ap"', '"mco:_ap"', '"dallas:_ap"', '"dfw:_ap"', '"phx:_ap"', '"slc:_ap"', '"iad:_ap"', '"sfo:_ap"', '"ont:_ap"',
'"iah:_ap"', '"ord:_ap"', '"mia:_ap"', '"cvg:_ap"', '"phl:_ap"', '"tpa:_ap"', '"dtw:_ap"', '"yyz:_ap"', '"ind:_ap"', '"atl:_ap"', '"mke:_ap"', '"hou:_ap"', '"bos:_ap"', '"dal:_ap"', '"bwi:_ap"', '"bna:_ap"', '"stapelton:_ap"', '"lax:_ap"']
GRAMMAR_DICTIONARY['rc'] = ['"b:_rc"', '"ap_55:_rc"', '"ap_57:_rc"', '"s_:_rc"', '"sd_d:_rc"',
'"ap_80:_rc"', '"d_s:_rc"', '"ap_58:_rc"', '"ls:_rc"', '"ap:_rc"', '"s:_rc"', '"ap_68:_rc"']
GRAMMAR_DICTIONARY['cl'] = ['"thrift:_cl"',
'"business:_cl"', '"first:_cl"', '"coach:_cl"']
GRAMMAR_DICTIONARY['ac'] = ['"dc10:_ac"', '"j31:_ac"', '"734:_ac"', '"73s:_ac"', '"72s:_ac"', '"100:_ac"', '"757:_ac"', '"d9s:_ac"',
'"d10:_ac"', '"727:_ac"', '"m80:_ac"', '"747:_ac"', '"f28:_ac"', '"737:_ac"', '"733:_ac"', '"767:_ac"']
GRAMMAR_DICTIONARY['da'] = ['"monday:_da"', '"thursday:_da"', '"saturday:_da"', '"friday:_da"',
'"sunday:_da"', '"wednesday:_da"', '"tuesday:_da"']
GRAMMAR_DICTIONARY['dn'] = ['"12:_dn"', '"18:_dn"', '"19:_dn"', '"31:_dn"', '"7:_dn"', '"20:_dn"', '"27:_dn"', '"6:_dn"', '"26:_dn"', '"17:_dn"', '"11:_dn"', '"10:_dn"', '"15:_dn"', '"23:_dn"',
'"1:_dn"', '"24:_dn"', '"25:_dn"', '"14:_dn"', '"13:_dn"', '"29:_dn"', '"3:_dn"', '"28:_dn"', '"8:_dn"', '"5:_dn"', '"2:_dn"', '"9:_dn"', '"30:_dn"', '"16:_dn"', '"4:_dn"', '"22:_dn"', '"21:_dn"']
GRAMMAR_DICTIONARY['mn'] = ['"january:_mn"', '"february:_mn"', '"december:_mn"', '"june:_mn"', '"august:_mn"',
'"april:_mn"', '"october:_mn"', '"november:_mn"', '"july:_mn"', '"may:_mn"', '"march:_mn"', '"september:_mn"']
GRAMMAR_DICTIONARY['yr'] = ['"1991:_yr"', '"1993:_yr"', '"1992:_yr"']
GRAMMAR_DICTIONARY['pd'] = ['"mealtime:_pd"', '"breakfast:_pd"', '"late:_pd"', '"afternoon:_pd"', '"late_evening:_pd"',
'"daytime:_pd"', '"pm:_pd"', '"late_night:_pd"', '"evening:_pd"', '"morning:_pd"', '"early:_pd"']
GRAMMAR_DICTIONARY['fb'] = ['"y:_fb"', '"qx:_fb"', '"m:_fb"', '"fn:_fb"', '"b:_fb"', '"q:_fb"',
'"bh:_fb"', '"qo:_fb"', '"h:_fb"', '"c:_fb"', '"qw:_fb"', '"k:_fb"', '"f:_fb"', '"yn:_fb"']
GRAMMAR_DICTIONARY['st'] = ['"minnesota:_st"', '"florida:_st"',
'"nevada:_st"', '"california:_st"', '"arizona:_st"']
GRAMMAR_DICTIONARY['i'] = ['"2:_i"', '"3:_i"', '"1:_i"']
GRAMMAR_DICTIONARY['bat'] = ['"737:_bat"', '"767:_bat"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY["wsp"] = ['~"\s+"i']
COPY_TERMINAL_SET = {'do', 'city', 'al', 'time',
'meal', 'fn', 'ap', 'rc', 'cl', 'ac', 'da', 'dn',
'mn', 'yr', 'pd', 'fb', 'st', 'i', 'bat'}
| 13,149 | 68.210526 | 888 |
py
|
Unimer
|
Unimer-master/grammars/atis/atis_seq2seq_sql_entity_matcher.py
|
# coding=utf8
import os
import re
import itertools
import collections
import numpy as np
from typing import List, Dict
from nltk.corpus import stopwords
from allennlp.data.tokenizers import Token
def clean_id(s, id_suffix, strip=None):
true_id = re.sub(' +', ' ', s)
if strip:
for v in strip:
true_id = true_id.replace(v, '').strip()
return '%s' % true_id
def clean_name(s, strip=None, split=None, prefix=None):
if split:
for v in split:
s = s.replace(v, ' ')
if strip:
for v in strip:
s = s.replace(v, '')
if prefix:
s = prefix + s
return s
def read_db(db_path, basename, id_col, name_col, id_suffix,
strip_id=None, strip_name=None, split_name=None, prefix_name=None):
filename = os.path.join(db_path, basename)
data = [] # Pairs of (name, id)
with open(filename) as f:
for line in f:
row = [s[1:-1] for s in re.findall('"[^"]*"|[0-9]+', line.strip())]
cur_name = clean_name(row[name_col].lower(), strip=strip_name,
split=split_name, prefix=prefix_name)
cur_id = clean_id(row[id_col].lower(), id_suffix, strip=strip_id)
data.append((cur_name, cur_id))
return data
def strip_unk(w):
# Strip unk:%06d identifiers
m = re.match('^unk:[0-9]{6,}:(.*)$', w)
if m:
return m.group(1)
else:
return w
class ATISSeq2SeqSQLEntityMatcher:
DAYS_OF_WEEK = [
(s, '%s' % s)
for s in ('monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday', 'sunday')
]
# For dates
WORD_NUMBERS = [('one', '1'), ('two', '2'), ('three', '3'), ('four', '4'), ('five', '5'),
('six', '6'), ('seven', '7'), ('eight', '8'), ('nine', '9'), ('ten', '10'),
('eleven', '11'), ('twelve', '12'), ('thirteen', '13'), ('fourteen', '14'),
('fifteen', '15'), ('sixteen', '16'), ('seventeen', '17'), ('eighteen', '18'),
('nineteen', '19'), ('twenty', '20'), ('twenty one', '21'),
('twenty two', '22'),
('twenty three', '23'), ('twenty four', '24'), ('twenty five', '25'),
('twenty six', '26'), ('twenty seven', '27'), ('twenty eight', '28'),
('twenty nine', '29'), ('thirty', '30'), ('thirty one', '31')]
ORDINAL_NUMBERS = [('second', '2'), ('third', '3'), ('fourth', '4'), ('fifth', '5'),
('sixth', '6'), ('seventh', '7'), ('eighth', '8'), ('ninth', '9'),
('tenth', '10'), ('eleventh', '11'), ('twelfth', '12'), ('thirteenth', '13'),
('fourteenth', '14'), ('fifteenth', '15'), ('sixteenth', '16'),
('seventeenth', '17'), ('eighteenth', '18'), ('nineteenth', '19'),
('twentieth', '20'), ('twenty first', '21'), ('twenty second', '22'),
('twenty third', '23'), ('twenty fourth', '24'), ('twenty fifth', '25'),
('twenty sixth', '26'), ('twenty seventh', '27'), ('twenty eighth', '28'),
('twenty ninth', '29'), ('thirtieth', '30'),
('thirty first', '31')]
MONTH_NUMBERS = [('august', '8'), ('april', '4'), ('may', '5'), ('october', '10'),
('june', '6'), ('november', '11'), ('september', '9'), ('february', '2'),
('december', '12'), ('march', '3'), ('july', '7'), ('january', '1')]
MEALS = [(m, '%s' % m) for m in ('breakfast', 'lunch', 'dinner', 'snack')]
ST_CITIES = [(m, "%s" % m) for m in ('st . louis', 'st . petersburg', 'st . paul')]
MEAL_CODES = [('s/' ,'s/'), ('sd / d' ,'sd/d'), ('d / s', 'd/s')]
def __init__(self, db_path):
self.entries = collections.OrderedDict()
self.handlers = []
self.unique_word_map = collections.OrderedDict()
self.seen_words = set()
self.stop_words = set(stopwords.words('english'))
# CCG Lexicon
filename = os.path.join(db_path, 'lexicon.txt')
entries = []
with open(filename) as f:
for line in f:
x, y = line.strip().split(' :- NP : ')
y = y[:y.index(":")]
entries.append((x, y))
self.add_entries(entries)
# Read DB
city_entries = read_db(db_path, 'CITY.TAB', 1, 1, '', strip_id=['.'])
self.add_entries(city_entries)
self.add_entries(self.DAYS_OF_WEEK)
self.add_entries([(x + 's', y) for x, y in self.DAYS_OF_WEEK]) # Handle "on tuesdays"
self.add_entries(read_db(db_path, 'AIRLINE.TAB', 0, 1, '',
strip_name=[', inc.', ', ltd.']))
self.add_entries(read_db(db_path, 'INTERVAL.TAB', 0, 0, ''))
self.add_entries(read_db(db_path, 'MONTH.TAB', 1, 1, ''))
self.add_entries(read_db(db_path, 'AIRPORT.TAB', 0, 1, '',
strip_name=[], split_name=['/']))
self.add_entries(read_db(db_path, 'COMP_CLS.TAB', 1, 1, ''))
self.add_entries(read_db(db_path, 'CLS_SVC.TAB', 0, 0, '', prefix_name='code '))
self.add_entries(self.MEALS)
self.add_entries(self.WORD_NUMBERS)
self.add_entries(self.ORDINAL_NUMBERS)
self.add_entries(self.ST_CITIES)
self.add_entries(self.MONTH_NUMBERS)
self.add_entries(self.MEAL_CODES)
self.remove_entries(self.DAYS_OF_WEEK)
self.remove_entries([('as', 'as')])
self.handle_times()
self.handle_rc()
self.handle_stop()
self.handle_dollars()
self.handle_flight_numbers()
def handle_times(self):
# Mod 12 deals with 12am/12pm special cases...
self.add_handler('([0-9]{1,2}) am$',
lambda m: '%d00' % (int(m.group(1)) % 12))
self.add_handler('([0-9]{1,2}) pm$',
lambda m: '%d00' % (int(m.group(1)) % 12 + 12))
self.add_handler('([0-9]{1,2})([0-9]{2}) am$',
lambda m: '%d%02d' % (int(m.group(1)) % 12, int(m.group(2))))
self.add_handler('([0-9]{1,2})([0-9]{2}) pm$',
lambda m: '%d%02d' % (int(m.group(1)) % 12 + 12, int(m.group(2))))
self.add_handler("([0-9]{1,2}) o'clock$",
lambda m: '%d00' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock am$",
lambda m: '%d00' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock pm$",
lambda m: '%d00' % (int(m.group(1)) % 12 + 12))
self.add_handler("([0-9]+) hours$",
lambda m: '%d' % (int(m.group(1))))
def handle_flight_numbers(self):
self.add_handler('[0-9]{2,}$', lambda m: '%d' % int(m.group(0)))
def handle_dollars(self):
self.add_handler('([0-9]+)$', lambda m: '%d' % int(m.group(1)))
self.add_handler('([0-9]+) dollars$', lambda m: '%d' % int(m.group(1)))
def handle_rc(self):
self.add_handler(re.compile(r'ap/(\d+)$'), lambda m: 'ap_%d' % int(m.group(1)))
self.add_handler(re.compile(r'ap(\d+)$'), lambda m: 'ap_%d' % int(m.group(1)))
def handle_stop(self):
self.add_handler('([0-9]+) stop$', lambda m: '%d' % int(m.group(1)))
self.add_handler('([0-9]+) stops$', lambda m: '%d' % int(m.group(1)))
def add_entries(self, entries):
for name, entity in entries:
# Update self.entries
if name in self.entries:
if self.entries[name] != entries:
print("Collision detected: %s -> %s, %s" % (name, self.entries[name], entity))
continue
self.entries[name] = entity
# Update self.unique_word_map
for w in name.split(' '):
if w in self.seen_words:
# This word is not unique!
if w in self.unique_word_map:
del self.unique_word_map[w]
else:
self.unique_word_map[w] = entity
self.seen_words.add(w)
def remove_entries(self, entries):
for name, entity in entries:
if name not in self.entries or entity != self.entries[name]:
continue
del self.entries[name]
def add_handler(self, regex, func):
self.handlers.append((regex, func))
def _match_candidates(self, tokens: List[Token]) -> List[str]:
words = [t.text for t in tokens]
entities = [None for i in range(len(words))]
ind_pairs = sorted(list(itertools.combinations(range(len(words) + 1), 2)),
key=lambda x: x[0] - x[1])
words = [strip_unk(w) for w in words] # Strip unk:%06d stuff
# Entries
for i, j in ind_pairs:
if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
if span in self.entries:
entity = self.entries[span]
for k in range(i, j):
entities[k] = entity
# Handlers
for i, j in ind_pairs:
if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
for regex, func in self.handlers:
m = re.match(regex, span)
if m:
entity = func(m)
for k in range(i, j):
entities[k] = entity
# Unique words
for i in range(len(words)):
if entities[i]: continue
word = words[i]
if entities[i]: continue
if word in self.unique_word_map:
entity = self.unique_word_map[word]
entities[i] = entity
return entities
def match(self, tokens: List[Token]) -> List[str]:
entity_candidates = self._match_candidates(tokens)
return entity_candidates
if __name__ == '__main__':
matcher = ATISSQLEntityMatcher('../../data/atis/db')
| 10,128 | 39.678715 | 100 |
py
|
Unimer
|
Unimer-master/grammars/atis/lambda_calculus_grammar_3.py
|
# coding=utf8
ROOT_RULE = 'statement -> [expression]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['(expression ws)']
GRAMMAR_DICTIONARY['expression'] = [
'(constant)', '(application)', '(abstraction)', '(variable)']
GRAMMAR_DICTIONARY['abstraction'] = [
'("(" ws "_lambda" wsp variable_definition wsp expression ws ")")']
GRAMMAR_DICTIONARY['application'] = ['("(" ws function ws ")")']
GRAMMAR_DICTIONARY['function'] = ['predicate', 'meta_predicate', 'entity_function']
GRAMMAR_DICTIONARY['predicate'] = [
'city_predicate', 'flight', 'ground_transport', 'flight_geo', 'flight_trip', 'flight_time', 'flight_stop',
'fare', 'aircraft', 'airport', 'airline', 'meal_predicate',
'booking_class', '("_time_zone_code" wsp expression)', 'meta_predicate'
]
GRAMMAR_DICTIONARY['flight'] = [
'("_flight_number" wsp expression wsp expression)',
'("_flight" wsp expression)',
'("_connecting" wsp expression)',
'("_turboprop" wsp expression)',
'("_jet" wsp expression)',
]
GRAMMAR_DICTIONARY['flight_geo'] = [
'("_from" wsp expression wsp expression)',
'("_to" wsp expression wsp expression)',
]
GRAMMAR_DICTIONARY['flight_trip'] = [
'("_oneway" wsp expression)',
'("_round_trip" wsp expression)',
]
GRAMMAR_DICTIONARY['flight_time'] = [
'("_weekday" wsp expression)',
'("_tonight" wsp expression)',
'("_today" wsp expression)',
'("_overnight" wsp expression)',
'("_day_after_tomorrow" wsp expression)',
'("_tomorrow_arrival" wsp expression)',
'("_tomorrow" wsp expression)',
'("_daily" wsp expression)',
'("_month_arrival" wsp expression wsp expression)',
'("_day_number" wsp expression wsp expression)',
'("_approx_return_time" wsp expression wsp expression)',
'("_approx_arrival_time" wsp expression wsp expression)',
'("_departure_time" wsp expression wsp expression)',
'("_during_day" wsp expression wsp expression)',
'("_approx_departure_time" wsp expression wsp expression)',
'("_arrival_time" wsp expression wsp expression)',
'("_next_days" wsp expression wsp expression)',
'("_year" wsp expression wsp expression)',
'("_day_return" wsp expression wsp expression)',
'("_day_arrival" wsp expression wsp expression)',
'("_during_day_arrival" wsp expression wsp expression)',
'("_days_from_today" wsp expression wsp expression)',
'("_day_number_arrival" wsp expression wsp expression)',
'("_month" wsp expression wsp expression)',
'("_day_number_return" wsp expression wsp expression)',
'("_day" wsp expression wsp expression)',
'("_before_day" wsp expression wsp expression)',
'("_time_elapsed" wsp expression wsp expression)',
'("_month_return" wsp expression wsp expression)',
'("_after_day" wsp expression wsp expression)',
]
GRAMMAR_DICTIONARY['flight_stop'] = [
'("_has_stops" wsp expression)',
'("_nonstop" wsp expression)',
'("_stops" wsp expression wsp expression)',
'("_stop" wsp expression wsp expression)',
]
GRAMMAR_DICTIONARY['fare'] = [
'("_fare_basis_code" wsp expression wsp expression)',
'("_fare_basis_code" wsp expression)',
'("_fare" wsp expression wsp expression)',
'("_fare" wsp expression)',
'("_discounted" wsp expression)',
]
GRAMMAR_DICTIONARY['meal_predicate'] = [
'("_has_meal" wsp expression)',
'("_meal_code" wsp expression wsp expression)',
'("_meal_code" wsp expression)',
'("_meal:_t" wsp expression)',
'("_meal" wsp expression wsp expression)',
]
GRAMMAR_DICTIONARY['booking_class'] = [
'("_booking_class:_t" wsp expression)',
'("_economy" wsp expression)',
'("_class_of_service" wsp expression)',
'("_booking_class" wsp expression wsp expression)',
'("_class_type" wsp expression wsp expression)',
]
GRAMMAR_DICTIONARY['aircraft'] = [
'("_aircraft_basis_type" wsp expression wsp expression)',
'("_aircraft_code" wsp expression wsp expression)',
'("_aircraft_code:_t" wsp expression)',
'("_aircraft" wsp expression wsp expression)',
'("_aircraft" wsp expression)',
'("_manufacturer" wsp expression wsp expression)',
]
GRAMMAR_DICTIONARY['airport'] = [
'("_airport" wsp expression wsp expression)',
'("_airport" wsp expression)'
]
GRAMMAR_DICTIONARY['airline'] = [
'("_airline" wsp expression wsp expression)',
'("_airline" wsp expression)',
'("_services" wsp expression wsp expression)',
]
GRAMMAR_DICTIONARY['city_predicate'] = [
'("_city" wsp expression)',
'("_loc:_t" wsp expression wsp expression)',
]
GRAMMAR_DICTIONARY['ground_transport'] = [
'("_taxi" wsp expression)',
'("_rapid_transit" wsp expression)',
'("_limousine" wsp expression)',
'("_air_taxi_operation" wsp expression)',
'("_ground_transport" wsp expression)',
'("_rental_car" wsp expression)',
'("_from_airport" wsp expression wsp expression)',
'("_to_city" wsp expression wsp expression)',
]
GRAMMAR_DICTIONARY['entity_function'] = [
'("_stops" wsp expression)',
'("_stop_arrival_time" wsp expression)',
'("_meal" wsp expression)',
'("_booking_class" wsp expression)',
'("_fare" wsp expression)',
'("_aircraft_code" wsp expression)',
'("_minimum_connection_time" wsp expression)',
'("_minutes_distant" wsp expression wsp expression)',
'("_minutes_distant" wsp expression)',
'("_departure_time" wsp expression)',
'("_ground_fare" wsp expression)',
'("_flight_number" wsp expression)',
'("_arrival_time" wsp expression)',
'("_restriction_code" wsp expression)',
'("_capacity" wsp expression)',
'("_cost" wsp expression)',
'("_airline_name" wsp expression)',
'("_airline:_e" wsp expression)',
'("_miles_distant" wsp expression wsp expression)',
'("_miles_distant" wsp expression)',
'("_time_elapsed" wsp expression)',
'("_abbrev" wsp expression)',
]
GRAMMAR_DICTIONARY['meta_predicate'] = [
'("_the" wsp variable wsp application)',
'("_>" wsp expression wsp expression)',
'("_=" wsp expression wsp expression)',
'("_<" wsp expression wsp expression)',
'("_named" wsp expression wsp expression)',
'("_max" wsp variable wsp application)',
'("_min" wsp variable wsp application)',
'("_not" wsp application)',
'("_or" wsp application wsp polyvariadic_expression)',
'("_and" wsp application wsp polyvariadic_expression)',
'("_argmax" wsp variable wsp application wsp application)',
'("_argmin" wsp variable wsp application wsp application)',
'("_sum" wsp variable wsp application wsp application)',
'("_equals" wsp expression wsp expression)',
'("_equals:_t" wsp expression wsp expression)',
'("_exists" wsp variable wsp application)',
'("_count" wsp variable wsp application)',
]
GRAMMAR_DICTIONARY['polyvariadic_expression'] = [
'(application ws polyvariadic_expression)', '""']
GRAMMAR_DICTIONARY['variable_definition'] = [
'(variable ":e")', '(variable ":i")']
GRAMMAR_DICTIONARY['variable'] = ['"$v0"', '"$v1"', '"$v2"', '"$v3"']
GRAMMAR_DICTIONARY['constant'] = ['(do)', '(city)', '(al)', '(time)',
'(meal)', '(fn)', '(ap)', '(rc)', '(cl)', '(ac)', '(da)', '(dn)',
'"9:_hr"', '"boeing:_mf"', '"sa:_dc"', '(mn)', '(yr)', '(pd)', '(fb)', '(st)', '(i)', '(bat)']
GRAMMAR_DICTIONARY['do'] = ['"100:_do"', '"1000:_do"', '"466:_do"', '"416:_do"', '"124:_do"', '"329:_do"', '"1100:_do"', '"415:_do"',
'"200:_do"', '"150:_do"', '"932:_do"', '"500:_do"', '"1288:_do"', '"300:_do"', '"400:_do"', '"1500:_do"']
GRAMMAR_DICTIONARY['city'] = ['"nashville:_ci"', '"indianapolis:_ci"', '"san_diego:_ci"', '"long_beach:_ci"', '"atlanta:_ci"', '"kansas_city:_ci"', '"miami:_ci"', '"st_louis:_ci"', '"columbus:_ci"', '"toronto:_ci"', '"las_vegas:_ci"', '"burbank:_ci"', '"cleveland:_ci"', '"tacoma:_ci"', '"st_petersburg:_ci"', '"memphis:_ci"', '"denver:_ci"', '"dallas:_ci"', '"detroit:_ci"', '"oakland:_ci"', '"baltimore:_ci"', '"pittsburgh:_ci"', '"philadelphia:_ci"',
'"milwaukee:_ci"', '"salt_lake_city:_ci"', '"san_jose:_ci"', '"tampa:_ci"', '"orlando:_ci"', '"chicago:_ci"', '"seattle:_ci"', '"new_york:_ci"', '"san_francisco:_ci"', '"boston:_ci"', '"washington:_ci"', '"cincinnati:_ci"', '"charlotte:_ci"', '"newark:_ci"', '"westchester_county:_ci"', '"los_angeles:_ci"', '"fort_worth:_ci"', '"minneapolis:_ci"', '"ontario:_ci"', '"montreal:_ci"', '"st_paul:_ci"', '"houston:_ci"', '"phoenix:_ci"']
GRAMMAR_DICTIONARY['al'] = ['"wn:_al"', '"ml:_al"', '"cp:_al"', '"nw:_al"', '"yx:_al"', '"ac:_al"', '"dl:_al"', '"kw:_al"', '"delta:_al"', '"as:_al"', '"tw:_al"',
'"co:_al"', '"ff:_al"', '"ea:_al"', '"ua:_al"', '"canadian_airlines_international:_al"', '"hp:_al"', '"lh:_al"', '"nx:_al"', '"usair:_al"', '"aa:_al"', '"us:_al"']
GRAMMAR_DICTIONARY['time'] = ['"1200:_ti"', '"1628:_ti"', '"1830:_ti"', '"823:_ti"', '"1245:_ti"', '"1524:_ti"', '"200:_ti"', '"1615:_ti"', '"1230:_ti"', '"705:_ti"', '"1045:_ti"', '"1700:_ti"', '"1115:_ti"', '"1645:_ti"', '"1730:_ti"', '"815:_ti"', '"0:_ti"', '"500:_ti"', '"1205:_ti"', '"1940:_ti"', '"2000:_ti"', '"1400:_ti"', '"1130:_ti"', '"2200:_ti"', '"645:_ti"', '"718:_ti"', '"2220:_ti"', '"600:_ti"', '"630:_ti"', '"800:_ti"', '"838:_ti"', '"1330:_ti"', '"845:_ti"', '"1630:_ti"', '"1715:_ti"', '"2010:_ti"', '"1000:_ti"', '"1619:_ti"',
'"2100:_ti"', '"1505:_ti"', '"2400:_ti"', '"1923:_ti"', '"1:_ti"', '"1145:_ti"', '"2300:_ti"', '"1620:_ti"', '"2023:_ti"', '"2358:_ti"', '"1500:_ti"', '"1815:_ti"', '"1425:_ti"', '"720:_ti"', '"1024:_ti"', '"1600:_ti"', '"100:_ti"', '"1310:_ti"', '"1300:_ti"', '"700:_ti"', '"650:_ti"', '"1800:_ti"', '"1110:_ti"', '"1410:_ti"', '"1030:_ti"', '"1900:_ti"', '"1017:_ti"', '"1430:_ti"', '"1850:_ti"', '"900:_ti"', '"1930:_ti"', '"1133:_ti"', '"1220:_ti"', '"2226:_ti"', '"1100:_ti"', '"819:_ti"', '"755:_ti"', '"2134:_ti"', '"555:_ti"']
GRAMMAR_DICTIONARY['meal'] = ['"snack:_me"',
'"lunch:_me"', '"dinner:_me"', '"breakfast:_me"']
GRAMMAR_DICTIONARY['fn'] = ['"838:_fn"', '"1059:_fn"', '"417:_fn"', '"323:_fn"', '"311:_fn"', '"137338:_fn"', '"315:_fn"', '"825:_fn"', '"345:_fn"', '"270:_fn"', '"271:_fn"', '"4400:_fn"', '"296:_fn"', '"1765:_fn"', '"343:_fn"', '"1222:_fn"', '"217:_fn"', '"459:_fn"', '"279:_fn"', '"1083:_fn"', '"324:_fn"', '"746:_fn"', '"281:_fn"', '"269:_fn"', '"98:_fn"',
'"212:_fn"', '"505:_fn"', '"852:_fn"', '"82:_fn"', '"352:_fn"', '"928:_fn"', '"19:_fn"', '"139:_fn"', '"415:_fn"', '"539:_fn"', '"3357:_fn"', '"813:_fn"', '"257:_fn"', '"297:_fn"', '"1055:_fn"', '"405:_fn"', '"201:_fn"', '"71:_fn"', '"1291:_fn"', '"402:_fn"', '"771:_fn"', '"106:_fn"', '"1039:_fn"', '"210:_fn"', '"2153:_fn"', '"3724:_fn"', '"1209:_fn"', '"21:_fn"']
GRAMMAR_DICTIONARY['ap'] = ['"ewr:_ap"', '"jfk:_ap"', '"pit:_ap"', '"oak:_ap"', '"bur:_ap"', '"las:_ap"', '"lga:_ap"', '"den:_ap"', '"mco:_ap"', '"dallas:_ap"', '"dfw:_ap"', '"phx:_ap"', '"slc:_ap"', '"iad:_ap"', '"sfo:_ap"', '"ont:_ap"',
'"iah:_ap"', '"ord:_ap"', '"mia:_ap"', '"cvg:_ap"', '"phl:_ap"', '"tpa:_ap"', '"dtw:_ap"', '"yyz:_ap"', '"ind:_ap"', '"atl:_ap"', '"mke:_ap"', '"hou:_ap"', '"bos:_ap"', '"dal:_ap"', '"bwi:_ap"', '"bna:_ap"', '"stapelton:_ap"', '"lax:_ap"']
GRAMMAR_DICTIONARY['rc'] = ['"b:_rc"', '"ap_55:_rc"', '"ap_57:_rc"', '"s_:_rc"', '"sd_d:_rc"',
'"ap_80:_rc"', '"d_s:_rc"', '"ap_58:_rc"', '"ls:_rc"', '"ap:_rc"', '"s:_rc"', '"ap_68:_rc"']
GRAMMAR_DICTIONARY['cl'] = ['"thrift:_cl"',
'"business:_cl"', '"first:_cl"', '"coach:_cl"']
GRAMMAR_DICTIONARY['ac'] = ['"dc10:_ac"', '"j31:_ac"', '"734:_ac"', '"73s:_ac"', '"72s:_ac"', '"100:_ac"', '"757:_ac"', '"d9s:_ac"',
'"d10:_ac"', '"727:_ac"', '"m80:_ac"', '"747:_ac"', '"f28:_ac"', '"737:_ac"', '"733:_ac"', '"767:_ac"']
GRAMMAR_DICTIONARY['da'] = ['"monday:_da"', '"thursday:_da"', '"saturday:_da"', '"friday:_da"',
'"sunday:_da"', '"wednesday:_da"', '"tuesday:_da"']
GRAMMAR_DICTIONARY['dn'] = ['"12:_dn"', '"18:_dn"', '"19:_dn"', '"31:_dn"', '"7:_dn"', '"20:_dn"', '"27:_dn"', '"6:_dn"', '"26:_dn"', '"17:_dn"', '"11:_dn"', '"10:_dn"', '"15:_dn"', '"23:_dn"',
'"1:_dn"', '"24:_dn"', '"25:_dn"', '"14:_dn"', '"13:_dn"', '"29:_dn"', '"3:_dn"', '"28:_dn"', '"8:_dn"', '"5:_dn"', '"2:_dn"', '"9:_dn"', '"30:_dn"', '"16:_dn"', '"4:_dn"', '"22:_dn"', '"21:_dn"']
GRAMMAR_DICTIONARY['mn'] = ['"january:_mn"', '"february:_mn"', '"december:_mn"', '"june:_mn"', '"august:_mn"',
'"april:_mn"', '"october:_mn"', '"november:_mn"', '"july:_mn"', '"may:_mn"', '"march:_mn"', '"september:_mn"']
GRAMMAR_DICTIONARY['yr'] = ['"1991:_yr"', '"1993:_yr"', '"1992:_yr"']
GRAMMAR_DICTIONARY['pd'] = ['"mealtime:_pd"', '"breakfast:_pd"', '"late:_pd"', '"afternoon:_pd"', '"late_evening:_pd"',
'"daytime:_pd"', '"pm:_pd"', '"late_night:_pd"', '"evening:_pd"', '"morning:_pd"', '"early:_pd"']
GRAMMAR_DICTIONARY['fb'] = ['"y:_fb"', '"qx:_fb"', '"m:_fb"', '"fn:_fb"', '"b:_fb"', '"q:_fb"',
'"bh:_fb"', '"qo:_fb"', '"h:_fb"', '"c:_fb"', '"qw:_fb"', '"k:_fb"', '"f:_fb"', '"yn:_fb"']
GRAMMAR_DICTIONARY['st'] = ['"minnesota:_st"', '"florida:_st"',
'"nevada:_st"', '"california:_st"', '"arizona:_st"']
GRAMMAR_DICTIONARY['i'] = ['"2:_i"', '"3:_i"', '"1:_i"']
GRAMMAR_DICTIONARY['bat'] = ['"737:_bat"', '"767:_bat"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY["wsp"] = ['~"\s+"i']
COPY_TERMINAL_SET = {'do', 'city', 'al', 'time',
'meal', 'fn', 'ap', 'rc', 'cl', 'ac', 'da', 'dn',
'mn', 'yr', 'pd', 'fb', 'st', 'i', 'bat'}
| 13,898 | 58.652361 | 564 |
py
|
Unimer
|
Unimer-master/grammars/atis/atis_lambda_calculus_entity_matcher.py
|
# coding=utf8
import os
import re
import itertools
import collections
import numpy as np
import pandas as pd
from typing import List, Dict
from allennlp.data.tokenizers import Token
def clean_id(s, id_suffix, strip=None):
true_id = s.replace(' ', '_')
if strip:
for v in strip:
true_id = true_id.replace(v, '').strip()
return '%s:%s' % (true_id, id_suffix)
def clean_name(s, strip=None, split=None, prefix=None):
if split:
for v in split:
s = s.replace(v, ' ')
if strip:
for v in strip:
s = s.replace(v, '')
if prefix:
s = prefix + s
return s
def read_db(db_path, basename, id_col, name_col, id_suffix,
strip_id=None, strip_name=None, split_name=None, prefix_name=None):
filename = os.path.join(db_path, basename)
data = [] # Pairs of (name, id)
with open(filename) as f:
for line in f:
row = [s[1:-1] for s in re.findall('"[^"]*"|[0-9]+', line.strip())]
cur_name = clean_name(row[name_col].lower(), strip=strip_name,
split=split_name, prefix=prefix_name)
cur_id = clean_id(row[id_col].lower(), id_suffix, strip=strip_id)
data.append((cur_name, cur_id))
return data
def print_aligned(a, b, indent=0):
a_toks = []
b_toks = []
for x, y in zip(a, b):
cur_len = max(len(x), len(y))
a_toks.append(x.ljust(cur_len))
b_toks.append(y.ljust(cur_len))
prefix = ' ' * indent
print('%s%s' % (prefix, ' '.join(a_toks)))
print('%s%s' % (prefix, ' '.join(b_toks)))
def parse_entry(line):
"""Parse an entry from the CCG lexicon."""
return tuple(line.strip().split(' :- NP : '))
def strip_unk(w):
# Strip unk:%06d identifiers
m = re.match('^unk:[0-9]{6,}:(.*)$', w)
if m:
return m.group(1)
else:
return w
class ATISLambdaCalculusEntityMatcher:
TYPE_DICT = {
'ci': 'city',
'da': 'day',
'al': 'airline',
'ti': 'time',
'pd': 'time of day',
'dn': 'date number',
'mn': 'month',
'ap': 'airport',
'cl': 'class',
'fb': 'fare code',
'fn': 'flight number',
'me': 'meal',
'do': 'dollars',
'rc': 'restrictions',
'ac': 'aircraft',
'yr': 'year',
'mf': 'manufacturer',
'dc': 'dc',
'st': 'state',
'hr': 'hour',
'i': 'stop'
}
DAYS_OF_WEEK = [
(s, '%s:_da' % s)
for s in ('monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday', 'sunday')
]
# For dates
WORD_NUMBERS = [('one', '1:_dn'), ('two', '2:_dn'), ('three', '3:_dn'), ('four', '4:_dn'), ('five', '5:_dn'),
('six', '6:_dn'), ('seven', '7:_dn'), ('eight', '8:_dn'), ('nine', '9:_dn'), ('ten', '10:_dn'),
('eleven', '11:_dn'), ('twelve', '12:_dn'), ('thirteen', '13:_dn'), ('fourteen', '14:_dn'),
('fifteen', '15:_dn'), ('sixteen', '16:_dn'), ('seventeen', '17:_dn'), ('eighteen', '18:_dn'),
('nineteen', '19:_dn'), ('twenty', '20:_dn'), ('twenty one', '21:_dn'),
('twenty two', '22:_dn'),
('twenty three', '23:_dn'), ('twenty four', '24:_dn'), ('twenty five', '25:_dn'),
('twenty six', '26:_dn'), ('twenty seven', '27:_dn'), ('twenty eight', '28:_dn'),
('twenty nine', '29:_dn'), ('thirty', '30:_dn'), ('thirty one', '31:_dn')]
ORDINAL_NUMBERS = [('second', '2:_dn'), ('third', '3:_dn'), ('fourth', '4:_dn'), ('fifth', '5:_dn'),
('sixth', '6:_dn'), ('seventh', '7:_dn'), ('eighth', '8:_dn'), ('ninth', '9:_dn'),
('tenth', '10:_dn'), ('eleventh', '11:_dn'), ('twelfth', '12:_dn'), ('thirteenth', '13:_dn'),
('fourteenth', '14:_dn'), ('fifteenth', '15:_dn'), ('sixteenth', '16:_dn'),
('seventeenth', '17:_dn'), ('eighteenth', '18:_dn'), ('nineteenth', '19:_dn'),
('twentieth', '20:_dn'), ('twenty first', '21:_dn'), ('twenty second', '22:_dn'),
('twenty third', '23:_dn'), ('twenty fourth', '24:_dn'), ('twenty fifth', '25:_dn'),
('twenty sixth', '26:_dn'), ('twenty seventh', '27:_dn'), ('twenty eighth', '28:_dn'),
('twenty ninth', '29:_dn'), ('thirtieth', '30:_dn'),
('thirty first', '31:_dn')]
MEALS = [(m, '%s:_me' % m) for m in ('breakfast', 'lunch', 'dinner', 'snack')]
ST_CITIES = [(m, "%s:_ci" % m.replace(" . ", "_")) for m in ('st . louis', 'st . petersburg', 'st . paul')]
def __init__(self, db_path):
self.entries = collections.OrderedDict()
self.handlers = []
self.unique_word_map = collections.OrderedDict()
self.seen_words = set()
# CCG Lexicon
filename = os.path.join(db_path, 'lexicon.txt')
entries = []
with open(filename) as f:
for line in f:
x, y = line.strip().split(' :- NP : ')
y = y.replace(':', ':_').strip()
entries.append((x, y))
self.add_entries(entries)
# Read DB
self.add_entries(read_db(db_path, 'CITY.TAB', 1, 1, '_ci', strip_id=['.']))
self.add_entries(self.DAYS_OF_WEEK)
self.add_entries([(x + 's', y) for x, y in self.DAYS_OF_WEEK]) # Handle "on tuesdays"
self.add_entries(read_db(db_path, 'AIRLINE.TAB', 0, 1, '_al',
strip_name=[', inc.', ', ltd.']))
self.add_entries(read_db(db_path, 'INTERVAL.TAB', 0, 0, '_pd'))
self.add_entries(read_db(db_path, 'MONTH.TAB', 1, 1, '_mn'))
self.add_entries(read_db(db_path, 'AIRPORT.TAB', 0, 1, '_ap',
strip_name=[], split_name=['/']))
self.add_entries(read_db(db_path, 'COMP_CLS.TAB', 1, 1, '_cl'))
self.add_entries(read_db(db_path, 'CLS_SVC.TAB', 0, 0, '_fb', prefix_name='code '))
self.add_entries(self.MEALS)
self.add_entries(self.WORD_NUMBERS)
self.add_entries(self.ORDINAL_NUMBERS)
self.add_entries(self.ST_CITIES)
self.handle_times()
self.handle_rc()
self.handle_stop()
self.handle_dollars()
self.handle_flight_numbers()
def handle_times(self):
# Mod 12 deals with 12am/12pm special cases...
self.add_handler('([0-9]{1,2}) am$',
lambda m: '%d00:_ti' % (int(m.group(1)) % 12))
self.add_handler('([0-9]{1,2}) pm$',
lambda m: '%d00:_ti' % (int(m.group(1)) % 12 + 12))
self.add_handler('([0-9]{1,2})([0-9]{2}) am$',
lambda m: '%d%02d:_ti' % (int(m.group(1)) % 12, int(m.group(2))))
self.add_handler('([0-9]{1,2})([0-9]{2}) pm$',
lambda m: '%d%02d:_ti' % (int(m.group(1)) % 12 + 12, int(m.group(2))))
self.add_handler("([0-9]{1,2}) o'clock$",
lambda m: '%d00:_ti' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock am$",
lambda m: '%d00:_ti' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock pm$",
lambda m: '%d00:_ti' % (int(m.group(1)) % 12 + 12))
self.add_handler("([0-9]+) hours$",
lambda m: '%d:_hr' % (int(m.group(1))))
def handle_flight_numbers(self):
self.add_handler('[0-9]{2,}$', lambda m: '%d:_fn' % int(m.group(0)))
def handle_dollars(self):
self.add_handler('([0-9]+)$', lambda m: '%d:_do' % int(m.group(1)))
self.add_handler('([0-9]+) dollars$', lambda m: '%d:_do' % int(m.group(1)))
def handle_rc(self):
self.add_handler(re.compile(r'ap/(\d+)$'), lambda m: 'ap_%d:_rc' % int(m.group(1)))
self.add_handler(re.compile(r'ap(\d+)$'), lambda m: 'ap_%d:_rc' % int(m.group(1)))
def handle_stop(self):
self.add_handler('([0-9]+) stop$', lambda m: '%d:_i' % int(m.group(1)))
self.add_handler('([0-9]+) stops$', lambda m: '%d:_i' % int(m.group(1)))
def add_entries(self, entries):
for name, entity in entries:
# Update self.entries
if name in self.entries:
if name not in self.entries[name]:
self.entries[name].append(entity)
else:
self.entries[name] = [entity]
# Update self.unique_word_map
for w in name.split(' '):
if w in self.seen_words:
# This word is not unique!
if w in self.unique_word_map:
del self.unique_word_map[w]
else:
self.unique_word_map[w] = entity
self.seen_words.add(w)
def add_handler(self, regex, func):
self.handlers.append((regex, func))
def get_rule_ids(self, entities, rules: List, copy_terminal_set: List) -> List:
rule_ids = list()
if isinstance(entities, str):
entities = [entities]
for entity in entities:
for rule in rules:
if rule.lhs not in copy_terminal_set:
continue
terminal = rule.rhs.strip('[] ').replace("'", "").replace('"', '')
if terminal == entity:
rule_ids.append(rule.rule_id)
break
else:
print("Cannot find a corresponding rule for terminal %s" % entity)
return rule_ids
def _match_candidates(self, tokens: List[Token], rules: List, copy_terminal_set: List, ) -> List:
words = [t.text for t in tokens]
entities = [[] for i in range(len(words))]
ind_pairs = sorted(list(itertools.combinations(range(len(words) + 1), 2)),
key=lambda x: x[0] - x[1])
ret_entries = []
words = [strip_unk(w) for w in words] # Strip unk:%06d stuff
# Handlers
for i, j in ind_pairs:
if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
for regex, func in self.handlers:
m = re.match(regex, span)
if m:
entity = func(m)
rule_ids = self.get_rule_ids(entity, rules, copy_terminal_set)
if len(rule_ids) > 0:
for k in range(i, j):
entities[k] += rule_ids
ret_entries.append(((i, j), rule_ids))
# Entries
for i, j in ind_pairs:
if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
if span in self.entries:
entity = self.entries[span]
rule_ids = self.get_rule_ids(entity, rules, copy_terminal_set)
if len(rule_ids) > 0:
for k in range(i, j):
entities[k] = rule_ids
ret_entries.append(((i, j), rule_ids))
# Unique words
for i in range(len(words)):
if any(x for x in entities[i:i+1]): continue
word = words[i]
if entities[i]: continue
if word in self.unique_word_map:
entity = self.unique_word_map[word]
rule_ids = self.get_rule_ids(entity, rules, copy_terminal_set)
if len(rule_ids) > 0:
entities[i] = [entity]
ret_entries.append(((i, i+1), rule_ids))
return ret_entries
def match(self, tokens: List[Token], rules: List, copy_terminal_set: List, pad_index: int,) -> List[List]:
entity_candidates = self._match_candidates(tokens, rules, copy_terminal_set)
token_rule_map = [list() for i in range(len(tokens))]
for (beg_idx, end_idx), rule_ids in entity_candidates:
for index in range(beg_idx, end_idx):
token_rule_map[index] += rule_ids
for midx, m in enumerate(token_rule_map):
if len(m) == 0:
m.append(pad_index)
token_rule_map[midx] = np.array(list(set(m)), dtype=np.int)
return token_rule_map
| 12,328 | 39.960133 | 116 |
py
|
Unimer
|
Unimer-master/grammars/atis/get_prolog_terminals.py
|
# coding=utf8
import re
def read_data():
questions, logical_forms = list(), list()
paths = [
"../../data/atis/atis_prolog_train.tsv",
"../../data/atis/atis_prolog_dev.tsv",
"../../data/atis/atis_prolog_test.tsv"]
for p in paths:
with open(p, "r") as f:
for line in f:
line = line.strip()
splits = line.split('\t')
questions.append(splits[0])
logical_forms.append(splits[1])
return questions, logical_forms
def get_const(logical_forms):
pattern = re.compile("const\([A-Z],(.*?)\(([a-z|A-Z|\d|_|\.]+)\)\)")
object_dict = dict()
for lf in logical_forms:
results = pattern.findall(lf)
if len(results):
for result in results:
if result[0] not in object_dict:
object_dict[result[0]] = list()
object_dict[result[0]].append(result[1])
return object_dict
def get_unit_relations(logical_forms):
pattern = re.compile("[,|\(]([a-z|\d|_]+d?)\([A-Z]\)[,|)]")
unit_relations = set()
for lf in logical_forms:
results = pattern.findall(lf)
if len(results):
for result in results:
unit_relations.add(result)
return unit_relations
def get_binary_relations(logical_forms):
pattern = re.compile("[,|\(]([a-z|\d|_]+d?)\([A-Z],[A-Z]\)[,|)]")
binary_relations = set()
for lf in logical_forms:
results = pattern.findall(lf)
if len(results):
for result in results:
binary_relations.add(result)
return binary_relations
def get_triplet_relations(logical_forms):
pattern = re.compile("[,|\(]([a-z|\d|_]+d?)\([A-Z],[A-Z],[A-Z]\)[,|)]")
triplet_relations = set()
for lf in logical_forms:
results = pattern.findall(lf)
if len(results):
for result in results:
triplet_relations.add(result)
return triplet_relations
def get_arg_predicates(logical_forms):
pattern = re.compile("((argmin|argmax)[a-z|\d|_]+?)\(")
arg_relations = set()
for lf in logical_forms:
results = pattern.findall(lf)
if len(results):
for result in results:
arg_relations.add(result[0])
return arg_relations
if __name__ == '__main__':
questions, logical_forms = read_data()
# Const Objects
print("Const Objects")
const_object_dict = get_const(logical_forms)
object_names = ["(%s)" % k for k in const_object_dict.keys()]
print(object_names)
for key, values in const_object_dict.items():
print("""GRAMMAR_DICTIONARY['%s'] = ['("%s(" %s_value ")")']""" % (key, key, key))
print("GRAMMAR_DICTIONARY['%s_value'] = %s" % (key, str(['"%s"' % v for v in set(values)])))
print("==\n\n")
# Unit Relations
print("Unit Relations")
unit_relations = get_unit_relations(logical_forms)
print(unit_relations)
print("""GRAMMAR_DICTIONARY['unit_relation'] = %s""" % (["(%s)" % r for r in unit_relations]))
for r in unit_relations:
print("""GRAMMAR_DICTIONARY['%s'] = ['("%s(" var ")")']""" % (r, r))
print("==\n\n")
# Binary Relations
binary_relations = get_binary_relations(logical_forms)
print("Binary Relations")
print(binary_relations)
print("""GRAMMAR_DICTIONARY['binary_relation'] = %s""" % (["(%s)" % r for r in binary_relations]))
for r in binary_relations:
print("""GRAMMAR_DICTIONARY['%s'] = ['("%s(" var "," var ")")']""" % (r, r))
print("==\n\n")
triplet_relations = get_triplet_relations(logical_forms)
print("Triplet Relations")
print(triplet_relations)
print("""GRAMMAR_DICTIONARY['triplet_relation'] = %s""" % (["(%s)" % r for r in triplet_relations]))
for r in triplet_relations:
print("""GRAMMAR_DICTIONARY['%s'] = ['("%s(" var "," var "," var ")")']""" % (r, r))
print("==\n\n")
# Arg Predicates
print("Arg Relations")
arg_predicates = get_arg_predicates(logical_forms)
print(arg_predicates)
for r in arg_predicates:
print("""GRAMMAR_DICTIONARY['%s'] = ['("%s(" var "," goal)']""" % (r, r))
| 4,187 | 32.774194 | 104 |
py
|
Unimer
|
Unimer-master/grammars/atis/atis_tokenizer.py
|
# coding=utf8
from typing import List
from overrides import overrides
from allennlp.data.tokenizers import Token, WordTokenizer
from allennlp.data.tokenizers.word_splitter import WordSplitter
class FunQLWordSplitter(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [Token(t) if "::" not in t else Token(
t.replace("::", " ")) for t in normalized_lf.split()]
return tokens
class FunQLWordSplitter2(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
("'", " ' "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = list()
for t in normalized_lf.split():
if "::" not in t:
tokens.append(Token(t))
else:
tokens += [Token(_t) for _t in t.replace("::", " ").split()]
return tokens
class PrologWordSplitter(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [Token(t) if "::" not in t else Token(
t.replace("::", " ")) for t in normalized_lf.split()]
return tokens
class PrologWordSplitter2(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
("'", " ' "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = list()
for t in normalized_lf.split():
if "::" not in t:
tokens.append(Token(t))
else:
tokens += [Token(_t) for _t in t.replace("::", " ").split()]
return tokens
class PrologWordSplitter3(WordSplitter):
PREDS = [
'cityid', 'countryid', 'placeid', 'riverid', 'stateid',
'capital', 'city', 'lake', 'major', 'mountain', 'place', 'river',
'state', 'area', 'const', 'density', 'elevation', 'high_point',
'higher', 'loc', 'longer', 'low_point', 'lower', 'len', 'next_to',
'population', 'size', 'traverse',
'answer', 'largest', 'smallest', 'highest', 'lowest', 'longest',
'shortest', 'count', 'most', 'fewest', 'sum']
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
("'", " ' "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = list()
for t in normalized_lf.split():
if "::" not in t:
if t in self.PREDS:
tokens.append(Token("_%s" % t))
else:
tokens.append(Token(t))
else:
tokens += [Token(_t) for _t in t.replace("::", " ").split()]
return tokens
class SQLWordSplitter(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_sql = logical_form
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("'", " ' "),
('.', ' . '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_sql = normalized_sql.replace(a, b)
tokens = [Token(t) for t in normalized_sql.split()]
return tokens
class LambdaCalculusWordSplitter(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lc = logical_form
replacements = [
('(', ' ( '),
(')', ' ) '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lc = normalized_lc.replace(a, b)
tokens = [Token(t) for t in normalized_lc.split()]
return tokens
def get_logical_tokenizer(language: str) -> WordTokenizer:
splitter = None
if language == 'funql':
splitter = FunQLWordSplitter()
# splitter = FunQLWordSplitter2()
elif language == 'prolog':
splitter = PrologWordSplitter()
# # splitter = PrologWordSplitter2()
# elif language == 'prolog2':
# splitter = PrologWordSplitter3()
elif language == 'sql':
splitter = SQLWordSplitter()
elif language == 'lambda':
splitter = LambdaCalculusWordSplitter()
assert splitter is not None
return splitter
if __name__ == '__main__':
spliiter = get_logical_tokenizer('sql')
tokenizer = WordTokenizer(spliiter)
from atis_normalization import normalize_lambda_calculus, preprocess_funql, normalize_prolog_variable_names, preprocess_sql
normalized_lf = preprocess_sql("SELECT DISTINCT flight_1.flight_id FROM flight flight_1 , airport airport_1 , airport_service airport_service_1 , city city_1 WHERE flight_1.to_airport = airport_1.airport_code AND airport_1.airport_code = 'MKE' AND flight_1.from_airport = airport_service_1.airport_code AND airport_service_1.city_code = city_1.city_code AND 1 = 1")
print(normalized_lf)
tokens = tokenizer.tokenize(normalized_lf)
print(tokens)
| 6,185 | 33.176796 | 369 |
py
|
Unimer
|
Unimer-master/grammars/atis/funql_grammar.py
|
# coding=utf8
"""
FunQL Grammar for ATIS
"""
ROOT_RULE = 'statement -> [answer]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['(answer ws)']
GRAMMAR_DICTIONARY['answer'] = ['("answer" ws "(" ws predicate ws ")" )']
GRAMMAR_DICTIONARY['predicate'] = [
'meta', 'object', 'collection', 'relation',
'("intersection" ws "(" ws predicate ws "," conjunction ")")',
'("or" ws "(" ws predicate ws "," conjunction ws ")")',
'("not" ws "(" predicate ")")'
]
GRAMMAR_DICTIONARY['conjunction'] = [
'(predicate ws "," ws conjunction)',
'(predicate)'
]
GRAMMAR_DICTIONARY['relation'] = [
# Airline
'(_abbrev)', '(_airline_1)', '(_airline_2)', '(_airline_name)', '(_airline)',
# Aircraft
'(_aircraft)', '(_aircraft_1)', '(_aircraft_2)', '(_aircraft_basis_type_2)',
'(_capacity)', '(_jet)', '(_manufacturer_2)', '(_turboprop)',
# Airport
'(_airport)', '(_airport_1)',
# Flight Property
'(_connecting)', '(_discounted)', '(_economy)', '(_flight_number)', '(_flight_number_2)', '(_flight)',
'(_from_1)', '(_from_2)', '(_has_meal)', '(_has_stops)', '(_nonstop)', '(_oneway)',
'(_round_trip)', '(_to_1)', '(_to_2)',
# Flight Time
'(_after_day_2)', '(_approx_arrival_time_2)', '(_approx_departure_time_2)',
'(_approx_return_time_2)', '(_arrival_time)', '(_arrival_time_2)', '(_before_day_2)',
'(_daily)', '(_day_2)', '(_day_after_tomorrow)', '(_day_arrival_2)', '(_day_number_2)',
'(_day_number_arrival_2)', '(_day_number_return_2)', '(_day_return_2)',
'(_days_from_today_2)', '(_departure_time)', '(_departure_time_2)',
'(_during_day_2)', '(_during_day_arrival_2)',
'(_month_2)', '(_month_arrival_2)', '(_month_return_2)',
'(_next_days_2)', '(_overnight)', '(_today)', '(_tomorrow)',
'(_tomorrow_arrival)', '(_tonight)',
# Flight Fare
'(_fare)', '(_fare_2)', '(_fare_basis_code)', '(_fare_basis_code_2)',
'(_fare_time)',
# Flight Stop
'(_stop_1)', '(_stop_2)', '(_stop_arrival_time)',
'(_stops)', '(_stops_2)',
# Booking Class
'(_booking_class_1)', '(_booking_class_2)', '(_class_of_service)', '(_class_type_2)',
# Ground Transport
'(_air_taxi_operation)', '(_from_airport_2)', '(_ground_fare)', '(_ground_transport)',
'(_limousine)', '(_rapid_transit)', '(_rental_car)', '(_to_city_2)', '(_taxi)',
# City
'(_city)',
# Meal
'(_meal)', '(_meal_2)', '(_meal_code)', '(_meal_code_2)',
# Service
'(_services_1)', '(_services_2)', '(_services)',
# Other
'(_flight_aircraft)', '(_flight_airline)',
'(_flight_fare)', '(_loc_1)', '(_loc_2)',
'(_miles_distant)', '(_minimum_connection_time)', '(_minutes_distant)',
'(_named_1)', '(_restriction_code)',
'(_time_elapsed)', '(_time_elapsed_2)',
'(_time_zone_code)', '(_weekday)', '(_year_2)',
]
GRAMMAR_DICTIONARY['_abbrev'] = ['("_abbrev(" predicate ")")']
GRAMMAR_DICTIONARY['_after_day_2'] = ['("_after_day_2(" predicate ")")']
GRAMMAR_DICTIONARY['_air_taxi_operation'] = ['("_air_taxi_operation(" predicate ")")']
GRAMMAR_DICTIONARY['_aircraft'] = ['("_aircraft(" predicate ")")']
GRAMMAR_DICTIONARY['_aircraft_1'] = ['("_aircraft_1(" predicate ")")']
GRAMMAR_DICTIONARY['_aircraft_2'] = ['("_aircraft_2(" predicate ")")']
GRAMMAR_DICTIONARY['_aircraft_basis_type_2'] = ['("_aircraft_basis_type_2(" predicate ")")']
GRAMMAR_DICTIONARY['_airline'] = ['("_airline(" predicate ")")']
GRAMMAR_DICTIONARY['_airline_1'] = ['("_airline_1(" predicate ")")']
GRAMMAR_DICTIONARY['_airline_2'] = ['("_airline_2(" predicate ")")']
GRAMMAR_DICTIONARY['_airline_name'] = ['("_airline_name(" predicate ")")']
GRAMMAR_DICTIONARY['_airport'] = ['("_airport(" predicate ")")']
GRAMMAR_DICTIONARY['_airport_1'] = ['("_airport_1(" predicate ")")']
GRAMMAR_DICTIONARY['_approx_arrival_time_2'] = ['("_approx_arrival_time_2(" predicate ")")']
GRAMMAR_DICTIONARY['_approx_departure_time_2'] = ['("_approx_departure_time_2(" predicate ")")']
GRAMMAR_DICTIONARY['_approx_return_time_2'] = ['("_approx_return_time_2(" predicate ")")']
GRAMMAR_DICTIONARY['_arrival_time'] = ['("_arrival_time(" predicate ")")']
GRAMMAR_DICTIONARY['_arrival_time_2'] = ['("_arrival_time_2(" predicate ")")']
GRAMMAR_DICTIONARY['_before_day_2'] = ['("_before_day_2(" predicate ")")']
GRAMMAR_DICTIONARY['_booking_class_1'] = ['("_booking_class_1(" predicate ")")']
GRAMMAR_DICTIONARY['_booking_class_2'] = ['("_booking_class_2(" predicate ")")']
GRAMMAR_DICTIONARY['_capacity'] = ['("_capacity(" predicate ")")']
GRAMMAR_DICTIONARY['_city'] = ['("_city(" predicate ")")']
GRAMMAR_DICTIONARY['_class_of_service'] = ['("_class_of_service(" predicate ")")']
GRAMMAR_DICTIONARY['_class_type_2'] = ['("_class_type_2(" predicate ")")']
GRAMMAR_DICTIONARY['_connecting'] = ['("_connecting(" predicate ")")']
GRAMMAR_DICTIONARY['_daily'] = ['("_daily(" predicate ")")']
GRAMMAR_DICTIONARY['_day_2'] = ['("_day_2(" predicate ")")']
GRAMMAR_DICTIONARY['_day_after_tomorrow'] = ['("_day_after_tomorrow(" predicate ")")']
GRAMMAR_DICTIONARY['_day_arrival_2'] = ['("_day_arrival_2(" predicate ")")']
GRAMMAR_DICTIONARY['_day_number_2'] = ['("_day_number_2(" predicate ")")']
GRAMMAR_DICTIONARY['_day_number_arrival_2'] = ['("_day_number_arrival_2(" predicate ")")']
GRAMMAR_DICTIONARY['_day_number_return_2'] = ['("_day_number_return_2(" predicate ")")']
GRAMMAR_DICTIONARY['_day_return_2'] = ['("_day_return_2(" predicate ")")']
GRAMMAR_DICTIONARY['_days_from_today_2'] = ['("_days_from_today_2(" predicate ")")']
GRAMMAR_DICTIONARY['_departure_time'] = ['("_departure_time(" predicate ")")']
GRAMMAR_DICTIONARY['_departure_time_2'] = ['("_departure_time_2(" predicate ")")']
GRAMMAR_DICTIONARY['_discounted'] = ['("_discounted(" predicate ")")']
GRAMMAR_DICTIONARY['_during_day_2'] = ['("_during_day_2(" predicate ")")']
GRAMMAR_DICTIONARY['_during_day_arrival_2'] = ['("_during_day_arrival_2(" predicate ")")']
GRAMMAR_DICTIONARY['_economy'] = ['("_economy(" predicate ")")']
GRAMMAR_DICTIONARY['_fare'] = ['("_fare(" predicate ")")']
GRAMMAR_DICTIONARY['_fare_2'] = ['("_fare_2(" predicate ")")']
GRAMMAR_DICTIONARY['_fare_basis_code'] = ['("_fare_basis_code(" predicate ")")']
GRAMMAR_DICTIONARY['_fare_basis_code_2'] = ['("_fare_basis_code_2(" predicate ")")']
GRAMMAR_DICTIONARY['_fare_time'] = ['("_fare_time(" predicate ")")']
GRAMMAR_DICTIONARY['_flight'] = ['("_flight(" predicate ")")']
GRAMMAR_DICTIONARY['_flight_aircraft'] = ['("_flight_aircraft(" predicate ")")']
GRAMMAR_DICTIONARY['_flight_airline'] = ['("_flight_airline(" predicate ")")']
GRAMMAR_DICTIONARY['_flight_fare'] = ['("_flight_fare(" predicate ")")']
GRAMMAR_DICTIONARY['_flight_number'] = ['("_flight_number(" predicate ")")']
GRAMMAR_DICTIONARY['_flight_number_2'] = ['("_flight_number_2(" predicate ")")']
GRAMMAR_DICTIONARY['_from_1'] = ['("_from_1(" predicate ")")']
GRAMMAR_DICTIONARY['_from_2'] = ['("_from_2(" predicate ")")']
GRAMMAR_DICTIONARY['_from_airport_2'] = ['("_from_airport_2(" predicate ")")']
GRAMMAR_DICTIONARY['_ground_fare'] = ['("_ground_fare(" predicate ")")']
GRAMMAR_DICTIONARY['_ground_transport'] = ['("_ground_transport(" predicate ")")']
GRAMMAR_DICTIONARY['_has_meal'] = ['("_has_meal(" predicate ")")']
GRAMMAR_DICTIONARY['_has_stops'] = ['("_has_stops(" predicate ")")']
GRAMMAR_DICTIONARY['_jet'] = ['("_jet(" predicate ")")']
GRAMMAR_DICTIONARY['_limousine'] = ['("_limousine(" predicate ")")']
GRAMMAR_DICTIONARY['_loc_1'] = ['("_loc:_t_1(" predicate ")")']
GRAMMAR_DICTIONARY['_loc_2'] = ['("_loc:_t_2(" predicate ")")']
GRAMMAR_DICTIONARY['_manufacturer_2'] = ['("_manufacturer_2(" predicate ")")']
GRAMMAR_DICTIONARY['_max'] = ['("_max(" predicate ")")']
GRAMMAR_DICTIONARY['_meal'] = ['("_meal(" predicate ")")']
GRAMMAR_DICTIONARY['_meal_2'] = ['("_meal_2(" predicate ")")']
GRAMMAR_DICTIONARY['_meal_code'] = ['("_meal_code(" predicate ")")']
GRAMMAR_DICTIONARY['_meal_code_2'] = ['("_meal_code_2(" predicate ")")']
GRAMMAR_DICTIONARY['_miles_distant'] = ['("_miles_distant(" ws predicate ws "," ws predicate ws ")")']
GRAMMAR_DICTIONARY['_min'] = ['("_min(" predicate ")")']
GRAMMAR_DICTIONARY['_minimum_connection_time'] = ['("_minimum_connection_time(" predicate ")")']
GRAMMAR_DICTIONARY['_minutes_distant'] = ['("_minutes_distant(" predicate ")")']
GRAMMAR_DICTIONARY['_month_2'] = ['("_month_2(" predicate ")")']
GRAMMAR_DICTIONARY['_month_arrival_2'] = ['("_month_arrival_2(" predicate ")")']
GRAMMAR_DICTIONARY['_month_return_2'] = ['("_month_return_2(" predicate ")")']
GRAMMAR_DICTIONARY['_named_1'] = ['("_named_1(" predicate ")")']
GRAMMAR_DICTIONARY['_next_days_2'] = ['("_next_days_2(" predicate ")")']
GRAMMAR_DICTIONARY['_nonstop'] = ['("_nonstop(" predicate ")")']
GRAMMAR_DICTIONARY['_oneway'] = ['("_oneway(" predicate ")")']
GRAMMAR_DICTIONARY['_overnight'] = ['("_overnight(" predicate ")")']
GRAMMAR_DICTIONARY['_rapid_transit'] = ['("_rapid_transit(" predicate ")")']
GRAMMAR_DICTIONARY['_rental_car'] = ['("_rental_car(" predicate ")")']
GRAMMAR_DICTIONARY['_restriction_code'] = ['("_restriction_code(" predicate ")")']
GRAMMAR_DICTIONARY['_round_trip'] = ['("_round_trip(" predicate ")")']
GRAMMAR_DICTIONARY['_services'] = ['("_services(" ws predicate ws "," ws predicate ws ")")']
GRAMMAR_DICTIONARY['_services_1'] = ['("_services_1(" predicate ")")']
GRAMMAR_DICTIONARY['_services_2'] = ['("_services_2(" predicate ")")']
GRAMMAR_DICTIONARY['_stop_1'] = ['("_stop_1(" predicate ")")']
GRAMMAR_DICTIONARY['_stop_2'] = ['("_stop_2(" predicate ")")']
GRAMMAR_DICTIONARY['_stop_arrival_time'] = ['("_stop_arrival_time(" predicate ")")']
GRAMMAR_DICTIONARY['_stops'] = ['("_stops(" predicate ")")']
GRAMMAR_DICTIONARY['_stops_2'] = ['("_stops_2(" predicate ")")']
GRAMMAR_DICTIONARY['_taxi'] = ['("_taxi(" predicate ")")']
GRAMMAR_DICTIONARY['_time_elapsed'] = ['("_time_elapsed(" predicate ")")']
GRAMMAR_DICTIONARY['_time_elapsed_2'] = ['("_time_elapsed_2(" predicate ")")']
GRAMMAR_DICTIONARY['_time_zone_code'] = ['("_time_zone_code(" predicate ")")']
GRAMMAR_DICTIONARY['_to_1'] = ['("_to_1(" predicate ")")']
GRAMMAR_DICTIONARY['_to_2'] = ['("_to_2(" predicate ")")']
GRAMMAR_DICTIONARY['_to_city_2'] = ['("_to_city_2(" predicate ")")']
GRAMMAR_DICTIONARY['_today'] = ['("_today(" predicate ")")']
GRAMMAR_DICTIONARY['_tomorrow'] = ['("_tomorrow(" predicate ")")']
GRAMMAR_DICTIONARY['_tomorrow_arrival'] = ['("_tomorrow_arrival(" predicate ")")']
GRAMMAR_DICTIONARY['_tonight'] = ['("_tonight(" predicate ")")']
GRAMMAR_DICTIONARY['_turboprop'] = ['("_turboprop(" predicate ")")']
GRAMMAR_DICTIONARY['_weekday'] = ['("_weekday(" predicate ")")']
GRAMMAR_DICTIONARY['_year_2'] = ['("_year_2(" predicate ")")']
# Meta-Predicates
GRAMMAR_DICTIONARY['meta'] = [
'(_less_than_arrival_time_2)', '(_less_than_departure_time_2)', '(_less_than_fare_2)',
'(_larger_than_arrival_time_2)', '(_larger_than_capacity_2)', '(_larger_than_departure_time_2)',
'(_larger_than_stops_2)', '(_equals)',
'(argmax_arrival_time)', '(argmax_capacity)', '(argmax_count)',
'(argmax_departure_time)', '(argmax_fare)', '(argmax_stops)',
'(argmin_arrival_time)', '(argmin_capacity)', '(argmin_departure_time)',
'(argmin_fare)', '(argmin_miles_distant_2)', '(argmin_stops)',
'(argmin_time_elapsed)', '(count)', '(sum_capacity)', '(sum_stops)',
'(_max)', '(_min)',
]
GRAMMAR_DICTIONARY['_less_than_arrival_time_2'] = ['("_<_arrival_time_2(" predicate ")")']
GRAMMAR_DICTIONARY['_less_than_departure_time_2'] = ['("_<_departure_time_2(" predicate ")")']
GRAMMAR_DICTIONARY['_less_than_fare_2'] = ['("_<_fare_2(" predicate ")")']
GRAMMAR_DICTIONARY['_larger_than_arrival_time_2'] = ['("_>_arrival_time_2(" predicate ")")']
GRAMMAR_DICTIONARY['_larger_than_capacity_2'] = ['("_>_capacity_2(" predicate ")")']
GRAMMAR_DICTIONARY['_larger_than_departure_time_2'] = ['("_>_departure_time_2(" predicate ")")']
GRAMMAR_DICTIONARY['_larger_than_stops_2'] = ['("_>_stops_2(" predicate ")")']
GRAMMAR_DICTIONARY['_equals'] = ['("_equals(" ws predicate ws "," predicate ws ")")']
GRAMMAR_DICTIONARY['argmax_arrival_time'] = ['("argmax_arrival_time(" predicate ")")']
GRAMMAR_DICTIONARY['argmax_capacity'] = ['("argmax_capacity(" predicate ")")']
GRAMMAR_DICTIONARY['argmax_count'] = ['("argmax_count(" predicate ")")']
GRAMMAR_DICTIONARY['argmax_departure_time'] = ['("argmax_departure_time(" predicate ")")']
GRAMMAR_DICTIONARY['argmax_fare'] = ['("argmax_fare(" predicate ")")']
GRAMMAR_DICTIONARY['argmax_stops'] = ['("argmax_stops(" predicate ")")']
GRAMMAR_DICTIONARY['argmin_arrival_time'] = ['("argmin_arrival_time(" predicate ")")']
GRAMMAR_DICTIONARY['argmin_capacity'] = ['("argmin_capacity(" predicate ")")']
GRAMMAR_DICTIONARY['argmin_departure_time'] = ['("argmin_departure_time(" predicate ")")']
GRAMMAR_DICTIONARY['argmin_fare'] = ['("argmin_fare(" predicate ")")']
GRAMMAR_DICTIONARY['argmin_miles_distant_2'] = ['("argmin_miles_distant_2(" predicate ")")']
GRAMMAR_DICTIONARY['argmin_stops'] = ['("argmin_stops(" predicate ")")']
GRAMMAR_DICTIONARY['argmin_time_elapsed'] = ['("argmin_time_elapsed(" predicate ")")']
GRAMMAR_DICTIONARY['count'] = ['("count(" predicate ")")']
GRAMMAR_DICTIONARY['sum_capacity'] = ['("sum_capacity(" predicate ")")']
GRAMMAR_DICTIONARY['sum_stops'] = ['("sum_stops(" predicate ")")']
GRAMMAR_DICTIONARY['_max'] = ['("_max(" predicate ")")']
GRAMMAR_DICTIONARY['_min'] = ['("_min(" predicate ")")']
# Collection
GRAMMAR_DICTIONARY['collection'] = [
'(all_flights)', '(all_booking_classes)', '(all_aircrafts)',
'(all_airlines)', '(all_airports)', '(all_class_of_service)',
'(all_fare_basis_codes)', '(all_ground_transports)', '(all_meal_codes)',
'(all_cities)',
]
GRAMMAR_DICTIONARY['all_flights'] = ['"_flight" ws "(" ws "all" ws ")"']
GRAMMAR_DICTIONARY['all_cities'] = ['"_city" ws "(" ws "all" ws ")"']
GRAMMAR_DICTIONARY['all_booking_classes'] = ['"_booking_class:_t" ws "(" ws "all" ws ")"']
GRAMMAR_DICTIONARY['all_aircrafts'] = ['"_aircraft" ws "(" ws "all" ws ")"']
GRAMMAR_DICTIONARY['all_airlines'] = ['"_airline" ws "(" ws "all" ws ")"']
GRAMMAR_DICTIONARY['all_airports'] = ['"_airport" ws "(" ws "all" ws ")"']
GRAMMAR_DICTIONARY['all_class_of_service'] = ['"_class_of_service" ws "(" ws "all" ws ")"']
GRAMMAR_DICTIONARY['all_fare_basis_codes'] = ['"_fare_basis_code" ws "(" ws "all" ws ")"']
GRAMMAR_DICTIONARY['all_ground_transports'] = ['"_ground_transport" ws "(" ws "all" ws ")"']
GRAMMAR_DICTIONARY['all_meal_codes'] = ['"_meal_code" ws "(" ws "all" ws ")"']
# Object
GRAMMAR_DICTIONARY['object'] = [
'(fare_basis_code)', '(meal_code)', '(airport_code)', '(airline_code)',
'(aircraft_code_object)', '(city_name)', '(time)', '(flight_number_object)',
'(class_description)', '(day_period)', '(state_name)',
'(day_number)', '(month)', '(day)', '(dollar)', '(meal_description)',
'("hour(9)")', '(integer)', '(basis_type)', '(year)',
'("days_code(sa)")', '("manufacturer(boeing)")'
]
GRAMMAR_DICTIONARY['fare_basis_code'] = ['("fare_basis_code(" fare_basis_code_value ")")']
GRAMMAR_DICTIONARY['fare_basis_code_value'] = ['"qx"', '"qw"', '"qo"', '"fn"', '"yn"', '"bh"', '"k"', '"b"', '"h"', '"f"', '"q"', '"c"', '"y"', '"m"',]
GRAMMAR_DICTIONARY['meal_code'] = ['("meal_code(" meal_code_value ")")']
GRAMMAR_DICTIONARY['meal_code_value'] = ['"ap_58"', '"ap_57"', '"d_s"', '"b"', '"ap_55"', '"s_"', '"sd_d"', '"ls"', '"ap_68"', '"ap_80"', '"ap"', '"s"', ]
GRAMMAR_DICTIONARY['airline_code'] = ['("airline_code(" airline_code_value ")")']
GRAMMAR_DICTIONARY['airline_code_value'] = ['"usair"', '"co"', '"ua"', '"delta"', '"as"', '"ff"', '"canadian_airlines_international"', '"us"', '"nx"', '"hp"', '"aa"', '"kw"', '"ml"', '"nw"', '"ac"', '"tw"', '"yx"', '"ea"', '"dl"', '"wn"', '"lh"', '"cp"']
GRAMMAR_DICTIONARY['airport_code'] = ['("airport_code(" airport_code_value ")")']
GRAMMAR_DICTIONARY['airport_code_value'] = ['"dallas"', '"ont"', '"stapelton"', '"bna"', '"bwi"', '"iad"', '"sfo"', '"phl"', '"pit"', '"slc"', '"phx"', '"lax"', '"bur"', '"ind"', '"iah"', '"dtw"', '"las"', '"dal"', '"den"', '"atl"', '"ewr"', '"bos"', '"tpa"', '"jfk"', '"mke"', '"oak"', '"yyz"', '"dfw"', '"cvg"', '"hou"', '"lga"', '"ord"', '"mia"', '"mco"']
GRAMMAR_DICTIONARY['aircraft_code_object'] = ['("aircraft_code(" aircraft_code_value ")")']
GRAMMAR_DICTIONARY['aircraft_code_value'] = ['"m80"', '"dc10"', '"727"', '"d9s"', '"f28"', '"j31"', '"767"', '"734"', '"73s"', '"747"', '"737"', '"733"', '"d10"', '"100"', '"757"', '"72s"']
GRAMMAR_DICTIONARY['city_name'] = ['("city_name(" city_name_value ")")']
GRAMMAR_DICTIONARY['city_name_value'] = ['"cleveland"', '"milwaukee"', '"detroit"', '"los_angeles"', '"miami"', '"salt_lake_city"', '"ontario"', '"tacoma"', '"memphis"', '"denver"', '"san_francisco"', '"new_york"', '"tampa"', '"washington"', '"westchester_county"', '"boston"', '"newark"', '"pittsburgh"', '"charlotte"', '"columbus"', '"atlanta"', '"oakland"', '"kansas_city"', '"st_louis"', '"nashville"', '"chicago"', '"fort_worth"', '"san_jose"', '"dallas"', '"philadelphia"', '"st_petersburg"', '"baltimore"', '"san_diego"', '"cincinnati"', '"long_beach"', '"phoenix"', '"indianapolis"', '"burbank"', '"montreal"', '"seattle"', '"st_paul"', '"minneapolis"', '"houston"', '"orlando"', '"toronto"', '"las_vegas"']
GRAMMAR_DICTIONARY['time'] = ['("time(" time_value ")")']
GRAMMAR_DICTIONARY['time_value'] = [
'"1850"', '"1110"', '"2000"', '"1815"', '"1024"', '"1500"',
'"1900"', '"1600"', '"1300"', '"1800"', '"1200"', '"1628"',
'"1830"', '"823"', '"1245"', '"1524"', '"200"', '"1615"',
'"1230"', '"705"', '"1045"', '"1700"', '"1115"', '"1645"',
'"1730"', '"815"', '"0"', '"500"', '"1205"', '"1940"',
'"1400"', '"1130"', '"2200"', '"645"', '"718"', '"2220"',
'"600"', '"630"', '"800"', '"838"', '"1330"', '"845"', '"1630"',
'"1715"', '"2010"', '"1000"', '"1619"', '"2100"', '"1505"',
'"2400"', '"1923"', '"100"', '"1145"', '"2300"', '"1620"',
'"2023"', '"2358"', '"1425"', '"720"', '"1310"', '"700"', '"650"',
'"1410"', '"1030"', '"1900"', '"1017"', '"1430"', '"900"', '"1930"',
'"1133"', '"1220"', '"2226"', '"1100"', '"819"', '"755"', '"2134"', '"555"', '"1"',
]
GRAMMAR_DICTIONARY['flight_number_object'] = ['("flight_number(" flight_number_value ")")']
GRAMMAR_DICTIONARY['flight_number_value'] = [
'"1291"', '"345"', '"813"', '"71"', '"1059"', '"212"', '"1209"',
'"281"', '"201"', '"324"', '"19"', '"352"', '"137338"', '"4400"',
'"323"', '"505"', '"825"', '"82"', '"279"', '"1055"', '"296"', '"315"',
'"1765"', '"405"', '"771"', '"106"', '"2153"', '"257"', '"402"',
'"343"', '"98"', '"1039"', '"217"', '"539"', '"459"', '"417"',
'"1083"', '"3357"', '"311"', '"210"', '"139"', '"852"', '"838"',
'"415"', '"3724"', '"21"', '"928"', '"269"', '"270"',
'"297"', '"746"', '"1222"', '"271"'
]
GRAMMAR_DICTIONARY['class_description'] = ['("class_description(" class_description_value ")")']
GRAMMAR_DICTIONARY['class_description_value'] = ['"thrift"', '"coach"', '"first"', '"business"']
GRAMMAR_DICTIONARY['day_period'] = ['("day_period(" day_period_value ")")']
GRAMMAR_DICTIONARY['day_period_value'] = ['"early"', '"afternoon"', '"late_evening"', '"late_night"', '"mealtime"', '"evening"', '"pm"', '"daytime"', '"breakfast"', '"morning"', '"late"']
GRAMMAR_DICTIONARY['state_name'] = ['("state_name(" state_name_value ")")']
GRAMMAR_DICTIONARY['state_name_value'] = ['"minnesota"', '"florida"', '"arizona"', '"nevada"', '"california"']
GRAMMAR_DICTIONARY['day_number'] = ['("day_number(" day_number_value ")")']
GRAMMAR_DICTIONARY['day_number_value'] = ['"13"', '"29"', '"28"', '"22"', '"21"', '"16"', '"30"', '"12"', '"18"', '"19"', '"31"', '"20"', '"27"', '"6"', '"26"', '"17"', '"11"', '"10"', '"15"', '"23"', '"24"', '"25"', '"14"', '"1"', '"3"', '"8"', '"5"', '"2"', '"9"', '"4"', '"7"']
GRAMMAR_DICTIONARY['month'] = ['("month(" month_value ")")']
GRAMMAR_DICTIONARY['month_value'] = ['"april"', '"august"', '"may"', '"october"', '"june"', '"november"', '"september"', '"february"', '"december"', '"march"', '"july"', '"january"']
GRAMMAR_DICTIONARY['day'] = ['("day(" day_value ")")']
GRAMMAR_DICTIONARY['day_value'] = ['"monday"', '"wednesday"', '"thursday"', '"tuesday"', '"saturday"', '"friday"', '"sunday"']
GRAMMAR_DICTIONARY['dollar'] = ['("dollar(" dollar_value ")")']
GRAMMAR_DICTIONARY['dollar_value'] = ['"1000"', '"1500"', '"466"', '"1288"', '"300"', '"329"', '"416"', '"124"', '"932"', '"1100"', '"200"', '"500"', '"100"', '"415"', '"150"', '"400"']
GRAMMAR_DICTIONARY['meal_description'] = ['("meal_description(" meal_description_value ")")']
GRAMMAR_DICTIONARY['meal_description_value'] = ['"snack"', '"breakfast"', '"lunch"', '"dinner"']
GRAMMAR_DICTIONARY['integer'] = ['("integer(" integer_value ")")']
GRAMMAR_DICTIONARY['integer_value'] = ['"2"', '"1"', '"3"']
GRAMMAR_DICTIONARY['basis_type'] = ['("basis_type(" basis_type_value ")")']
GRAMMAR_DICTIONARY['basis_type_value'] = ['"737"', '"767"']
GRAMMAR_DICTIONARY['year'] = ['("year(" year_value ")")']
GRAMMAR_DICTIONARY['year_value'] = ['"1991"', '"1993"', '"1992"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY["wsp"] = ['~"\s+"i']
COPY_TERMINAL_SET = {
'fare_basis_code_value', 'meal_code_value', 'airport_code_value', 'airline_code_value',
'aircraft_code_value', 'city_name_value', 'time_value', 'flight_number_value',
'class_description_value', 'day_period_value', 'state_name_value',
'day_number_value', 'month_value', 'day_value', 'dollar_value', 'meal_description_value',
'integer_value', 'basis_type_value', 'year_value',
}
| 21,561 | 67.234177 | 715 |
py
|
Unimer
|
Unimer-master/grammars/atis/atis_entity_matcher.py
|
# coding=utf8
import os
import re
import itertools
import collections
import numpy as np
from typing import List, Dict
from allennlp.data.tokenizers import Token
def clean_id(s, id_suffix, strip=None):
true_id = s.replace(' ', '_')
if strip:
for v in strip:
true_id = true_id.replace(v, '').strip()
return '%s' % true_id
def clean_name(s, strip=None, split=None, prefix=None):
if split:
for v in split:
s = s.replace(v, ' ')
if strip:
for v in strip:
s = s.replace(v, '')
if prefix:
s = prefix + s
return s
def read_db(db_path, basename, id_col, name_col, id_suffix,
strip_id=None, strip_name=None, split_name=None, prefix_name=None):
filename = os.path.join(db_path, basename)
data = [] # Pairs of (name, id)
with open(filename) as f:
for line in f:
row = [s[1:-1] for s in re.findall('"[^"]*"|[0-9]+', line.strip())]
cur_name = clean_name(row[name_col].lower(), strip=strip_name,
split=split_name, prefix=prefix_name)
cur_id = clean_id(row[id_col].lower(), id_suffix, strip=strip_id)
data.append((cur_name, cur_id))
return data
def strip_unk(w):
# Strip unk:%06d identifiers
m = re.match('^unk:[0-9]{6,}:(.*)$', w)
if m:
return m.group(1)
else:
return w
class ATISEntityMatcher:
DAYS_OF_WEEK = [
(s, '%s' % s)
for s in ('monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday', 'sunday')
]
# For dates
WORD_NUMBERS = [('one', '1'), ('two', '2'), ('three', '3'), ('four', '4'), ('five', '5'),
('six', '6'), ('seven', '7'), ('eight', '8'), ('nine', '9'), ('ten', '10'),
('eleven', '11'), ('twelve', '12'), ('thirteen', '13'), ('fourteen', '14'),
('fifteen', '15'), ('sixteen', '16'), ('seventeen', '17'), ('eighteen', '18'),
('nineteen', '19'), ('twenty', '20'), ('twenty one', '21'),
('twenty two', '22'),
('twenty three', '23'), ('twenty four', '24'), ('twenty five', '25'),
('twenty six', '26'), ('twenty seven', '27'), ('twenty eight', '28'),
('twenty nine', '29'), ('thirty', '30'), ('thirty one', '31')]
ORDINAL_NUMBERS = [('second', '2'), ('third', '3'), ('fourth', '4'), ('fifth', '5'),
('sixth', '6'), ('seventh', '7'), ('eighth', '8'), ('ninth', '9'),
('tenth', '10'), ('eleventh', '11'), ('twelfth', '12'), ('thirteenth', '13'),
('fourteenth', '14'), ('fifteenth', '15'), ('sixteenth', '16'),
('seventeenth', '17'), ('eighteenth', '18'), ('nineteenth', '19'),
('twentieth', '20'), ('twenty first', '21'), ('twenty second', '22'),
('twenty third', '23'), ('twenty fourth', '24'), ('twenty fifth', '25'),
('twenty sixth', '26'), ('twenty seventh', '27'), ('twenty eighth', '28'),
('twenty ninth', '29'), ('thirtieth', '30'),
('thirty first', '31')]
MEALS = [(m, '%s' % m) for m in ('breakfast', 'lunch', 'dinner', 'snack')]
ST_CITIES = [(m, "%s" % m.replace(" . ", "_")) for m in ('st . louis', 'st . petersburg', 'st . paul')]
def __init__(self, db_path):
self.entries = collections.OrderedDict()
self.handlers = []
self.unique_word_map = collections.OrderedDict()
self.seen_words = set()
# CCG Lexicon
filename = os.path.join(db_path, 'lexicon.txt')
entries = []
with open(filename) as f:
for line in f:
x, y = line.strip().split(' :- NP : ')
y = y[:y.index(":")]
entries.append((x, y))
self.add_entries(entries)
# Read DB
city_entries = read_db(db_path, 'CITY.TAB', 1, 1, '', strip_id=['.'])
self.add_entries(city_entries)
self.add_entries(self.DAYS_OF_WEEK)
self.add_entries([(x + 's', y) for x, y in self.DAYS_OF_WEEK]) # Handle "on tuesdays"
self.add_entries(read_db(db_path, 'AIRLINE.TAB', 0, 1, '',
strip_name=[', inc.', ', ltd.']))
self.add_entries(read_db(db_path, 'INTERVAL.TAB', 0, 0, ''))
self.add_entries(read_db(db_path, 'MONTH.TAB', 1, 1, ''))
self.add_entries(read_db(db_path, 'AIRPORT.TAB', 0, 1, '',
strip_name=[], split_name=['/']))
self.add_entries(read_db(db_path, 'COMP_CLS.TAB', 1, 1, ''))
self.add_entries(read_db(db_path, 'CLS_SVC.TAB', 0, 0, '', prefix_name='code '))
self.add_entries(self.MEALS)
self.add_entries(self.WORD_NUMBERS)
self.add_entries(self.ORDINAL_NUMBERS)
self.add_entries(self.ST_CITIES)
self.handle_times()
self.handle_rc()
self.handle_stop()
self.handle_dollars()
self.handle_flight_numbers()
def handle_times(self):
# Mod 12 deals with 12am/12pm special cases...
self.add_handler('([0-9]{1,2}) am$',
lambda m: '%d00' % (int(m.group(1)) % 12))
self.add_handler('([0-9]{1,2}) pm$',
lambda m: '%d00' % (int(m.group(1)) % 12 + 12))
self.add_handler('([0-9]{1,2})([0-9]{2}) am$',
lambda m: '%d%02d' % (int(m.group(1)) % 12, int(m.group(2))))
self.add_handler('([0-9]{1,2})([0-9]{2}) pm$',
lambda m: '%d%02d' % (int(m.group(1)) % 12 + 12, int(m.group(2))))
self.add_handler("([0-9]{1,2}) o'clock$",
lambda m: '%d00' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock am$",
lambda m: '%d00' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock pm$",
lambda m: '%d00' % (int(m.group(1)) % 12 + 12))
self.add_handler("([0-9]+) hours$",
lambda m: '%d' % (int(m.group(1))))
def handle_flight_numbers(self):
self.add_handler('[0-9]{2,}$', lambda m: '%d' % int(m.group(0)))
def handle_dollars(self):
self.add_handler('([0-9]+)$', lambda m: '%d' % int(m.group(1)))
self.add_handler('([0-9]+) dollars$', lambda m: '%d' % int(m.group(1)))
def handle_rc(self):
self.add_handler(re.compile(r'ap/(\d+)$'), lambda m: 'ap_%d' % int(m.group(1)))
self.add_handler(re.compile(r'ap(\d+)$'), lambda m: 'ap_%d' % int(m.group(1)))
def handle_stop(self):
self.add_handler('([0-9]+) stop$', lambda m: '%d' % int(m.group(1)))
self.add_handler('([0-9]+) stops$', lambda m: '%d' % int(m.group(1)))
def add_entries(self, entries):
for name, entity in entries:
# Update self.entries
if name in self.entries:
if name not in self.entries[name]:
self.entries[name].append(entity)
else:
self.entries[name] = [entity]
# Update self.unique_word_map
for w in name.split(' '):
if w in self.seen_words:
# This word is not unique!
if w in self.unique_word_map:
del self.unique_word_map[w]
else:
self.unique_word_map[w] = entity
self.seen_words.add(w)
def add_handler(self, regex, func):
self.handlers.append((regex, func))
def get_rule_ids(self, entities, rules: List, copy_terminal_set: List) -> List:
rule_ids = list()
if isinstance(entities, str):
entities = [entities]
for entity in entities:
for rule in rules:
if rule.lhs not in copy_terminal_set:
continue
terminal = rule.rhs.strip('[] ').replace("'", "").replace('"', '')
if terminal == entity:
rule_ids.append(rule.rule_id)
break
else:
print("Cannot find a corresponding rule for terminal %s" % entity)
return rule_ids
def _match_candidates(self, tokens: List[Token], rules: List, copy_terminal_set: List, ) -> List:
words = [t.text for t in tokens]
entities = [[] for i in range(len(words))]
ind_pairs = sorted(list(itertools.combinations(range(len(words) + 1), 2)),
key=lambda x: x[0] - x[1])
ret_entries = []
words = [strip_unk(w) for w in words] # Strip unk:%06d stuff
# Handlers
for i, j in ind_pairs:
if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
for regex, func in self.handlers:
m = re.match(regex, span)
if m:
entity = func(m)
rule_ids = self.get_rule_ids(entity, rules, copy_terminal_set)
if len(rule_ids) > 0:
for k in range(i, j):
entities[k] += rule_ids
ret_entries.append(((i, j), rule_ids))
# Entries
for i, j in ind_pairs:
if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
if span in self.entries:
entity = self.entries[span]
rule_ids = self.get_rule_ids(entity, rules, copy_terminal_set)
if len(rule_ids) > 0:
for k in range(i, j):
entities[k] = rule_ids
ret_entries.append(((i, j), rule_ids))
# Unique words
for i in range(len(words)):
if any(x for x in entities[i:i+1]): continue
word = words[i]
if entities[i]: continue
if word in self.unique_word_map:
entity = self.unique_word_map[word]
rule_ids = self.get_rule_ids(entity, rules, copy_terminal_set)
if len(rule_ids) > 0:
entities[i] = [entity]
ret_entries.append(((i, i+1), rule_ids))
return ret_entries
def match(self, tokens: List[Token], rules: List, copy_terminal_set: List, pad_index: int,) -> List[List]:
entity_candidates = self._match_candidates(tokens, rules, copy_terminal_set)
token_rule_map = [list() for i in range(len(tokens))]
for (beg_idx, end_idx), rule_ids in entity_candidates:
for index in range(beg_idx, end_idx):
token_rule_map[index] += rule_ids
for midx, m in enumerate(token_rule_map):
if len(m) == 0:
m.append(pad_index)
token_rule_map[midx] = np.array(list(set(m)), dtype=np.int)
return token_rule_map
if __name__ == '__main__':
matcher = ATISEntityMatcher('../../data/atis/db')
| 11,052 | 40.867424 | 110 |
py
|
Unimer
|
Unimer-master/grammars/atis/prolog_grammar_2.py
|
# coding=utf8
"""
Prolog Grammar of ATIS
"""
GRAMMAR_DICTIONARY = {}
ROOT_RULE = 'statement -> [answer]'
GRAMMAR_DICTIONARY['statement'] = ['(answer ws)']
GRAMMAR_DICTIONARY['answer'] = [
'("answer_1(" var "," Form ")")',
'("answer_2(" var "," var "," Form ")")',
'("answer_3(" var "," var "," var "," Form ")")',
'("answer_4(" var "," var "," var "," var "," Form ")")',
]
GRAMMAR_DICTIONARY['Form'] = [
'("(" Form conjunction ")")',
'("or(" Form conjunction ")")',
'("not(" Form conjunction ")")',
'("is_flight(" var ")")',
'("is_oneway(" var ")")',
'("is_round_trip(" var ")")',
'("is_daily_flight(" var ")")',
'("is_flight_has_stop(" var ")")',
'("is_non_stop_flight(" var ")")',
'("is_flight_economy(" var ")")',
'("is_flight_has_meal(" var ")")',
'("is_economy(" var ")")',
'("is_discounted_flight(" var ")")',
'("is_flight_overnight(" var ")")',
'("is_connecting_flight(" var ")")',
'("is_meal(" var ")")',
'("is_meal_code(" var ")")',
'("is_airline(" var ")")',
'("is_rapid_transit(" var ")")',
'("is_taxi(" var ")")',
'("is_air_taxi_operation(" var ")")',
'("is_ground_transport_on_weekday(" var ")")',
'("is_ground_transport(" var ")")',
'("is_limousine(" var ")")',
'("is_rental_car(" var ")")',
'("is_flight_turboprop(" var ")")',
'("is_turboprop(" var ")")',
'("aircraft_code(" var ")")',
'("is_aircraft(" var ")")',
'("is_flight_jet(" var ")")',
'("is_day_after_tomorrow_flight(" var ")")',
'("is_flight_tonight(" var ")")',
'("is_today_flight(" var ")")',
'("is_tomorrow_flight(" var ")")',
'("is_flight_on_weekday(" var ")")',
'("is_tomorrow_arrival_flight(" var ")")',
'("is_time_zone_code(" var ")")',
'("is_class_of_service(" var ")")',
'("is_city(" var ")")',
'("is_airport(" var ")")',
'("is_fare_basis_code(" var ")")',
'("is_booking_class_t(" var ")")',
'("_named(" var "," var ")")',
'("is_flight_has_specific_fare_basis_code(" var "," var ")")',
'("is_flight_has_booking_class(" var "," var ")")',
'("is_flight_stop_at_city(" var "," var ")")',
'("is_flight_on_year(" var "," var ")")',
'("is_flight_during_day(" var "," var ")")',
'("is_flight_stops_specify_number_of_times(" var "," var ")")',
'("is_flight_meal_code(" var "," var ")")',
'("is_from(" var "," var ")")',
'("is_flight_day_return(" var "," var ")")',
'("is_flight_day_number_return(" var "," var ")")',
'("is_flight_departure_time(" var "," var ")")',
'("is_flight_month_return(" var "," var ")")',
'("is_flight_month_arrival(" var "," var ")")',
'("is_flight_approx_return_time(" var "," var ")")',
'("is_flight_before_day(" var "," var ")")',
'("is_flight_approx_arrival_time(" var "," var ")")',
'("is_flight_day_number_arrival(" var "," var ")")',
'("is_flight_arrival_time(" var "," var ")")',
'("is_flight_with_specific_aircraft(" var "," var ")")',
'("is_flight_on_day_number(" var "," var ")")',
'("is_flight_on_day(" var "," var ")")',
'("is_flight_manufacturer(" var "," var ")")',
'("is_flight_aircraft(" var "," var ")")',
'("is_flight_stop_at_airport(" var "," var ")")',
'("is_flight_during_day_arrival(" var "," var ")")',
'("is_flight_days_from_today(" var "," var ")")',
'("is_fare_basis_code_class_type(" var "," var ")")',
'("is_flight_after_day(" var "," var ")")',
'("is_flight_day_arrival(" var "," var ")")',
'("is_flight_approx_departure_time(" var "," var ")")',
'("is_flight_has_specific_meal(" var "," var ")")',
'("is_next_days_flight(" var "," var ")")',
'("is_flight_has_class_type(" var "," var ")")',
'("is_to(" var "," var ")")',
'("is_flight_airline(" var "," var ")")',
'("p_flight_fare(" var "," var ")")',
'("is_flight_number(" var "," var ")")',
'("is_airport_of_city(" var "," var ")")',
'("is_airline_services(" var "," var ")")',
'("is_services(" var "," var ")")',
'("is_from_airports_of_city(" var "," var ")")',
'("is_from_airport(" var "," var ")")',
'("is_to_city(" var "," var ")")',
'("is_loc_t_state(" var "," var ")")',
'("is_mf(" var "," var ")")',
'("is_loc_t(" var "," var ")")',
'("is_aircraft_basis_type(" var "," var ")")',
'("is_aircraft_airline(" var "," var ")")',
'("is_flight_cost_fare(" var "," var ")")',
'("is_loc_t_city_time_zone(" var "," var ")")',
'("is_airline_provide_meal(" var "," var ")")',
'("is_airline_has_booking_class(" var "," var ")")',
'("minimum_connection_time(" var "," var ")")',
'("p_flight_stop_arrival_time(" var "," var ")")',
'("p_ground_fare(" var "," var ")")',
'("p_booking_class_fare(" var "," var ")")',
'("airline_name(" var "," var ")")',
'("abbrev(" var "," var ")")',
'("capacity(" var "," var ")")',
'("minutes_distant(" var "," var ")")',
'("is_time_elapsed(" var "," var ")")',
'("p_flight_restriction_code(" var "," var ")")',
'("equals(" var "," var ")")',
'("equals_arrival_time(" var "," var ")")',
'("larger_than_arrival_time(" var "," var ")")',
'("larger_than_capacity(" var "," var ")")',
'("larger_than_departure_time(" var "," var ")")',
'("larger_than_number_of_stops(" var "," var ")")',
'("less_than_flight_cost(" var "," var ")")',
'("less_than_departure_time(" var "," var ")")',
'("less_than_flight_fare(" var "," var ")")',
'("less_than_arrival_time(" var "," var ")")',
'("count(" var "," Form "," var ")")',
'("argmax_capacity(" var "," Form ")")',
'("argmax_arrival_time(" var "," Form ")")',
'("argmax_departure_time(" var "," Form ")")',
'("argmax_get_number_of_stops(" var "," Form ")")',
'("argmax_get_flight_fare(" var "," Form ")")',
'("argmax_count(" var "," Form ")")',
'("argmin_time_elapsed(" var "," Form ")")',
'("argmin_get_number_of_stops(" var "," Form ")")',
'("argmin_time_elapsed(" var "," Form ")")',
'("argmin_arrival_time(" var "," Form ")")',
'("argmin_capacity(" var "," Form ")")',
'("argmin_departure_time(" var "," Form ")")',
'("argmin_get_flight_fare(" var "," Form ")")',
'("argmin_miles_distant(" var "," Form ")")',
'("_max(" var "," Form ")")',
'("_min(" var "," Form ")")',
'("sum_capacity(" var "," Form "," var ")")',
'("sum_get_number_of_stops(" var "," Form "," var ")")',
'("miles_distant_between_city(" var "," var "," var ")")',
'("miles_distant(" var "," var "," var ")")',
'("const(" var "," fare_basis_code ")")',
'("const(" var "," meal_code ")")',
'("const(" var "," airport_code ")")',
'("const(" var "," airline_code ")")',
'("const(" var "," aircraft_code_object ")")',
'("const(" var "," city_name ")")',
'("const(" var "," time ")")',
'("const(" var "," flight_number_object ")")',
'("const(" var "," class_description ")")',
'("const(" var "," day_period ")")',
'("const(" var "," state_name ")")',
'("const(" var "," day_number ")")',
'("const(" var "," month ")")',
'("const(" var "," day ")")',
'("const(" var "," dollar ")")',
'("const(" var "," meal_description ")")',
'("const(" var "," "hour(9)" ")")',
'("const(" var "," integer ")")',
'("const(" var "," basis_type ")")',
'("const(" var "," year ")")',
'("const(" var "," "days_code(sa)" ")")',
'("const(" var "," "manufacturer(boeing)" ")")'
]
GRAMMAR_DICTIONARY['conjunction'] = [
'("," Form conjunction)',
'""'
]
# Variable
GRAMMAR_DICTIONARY['var'] = ['"%s"' % chr(97+i) for i in range(26)]
GRAMMAR_DICTIONARY['fare_basis_code'] = ['("fare_basis_code(" fare_basis_code_value ")")']
GRAMMAR_DICTIONARY['fare_basis_code_value'] = ['"qx"', '"qw"', '"qo"', '"fn"', '"yn"', '"bh"', '"k"', '"b"', '"h"', '"f"', '"q"', '"c"', '"y"', '"m"',]
GRAMMAR_DICTIONARY['meal_code'] = ['("meal_code(" meal_code_value ")")']
GRAMMAR_DICTIONARY['meal_code_value'] = ['"ap_58"', '"ap_57"', '"d_s"', '"b"', '"ap_55"', '"s_"', '"sd_d"', '"ls"', '"ap_68"', '"ap_80"', '"ap"', '"s"', ]
GRAMMAR_DICTIONARY['airline_code'] = ['("airline_code(" airline_code_value ")")']
GRAMMAR_DICTIONARY['airline_code_value'] = ['"usair"', '"co"', '"ua"', '"delta"', '"as"', '"ff"', '"canadian_airlines_international"', '"us"', '"nx"', '"hp"', '"aa"', '"kw"', '"ml"', '"nw"', '"ac"', '"tw"', '"yx"', '"ea"', '"dl"', '"wn"', '"lh"', '"cp"']
GRAMMAR_DICTIONARY['airport_code'] = ['("airport_code(" airport_code_value ")")']
GRAMMAR_DICTIONARY['airport_code_value'] = ['"dallas"', '"ont"', '"stapelton"', '"bna"', '"bwi"', '"iad"', '"sfo"', '"phl"', '"pit"', '"slc"', '"phx"', '"lax"', '"bur"', '"ind"', '"iah"', '"dtw"', '"las"', '"dal"', '"den"', '"atl"', '"ewr"', '"bos"', '"tpa"', '"jfk"', '"mke"', '"oak"', '"yyz"', '"dfw"', '"cvg"', '"hou"', '"lga"', '"ord"', '"mia"', '"mco"']
GRAMMAR_DICTIONARY['aircraft_code_object'] = ['("aircraft_code(" aircraft_code_value ")")']
GRAMMAR_DICTIONARY['aircraft_code_value'] = ['"m80"', '"dc10"', '"727"', '"d9s"', '"f28"', '"j31"', '"767"', '"734"', '"73s"', '"747"', '"737"', '"733"', '"d10"', '"100"', '"757"', '"72s"']
GRAMMAR_DICTIONARY['city_name'] = ['("city_name(" city_name_value ")")']
GRAMMAR_DICTIONARY['city_name_value'] = ['"cleveland"', '"milwaukee"', '"detroit"', '"los_angeles"', '"miami"', '"salt_lake_city"', '"ontario"', '"tacoma"', '"memphis"', '"denver"', '"san_francisco"', '"new_york"', '"tampa"', '"washington"', '"westchester_county"', '"boston"', '"newark"', '"pittsburgh"', '"charlotte"', '"columbus"', '"atlanta"', '"oakland"', '"kansas_city"', '"st_louis"', '"nashville"', '"chicago"', '"fort_worth"', '"san_jose"', '"dallas"', '"philadelphia"', '"st_petersburg"', '"baltimore"', '"san_diego"', '"cincinnati"', '"long_beach"', '"phoenix"', '"indianapolis"', '"burbank"', '"montreal"', '"seattle"', '"st_paul"', '"minneapolis"', '"houston"', '"orlando"', '"toronto"', '"las_vegas"']
GRAMMAR_DICTIONARY['time'] = ['("time(" time_value ")")']
GRAMMAR_DICTIONARY['time_value'] = [
'"1850"', '"1110"', '"2000"', '"1815"', '"1024"', '"1500"',
'"1900"', '"1600"', '"1300"', '"1800"', '"1200"', '"1628"',
'"1830"', '"823"', '"1245"', '"1524"', '"200"', '"1615"',
'"1230"', '"705"', '"1045"', '"1700"', '"1115"', '"1645"',
'"1730"', '"815"', '"0"', '"500"', '"1205"', '"1940"',
'"1400"', '"1130"', '"2200"', '"645"', '"718"', '"2220"',
'"600"', '"630"', '"800"', '"838"', '"1330"', '"845"', '"1630"',
'"1715"', '"2010"', '"1000"', '"1619"', '"2100"', '"1505"',
'"2400"', '"1923"', '"100"', '"1145"', '"2300"', '"1620"',
'"2023"', '"2358"', '"1425"', '"720"', '"1310"', '"700"', '"650"',
'"1410"', '"1030"', '"1900"', '"1017"', '"1430"', '"900"', '"1930"',
'"1133"', '"1220"', '"2226"', '"1100"', '"819"', '"755"', '"2134"', '"555"', '"1"',
]
GRAMMAR_DICTIONARY['flight_number_object'] = ['("flight_number(" flight_number_value ")")']
GRAMMAR_DICTIONARY['flight_number_value'] = [
'"1291"', '"345"', '"813"', '"71"', '"1059"', '"212"', '"1209"',
'"281"', '"201"', '"324"', '"19"', '"352"', '"137338"', '"4400"',
'"323"', '"505"', '"825"', '"82"', '"279"', '"1055"', '"296"', '"315"',
'"1765"', '"405"', '"771"', '"106"', '"2153"', '"257"', '"402"',
'"343"', '"98"', '"1039"', '"217"', '"539"', '"459"', '"417"',
'"1083"', '"3357"', '"311"', '"210"', '"139"', '"852"', '"838"',
'"415"', '"3724"', '"21"', '"928"', '"269"', '"270"',
'"297"', '"746"', '"1222"', '"271"'
]
GRAMMAR_DICTIONARY['class_description'] = ['("class_description(" class_description_value ")")']
GRAMMAR_DICTIONARY['class_description_value'] = ['"thrift"', '"coach"', '"first"', '"business"']
GRAMMAR_DICTIONARY['day_period'] = ['("day_period(" day_period_value ")")']
GRAMMAR_DICTIONARY['day_period_value'] = ['"early"', '"afternoon"', '"late_evening"', '"late_night"', '"mealtime"', '"evening"', '"pm"', '"daytime"', '"breakfast"', '"morning"', '"late"']
GRAMMAR_DICTIONARY['state_name'] = ['("state_name(" state_name_value ")")']
GRAMMAR_DICTIONARY['state_name_value'] = ['"minnesota"', '"florida"', '"arizona"', '"nevada"', '"california"']
GRAMMAR_DICTIONARY['day_number'] = ['("day_number(" day_number_value ")")']
GRAMMAR_DICTIONARY['day_number_value'] = ['"13"', '"29"', '"28"', '"22"', '"21"', '"16"', '"30"', '"12"', '"18"', '"19"', '"31"', '"20"', '"27"', '"6"', '"26"', '"17"', '"11"', '"10"', '"15"', '"23"', '"24"', '"25"', '"14"', '"1"', '"3"', '"8"', '"5"', '"2"', '"9"', '"4"', '"7"']
GRAMMAR_DICTIONARY['month'] = ['("month(" month_value ")")']
GRAMMAR_DICTIONARY['month_value'] = ['"april"', '"august"', '"may"', '"october"', '"june"', '"november"', '"september"', '"february"', '"december"', '"march"', '"july"', '"january"']
GRAMMAR_DICTIONARY['day'] = ['("day(" day_value ")")']
GRAMMAR_DICTIONARY['day_value'] = ['"monday"', '"wednesday"', '"thursday"', '"tuesday"', '"saturday"', '"friday"', '"sunday"']
GRAMMAR_DICTIONARY['dollar'] = ['("dollar(" dollar_value ")")']
GRAMMAR_DICTIONARY['dollar_value'] = ['"1000"', '"1500"', '"466"', '"1288"', '"300"', '"329"', '"416"', '"124"', '"932"', '"1100"', '"200"', '"500"', '"100"', '"415"', '"150"', '"400"']
GRAMMAR_DICTIONARY['meal_description'] = ['("meal_description(" meal_description_value ")")']
GRAMMAR_DICTIONARY['meal_description_value'] = ['"snack"', '"breakfast"', '"lunch"', '"dinner"']
GRAMMAR_DICTIONARY['integer'] = ['("integer(" integer_value ")")']
GRAMMAR_DICTIONARY['integer_value'] = ['"2"', '"1"', '"3"']
GRAMMAR_DICTIONARY['basis_type'] = ['("basis_type(" basis_type_value ")")']
GRAMMAR_DICTIONARY['basis_type_value'] = ['"737"', '"767"']
GRAMMAR_DICTIONARY['year'] = ['("year(" year_value ")")']
GRAMMAR_DICTIONARY['year_value'] = ['"1991"', '"1993"', '"1992"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY["wsp"] = ['~"\s+"i']
COPY_TERMINAL_SET = {
'fare_basis_code_value', 'meal_code_value', 'airport_code_value', 'airline_code_value',
'aircraft_code_value', 'city_name_value', 'time_value', 'flight_number_value',
'class_description', 'day_period_value', 'state_name_value',
'day_number_value', 'month_value', 'day_value', 'dollar_value', 'meal_description_value',
'integer_value', 'basis_type_value', 'year_value',
}
| 14,314 | 54.484496 | 715 |
py
|
Unimer
|
Unimer-master/grammars/atis/atis_seq2seq_lambda_calculus_entity_matcher.py
|
# coding=utf8
import os
import re
import itertools
import collections
import numpy as np
from typing import List
from allennlp.data.tokenizers import Token
def clean_id(s, id_suffix, strip=None):
true_id = s.replace(' ', '_')
if strip:
for v in strip:
true_id = true_id.replace(v, '').strip()
return '%s:%s' % (true_id, id_suffix)
def clean_name(s, strip=None, split=None, prefix=None):
if split:
for v in split:
s = s.replace(v, ' ')
if strip:
for v in strip:
s = s.replace(v, '')
if prefix:
s = prefix + s
return s
def read_db(db_path, basename, id_col, name_col, id_suffix,
strip_id=None, strip_name=None, split_name=None, prefix_name=None):
filename = os.path.join(db_path, basename)
data = [] # Pairs of (name, id)
with open(filename) as f:
for line in f:
row = [s[1:-1] for s in re.findall('"[^"]*"|[0-9]+', line.strip())]
cur_name = clean_name(row[name_col].lower(), strip=strip_name,
split=split_name, prefix=prefix_name)
cur_id = clean_id(row[id_col].lower(), id_suffix, strip=strip_id)
data.append((cur_name, cur_id))
return data
def print_aligned(a, b, indent=0):
a_toks = []
b_toks = []
for x, y in zip(a, b):
cur_len = max(len(x), len(y))
a_toks.append(x.ljust(cur_len))
b_toks.append(y.ljust(cur_len))
prefix = ' ' * indent
print('%s%s' % (prefix, ' '.join(a_toks)))
print('%s%s' % (prefix, ' '.join(b_toks)))
def parse_entry(line):
"""Parse an entry from the CCG lexicon."""
return tuple(line.strip().split(' :- NP : '))
def strip_unk(w):
# Strip unk:%06d identifiers
m = re.match('^unk:[0-9]{6,}:(.*)$', w)
if m:
return m.group(1)
else:
return w
class ATISSeq2SeqLambdaCalculusEntityMatcher:
TYPE_DICT = {
'ci': 'city',
'da': 'day',
'al': 'airline',
'ti': 'time',
'pd': 'time of day',
'dn': 'date number',
'mn': 'month',
'ap': 'airport',
'cl': 'class',
'fb': 'fare code',
'fn': 'flight number',
'me': 'meal',
'do': 'dollars',
'rc': 'restrictions',
'ac': 'aircraft',
'yr': 'year',
'mf': 'manufacturer',
'dc': 'dc',
'st': 'state',
'hr': 'hour',
'i': 'stop'
}
DAYS_OF_WEEK = [
(s, '%s:_da' % s)
for s in ('monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday', 'sunday')
]
# For dates
WORD_NUMBERS = [('one', '1:_dn'), ('two', '2:_dn'), ('three', '3:_dn'), ('four', '4:_dn'), ('five', '5:_dn'),
('six', '6:_dn'), ('seven', '7:_dn'), ('eight', '8:_dn'), ('nine', '9:_dn'), ('ten', '10:_dn'),
('eleven', '11:_dn'), ('twelve', '12:_dn'), ('thirteen', '13:_dn'), ('fourteen', '14:_dn'),
('fifteen', '15:_dn'), ('sixteen', '16:_dn'), ('seventeen', '17:_dn'), ('eighteen', '18:_dn'),
('nineteen', '19:_dn'), ('twenty', '20:_dn'), ('twenty one', '21:_dn'),
('twenty two', '22:_dn'),
('twenty three', '23:_dn'), ('twenty four', '24:_dn'), ('twenty five', '25:_dn'),
('twenty six', '26:_dn'), ('twenty seven', '27:_dn'), ('twenty eight', '28:_dn'),
('twenty nine', '29:_dn'), ('thirty', '30:_dn'), ('thirty one', '31:_dn')]
ORDINAL_NUMBERS = [('second', '2:_dn'), ('third', '3:_dn'), ('fourth', '4:_dn'), ('fifth', '5:_dn'),
('sixth', '6:_dn'), ('seventh', '7:_dn'), ('eighth', '8:_dn'), ('ninth', '9:_dn'),
('tenth', '10:_dn'), ('eleventh', '11:_dn'), ('twelfth', '12:_dn'), ('thirteenth', '13:_dn'),
('fourteenth', '14:_dn'), ('fifteenth', '15:_dn'), ('sixteenth', '16:_dn'),
('seventeenth', '17:_dn'), ('eighteenth', '18:_dn'), ('nineteenth', '19:_dn'),
('twentieth', '20:_dn'), ('twenty first', '21:_dn'), ('twenty second', '22:_dn'),
('twenty third', '23:_dn'), ('twenty fourth', '24:_dn'), ('twenty fifth', '25:_dn'),
('twenty sixth', '26:_dn'), ('twenty seventh', '27:_dn'), ('twenty eighth', '28:_dn'),
('twenty ninth', '29:_dn'), ('thirtieth', '30:_dn'),
('thirty first', '31:_dn')]
MEALS = [(m, '%s:_me' % m) for m in ('breakfast', 'lunch', 'dinner', 'snack')]
ST_CITIES = [(m, "%s:_ci" % m.replace(" . ", "_")) for m in ('st . louis', 'st . petersburg', 'st . paul')]
def __init__(self, db_path):
self.entries = collections.OrderedDict()
self.handlers = []
self.unique_word_map = collections.OrderedDict()
self.seen_words = set()
# CCG Lexicon
filename = os.path.join(db_path, 'lexicon.txt')
entries = []
with open(filename) as f:
for line in f:
x, y = line.strip().split(' :- NP : ')
y = y.replace(':', ':_').strip()
entries.append((x, y))
self.add_entries(entries)
# Read DB
self.add_entries(read_db(db_path, 'CITY.TAB', 1, 1, '_ci', strip_id=['.']))
self.add_entries(self.DAYS_OF_WEEK)
self.add_entries([(x + 's', y) for x, y in self.DAYS_OF_WEEK]) # Handle "on tuesdays"
self.add_entries(read_db(db_path, 'AIRLINE.TAB', 0, 1, '_al',
strip_name=[', inc.', ', ltd.']))
self.add_entries(read_db(db_path, 'INTERVAL.TAB', 0, 0, '_pd'))
self.add_entries(read_db(db_path, 'MONTH.TAB', 1, 1, '_mn'))
self.add_entries(read_db(db_path, 'AIRPORT.TAB', 0, 1, '_ap',
strip_name=[], split_name=['/']))
self.add_entries(read_db(db_path, 'COMP_CLS.TAB', 1, 1, '_cl'))
self.add_entries(read_db(db_path, 'CLS_SVC.TAB', 0, 0, '_fb', prefix_name='code '))
self.add_entries(self.MEALS)
self.add_entries(self.WORD_NUMBERS)
self.add_entries(self.ORDINAL_NUMBERS)
self.add_entries(self.ST_CITIES)
self.handle_times()
self.handle_rc()
self.handle_stop()
self.handle_dollars()
self.handle_flight_numbers()
def handle_times(self):
# Mod 12 deals with 12am/12pm special cases...
self.add_handler('([0-9]{1,2}) am$',
lambda m: '%d00:_ti' % (int(m.group(1)) % 12))
self.add_handler('([0-9]{1,2}) pm$',
lambda m: '%d00:_ti' % (int(m.group(1)) % 12 + 12))
self.add_handler('([0-9]{1,2})([0-9]{2}) am$',
lambda m: '%d%02d:_ti' % (int(m.group(1)) % 12, int(m.group(2))))
self.add_handler('([0-9]{1,2})([0-9]{2}) pm$',
lambda m: '%d%02d:_ti' % (int(m.group(1)) % 12 + 12, int(m.group(2))))
self.add_handler("([0-9]{1,2}) o'clock$",
lambda m: '%d00:_ti' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock am$",
lambda m: '%d00:_ti' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock pm$",
lambda m: '%d00:_ti' % (int(m.group(1)) % 12 + 12))
self.add_handler("([0-9]+) hours$",
lambda m: '%d:_hr' % (int(m.group(1))))
def handle_flight_numbers(self):
self.add_handler('[0-9]{2,}$', lambda m: '%d:_fn' % int(m.group(0)))
def handle_dollars(self):
self.add_handler('([0-9]+)$', lambda m: '%d:_do' % int(m.group(1)))
self.add_handler('([0-9]+) dollars$', lambda m: '%d:_do' % int(m.group(1)))
def handle_rc(self):
self.add_handler(re.compile(r'ap/(\d+)$'), lambda m: 'ap_%d:_rc' % int(m.group(1)))
self.add_handler(re.compile(r'ap(\d+)$'), lambda m: 'ap_%d:_rc' % int(m.group(1)))
def handle_stop(self):
self.add_handler('([0-9]+) stop$', lambda m: '%d:_i' % int(m.group(1)))
self.add_handler('([0-9]+) stops$', lambda m: '%d:_i' % int(m.group(1)))
def add_entries(self, entries):
for name, entity in entries:
# Update self.entries
if name in self.entries:
if self.entries[name] != entries:
print("Collision detected: %s -> %s, %s" % (name, self.entries[name], entity))
continue
self.entries[name] = entity
# Update self.unique_word_map
for w in name.split(' '):
if w in self.seen_words:
# This word is not unique!
if w in self.unique_word_map:
del self.unique_word_map[w]
else:
self.unique_word_map[w] = entity
self.seen_words.add(w)
def add_handler(self, regex, func):
self.handlers.append((regex, func))
def _match_candidates(self, tokens: List[Token]) -> List[str]:
words = [t.text for t in tokens]
entities = [None for i in range(len(words))]
ind_pairs = sorted(list(itertools.combinations(range(len(words) + 1), 2)),
key=lambda x: x[0] - x[1])
words = [strip_unk(w) for w in words] # Strip unk:%06d stuff
# Entries
for i, j in ind_pairs:
if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
if span in self.entries:
entity = self.entries[span]
for k in range(i, j):
entities[k] = entity
# Handlers
for i, j in ind_pairs:
if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
for regex, func in self.handlers:
m = re.match(regex, span)
if m:
entity = func(m)
for k in range(i, j):
entities[k] = entity
# Unique words
for i in range(len(words)):
if entities[i]: continue
word = words[i]
if entities[i]: continue
if word in self.unique_word_map:
entity = self.unique_word_map[word]
entities[i] = entity
return entities
def match(self, tokens: List[Token]) -> List[str]:
entity_candidates = self._match_candidates(tokens)
return entity_candidates
if __name__ == '__main__':
matcher = ATISSeq2SeqLambdaCalculusEntityMatcher('../../data/atis/db')
print(matcher)
| 10,685 | 38.431734 | 116 |
py
|
Unimer
|
Unimer-master/grammars/atis/atis_seq2seq_entity_matcher.py
|
# coding=utf8
import os
import re
import itertools
import collections
from typing import List
from allennlp.data.tokenizers import Token
def clean_id(s, id_suffix, strip=None):
true_id = s.replace(' ', '_')
if strip:
for v in strip:
true_id = true_id.replace(v, '').strip()
return '%s' % true_id
def clean_name(s, strip=None, split=None, prefix=None):
if split:
for v in split:
s = s.replace(v, ' ')
if strip:
for v in strip:
s = s.replace(v, '')
if prefix:
s = prefix + s
return s
def read_db(db_path, basename, id_col, name_col, id_suffix,
strip_id=None, strip_name=None, split_name=None, prefix_name=None):
filename = os.path.join(db_path, basename)
data = [] # Pairs of (name, id)
with open(filename) as f:
for line in f:
row = [s[1:-1] for s in re.findall('"[^"]*"|[0-9]+', line.strip())]
cur_name = clean_name(row[name_col].lower(), strip=strip_name,
split=split_name, prefix=prefix_name)
cur_id = clean_id(row[id_col].lower(), id_suffix, strip=strip_id)
data.append((cur_name, cur_id))
return data
def strip_unk(w):
# Strip unk:%06d identifiers
m = re.match('^unk:[0-9]{6,}:(.*)$', w)
if m:
return m.group(1)
else:
return w
class ATISSeq2SeqEntityMatcher:
DAYS_OF_WEEK = [
(s, '%s' % s)
for s in ('monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday', 'sunday')
]
# For dates
WORD_NUMBERS = [('one', '1'), ('two', '2'), ('three', '3'), ('four', '4'), ('five', '5'),
('six', '6'), ('seven', '7'), ('eight', '8'), ('nine', '9'), ('ten', '10'),
('eleven', '11'), ('twelve', '12'), ('thirteen', '13'), ('fourteen', '14'),
('fifteen', '15'), ('sixteen', '16'), ('seventeen', '17'), ('eighteen', '18'),
('nineteen', '19'), ('twenty', '20'), ('twenty one', '21'),
('twenty two', '22'),
('twenty three', '23'), ('twenty four', '24'), ('twenty five', '25'),
('twenty six', '26'), ('twenty seven', '27'), ('twenty eight', '28'),
('twenty nine', '29'), ('thirty', '30'), ('thirty one', '31')]
ORDINAL_NUMBERS = [('second', '2'), ('third', '3'), ('fourth', '4'), ('fifth', '5'),
('sixth', '6'), ('seventh', '7'), ('eighth', '8'), ('ninth', '9'),
('tenth', '10'), ('eleventh', '11'), ('twelfth', '12'), ('thirteenth', '13'),
('fourteenth', '14'), ('fifteenth', '15'), ('sixteenth', '16'),
('seventeenth', '17'), ('eighteenth', '18'), ('nineteenth', '19'),
('twentieth', '20'), ('twenty first', '21'), ('twenty second', '22'),
('twenty third', '23'), ('twenty fourth', '24'), ('twenty fifth', '25'),
('twenty sixth', '26'), ('twenty seventh', '27'), ('twenty eighth', '28'),
('twenty ninth', '29'), ('thirtieth', '30'),
('thirty first', '31')]
MEALS = [(m, '%s' % m) for m in ('breakfast', 'lunch', 'dinner', 'snack')]
ST_CITIES = [(m, "%s" % m.replace(" . ", "_")) for m in ('st . louis', 'st . petersburg', 'st . paul')]
def __init__(self, db_path):
self.entries = collections.OrderedDict()
self.handlers = []
self.unique_word_map = collections.OrderedDict()
self.seen_words = set()
# CCG Lexicon
filename = os.path.join(db_path, 'lexicon.txt')
entries = []
with open(filename) as f:
for line in f:
x, y = line.strip().split(' :- NP : ')
y = y[:y.index(":")]
entries.append((x, y))
self.add_entries(entries)
# Read DB
city_entries = read_db(db_path, 'CITY.TAB', 1, 1, '', strip_id=['.'])
self.add_entries(city_entries)
self.add_entries(self.DAYS_OF_WEEK)
self.add_entries([(x + 's', y) for x, y in self.DAYS_OF_WEEK]) # Handle "on tuesdays"
self.add_entries(read_db(db_path, 'AIRLINE.TAB', 0, 1, '',
strip_name=[', inc.', ', ltd.']))
self.add_entries(read_db(db_path, 'INTERVAL.TAB', 0, 0, ''))
self.add_entries(read_db(db_path, 'MONTH.TAB', 1, 1, ''))
self.add_entries(read_db(db_path, 'AIRPORT.TAB', 0, 1, '',
strip_name=[], split_name=['/']))
self.add_entries(read_db(db_path, 'COMP_CLS.TAB', 1, 1, ''))
self.add_entries(read_db(db_path, 'CLS_SVC.TAB', 0, 0, '', prefix_name='code '))
self.add_entries(self.MEALS)
self.add_entries(self.WORD_NUMBERS)
self.add_entries(self.ORDINAL_NUMBERS)
self.add_entries(self.ST_CITIES)
self.handle_times()
self.handle_rc()
self.handle_stop()
self.handle_dollars()
self.handle_flight_numbers()
def handle_times(self):
# Mod 12 deals with 12am/12pm special cases...
self.add_handler('([0-9]{1,2}) am$',
lambda m: '%d00' % (int(m.group(1)) % 12))
self.add_handler('([0-9]{1,2}) pm$',
lambda m: '%d00' % (int(m.group(1)) % 12 + 12))
self.add_handler('([0-9]{1,2})([0-9]{2}) am$',
lambda m: '%d%02d' % (int(m.group(1)) % 12, int(m.group(2))))
self.add_handler('([0-9]{1,2})([0-9]{2}) pm$',
lambda m: '%d%02d' % (int(m.group(1)) % 12 + 12, int(m.group(2))))
self.add_handler("([0-9]{1,2}) o'clock$",
lambda m: '%d00' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock am$",
lambda m: '%d00' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock pm$",
lambda m: '%d00' % (int(m.group(1)) % 12 + 12))
self.add_handler("([0-9]+) hours$",
lambda m: '%d' % (int(m.group(1))))
def handle_flight_numbers(self):
self.add_handler('[0-9]{2,}$', lambda m: '%d' % int(m.group(0)))
def handle_dollars(self):
self.add_handler('([0-9]+)$', lambda m: '%d' % int(m.group(1)))
self.add_handler('([0-9]+) dollars$', lambda m: '%d' % int(m.group(1)))
def handle_rc(self):
self.add_handler(re.compile(r'ap/(\d+)$'), lambda m: 'ap_%d' % int(m.group(1)))
self.add_handler(re.compile(r'ap(\d+)$'), lambda m: 'ap_%d' % int(m.group(1)))
def handle_stop(self):
self.add_handler('([0-9]+) stop$', lambda m: '%d' % int(m.group(1)))
self.add_handler('([0-9]+) stops$', lambda m: '%d' % int(m.group(1)))
def add_entries(self, entries):
for name, entity in entries:
# Update self.entries
if name in self.entries:
if self.entries[name] != entries:
print("Collision detected: %s -> %s, %s" % (name, self.entries[name], entity))
continue
self.entries[name] = entity
# Update self.unique_word_map
for w in name.split(' '):
if w in self.seen_words:
# This word is not unique!
if w in self.unique_word_map:
del self.unique_word_map[w]
else:
self.unique_word_map[w] = entity
self.seen_words.add(w)
def add_handler(self, regex, func):
self.handlers.append((regex, func))
def get_rule_ids(self, entities, rules: List, copy_terminal_set: List) -> List:
rule_ids = list()
if isinstance(entities, str):
entities = [entities]
for entity in entities:
for rule in rules:
if rule.lhs not in copy_terminal_set:
continue
terminal = rule.rhs.strip('[] ').replace("'", "").replace('"', '')
if terminal == entity:
rule_ids.append(rule.rule_id)
break
else:
print("Cannot find a corresponding rule for terminal %s" % entity)
return rule_ids
def _match_candidates(self, tokens: List[Token]) -> List[str]:
words = [t.text for t in tokens]
entities = [None for i in range(len(words))]
ind_pairs = sorted(list(itertools.combinations(range(len(words) + 1), 2)),
key=lambda x: x[0] - x[1])
words = [strip_unk(w) for w in words] # Strip unk:%06d stuff
# Entries
for i, j in ind_pairs:
if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
if span in self.entries:
entity = self.entries[span]
for k in range(i, j):
entities[k] = entity
# Handlers
for i, j in ind_pairs:
if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
for regex, func in self.handlers:
m = re.match(regex, span)
if m:
entity = func(m)
for k in range(i, j):
entities[k] = entity
# Unique words
for i in range(len(words)):
if entities[i]: continue
word = words[i]
if entities[i]: continue
if word in self.unique_word_map:
entity = self.unique_word_map[word]
entities[i] = entity
return entities
def match(self, tokens: List[Token]) -> List[str]:
entity_candidates = self._match_candidates(tokens)
return entity_candidates
if __name__ == '__main__':
matcher = ATISSeq2SeqEntityMatcher('../../data/atis/db')
print(matcher)
| 9,965 | 39.512195 | 107 |
py
|
Unimer
|
Unimer-master/grammars/atis/lambda_calculus_grammar_2.py
|
# coding=utf8
ROOT_RULE = 'statement -> [expression]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['(expression ws)']
GRAMMAR_DICTIONARY['expression'] = [
'(constant)', '(application)', '(abstraction)', '(variable)']
GRAMMAR_DICTIONARY['abstraction'] = [
'("(" ws "_lambda" wsp variable_definition wsp expression ws ")")']
GRAMMAR_DICTIONARY['application'] = ['("(" ws function ws ")")']
GRAMMAR_DICTIONARY['function'] = ['binary_predicate',
'unit_predicate', 'entity_function', 'meta_predicate', ]
GRAMMAR_DICTIONARY['unit_predicate'] = [
'("_weekday" wsp expression)',
'("_meal:_t" wsp expression)',
'("_booking_class:_t" wsp expression)',
'("_fare" wsp expression)',
'("_flight" wsp expression)',
'("_aircraft_code:_t" wsp expression)',
'("_economy" wsp expression)',
'("_has_stops" wsp expression)',
'("_oneway" wsp expression)',
'("_airport" wsp expression)',
'("_taxi" wsp expression)',
'("_rapid_transit" wsp expression)',
'("_airline" wsp expression)',
'("_fare_basis_code" wsp expression)',
'("_tonight" wsp expression)',
'("_today" wsp expression)',
'("_connecting" wsp expression)',
'("_overnight" wsp expression)',
'("_round_trip" wsp expression)',
'("_day_after_tomorrow" wsp expression)',
'("_discounted" wsp expression)',
'("_time_zone_code" wsp expression)',
'("_limousine" wsp expression)',
'("_daily" wsp expression)',
'("_turboprop" wsp expression)',
'("_air_taxi_operation" wsp expression)',
'("_has_meal" wsp expression)',
'("_minimum_connection_time" wsp expression)',
'("_tomorrow_arrival" wsp expression)',
'("_tomorrow" wsp expression)',
'("_aircraft" wsp expression)',
'("_rental_car" wsp expression)',
'("_jet" wsp expression)',
'("_city" wsp expression)',
'("_class_of_service" wsp expression)',
'("_ground_transport" wsp expression)',
'("_nonstop" wsp expression)',
'("_meal_code" wsp expression)',
]
GRAMMAR_DICTIONARY['binary_predicate'] = [
'("_month_arrival" wsp expression wsp expression)',
'("_stops" wsp expression wsp expression)',
'("_day_number" wsp expression wsp expression)',
'("_meal" wsp expression wsp expression)',
'("_approx_return_time" wsp expression wsp expression)',
'("_booking_class" wsp expression wsp expression)',
'("_approx_arrival_time" wsp expression wsp expression)',
'("_fare" wsp expression wsp expression)',
'("_aircraft_basis_type" wsp expression wsp expression)',
'("_aircraft_code" wsp expression wsp expression)',
'("_departure_time" wsp expression wsp expression)',
'("_airport" wsp expression wsp expression)',
'("_flight_number" wsp expression wsp expression)',
'("_loc:_t" wsp expression wsp expression)',
'("_airline" wsp expression wsp expression)',
'("_during_day" wsp expression wsp expression)',
'("_manufacturer" wsp expression wsp expression)',
'("_fare_basis_code" wsp expression wsp expression)',
'("_approx_departure_time" wsp expression wsp expression)',
'("_arrival_time" wsp expression wsp expression)',
'("_services" wsp expression wsp expression)',
'("_next_days" wsp expression wsp expression)',
'("_from" wsp expression wsp expression)',
'("_stop" wsp expression wsp expression)',
'("_year" wsp expression wsp expression)',
'("_day_return" wsp expression wsp expression)',
'("_class_type" wsp expression wsp expression)',
'("_day_arrival" wsp expression wsp expression)',
'("_during_day_arrival" wsp expression wsp expression)',
'("_days_from_today" wsp expression wsp expression)',
'("_from_airport" wsp expression wsp expression)',
'("_to_city" wsp expression wsp expression)',
'("_day_number_arrival" wsp expression wsp expression)',
'("_aircraft" wsp expression wsp expression)',
'("_month" wsp expression wsp expression)',
'("_day_number_return" wsp expression wsp expression)',
'("_day" wsp expression wsp expression)',
'("_before_day" wsp expression wsp expression)',
'("_to" wsp expression wsp expression)',
'("_time_elapsed" wsp expression wsp expression)',
'("_month_return" wsp expression wsp expression)',
'("_after_day" wsp expression wsp expression)',
'("_meal_code" wsp expression wsp expression)',
]
GRAMMAR_DICTIONARY['entity_function'] = [
'("_stops" wsp expression)',
'("_stop_arrival_time" wsp expression)',
'("_meal" wsp expression)',
'("_booking_class" wsp expression)',
'("_fare" wsp expression)',
'("_aircraft_code" wsp expression)',
'("_minutes_distant" wsp expression wsp expression)',
'("_minutes_distant" wsp expression)',
'("_departure_time" wsp expression)',
'("_ground_fare" wsp expression)',
'("_flight_number" wsp expression)',
'("_arrival_time" wsp expression)',
'("_airline:_e" wsp expression)',
'("_restriction_code" wsp expression)',
'("_capacity" wsp expression)',
'("_cost" wsp expression)',
'("_airline_name" wsp expression)',
'("_miles_distant" wsp expression wsp expression)',
'("_miles_distant" wsp expression)',
'("_time_elapsed" wsp expression)',
'("_abbrev" wsp expression)',
]
GRAMMAR_DICTIONARY['meta_predicate'] = [
'("_the" wsp variable wsp application)',
'("_>" wsp expression wsp expression)',
'("_=" wsp expression wsp expression)',
'("_<" wsp expression wsp expression)',
'("_named" wsp expression wsp expression)',
'("_max" wsp variable wsp application)',
'("_min" wsp variable wsp application)',
'("_not" wsp application)',
'("_or" wsp application wsp polyvariadic_expression)',
'("_and" wsp application wsp polyvariadic_expression)',
'("_argmax" wsp variable wsp application wsp application)',
'("_argmin" wsp variable wsp application wsp application)',
'("_sum" wsp variable wsp application wsp application)',
'("_equals" wsp expression wsp expression)',
'("_equals:_t" wsp expression wsp expression)',
'("_exists" wsp variable wsp application)',
'("_count" wsp variable wsp application)',
]
GRAMMAR_DICTIONARY['polyvariadic_expression'] = [
'(application ws polyvariadic_expression)', '""']
GRAMMAR_DICTIONARY['variable_definition'] = [
'(variable ":e")', '(variable ":i")']
GRAMMAR_DICTIONARY['variable'] = ['"$v0"', '"$v1"', '"$v2"', '"$v3"']
GRAMMAR_DICTIONARY['constant'] = ['(do)', '(city)', '(al)', '(time)',
'(meal)', '(fn)', '(ap)', '(rc)', '(cl)', '(ac)', '(da)', '(dn)',
'"9:_hr"', '"boeing:_mf"', '"sa:_dc"', '(mn)', '(yr)', '(pd)', '(fb)', '(st)', '(i)', '(bat)']
GRAMMAR_DICTIONARY['do'] = ['"100:_do"', '"1000:_do"', '"466:_do"', '"416:_do"', '"124:_do"', '"329:_do"', '"1100:_do"', '"415:_do"',
'"200:_do"', '"150:_do"', '"932:_do"', '"500:_do"', '"1288:_do"', '"300:_do"', '"400:_do"', '"1500:_do"']
GRAMMAR_DICTIONARY['city'] = ['"nashville:_ci"', '"indianapolis:_ci"', '"san_diego:_ci"', '"long_beach:_ci"', '"atlanta:_ci"', '"kansas_city:_ci"', '"miami:_ci"', '"st_louis:_ci"', '"columbus:_ci"', '"toronto:_ci"', '"las_vegas:_ci"', '"burbank:_ci"', '"cleveland:_ci"', '"tacoma:_ci"', '"st_petersburg:_ci"', '"memphis:_ci"', '"denver:_ci"', '"dallas:_ci"', '"detroit:_ci"', '"oakland:_ci"', '"baltimore:_ci"', '"pittsburgh:_ci"', '"philadelphia:_ci"', '"milwaukee:_ci"', '"salt_lake_city:_ci"', '"san_jose:_ci"', '"tampa:_ci"', '"orlando:_ci"', '"chicago:_ci"', '"seattle:_ci"', '"new_york:_ci"', '"san_francisco:_ci"', '"boston:_ci"', '"washington:_ci"', '"cincinnati:_ci"', '"charlotte:_ci"', '"newark:_ci"', '"westchester_county:_ci"', '"los_angeles:_ci"', '"fort_worth:_ci"', '"minneapolis:_ci"', '"ontario:_ci"', '"montreal:_ci"', '"st_paul:_ci"', '"houston:_ci"', '"phoenix:_ci"']
GRAMMAR_DICTIONARY['al'] = ['"wn:_al"', '"ml:_al"', '"cp:_al"', '"nw:_al"', '"yx:_al"', '"ac:_al"', '"dl:_al"', '"kw:_al"', '"delta:_al"', '"as:_al"', '"tw:_al"',
'"co:_al"', '"ff:_al"', '"ea:_al"', '"ua:_al"', '"canadian_airlines_international:_al"', '"hp:_al"', '"lh:_al"', '"nx:_al"', '"usair:_al"', '"aa:_al"', '"us:_al"']
GRAMMAR_DICTIONARY['time'] = ['"1200:_ti"', '"1628:_ti"', '"1830:_ti"', '"823:_ti"', '"1245:_ti"', '"1524:_ti"', '"200:_ti"', '"1615:_ti"', '"1230:_ti"', '"705:_ti"', '"1045:_ti"', '"1700:_ti"', '"1115:_ti"', '"1645:_ti"', '"1730:_ti"', '"815:_ti"', '"0:_ti"', '"500:_ti"', '"1205:_ti"', '"1940:_ti"', '"2000:_ti"', '"1400:_ti"', '"1130:_ti"', '"2200:_ti"', '"645:_ti"', '"718:_ti"', '"2220:_ti"', '"600:_ti"', '"630:_ti"', '"800:_ti"', '"838:_ti"', '"1330:_ti"', '"845:_ti"', '"1630:_ti"', '"1715:_ti"', '"2010:_ti"', '"1000:_ti"', '"1619:_ti"',
'"2100:_ti"', '"1505:_ti"', '"2400:_ti"', '"1923:_ti"', '"1:_ti"', '"1145:_ti"', '"2300:_ti"', '"1620:_ti"', '"2023:_ti"', '"2358:_ti"', '"1500:_ti"', '"1815:_ti"', '"1425:_ti"', '"720:_ti"', '"1024:_ti"', '"1600:_ti"', '"100:_ti"', '"1310:_ti"', '"1300:_ti"', '"700:_ti"', '"650:_ti"', '"1800:_ti"', '"1110:_ti"', '"1410:_ti"', '"1030:_ti"', '"1900:_ti"', '"1017:_ti"', '"1430:_ti"', '"1850:_ti"', '"900:_ti"', '"1930:_ti"', '"1133:_ti"', '"1220:_ti"', '"2226:_ti"', '"1100:_ti"', '"819:_ti"', '"755:_ti"', '"2134:_ti"', '"555:_ti"']
GRAMMAR_DICTIONARY['meal'] = ['"snack:_me"',
'"lunch:_me"', '"dinner:_me"', '"breakfast:_me"']
GRAMMAR_DICTIONARY['fn'] = ['"838:_fn"', '"1059:_fn"', '"417:_fn"', '"323:_fn"', '"311:_fn"', '"137338:_fn"', '"315:_fn"', '"825:_fn"', '"345:_fn"', '"270:_fn"', '"271:_fn"', '"4400:_fn"', '"296:_fn"', '"1765:_fn"', '"343:_fn"', '"1222:_fn"', '"217:_fn"', '"459:_fn"', '"279:_fn"', '"1083:_fn"', '"324:_fn"', '"746:_fn"', '"281:_fn"', '"269:_fn"', '"98:_fn"',
'"212:_fn"', '"505:_fn"', '"852:_fn"', '"82:_fn"', '"352:_fn"', '"928:_fn"', '"19:_fn"', '"139:_fn"', '"415:_fn"', '"539:_fn"', '"3357:_fn"', '"813:_fn"', '"257:_fn"', '"297:_fn"', '"1055:_fn"', '"405:_fn"', '"201:_fn"', '"71:_fn"', '"1291:_fn"', '"402:_fn"', '"771:_fn"', '"106:_fn"', '"1039:_fn"', '"210:_fn"', '"2153:_fn"', '"3724:_fn"', '"1209:_fn"', '"21:_fn"']
GRAMMAR_DICTIONARY['ap'] = ['"ewr:_ap"', '"jfk:_ap"', '"pit:_ap"', '"oak:_ap"', '"bur:_ap"', '"las:_ap"', '"lga:_ap"', '"den:_ap"', '"mco:_ap"', '"dallas:_ap"', '"dfw:_ap"', '"phx:_ap"', '"slc:_ap"', '"iad:_ap"', '"sfo:_ap"', '"ont:_ap"',
'"iah:_ap"', '"ord:_ap"', '"mia:_ap"', '"cvg:_ap"', '"phl:_ap"', '"tpa:_ap"', '"dtw:_ap"', '"yyz:_ap"', '"ind:_ap"', '"atl:_ap"', '"mke:_ap"', '"hou:_ap"', '"bos:_ap"', '"dal:_ap"', '"bwi:_ap"', '"bna:_ap"', '"stapelton:_ap"', '"lax:_ap"']
GRAMMAR_DICTIONARY['rc'] = ['"b:_rc"', '"ap_55:_rc"', '"ap_57:_rc"', '"s_:_rc"', '"sd_d:_rc"',
'"ap_80:_rc"', '"d_s:_rc"', '"ap_58:_rc"', '"ls:_rc"', '"ap:_rc"', '"s:_rc"', '"ap_68:_rc"']
GRAMMAR_DICTIONARY['cl'] = ['"thrift:_cl"',
'"business:_cl"', '"first:_cl"', '"coach:_cl"']
GRAMMAR_DICTIONARY['ac'] = ['"dc10:_ac"', '"j31:_ac"', '"734:_ac"', '"73s:_ac"', '"72s:_ac"', '"100:_ac"', '"757:_ac"', '"d9s:_ac"',
'"d10:_ac"', '"727:_ac"', '"m80:_ac"', '"747:_ac"', '"f28:_ac"', '"737:_ac"', '"733:_ac"', '"767:_ac"']
GRAMMAR_DICTIONARY['da'] = ['"monday:_da"', '"thursday:_da"', '"saturday:_da"', '"friday:_da"',
'"sunday:_da"', '"wednesday:_da"', '"tuesday:_da"']
GRAMMAR_DICTIONARY['dn'] = ['"12:_dn"', '"18:_dn"', '"19:_dn"', '"31:_dn"', '"7:_dn"', '"20:_dn"', '"27:_dn"', '"6:_dn"', '"26:_dn"', '"17:_dn"', '"11:_dn"', '"10:_dn"', '"15:_dn"', '"23:_dn"',
'"1:_dn"', '"24:_dn"', '"25:_dn"', '"14:_dn"', '"13:_dn"', '"29:_dn"', '"3:_dn"', '"28:_dn"', '"8:_dn"', '"5:_dn"', '"2:_dn"', '"9:_dn"', '"30:_dn"', '"16:_dn"', '"4:_dn"', '"22:_dn"', '"21:_dn"']
GRAMMAR_DICTIONARY['mn'] = ['"january:_mn"', '"february:_mn"', '"december:_mn"', '"june:_mn"', '"august:_mn"',
'"april:_mn"', '"october:_mn"', '"november:_mn"', '"july:_mn"', '"may:_mn"', '"march:_mn"', '"september:_mn"']
GRAMMAR_DICTIONARY['yr'] = ['"1991:_yr"', '"1993:_yr"', '"1992:_yr"']
GRAMMAR_DICTIONARY['pd'] = ['"mealtime:_pd"', '"breakfast:_pd"', '"late:_pd"', '"afternoon:_pd"', '"late_evening:_pd"',
'"daytime:_pd"', '"pm:_pd"', '"late_night:_pd"', '"evening:_pd"', '"morning:_pd"', '"early:_pd"']
GRAMMAR_DICTIONARY['fb'] = ['"y:_fb"', '"qx:_fb"', '"m:_fb"', '"fn:_fb"', '"b:_fb"', '"q:_fb"',
'"bh:_fb"', '"qo:_fb"', '"h:_fb"', '"c:_fb"', '"qw:_fb"', '"k:_fb"', '"f:_fb"', '"yn:_fb"']
GRAMMAR_DICTIONARY['st'] = ['"minnesota:_st"', '"florida:_st"',
'"nevada:_st"', '"california:_st"', '"arizona:_st"']
GRAMMAR_DICTIONARY['i'] = ['"2:_i"', '"3:_i"', '"1:_i"']
GRAMMAR_DICTIONARY['bat'] = ['"737:_bat"', '"767:_bat"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY["wsp"] = ['~"\s+"i']
COPY_TERMINAL_SET = {'do', 'city', 'al', 'time',
'meal', 'fn', 'ap', 'rc', 'cl', 'ac', 'da', 'dn',
'mn', 'yr', 'pd', 'fb', 'st', 'i', 'bat'}
| 13,179 | 67.290155 | 888 |
py
|
Unimer
|
Unimer-master/grammars/atis/get_atis_predicates.py
|
# coding=utf8
def read_data():
questions, logical_forms = list(), list()
paths = ["../../data/atis/atis_lambda_train.tsv",
"../../data/atis/atis_lambda_dev.tsv",
"../../data/atis/atis_lambda_test.tsv"]
for p in paths:
with open(p, "r") as f:
for line in f:
line = line.strip()
splits = line.split('\t')
questions.append(splits[0])
logical_forms.append(splits[1])
return questions, logical_forms
def tokenize_logical_form(lf):
return lf.split()
def extract_predicate_and_entity(logical_forms):
predicates = set()
entities = set()
variables = set()
for lf in logical_forms:
tokens = tokenize_logical_form(lf)
for token in tokens:
if token.startswith("_"):
predicates.add(token)
elif ":_" in token:
entities.add(token)
elif token.startswith("$"):
variables.add(token)
return predicates, entities, variables
if __name__ == '__main__':
questions, logical_forms = read_data()
for q, lf in zip(questions, logical_forms):
print(q)
print(lf)
print("==\n\n")
predicates, entities, variables = extract_predicate_and_entity(logical_forms)
print("Predicates")
print(predicates)
print("Entities: ")
entity_dict = dict()
for entity in entities:
splits = entity.split(":")
etype = splits[1]
if etype not in entity_dict:
entity_dict[etype] = list()
entity_dict[etype].append(entity)
for etype, _entities in entity_dict.items():
print(etype)
print(['"%s"' % e for e in _entities])
print("***\n\n")
print("Variables:")
_vars = ['"%s"' % v for v in variables]
print(_vars)
| 1,850 | 27.045455 | 81 |
py
|
Unimer
|
Unimer-master/grammars/atis/prolog_grammar.py
|
# coding=utf8
"""
Prolog Grammar of ATIS
"""
GRAMMAR_DICTIONARY = {}
ROOT_RULE = 'statement -> [answer]'
GRAMMAR_DICTIONARY['statement'] = ['(answer ws)']
GRAMMAR_DICTIONARY['answer'] = [
'("answer_1(" var "," goal ")")',
'("answer_2(" var "," var "," goal ")")',
'("answer_3(" var "," var "," var "," goal ")")',
'("answer_4(" var "," var "," var "," var "," goal ")")',
]
# Goal
GRAMMAR_DICTIONARY['goal'] = [
'(declaration)',
'(unit_relation)',
'(binary_relation)',
'(triplet_relation)',
'(meta)',
'("(" goal conjunction ")")',
'("or(" goal conjunction ")")',
'("not(" goal conjunction ")")'
]
GRAMMAR_DICTIONARY['conjunction'] = [
'("," goal conjunction)',
'""'
]
# Variable
GRAMMAR_DICTIONARY['var'] = ['"%s"' % chr(97+i) for i in range(26)]
# Declaration
GRAMMAR_DICTIONARY['declaration'] = [
'("const(" var "," object ")")']
# Object
GRAMMAR_DICTIONARY['object'] = [
'(fare_basis_code)', '(meal_code)', '(airport_code)', '(airline_code)',
'(aircraft_code_object)', '(city_name)', '(time)', '(flight_number_object)',
'(class_description)', '(day_period)', '(state_name)',
'(day_number)', '(month)', '(day)', '(dollar)', '(meal_description)',
'("hour(9)")', '(integer)', '(basis_type)', '(year)',
'("days_code(sa)")', '("manufacturer(boeing)")'
]
GRAMMAR_DICTIONARY['fare_basis_code'] = ['("fare_basis_code(" fare_basis_code_value ")")']
GRAMMAR_DICTIONARY['fare_basis_code_value'] = ['"_qx"', '"_qw"', '"_qo"', '"_fn"', '"_yn"', '"_bh"', '"_k"', '"_b"', '"_h"', '"_f"', '"_q"', '"_c"', '"_y"', '"_m"',]
GRAMMAR_DICTIONARY['meal_code'] = ['("meal_code(" meal_code_value ")")']
GRAMMAR_DICTIONARY['meal_code_value'] = ['"ap_58"', '"ap_57"', '"d_s"', '"b"', '"ap_55"', '"s_"', '"sd_d"', '"ls"', '"ap_68"', '"ap_80"', '"ap"', '"s"', ]
GRAMMAR_DICTIONARY['airline_code'] = ['("airline_code(" airline_code_value ")")']
GRAMMAR_DICTIONARY['airline_code_value'] = ['"usair"', '"co"', '"ua"', '"delta"', '"as"', '"ff"', '"canadian_airlines_international"', '"us"', '"nx"', '"hp"', '"aa"', '"kw"', '"ml"', '"nw"', '"ac"', '"tw"', '"yx"', '"ea"', '"dl"', '"wn"', '"lh"', '"cp"']
GRAMMAR_DICTIONARY['airport_code'] = ['("airport_code(" airport_code_value ")")']
GRAMMAR_DICTIONARY['airport_code_value'] = ['"dallas"', '"ont"', '"stapelton"', '"bna"', '"bwi"', '"iad"', '"sfo"', '"phl"', '"pit"', '"slc"', '"phx"', '"lax"', '"bur"', '"ind"', '"iah"', '"dtw"', '"las"', '"dal"', '"den"', '"atl"', '"ewr"', '"bos"', '"tpa"', '"jfk"', '"mke"', '"oak"', '"yyz"', '"dfw"', '"cvg"', '"hou"', '"lga"', '"ord"', '"mia"', '"mco"']
GRAMMAR_DICTIONARY['aircraft_code_object'] = ['("aircraft_code(" aircraft_code_value ")")']
GRAMMAR_DICTIONARY['aircraft_code_value'] = ['"m80"', '"dc10"', '"727"', '"d9s"', '"f28"', '"j31"', '"767"', '"734"', '"73s"', '"747"', '"737"', '"733"', '"d10"', '"100"', '"757"', '"72s"']
GRAMMAR_DICTIONARY['city_name'] = ['("city_name(" city_name_value ")")']
GRAMMAR_DICTIONARY['city_name_value'] = ['"cleveland"', '"milwaukee"', '"detroit"', '"los_angeles"', '"miami"', '"salt_lake_city"', '"ontario"', '"tacoma"', '"memphis"', '"denver"', '"san_francisco"', '"new_york"', '"tampa"', '"washington"', '"westchester_county"', '"boston"', '"newark"', '"pittsburgh"', '"charlotte"', '"columbus"', '"atlanta"', '"oakland"', '"kansas_city"', '"st_louis"', '"nashville"', '"chicago"', '"fort_worth"', '"san_jose"', '"dallas"', '"philadelphia"', '"st_petersburg"', '"baltimore"', '"san_diego"', '"cincinnati"', '"long_beach"', '"phoenix"', '"indianapolis"', '"burbank"', '"montreal"', '"seattle"', '"st_paul"', '"minneapolis"', '"houston"', '"orlando"', '"toronto"', '"las_vegas"']
GRAMMAR_DICTIONARY['time'] = ['("time(" time_value ")")']
GRAMMAR_DICTIONARY['time_value'] = [
'"1850"', '"1110"', '"2000"', '"1815"', '"1024"', '"1500"',
'"1900"', '"1600"', '"1300"', '"1800"', '"1200"', '"1628"',
'"1830"', '"823"', '"1245"', '"1524"', '"200"', '"1615"',
'"1230"', '"705"', '"1045"', '"1700"', '"1115"', '"1645"',
'"1730"', '"815"', '"0"', '"500"', '"1205"', '"1940"',
'"1400"', '"1130"', '"2200"', '"645"', '"718"', '"2220"',
'"600"', '"630"', '"800"', '"838"', '"1330"', '"845"', '"1630"',
'"1715"', '"2010"', '"1000"', '"1619"', '"2100"', '"1505"',
'"2400"', '"1923"', '"100"', '"1145"', '"2300"', '"1620"',
'"2023"', '"2358"', '"1425"', '"720"', '"1310"', '"700"', '"650"',
'"1410"', '"1030"', '"1900"', '"1017"', '"1430"', '"900"', '"1930"',
'"1133"', '"1220"', '"2226"', '"1100"', '"819"', '"755"', '"2134"', '"555"', '"1"',
]
GRAMMAR_DICTIONARY['flight_number_object'] = ['("flight_number(" flight_number_value ")")']
GRAMMAR_DICTIONARY['flight_number_value'] = [
'"1291"', '"345"', '"813"', '"71"', '"1059"', '"212"', '"1209"',
'"281"', '"201"', '"324"', '"19"', '"352"', '"137338"', '"4400"',
'"323"', '"505"', '"825"', '"82"', '"279"', '"1055"', '"296"', '"315"',
'"1765"', '"405"', '"771"', '"106"', '"2153"', '"257"', '"402"',
'"343"', '"98"', '"1039"', '"217"', '"539"', '"459"', '"417"',
'"1083"', '"3357"', '"311"', '"210"', '"139"', '"852"', '"838"',
'"415"', '"3724"', '"21"', '"928"', '"269"', '"270"',
'"297"', '"746"', '"1222"', '"271"'
]
GRAMMAR_DICTIONARY['class_description'] = ['("class_description(" class_description_value ")")']
GRAMMAR_DICTIONARY['class_description_value'] = ['"thrift"', '"coach"', '"first"', '"business"']
GRAMMAR_DICTIONARY['day_period'] = ['("day_period(" day_period_value ")")']
GRAMMAR_DICTIONARY['day_period_value'] = ['"early"', '"afternoon"', '"late_evening"', '"late_night"', '"mealtime"', '"evening"', '"pm"', '"daytime"', '"breakfast"', '"morning"', '"late"']
GRAMMAR_DICTIONARY['state_name'] = ['("state_name(" state_name_value ")")']
GRAMMAR_DICTIONARY['state_name_value'] = ['"minnesota"', '"florida"', '"arizona"', '"nevada"', '"california"']
GRAMMAR_DICTIONARY['day_number'] = ['("day_number(" day_number_value ")")']
GRAMMAR_DICTIONARY['day_number_value'] = ['"13"', '"29"', '"28"', '"22"', '"21"', '"16"', '"30"', '"12"', '"18"', '"19"', '"31"', '"20"', '"27"', '"6"', '"26"', '"17"', '"11"', '"10"', '"15"', '"23"', '"24"', '"25"', '"14"', '"1"', '"3"', '"8"', '"5"', '"2"', '"9"', '"4"', '"7"']
GRAMMAR_DICTIONARY['month'] = ['("month(" month_value ")")']
GRAMMAR_DICTIONARY['month_value'] = ['"april"', '"august"', '"may"', '"october"', '"june"', '"november"', '"september"', '"february"', '"december"', '"march"', '"july"', '"january"']
GRAMMAR_DICTIONARY['day'] = ['("day(" day_value ")")']
GRAMMAR_DICTIONARY['day_value'] = ['"monday"', '"wednesday"', '"thursday"', '"tuesday"', '"saturday"', '"friday"', '"sunday"']
GRAMMAR_DICTIONARY['dollar'] = ['("dollar(" dollar_value ")")']
GRAMMAR_DICTIONARY['dollar_value'] = ['"1000"', '"1500"', '"466"', '"1288"', '"300"', '"329"', '"416"', '"124"', '"932"', '"1100"', '"200"', '"500"', '"100"', '"415"', '"150"', '"400"']
GRAMMAR_DICTIONARY['meal_description'] = ['("meal_description(" meal_description_value ")")']
GRAMMAR_DICTIONARY['meal_description_value'] = ['"snack"', '"breakfast"', '"lunch"', '"dinner"']
GRAMMAR_DICTIONARY['integer'] = ['("integer(" integer_value ")")']
GRAMMAR_DICTIONARY['integer_value'] = ['"2"', '"1"', '"3"']
GRAMMAR_DICTIONARY['basis_type'] = ['("basis_type(" basis_type_value ")")']
GRAMMAR_DICTIONARY['basis_type_value'] = ['"737"', '"767"']
GRAMMAR_DICTIONARY['year'] = ['("year(" year_value ")")']
GRAMMAR_DICTIONARY['year_value'] = ['"1991"', '"1993"', '"1992"']
# Unit Relation
GRAMMAR_DICTIONARY['unit_relation'] = [
# Flight
'(is_flight)', '(is_oneway)', '(is_round_trip)', '(is_daily_flight)',
'(is_flight_has_stop)', '(is_non_stop_flight)', '(is_flight_economy)',
'(is_flight_has_meal)', '(is_economy)', '(is_discounted_flight)',
'(is_flight_overnight)', '(is_connecting_flight)',
# Meal
'(is_meal)', '(is_meal_code)',
# Airline
'(is_airline)',
# Transport way
'(is_rapid_transit)', '(is_taxi)', '(is_air_taxi_operation)', '(is_ground_transport_on_weekday)',
'(is_ground_transport)', '(is_limousine)', '(is_rental_car)',
# Aircraft
'(is_flight_turboprop)', '(is_turboprop)', '(aircraft_code)', '(is_aircraft)', '(is_flight_jet)',
# Time
'(is_day_after_tomorrow_flight)', '(is_flight_tonight)', '(is_today_flight)',
'(is_tomorrow_flight)', '(is_flight_on_weekday)', '(is_tomorrow_arrival_flight)',
# Other
'(is_time_zone_code)', '(is_class_of_service)', '(is_city)',
'(is_airport)', '(is_fare_basis_code)', '(is_booking_class_t)',
]
GRAMMAR_DICTIONARY['is_discounted_flight'] = ['("is_discounted_flight(" var ")")']
GRAMMAR_DICTIONARY['is_taxi'] = ['("is_taxi(" var ")")']
GRAMMAR_DICTIONARY['is_economy'] = ['("is_economy(" var ")")']
GRAMMAR_DICTIONARY['is_flight_on_weekday'] = ['("is_flight_on_weekday(" var ")")']
GRAMMAR_DICTIONARY['is_time_zone_code'] = ['("is_time_zone_code(" var ")")']
GRAMMAR_DICTIONARY['is_air_taxi_operation'] = ['("is_air_taxi_operation(" var ")")']
GRAMMAR_DICTIONARY['is_fare_basis_code'] = ['("is_fare_basis_code(" var ")")']
GRAMMAR_DICTIONARY['is_meal_code'] = ['("is_meal_code(" var ")")']
GRAMMAR_DICTIONARY['is_limousine'] = ['("is_limousine(" var ")")']
GRAMMAR_DICTIONARY['is_flight_tonight'] = ['("is_flight_tonight(" var ")")']
GRAMMAR_DICTIONARY['is_tomorrow_arrival_flight'] = ['("is_tomorrow_arrival_flight(" var ")")']
GRAMMAR_DICTIONARY['is_tomorrow_flight'] = ['("is_tomorrow_flight(" var ")")']
GRAMMAR_DICTIONARY['is_daily_flight'] = ['("is_daily_flight(" var ")")']
GRAMMAR_DICTIONARY['_minutes_distant'] = ['("_minutes_distant(" var ")")']
GRAMMAR_DICTIONARY['is_flight'] = ['("is_flight(" var ")")']
GRAMMAR_DICTIONARY['is_city'] = ['("is_city(" var ")")']
GRAMMAR_DICTIONARY['is_booking_class_t'] = ['("is_booking_class_t(" var ")")']
GRAMMAR_DICTIONARY['is_rapid_transit'] = ['("is_rapid_transit(" var ")")']
GRAMMAR_DICTIONARY['is_oneway'] = ['("is_oneway(" var ")")']
GRAMMAR_DICTIONARY['is_airport'] = ['("is_airport(" var ")")']
GRAMMAR_DICTIONARY['is_flight_has_stop'] = ['("is_flight_has_stop(" var ")")']
GRAMMAR_DICTIONARY['aircraft_code'] = ['("aircraft_code(" var ")")']
GRAMMAR_DICTIONARY['is_day_after_tomorrow_flight'] = ['("is_day_after_tomorrow_flight(" var ")")']
GRAMMAR_DICTIONARY['is_airline'] = ['("is_airline(" var ")")']
GRAMMAR_DICTIONARY['is_flight_economy'] = ['("is_flight_economy(" var ")")']
GRAMMAR_DICTIONARY['is_class_of_service'] = ['("is_class_of_service(" var ")")']
GRAMMAR_DICTIONARY['is_aircraft'] = ['("is_aircraft(" var ")")']
GRAMMAR_DICTIONARY['is_today_flight'] = ['("is_today_flight(" var ")")']
GRAMMAR_DICTIONARY['is_flight_has_meal'] = ['("is_flight_has_meal(" var ")")']
GRAMMAR_DICTIONARY['is_ground_transport'] = ['("is_ground_transport(" var ")")']
GRAMMAR_DICTIONARY['is_non_stop_flight'] = ['("is_non_stop_flight(" var ")")']
GRAMMAR_DICTIONARY['is_flight_turboprop'] = ['("is_flight_turboprop(" var ")")']
GRAMMAR_DICTIONARY['is_meal'] = ['("is_meal(" var ")")']
GRAMMAR_DICTIONARY['is_round_trip'] = ['("is_round_trip(" var ")")']
GRAMMAR_DICTIONARY['is_ground_transport_on_weekday'] = ['("is_ground_transport_on_weekday(" var ")")']
GRAMMAR_DICTIONARY['is_turboprop'] = ['("is_turboprop(" var ")")']
GRAMMAR_DICTIONARY['is_rental_car'] = ['("is_rental_car(" var ")")']
GRAMMAR_DICTIONARY['is_connecting_flight'] = ['("is_connecting_flight(" var ")")']
GRAMMAR_DICTIONARY['is_flight_jet'] = ['("is_flight_jet(" var ")")']
GRAMMAR_DICTIONARY['is_flight_overnight'] = ['("is_flight_overnight(" var ")")']
# Binary Predicate
GRAMMAR_DICTIONARY['binary_relation'] = [
# General
'(_named)',
# Flight property
'(is_flight_has_specific_fare_basis_code)', '(is_flight_has_booking_class)',
'(is_flight_stop_at_city)', '(is_flight_on_year)', '(is_flight_during_day)',
'(is_flight_stops_specify_number_of_times)', '(is_flight_meal_code)',
'(is_from)', '(is_flight_day_return)', '(is_flight_day_number_return)',
'(is_flight_departure_time)', '(is_flight_month_return)',
'(is_flight_month_arrival)', '(is_flight_approx_return_time)',
'(is_flight_before_day)', '(is_flight_approx_arrival_time)',
'(is_flight_day_number_arrival)',
'(is_flight_arrival_time)', '(is_flight_with_specific_aircraft)',
'(is_flight_on_day_number)', '(is_flight_on_day)', '(is_flight_manufacturer)',
'(is_flight_aircraft)', '(is_flight_stop_at_airport)',
'(is_flight_during_day_arrival)', '(is_flight_days_from_today)',
'(is_fare_basis_code_class_type)', '(is_flight_after_day)',
'(is_flight_day_arrival)', '(is_flight_approx_departure_time)',
'(is_flight_has_specific_meal)', '(is_next_days_flight)',
'(is_flight_has_class_type)', '(is_to)', '(is_flight_airline)',
'(p_flight_fare)', '(is_flight_number)',
# Airport
'(is_airport_of_city)', '(is_airline_services)', '(is_services)', '(is_from_airports_of_city)',
# Ground Transport
'(is_from_airport)', '(is_to_city)', '(is_loc_t_state)',
# Aircraft
'(is_mf)', '(is_loc_t)', '(is_aircraft_basis_type)', '(is_aircraft_airline)',
# Other
'(is_flight_cost_fare)',
'(is_loc_t_city_time_zone)',
'(is_airline_provide_meal)',
'(is_airline_has_booking_class)',
# Entity
'(minimum_connection_time)', '(p_flight_stop_arrival_time)',
'(p_ground_fare)', '(p_booking_class_fare)',
'(airline_name)', '(abbrev)', '(capacity)', '(minutes_distant)', '(is_time_elapsed)',
'(p_flight_restriction_code)'
]
GRAMMAR_DICTIONARY['airline_name'] = ['("airline_name(" var "," var ")")']
GRAMMAR_DICTIONARY['_named'] = ['("_named(" var "," var ")")']
GRAMMAR_DICTIONARY['is_time_elapsed'] = ['("is_time_elapsed(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_has_specific_fare_basis_code'] = ['("is_flight_has_specific_fare_basis_code(" var "," var ")")']
GRAMMAR_DICTIONARY['abbrev'] = ['("abbrev(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_during_day'] = ['("is_flight_during_day(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_has_booking_class'] = ['("is_flight_has_booking_class(" var "," var ")")']
GRAMMAR_DICTIONARY['is_airline_has_booking_class'] = ['("is_airline_has_booking_class(" var "," var ")")']
GRAMMAR_DICTIONARY['capacity'] = ['("capacity(" var "," var ")")']
GRAMMAR_DICTIONARY['get_flight_airline_code'] = ['("get_flight_airline_code(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_stop_at_city'] = ['("is_flight_stop_at_city(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_on_year'] = ['("is_flight_on_year(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_stops_specify_number_of_times'] = ['("is_flight_stops_specify_number_of_times(" var "," var ")")']
GRAMMAR_DICTIONARY['is_from_airport'] = ['("is_from_airport(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_meal_code'] = ['("is_flight_meal_code(" var "," var ")")']
GRAMMAR_DICTIONARY['p_flight_airline_code'] = ['("p_flight_airline_code(" var "," var ")")']
GRAMMAR_DICTIONARY['is_from'] = ['("is_from(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_day_return'] = ['("is_flight_day_return(" var "," var ")")']
GRAMMAR_DICTIONARY['get_flight_fare'] = ['("get_flight_fare(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_day_number_return'] = ['("is_flight_day_number_return(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_departure_time'] = ['("is_flight_departure_time(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_month_return'] = ['("is_flight_month_return(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_month_arrival'] = ['("is_flight_month_arrival(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_number'] = ['("is_flight_number(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_cost_fare'] = ['("is_flight_cost_fare(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_approx_return_time'] = ['("is_flight_approx_return_time(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_before_day'] = ['("is_flight_before_day(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_approx_arrival_time'] = ['("is_flight_approx_arrival_time(" var "," var ")")']
GRAMMAR_DICTIONARY['is_airport_of_city'] = ['("is_airport_of_city(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_day_number_arrival'] = ['("is_flight_day_number_arrival(" var "," var ")")']
GRAMMAR_DICTIONARY['is_airline_services'] = ['("is_airline_services(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_airline'] = ['("is_flight_airline(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_arrival_time'] = ['("is_flight_arrival_time(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_with_specific_aircraft'] = ['("is_flight_with_specific_aircraft(" var "," var ")")']
GRAMMAR_DICTIONARY['is_mf'] = ['("is_mf(" var "," var ")")']
GRAMMAR_DICTIONARY['get_flight_aircraft_code'] = ['("get_flight_aircraft_code(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_on_day_number'] = ['("is_flight_on_day_number(" var "," var ")")']
GRAMMAR_DICTIONARY['is_loc_t'] = ['("is_loc_t(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_on_day'] = ['("is_flight_on_day(" var "," var ")")']
GRAMMAR_DICTIONARY['get_flight_restriction_code'] = ['("get_flight_restriction_code(" var "," var ")")']
GRAMMAR_DICTIONARY['is_to_city'] = ['("is_to_city(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_manufacturer'] = ['("is_flight_manufacturer(" var "," var ")")']
GRAMMAR_DICTIONARY['minutes_distant'] = ['("minutes_distant(" var "," var ")")']
GRAMMAR_DICTIONARY['is_services'] = ['("is_services(" var "," var ")")']
GRAMMAR_DICTIONARY['p_booking_class_fare'] = ['("p_booking_class_fare(" var "," var ")")']
GRAMMAR_DICTIONARY['p_flight_aircraft_code'] = ['("p_flight_aircraft_code(" var "," var ")")']
GRAMMAR_DICTIONARY['p_flight_restriction_code'] = ['("p_flight_restriction_code(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_aircraft'] = ['("is_flight_aircraft(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_stop_at_airport'] = ['("is_flight_stop_at_airport(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_during_day_arrival'] = ['("is_flight_during_day_arrival(" var "," var ")")']
GRAMMAR_DICTIONARY['departure_time'] = ['("departure_time(" var "," var ")")']
GRAMMAR_DICTIONARY['arrival_time'] = ['("arrival_time(" var "," var ")")']
GRAMMAR_DICTIONARY['is_fare_basis_code_class_type'] = ['("is_fare_basis_code_class_type(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_after_day'] = ['("is_flight_after_day(" var "," var ")")']
GRAMMAR_DICTIONARY['p_flight_booking_class'] = ['("p_flight_booking_class(" var "," var ")")']
GRAMMAR_DICTIONARY['get_number_of_stops'] = ['("get_number_of_stops(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_days_from_today'] = ['("is_flight_days_from_today(" var "," var ")")']
GRAMMAR_DICTIONARY['minimum_connection_time'] = ['("minimum_connection_time(" var "," var ")")']
GRAMMAR_DICTIONARY['is_aircraft_basis_type'] = ['("is_aircraft_basis_type(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_day_arrival'] = ['("is_flight_day_arrival(" var "," var ")")']
GRAMMAR_DICTIONARY['is_loc_t_state'] = ['("is_loc_t_state(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_approx_departure_time'] = ['("is_flight_approx_departure_time(" var "," var ")")']
GRAMMAR_DICTIONARY['is_from_airports_of_city'] = ['("is_from_airports_of_city(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_has_specific_meal'] = ['("is_flight_has_specific_meal(" var "," var ")")']
GRAMMAR_DICTIONARY['p_flight_fare'] = ['("p_flight_fare(" var "," var ")")']
GRAMMAR_DICTIONARY['is_next_days_flight'] = ['("is_next_days_flight(" var "," var ")")']
GRAMMAR_DICTIONARY['is_flight_has_class_type'] = ['("is_flight_has_class_type(" var "," var ")")']
GRAMMAR_DICTIONARY['time_elapsed'] = ['("time_elapsed(" var "," var ")")']
GRAMMAR_DICTIONARY['is_to'] = ['("is_to(" var "," var ")")']
GRAMMAR_DICTIONARY['is_loc_t_city_time_zone'] = ['("is_loc_t_city_time_zone(" var "," var ")")']
GRAMMAR_DICTIONARY['is_aircraft_airline'] = ['("is_aircraft_airline(" var "," var ")")']
GRAMMAR_DICTIONARY['p_ground_fare'] = ['("p_ground_fare(" var "," var ")")']
GRAMMAR_DICTIONARY['is_airline_provide_meal'] = ['("is_airline_provide_meal(" var "," var ")")']
GRAMMAR_DICTIONARY['p_flight_meal'] = ['("p_flight_meal(" var "," var ")")']
GRAMMAR_DICTIONARY['p_flight_stop_arrival_time'] = ['("p_flight_stop_arrival_time(" var "," var ")")']
# Triplet Relations
GRAMMAR_DICTIONARY['triplet_relation'] = ['(miles_distant_between_city)', '(miles_distant)']
GRAMMAR_DICTIONARY['miles_distant_between_city'] = ['("miles_distant_between_city(" var "," var "," var ")")']
GRAMMAR_DICTIONARY['miles_distant'] = ['("miles_distant(" var "," var "," var ")")']
# Meta Predicates
GRAMMAR_DICTIONARY['meta'] = [
'(equals)', '(equals_arrival_time)',
'(larger_than_arrival_time)', '(larger_than_capacity)', '(larger_than_departure_time)',
'(larger_than_number_of_stops)', '(less_than_flight_cost)', '(less_than_departure_time)',
'(less_than_flight_fare)', '(less_than_arrival_time)', '(count)', '(argmax_capacity)',
'(argmax_arrival_time)', '(argmax_departure_time)', '(argmax_get_number_of_stops)',
'(argmax_get_flight_fare)', '(argmax_count)', '(argmin_time_elapsed)',
'(argmin_get_number_of_stops)', '(argmin_time_elapsed)', '(argmin_arrival_time)',
'(argmin_capacity)', '(argmin_departure_time)', '(argmin_get_flight_fare)',
'(argmin_miles_distant)', '(max)', '(min)', '(sum_capacity)', '(sum_get_number_of_stops)'
]
GRAMMAR_DICTIONARY['equals'] = ['("equals(" var "," var ")")']
GRAMMAR_DICTIONARY['equals_arrival_time'] = ['("equals_arrival_time(" var "," var ")")']
GRAMMAR_DICTIONARY['larger_than_arrival_time'] = ['("larger_than_arrival_time(" var "," var ")")']
GRAMMAR_DICTIONARY['larger_than_capacity'] = ['("larger_than_capacity(" var "," var ")")']
GRAMMAR_DICTIONARY['larger_than_departure_time'] = ['("larger_than_departure_time(" var "," var ")")']
GRAMMAR_DICTIONARY['larger_than_number_of_stops'] = ['("larger_than_number_of_stops(" var "," var ")")']
GRAMMAR_DICTIONARY['less_than_flight_cost'] = ['("less_than_flight_cost(" var "," var ")")']
GRAMMAR_DICTIONARY['less_than_departure_time'] = ['("less_than_departure_time(" var "," var ")")']
GRAMMAR_DICTIONARY['less_than_flight_fare'] = ['("less_than_flight_fare(" var "," var ")")']
GRAMMAR_DICTIONARY['less_than_arrival_time'] = ['("less_than_arrival_time(" var "," var ")")']
GRAMMAR_DICTIONARY['count'] = ['("count(" var "," goal "," var ")")']
GRAMMAR_DICTIONARY['max'] = ['("_max(" var "," goal ")")']
GRAMMAR_DICTIONARY['min'] = ['("_min(" var "," goal ")")']
GRAMMAR_DICTIONARY['argmax_capacity'] = ['("argmax_capacity(" var "," goal ")")']
GRAMMAR_DICTIONARY['argmax_arrival_time'] = ['("argmax_arrival_time(" var "," goal ")")']
GRAMMAR_DICTIONARY['argmax_departure_time'] = ['("argmax_departure_time(" var "," goal ")")']
GRAMMAR_DICTIONARY['argmax_get_number_of_stops'] = ['("argmax_get_number_of_stops(" var "," goal ")")']
GRAMMAR_DICTIONARY['argmax_get_flight_fare'] = ['("argmax_get_flight_fare(" var "," goal ")")']
GRAMMAR_DICTIONARY['argmax_count'] = ['("argmax_count(" var "," goal ")")']
GRAMMAR_DICTIONARY['argmin_arrival_time'] = ['("argmin_arrival_time(" var "," goal ")")']
GRAMMAR_DICTIONARY['argmin_capacity'] = ['("argmin_capacity(" var "," goal ")")']
GRAMMAR_DICTIONARY['argmin_departure_time'] = ['("argmin_departure_time(" var "," goal ")")']
GRAMMAR_DICTIONARY['argmin_get_number_of_stops'] = ['("argmin_get_number_of_stops(" var "," goal ")")']
GRAMMAR_DICTIONARY['argmin_get_flight_fare'] = ['("argmin_get_flight_fare(" var "," goal ")")']
GRAMMAR_DICTIONARY['argmin_miles_distant'] = ['("argmin_miles_distant(" var "," goal ")")']
GRAMMAR_DICTIONARY['argmin_time_elapsed'] = ['("argmin_time_elapsed(" var "," goal ")")']
GRAMMAR_DICTIONARY['sum_capacity'] = ['("sum_capacity(" var "," goal "," var ")")']
GRAMMAR_DICTIONARY['sum_get_number_of_stops'] = ['("sum_get_number_of_stops(" var "," goal "," var ")")']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY["wsp"] = ['~"\s+"i']
COPY_TERMINAL_SET = {
'fare_basis_code_value', 'meal_code_value', 'airport_code_value', 'airline_code_value',
'aircraft_code_value', 'city_name_value', 'time_value', 'flight_number_value',
'class_description_value', 'day_period_value', 'state_name_value',
'day_number_value', 'month_value', 'day_value', 'dollar_value', 'meal_description_value',
'integer_value', 'basis_type_value', 'year_value',
}
| 24,384 | 69.071839 | 715 |
py
|
Unimer
|
Unimer-master/grammars/atis/sql_grammar_3.py
|
# coding=utf8
ROOT_RULE = 'statement -> [mquery]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY["statement"] = ['(mquery ws)']
GRAMMAR_DICTIONARY["mquery"] = [
'(ws select_clause ws from_clause ws where_clause ws groupby_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws where_clause ws groupby_clause)',
'(ws select_clause ws from_clause ws where_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws where_clause)',
'(ws select_clause ws from_clause ws groupby_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws groupby_clause)',
'(ws select_clause ws from_clause)'
]
# SELECT
GRAMMAR_DICTIONARY["select_clause"] = [
'(select_with_distinct ws select_results)']
GRAMMAR_DICTIONARY["select_with_distinct"] = [
'(ws "select" ws "distinct")', '(ws "select")']
GRAMMAR_DICTIONARY["select_results"] = [
'(ws subject ws "," ws select_results)', '(ws subject)']
# FROM
GRAMMAR_DICTIONARY["from_clause"] = ['(ws "from" ws source)']
GRAMMAR_DICTIONARY["source"] = [
'(ws table_name_alias ws "," ws source)', '(ws table_name_alias)']
# ORDER
GRAMMAR_DICTIONARY["orderby_clause"] = ['ws "order by" ws subject']
# GROUP BY
GRAMMAR_DICTIONARY["groupby_clause"] = ['(ws "group by" ws table_columns)']
# WHERE
GRAMMAR_DICTIONARY["where_clause"] = ['(ws "where" wsp condition)']
GRAMMAR_DICTIONARY["condition"] = ['(ws single ws "and" wsp condition)',
'(ws single ws "or" wsp condition)',
'(single)']
GRAMMAR_DICTIONARY["single"] = ['(expr)',
'("(" ws condition ws ")")',
'("not" ws single)']
GRAMMAR_DICTIONARY["expr"] = [
'(table_columns wsp "between" wsp time_value wsp "and" wsp time_value)',
'(table_columns wsp "not" wsp "between" wsp time_value wsp "and" wsp time_value)',
'(table_columns wsp "is" wsp "not" wsp "null")',
'(table_columns wsp "not" wsp "in" wsp "(" ws mquery ws ")")',
'(table_columns wsp "in" ws "(" ws mquery ws ")")',
'(table_columns ws binaryop ws "all" ws "(" ws mquery ws ")")',
'(table_columns ws binaryop ws "any" ws "(" ws mquery ws ")")',
'(table_columns ws binaryop ws "(" ws mquery ws ")")',
'(concrete_value_expr)',
'(table_columns ws binaryop ws col_ref)',
]
GRAMMAR_DICTIONARY["subject"] = ['function', 'col_ref']
GRAMMAR_DICTIONARY["col_ref"] = ['table_columns', '"*"']
GRAMMAR_DICTIONARY["function"] = ['(fname ws "(" ws "distinct" ws col_ref ws ")")',
'(fname ws "(" ws col_ref ws ")")']
GRAMMAR_DICTIONARY["fname"] = ['"count"', '"sum"', '"max"', '"min"']
# TODO(MARK): This is not tight enough. AND/OR are strictly boolean value operators.
GRAMMAR_DICTIONARY["binaryop"] = ['"="', '"!="', '"<>"',
'">="', '"<="', '">"', '"<"', '"like"', '"not like"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY['wsp'] = ['~"\s+"i']
GRAMMAR_DICTIONARY["table_name"] = ['"aircraft"', '"airline"', '"airport_base"', '"airport_service"', '"fare_basis"',
'"city"', '"class_of_service"','"date_day"', '"days"', '"equipment_sequence"', '"fare_base"',
'"flight_base"', '"flight_fare"', '"flight_leg"',
'"flight_stop"', '"food_service"', '"ground_service"', '"restriction"', '"state"',]
GRAMMAR_DICTIONARY["table_name_alias"] = [
'("aircraft" wsp aircraft_alias)',
'("airline" wsp "airline_1")',
'("airport_base" wsp airport_base_alias)',
'("airport_service" wsp airport_service_alias)',
'("fare_basis" wsp fare_basis_alias)',
'("city" wsp city_alias)',
'("class_of_service" wsp "class_of_service_1")',
'("date_day" wsp date_day_alias)',
'("days" wsp days_alias)',
'("equipment_sequence" wsp equipment_sequence_alias)',
'("fare_base" wsp fare_base_alias)',
'("flight_base" wsp flight_base_alias)',
'("flight_fare" wsp flight_fare_alias)',
'("flight_leg" wsp flight_leg_alias)',
'("flight_stop" wsp flight_stop_alias)',
'("food_service" wsp food_service_alias)',
'("ground_service" wsp "ground_service_1")',
'("restriction" wsp "restriction_1")',
'("state" wsp state_alias)'
]
GRAMMAR_DICTIONARY['aircraft_alias'] = [
'"aircraft_4"', '"aircraft_1"', '"aircraft_2"', '"aircraft_3"',
]
GRAMMAR_DICTIONARY['airline_alias'] = ['"airline_1"']
GRAMMAR_DICTIONARY['airport_base_alias'] = ['"airport_4"', '"airport_1"', '"airport_2"', '"airport_3"', ]
GRAMMAR_DICTIONARY['airport_service_alias'] = ['"airport_service_6"', '"airport_service_1"', '"airport_service_2"',
'"airport_service_3"', '"airport_service_4"', '"airport_service_5"',]
GRAMMAR_DICTIONARY['city_alias'] = [
'"city_6"', '"city_1"', '"city_2"', '"city_3"', '"city_4"', '"city_5"',
]
GRAMMAR_DICTIONARY['class_of_service_alias'] = ['"class_of_service_1"']
GRAMMAR_DICTIONARY['date_day_alias'] = [
'"date_day_5"', '"date_day_1"', '"date_day_2"',
'"date_day_3"', '"date_day_4"',
]
GRAMMAR_DICTIONARY['days_alias'] = [
'"days_10"', '"days_1"', '"days_2"',
'"days_3"', '"days_4"', '"days_5"', '"days_6"', '"days_7"', '"days_8"',
'"days_9"',
]
GRAMMAR_DICTIONARY['equipment_sequence_alias'] = [
'"equipment_sequence_3"', '"equipment_sequence_1"',
'"equipment_sequence_2"',
]
GRAMMAR_DICTIONARY['fare_base_alias'] = [
'"fare_5"', '"fare_1"',
'"fare_2"', '"fare_3"', '"fare_4"',
]
GRAMMAR_DICTIONARY['fare_basis_alias'] = [
'"fare_basis_6"', '"fare_basis_1"', '"fare_basis_2"',
'"fare_basis_3"', '"fare_basis_4"', '"fare_basis_5"',
]
GRAMMAR_DICTIONARY['flight_base_alias'] = [
'"flight_1"', '"flight_2"', '"flight_3"', '"flight_4"',
]
GRAMMAR_DICTIONARY['flight_fare_alias'] = [
'"flight_fare_5"', '"flight_fare_2"', '"flight_fare_3"', '"flight_fare_4"', '"flight_fare_1"'
]
GRAMMAR_DICTIONARY['flight_leg_alias'] = [
'"flight_leg_2"', '"flight_leg_1"'
]
GRAMMAR_DICTIONARY['flight_stop_alias'] = [
'"flight_stop_2"', '"flight_stop_1"',
]
GRAMMAR_DICTIONARY['food_service_alias'] = [
'"food_service_2"', '"food_service_1"'
]
# GRAMMAR_DICTIONARY['ground_service_alias'] = ['"ground_service_1"']
GRAMMAR_DICTIONARY['state_alias'] = [
'"state_4"', '"state_1"', '"state_2"', '"state_3"'
]
GRAMMAR_DICTIONARY['restriction_alias'] = ['"restriction_1"']
# Column Name
GRAMMAR_DICTIONARY['table_columns'] = [
'(aircraft_alias ws "." ws aircraft_columns)',
'("airline_1" ws "." ws airline_columns)',
'(airport_base_alias ws "." ws airport_base_columns)',
'(airport_service_alias ws "." ws airport_service_columns)',
'(city_alias ws "." ws city_columns)',
'("class_of_service_1" ws "." ws class_of_service_columns)',
'(date_day_alias ws "." ws date_day_columns)',
'(days_alias ws "." ws days_columns)',
'(equipment_sequence_alias ws "." ws equipment_sequence_columns)',
'(fare_base_alias ws "." ws fare_base_columns)',
'(fare_basis_alias ws "." ws fare_basis_columns)',
'(flight_base_alias ws "." ws flight_base_columns)',
'(flight_fare_alias ws "." ws flight_fare_columns)',
'(flight_leg_alias ws "." ws flight_leg_columns)',
'(flight_stop_alias ws "." ws flight_stop_columns)',
'(food_service_alias ws "." ws food_service_columns)',
'("ground_service_1" ws "." ws ground_service_columns)',
'(state_alias ws "." ws state_colums)',
'("restriction_1" ws "." ws restriction_columns)',
]
GRAMMAR_DICTIONARY['aircraft_columns'] = [
'"capacity"', '"manufacturer"', '"basic_type"', '"propulsion"', '"aircraft_code_base"']
GRAMMAR_DICTIONARY['airline_columns'] = [
'"airline_name"', '"airline_code"', '"note"']
GRAMMAR_DICTIONARY['airport_base_columns'] = ['"state_code"', '"airport_code"', '"airport_location"', '"minimum_connect_time"', '"time_zone_code"', '"country_name"', '"airport_name"']
GRAMMAR_DICTIONARY['airport_service_columns'] = [
'"miles_distant"', '"minutes_distant"', '"airport_code"', '"city_code"']
GRAMMAR_DICTIONARY['city_columns'] = ['"city_code"', '"time_zone_code"', '"country_name"', '"city_name"', '"state_code"']
GRAMMAR_DICTIONARY['class_of_service_columns'] = ['"rank"', '"class_description"', '"booking_class"']
GRAMMAR_DICTIONARY['date_day_columns'] = [
'"day_name"', '"day_number"', '"month_number"', '"year"']
GRAMMAR_DICTIONARY['days_columns'] = ['"days_code"', '"day_name"']
GRAMMAR_DICTIONARY['equipment_sequence_columns'] = [
'"aircraft_code_base"', '"aircraft_code_sequence"']
GRAMMAR_DICTIONARY['flight_base_columns'] = ['"connections"', '"meal_code"', '"flight_days"', '"flight_id"', '"from_airport"', '"flight_number"',
'"airline_code"', '"to_airport"', '"departure_time"', '"aircraft_code_sequence"', '"time_elapsed"', '"stops"', '"arrival_time"']
GRAMMAR_DICTIONARY['fare_base_columns'] = ['"restriction_code"', '"fare_id"', '"from_airport"', '"flight_id"',
'"fare_airline"', '"fare_basis_code"', '"to_airport"', '"one_direction_cost"', '"round_trip_required"', '"round_trip_cost"']
GRAMMAR_DICTIONARY['fare_basis_columns'] = ['"booking_class"', '"economy"',
'"basis_days"', '"fare_basis_code"', '"class_type"', '"discounted"']
GRAMMAR_DICTIONARY['flight_fare_columns'] = ['"fare_id"', '"flight_id"']
GRAMMAR_DICTIONARY['flight_leg_columns'] = ['"leg_flight"', '"flight_id"']
GRAMMAR_DICTIONARY['flight_stop_columns'] = [
'"arrival_time"', '"flight_id"', '"stop_number"', '"stop_airport"']
GRAMMAR_DICTIONARY['food_service_columns'] = ['"meal_code"',
'"meal_description"', '"meal_number"', '"compartment"']
GRAMMAR_DICTIONARY['ground_service_columns'] = [
'"ground_fare"', '"airport_code"', '"transport_type"', '"city_code"']
GRAMMAR_DICTIONARY['state_colums'] = ['"state_code"', '"state_name"']
GRAMMAR_DICTIONARY['restriction_columns'] = ['"advance_purchase"', '"stopovers"', '"minimum_stay"',
'"application"', '"maximum_stay"', '"saturday_stay_required"', '"restriction_code"', '"no_discounts"']
# Column Values
GRAMMAR_DICTIONARY['concrete_value_expr'] = [
'(days_alias ws "." ws "days_code" ws binaryop ws days_code_value)',
'(days_alias ws "." ws "day_name" ws binaryop ws day_name_value)',
'(fare_basis_alias ws "." ws "fare_basis_code" ws binaryop ws fare_basis_code_value)',
'(fare_basis_alias ws "." ws "class_type" ws binaryop ws class_type_value)',
'(fare_basis_alias ws "." ws "economy" ws binaryop ws economy_value)',
'(fare_basis_alias ws "." ws "discounted" ws binaryop ws discounted_value)',
'(fare_basis_alias ws "." ws "booking_class" ws binaryop ws booking_class_value)',
'(fare_base_alias ws "." ws "round_trip_required" ws binaryop ws round_trip_required_value)',
'(fare_base_alias ws "." ws "fare_basis_code" ws binaryop ws fare_basis_code_value)',
'(aircraft_alias ws "." ws "manufacturer" ws binaryop ws manufacturer_value)',
'(aircraft_alias ws "." ws "basic_type" ws binaryop ws basic_type_value)',
'(aircraft_alias ws "." ws "aircraft_code_base" ws binaryop ws aircraft_code_value)',
'(aircraft_alias ws "." ws "propulsion" ws binaryop ws propulsion_value)',
'(airport_base_alias ws "." ws "airport_code" ws binaryop ws airport_code_value)',
'(airport_base_alias ws "." ws "airport_name" ws binaryop ws airport_name_value)',
'(city_alias ws "." ws "city_name" ws binaryop ws city_name_value)',
'(city_alias ws "." ws "country_name" ws binaryop ws country_name_value)',
'(city_alias ws "." ws "state_code" ws binaryop ws state_code_value)',
'(state_alias ws "." ws "state_code" ws binaryop ws state_code_value)',
'(state_alias ws "." ws "state_name" ws binaryop ws state_name_value)',
'(flight_base_alias ws "." ws "airline_code" ws binaryop ws airline_code_value)',
'(flight_base_alias ws "." ws "flight_days" ws binaryop ws flight_days_value)',
'(flight_base_alias ws "." ws "meal_code" ws binaryop ws meal_code_value)',
'("airline_1" ws "." ws "airline_code" ws binaryop ws airline_code_value)',
'("airline_1" ws "." ws "airline_name" ws binaryop ws airline_name_value)',
'("ground_service_1" ws "." ws "transport_type" ws binaryop ws transport_type_value)',
'(food_service_alias ws "." ws "meal_description" ws binaryop ws meal_description_value)',
'(food_service_alias ws "." ws "meal_code" ws binaryop ws meal_code_value)',
'(airport_service_alias ws "." ws "airport_code" ws binaryop ws airport_code_value)',
'("restriction_1" ws "." ws "restriction_code" ws binaryop ws restriction_code_value)',
'("class_of_service_1" ws "." ws "booking_class" ws binaryop ws booking_class_value)',
# Numerical
'(date_day_alias ws "." ws "year" ws binaryop ws year_value)',
'(date_day_alias ws "." ws "month_number" ws binaryop ws month_number_value)',
'(date_day_alias ws "." ws "day_number" ws binaryop ws day_number_value)',
'(flight_stop_alias ws "." ws "arrival_time" ws binaryop ws time_value)',
'(flight_base_alias ws "." ws "flight_number" ws binaryop ws flight_number_value)',
'(flight_base_alias ws "." ws "connections" ws binaryop ws connections_value)',
'(flight_base_alias ws "." ws "arrival_time" ws binaryop ws time_value)',
'(flight_base_alias ws "." ws "departure_time" ws binaryop ws time_value)',
'(flight_base_alias ws "." ws "stops" ws binaryop ws stops_value)',
'(flight_base_alias ws "." ws "time_elapsed" ws binaryop ws time_elapsed_value)',
'(fare_base_alias ws "." ws "one_direction_cost" ws binaryop ws one_direction_cost_value)',
'(fare_base_alias ws "." ws "round_trip_cost" ws binaryop ws round_trip_cost_value)',
]
GRAMMAR_DICTIONARY['airport_code_value'] = ['"\'iah\'"', '"\'sfo\'"', '"\'tpa\'"', '"\'jfk\'"', '"\'cvg\'"', '"\'dfw\'"', '"\'mco\'"', '"\'phl\'"', '"\'lga\'"', '"\'lax\'"', '"\'yyz\'"', '"\'bwi\'"', '"\'oak\'"',
'"\'slc\'"', '"\'ont\'"', '"\'pit\'"', '"\'hou\'"', '"\'mia\'"', '"\'den\'"', '"\'bur\'"', '"\'ord\'"', '"\'dtw\'"', '"\'mke\'"', '"\'bna\'"', '"\'iad\'"', '"\'bos\'"', '"\'atl\'"', '"\'ewr\'"', '"\'dal\'"']
GRAMMAR_DICTIONARY['city_name_value'] = ['"\'salt lake city\'"', '"\'san jose\'"', '"\'newark\'"', '"\'montreal\'"', '"\'st. paul\'"', '"\'ontario\'"', '"\'orlando\'"', '"\'minneapolis\'"', '"\'westchester county\'"', '"\'memphis\'"', '"\'chicago\'"', '"\'tampa\'"', '"\'pittsburgh\'"', '"\'toronto\'"', '"\'houston\'"', '"\'detroit\'"', '"\'new york\'"', '"\'cleveland\'"', '"\'columbus\'"', '"\'nashville\'"', '"\'tacoma\'"', '"\'philadelphia\'"',
'"\'las vegas\'"', '"\'denver\'"', '"\'san diego\'"', '"\'miami\'"', '"\'indianapolis\'"', '"\'burbank\'"', '"\'cincinnati\'"', '"\'fort worth\'"', '"\'milwaukee\'"', '"\'boston\'"', '"\'baltimore\'"', '"\'dallas\'"', '"\'seattle\'"', '"\'atlanta\'"', '"\'kansas city\'"', '"\'los angeles\'"', '"\'phoenix\'"', '"\'oakland\'"', '"\'san francisco\'"', '"\'washington\'"', '"\'st. louis\'"', '"\'charlotte\'"', '"\'st. petersburg\'"', '"\'long beach\'"']
GRAMMAR_DICTIONARY['round_trip_required_value'] = ['"\'no\'"', '"\'yes\'"']
GRAMMAR_DICTIONARY['airline_code_value'] = ['"\'ua\'"', '"\'cp\'"', '"\'ea\'"', '"\'ac\'"', '"\'ml\'"', '"\'as\'"', '"\'lh\'"', '"\'dl\'"',
'"\'nw\'"', '"\'us\'"', '"\'yx\'"', '"\'tw\'"', '"\'wn\'"', '"\'ff\'"', '"\'nx\'"', '"\'kw\'"', '"\'co\'"', '"\'hp\'"', '"\'aa\'"']
GRAMMAR_DICTIONARY['day_name_value'] = ['"\'monday\'"', '"\'friday\'"', '"\'tuesday\'"',
'"\'thursday\'"', '"\'sunday\'"', '"\'saturday\'"', '"\'wednesday\'"']
GRAMMAR_DICTIONARY['aircraft_code_value'] = ['"\'757\'"', '"\'m80\'"', '"\'733\'"', '"\'j31\'"',
'"\'73s\'"', '"\'72s\'"', '"\'734\'"', '"\'d9s\'"', '"\'f28\'"', '"\'100\'"', '"\'d10\'"']
GRAMMAR_DICTIONARY['meal_code_value'] = ['"\'%s/%\'"', '"\'s\'"',
'"\'bb\'"', '"\'b\'"', '"\'s/\'"', '"\'sd/d\'"', '"\'ls\'"', '"\'d/s\'"']
GRAMMAR_DICTIONARY['state_name_value'] = ['"\'nevada\'"', '"\'ohio\'"', '"\'michigan\'"', '"\'minnesota\'"', '"\'new jersey\'"', '"\'colorado\'"', '"\'indiana\'"', '"\'california\'"',
'"\'washington\'"', '"\'georgia\'"', '"\'north carolina\'"', '"\'texas\'"', '"\'new york\'"', '"\'quebec\'"', '"\'utah\'"', '"\'missouri\'"', '"\'arizona\'"', '"\'florida\'"', '"\'tennessee\'"']
GRAMMAR_DICTIONARY['class_type_value'] = ['"\'first\'"',
'"\'coach\'"', '"\'business\'"', '"\'thrift\'"']
GRAMMAR_DICTIONARY['transport_type_value'] = ['"\'rapid transit\'"',
'"\'rental car\'"', '"\'air taxi operation\'"', '"\'taxi\'"', '"\'limousine\'"',
'"\'%limousine%\'"', '"\'%taxi%\'"']
GRAMMAR_DICTIONARY['state_code_value'] = ['"\'tx\'"',
'"\'ca\'"', '"\'asd\'"', '"\'dc\'"', '"\'ga\'"']
GRAMMAR_DICTIONARY['economy_value'] = ['"\'yes\'"', '"\'no\'"']
GRAMMAR_DICTIONARY['fare_basis_code_value'] = ['"\'b\'"', '"\'bh\'"', '"\'m\'"', '"\'c\'"', '"\'qx\'"',
'"\'h\'"', '"\'qw\'"', '"\'y\'"', '"\'q\'"', '"\'qo\'"', '"\'fn\'"', '"\'f\'"', '"\'yn\'"']
GRAMMAR_DICTIONARY['booking_class_value'] = [
'"\'c\'"', '"\'b\'"', '"\'h\'"', '"\'y\'"', '"\'q\'"', '"\'f\'"', '"\'yn\'"']
GRAMMAR_DICTIONARY['meal_description_value'] = [
'"\'lunch\'"', '"\'breakfast\'"', '"\'snack\'"', '"\'dinner\'"']
GRAMMAR_DICTIONARY['basic_type_value'] = ['"\'757\'"', '"\'747\'"',
'"\'767\'"', '"\'727\'"', '"\'dc10\'"', '"\'f28\'"', '"\'737\'"']
GRAMMAR_DICTIONARY['manufacturer_value'] = ['"\'boeing\'"']
GRAMMAR_DICTIONARY['days_code_value'] = ['"\'sa\'"', '"\'su\'"']
GRAMMAR_DICTIONARY['restriction_code_value'] = [
'"\'ap/80\'"', '"\'ap/68\'"', '"\'ap/57\'"', '"\'ap/55\'"', '"\'ap/58\'"', '"\'ap\'"']
GRAMMAR_DICTIONARY['flight_days_value'] = ['"\'daily\'"']
GRAMMAR_DICTIONARY['country_name_value'] = ['"\'canada\'"', '"\'usa\'"']
GRAMMAR_DICTIONARY['airline_name_value'] = [
'"\'united\'"', '"\'continental airlines\'"', '"\'%canadian airlines international%\'"', '"\'%canadian airlines%\'"', '"\'%delta%\'"', '"\'usair\'"']
GRAMMAR_DICTIONARY['propulsion_value'] = ['"\'jet\'"', '"\'turboprop\'"']
GRAMMAR_DICTIONARY['airport_name_value'] = [
'"\'general mitchell international\'"', '"\'%canadian airlines international%\'"', '"\'stapleton international\'"', '"\'%pearson%\'"', '"\'%lester%\'"', '"\'%stapleton%\'"']
GRAMMAR_DICTIONARY['discounted_value'] = ['"\'yes\'"', '"\'no\'"']
# Numerical Value
GRAMMAR_DICTIONARY['year_value'] = [
'"1991"', '"1993"', '"1994"', '"1990"', '"1992"']
GRAMMAR_DICTIONARY['time_value'] = ['"1619"', '"815"', '"2220"', '"2010"', '"1524"', '"1205"', '"1159"', '"1220"', '"1620"', '"705"', '"2330"', '"1045"', '"1401"', '"1024"', '"400"', '"755"', '"838"', '"823"', '"1430"', '"1017"', '"930"',
'"1000"', '"2159"', '"301"', '"2134"', '"645"', '"718"', '"1310"', '"1330"', '"1425"', '"1940"', '"1923"', '"1628"', '"1745"', '"1845"','"830"','"730"','"720"',
'"555"','"500"', '"1505"', '"2226"', '"1759"', '"300"', '"1800"', '"650"', '"601"', '"600"', '"845"', '"819"', '"1200"', '"2200"', '"2400"', '"1930"', '"430"', '"530"', '"41"',
'"2230"', '"2358"', '"2359"', '"2300"', '"1900"', '"1615"', '"1530"', '"1630"', '"2000"', '"1830"', '"630"', '"2100"', '"2030"', '"1130"', '"1715"',
'"1110"', '"1645"', '"800"', '"1230"', '"1730"', '"1700"', '"1030"', '"1850"', '"1500"', '"1600"', '"1400"', '"1300"', '"0"', '"200"', '"2130"', '"1115"',
'"1245"', '"1145"', '"1100"', '"900"', '"1410"', '"700"', '"100"', '"230"', '"30"', '"1"']
GRAMMAR_DICTIONARY['month_number_value'] = ['"12"', '"4"', '"6"',
'"9"', '"8"', '"11"', '"10"', '"1"', '"3"', '"7"', '"5"', '"2"']
GRAMMAR_DICTIONARY['day_number_value'] = ['"26"', '"28"', '"23"', '"24"', '"27"', '"25"', '"29"', '"22"', '"21"', '"20"', '"2"',
'"16"', '"11"','"13"', '"12"', '"14"', '"17"', '"15"', '"19"', '"18"', '"10"', '"1"',
'"31"', '"30"', '"3"', '"4"', '"8"', '"7"', '"5"', '"6"', '"9"']
GRAMMAR_DICTIONARY['stops_value'] = ['"0"', '"3"', '"1"']
GRAMMAR_DICTIONARY['flight_number_value'] = ['"297"', '"271"', '"2153"', '"229"', '"269"', '"270"', '"1222"', '"766"', '"505"', '"402"', '"343"', '"19"', '"417"', '"137338"', '"71"', '"324"', '"139"', '"1039"', '"771"', '"3724"', '"746"', '"217"', '"210"', '"212"', '"21"', '"852"', '"459"',
'"1291"', '"296"', '"311"', '"323"', '"1765"', '"279"', '"315"', '"497"', '"163"', '"1083"', '"1209"', '"98"', '"345"', '"928"', '"106"', '"825"', '"82"', '"4400"', '"352"', '"415"', '"3357"', '"838"', '"539"', '"281"', '"813"', '"257"', '"201"']
GRAMMAR_DICTIONARY['round_trip_cost_value'] = [
'"466"', '"300"', '"932"', '"1288"', '"1100"', '"1500"', '"1000"', '"100"']
GRAMMAR_DICTIONARY['connections_value'] = ['"0"', '"1"']
GRAMMAR_DICTIONARY['one_direction_cost_value'] = [
'"466"', '"400"', '"329"', '"300"', '"150"', '"200"', '"416"', '"500"']
GRAMMAR_DICTIONARY['time_elapsed_value'] = ['"60"', '"540"']
COPY_TERMINAL_SET = {'year_value', 'time_value', 'month_number_value', 'day_number_value', 'stops_value', 'flight_number_value', 'round_trip_cost_value',
'connections_value', 'one_direction_cost_value', 'time_elapsed_value', 'airport_code_value', 'city_name_value', 'round_trip_required_value',
'airline_code_value', 'day_name_value', 'aircraft_code_value', 'meal_code_value', 'state_name_value', 'class_type_value', 'transport_type_value',
'state_code_value', 'economy_value', 'fare_basis_code_value', 'booking_class_value', 'meal_description_value', 'basic_type_value', 'manufacturer_value',
'days_code_value', 'restriction_code_value', 'flight_days_value', 'country_name_value', 'airline_name_value', 'propulsion_value', 'airport_name_value',
'discounted_value'}
| 22,697 | 67.781818 | 493 |
py
|
Unimer
|
Unimer-master/grammars/atis/lambda_calculus_grammar_4.py
|
# coding=utf8
ROOT_RULE = 'statement -> [expression]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['(expression ws)']
GRAMMAR_DICTIONARY['expression'] = [
'(constant)', '(application)', '(abstraction)', '(variable)']
GRAMMAR_DICTIONARY['abstraction'] = [
'("(" ws "_lambda" wsp variable_definition wsp expression ws ")")']
GRAMMAR_DICTIONARY['application'] = ['("(" ws function ws ")")']
GRAMMAR_DICTIONARY['function'] = ['binary_predicate',
'unit_predicate', 'entity_function', 'meta_predicate', ]
GRAMMAR_DICTIONARY['unit_predicate'] = [
'("_weekday" wsp expression)',
'("_meal:_t" wsp expression)',
'("_booking_class:_t" wsp expression)',
'("_fare" wsp expression)',
'("_flight" wsp expression)',
'("_aircraft_code:_t" wsp expression)',
'("_economy" wsp expression)',
'("_has_stops" wsp expression)',
'("_oneway" wsp expression)',
'("_airport" wsp expression)',
'("_taxi" wsp expression)',
'("_rapid_transit" wsp expression)',
'("_airline" wsp expression)',
'("_fare_basis_code" wsp expression)',
'("_tonight" wsp expression)',
'("_today" wsp expression)',
'("_connecting" wsp expression)',
'("_overnight" wsp expression)',
'("_round_trip" wsp expression)',
'("_day_after_tomorrow" wsp expression)',
'("_discounted" wsp expression)',
'("_time_zone_code" wsp expression)',
'("_limousine" wsp expression)',
'("_daily" wsp expression)',
'("_turboprop" wsp expression)',
'("_air_taxi_operation" wsp expression)',
'("_has_meal" wsp expression)',
'("_minimum_connection_time" wsp expression)',
'("_tomorrow_arrival" wsp expression)',
'("_tomorrow" wsp expression)',
'("_aircraft" wsp expression)',
'("_rental_car" wsp expression)',
'("_jet" wsp expression)',
'("_city" wsp expression)',
'("_class_of_service" wsp expression)',
'("_ground_transport" wsp expression)',
'("_nonstop" wsp expression)',
'("_meal_code" wsp expression)',
]
GRAMMAR_DICTIONARY['binary_predicate'] = [
'("_month_arrival" wsp expression wsp expression)',
'("_stops" wsp expression wsp expression)',
'("_day_number" wsp expression wsp expression)',
'("_meal" wsp expression wsp expression)',
'("_approx_return_time" wsp expression wsp expression)',
'("_booking_class" wsp expression wsp expression)',
'("_approx_arrival_time" wsp expression wsp expression)',
'("_fare" wsp expression wsp expression)',
'("_aircraft_basis_type" wsp expression wsp expression)',
'("_aircraft_code" wsp expression wsp expression)',
'("_departure_time" wsp expression wsp expression)',
'("_airport" wsp expression wsp expression)',
'("_flight_number" wsp expression wsp expression)',
'("_loc:_t" wsp expression wsp expression)',
'("_airline" wsp expression wsp expression)',
'("_during_day" wsp expression wsp expression)',
'("_manufacturer" wsp expression wsp expression)',
'("_fare_basis_code" wsp expression wsp expression)',
'("_approx_departure_time" wsp expression wsp expression)',
'("_arrival_time" wsp expression wsp expression)',
'("_services" wsp expression wsp expression)',
'("_next_days" wsp expression wsp expression)',
'("_from" wsp expression wsp expression)',
'("_stop" wsp expression wsp expression)',
'("_year" wsp expression wsp expression)',
'("_day_return" wsp expression wsp expression)',
'("_class_type" wsp expression wsp expression)',
'("_day_arrival" wsp expression wsp expression)',
'("_during_day_arrival" wsp expression wsp expression)',
'("_days_from_today" wsp expression wsp expression)',
'("_from_airport" wsp expression wsp expression)',
'("_to_city" wsp expression wsp expression)',
'("_day_number_arrival" wsp expression wsp expression)',
'("_aircraft" wsp expression wsp expression)',
'("_month" wsp expression wsp expression)',
'("_day_number_return" wsp expression wsp expression)',
'("_day" wsp expression wsp expression)',
'("_before_day" wsp expression wsp expression)',
'("_to" wsp expression wsp expression)',
'("_time_elapsed" wsp expression wsp expression)',
'("_month_return" wsp expression wsp expression)',
'("_after_day" wsp expression wsp expression)',
'("_meal_code" wsp expression wsp expression)',
]
GRAMMAR_DICTIONARY['entity_function'] = [
'("_stops" wsp expression)',
'("_stop_arrival_time" wsp expression)',
'("_meal" wsp expression)',
'("_booking_class" wsp expression)',
'("_fare" wsp expression)',
'("_aircraft_code" wsp expression)',
'("_minutes_distant" wsp expression wsp expression)',
'("_minutes_distant" wsp expression)',
'("_departure_time" wsp expression)',
'("_ground_fare" wsp expression)',
'("_flight_number" wsp expression)',
'("_arrival_time" wsp expression)',
'("_airline:_e" wsp expression)',
'("_restriction_code" wsp expression)',
'("_capacity" wsp expression)',
'("_cost" wsp expression)',
'("_airline_name" wsp expression)',
'("_miles_distant" wsp expression wsp expression)',
'("_miles_distant" wsp expression)',
'("_time_elapsed" wsp expression)',
'("_abbrev" wsp expression)',
]
GRAMMAR_DICTIONARY['meta_predicate'] = [
'("_the" wsp variable wsp application)',
'("_>" wsp expression wsp expression)',
'("_=" wsp expression wsp expression)',
'("_<" wsp expression wsp expression)',
'("_named" wsp expression wsp expression)',
'("_max" wsp variable wsp application)',
'("_min" wsp variable wsp application)',
'("_not" wsp application)',
'("_or" wsp application wsp polyvariadic_expression)',
'("_and" wsp application wsp polyvariadic_expression)',
'("_argmax" wsp variable wsp application wsp application)',
'("_argmin" wsp variable wsp application wsp application)',
'("_sum" wsp variable wsp application wsp application)',
'("_equals" wsp expression wsp expression)',
'("_equals:_t" wsp expression wsp expression)',
'("_exists" wsp variable wsp application)',
'("_count" wsp variable wsp application)',
]
GRAMMAR_DICTIONARY['polyvariadic_expression'] = [
'(application ws polyvariadic_expression)', '""']
GRAMMAR_DICTIONARY['variable_definition'] = [
'(variable ":e")', '(variable ":i")']
GRAMMAR_DICTIONARY['variable'] = ['"$v0"', '"$v1"', '"$v2"', '"$v3"']
GRAMMAR_DICTIONARY['constant'] = [
# Dollars
'"100:_do"', '"1000:_do"', '"466:_do"', '"416:_do"', '"124:_do"', '"329:_do"', '"1100:_do"', '"415:_do"',
'"200:_do"', '"150:_do"', '"932:_do"', '"500:_do"', '"1288:_do"', '"300:_do"', '"400:_do"', '"1500:_do"',
# City
'"nashville:_ci"', '"indianapolis:_ci"', '"san_diego:_ci"', '"long_beach:_ci"', '"atlanta:_ci"', '"kansas_city:_ci"',
'"miami:_ci"', '"st_louis:_ci"', '"columbus:_ci"', '"toronto:_ci"', '"las_vegas:_ci"', '"burbank:_ci"', '"cleveland:_ci"',
'"tacoma:_ci"', '"st_petersburg:_ci"', '"memphis:_ci"', '"denver:_ci"', '"dallas:_ci"', '"detroit:_ci"', '"oakland:_ci"',
'"baltimore:_ci"', '"pittsburgh:_ci"', '"philadelphia:_ci"', '"milwaukee:_ci"', '"salt_lake_city:_ci"', '"san_jose:_ci"',
'"tampa:_ci"', '"orlando:_ci"', '"chicago:_ci"', '"seattle:_ci"', '"new_york:_ci"', '"san_francisco:_ci"', '"boston:_ci"',
'"washington:_ci"', '"cincinnati:_ci"', '"charlotte:_ci"', '"newark:_ci"', '"westchester_county:_ci"', '"los_angeles:_ci"',
'"fort_worth:_ci"', '"minneapolis:_ci"', '"ontario:_ci"', '"montreal:_ci"', '"st_paul:_ci"', '"houston:_ci"', '"phoenix:_ci"',
# Airline
'"wn:_al"', '"ml:_al"', '"cp:_al"', '"nw:_al"', '"yx:_al"', '"ac:_al"', '"dl:_al"', '"kw:_al"', '"delta:_al"',
'"as:_al"', '"tw:_al"', '"co:_al"', '"ff:_al"', '"ea:_al"', '"ua:_al"', '"canadian_airlines_international:_al"',
'"hp:_al"', '"lh:_al"', '"nx:_al"', '"usair:_al"', '"aa:_al"', '"us:_al"',
# Time
'"1200:_ti"', '"1628:_ti"', '"1830:_ti"', '"823:_ti"', '"1245:_ti"', '"1524:_ti"', '"200:_ti"', '"1615:_ti"',
'"1230:_ti"', '"705:_ti"', '"1045:_ti"', '"1700:_ti"', '"1115:_ti"', '"1645:_ti"', '"1730:_ti"', '"815:_ti"',
'"0:_ti"', '"500:_ti"', '"1205:_ti"', '"1940:_ti"', '"2000:_ti"', '"1400:_ti"', '"1130:_ti"', '"2200:_ti"',
'"645:_ti"', '"718:_ti"', '"2220:_ti"', '"600:_ti"', '"630:_ti"', '"800:_ti"', '"838:_ti"', '"1330:_ti"',
'"845:_ti"', '"1630:_ti"', '"1715:_ti"', '"2010:_ti"', '"1000:_ti"', '"1619:_ti"', '"2100:_ti"', '"1505:_ti"',
'"2400:_ti"', '"1923:_ti"', '"1:_ti"', '"1145:_ti"', '"2300:_ti"', '"1620:_ti"', '"2023:_ti"', '"2358:_ti"',
'"1500:_ti"', '"1815:_ti"', '"1425:_ti"', '"720:_ti"', '"1024:_ti"', '"1600:_ti"', '"100:_ti"', '"1310:_ti"',
'"1300:_ti"', '"700:_ti"', '"650:_ti"', '"1800:_ti"', '"1110:_ti"', '"1410:_ti"', '"1030:_ti"', '"1900:_ti"',
'"1017:_ti"', '"1430:_ti"', '"1850:_ti"', '"900:_ti"', '"1930:_ti"', '"1133:_ti"', '"1220:_ti"', '"2226:_ti"',
'"1100:_ti"', '"819:_ti"', '"755:_ti"', '"2134:_ti"', '"555:_ti"',
# Meal
'"snack:_me"', '"lunch:_me"', '"dinner:_me"', '"breakfast:_me"',
# Flight Number
'"838:_fn"', '"1059:_fn"', '"417:_fn"', '"323:_fn"', '"311:_fn"', '"137338:_fn"', '"315:_fn"', '"825:_fn"',
'"345:_fn"', '"270:_fn"', '"271:_fn"', '"4400:_fn"', '"296:_fn"', '"1765:_fn"', '"343:_fn"', '"1222:_fn"',
'"217:_fn"', '"459:_fn"', '"279:_fn"', '"1083:_fn"', '"324:_fn"', '"746:_fn"', '"281:_fn"', '"269:_fn"',
'"98:_fn"', '"212:_fn"', '"505:_fn"', '"852:_fn"', '"82:_fn"', '"352:_fn"', '"928:_fn"', '"19:_fn"',
'"139:_fn"', '"415:_fn"', '"539:_fn"', '"3357:_fn"', '"813:_fn"', '"257:_fn"', '"297:_fn"', '"1055:_fn"',
'"405:_fn"', '"201:_fn"', '"71:_fn"', '"1291:_fn"', '"402:_fn"', '"771:_fn"', '"106:_fn"', '"1039:_fn"',
'"210:_fn"', '"2153:_fn"', '"3724:_fn"', '"1209:_fn"', '"21:_fn"',
# Airport
'"ewr:_ap"', '"jfk:_ap"', '"pit:_ap"', '"oak:_ap"', '"bur:_ap"', '"las:_ap"', '"lga:_ap"', '"den:_ap"',
'"mco:_ap"', '"dallas:_ap"', '"dfw:_ap"', '"phx:_ap"', '"slc:_ap"', '"iad:_ap"', '"sfo:_ap"', '"ont:_ap"',
'"iah:_ap"', '"ord:_ap"', '"mia:_ap"', '"cvg:_ap"', '"phl:_ap"', '"tpa:_ap"', '"dtw:_ap"', '"yyz:_ap"',
'"ind:_ap"', '"atl:_ap"', '"mke:_ap"', '"hou:_ap"', '"bos:_ap"', '"dal:_ap"', '"bwi:_ap"', '"bna:_ap"',
'"stapelton:_ap"', '"lax:_ap"',
# rc
'"b:_rc"', '"ap_55:_rc"', '"ap_57:_rc"', '"s_:_rc"', '"sd_d:_rc"', '"ap_80:_rc"', '"d_s:_rc"',
'"ap_58:_rc"', '"ls:_rc"', '"ap:_rc"', '"s:_rc"', '"ap_68:_rc"',
# class type
'"thrift:_cl"', '"business:_cl"', '"first:_cl"', '"coach:_cl"',
# Aircraft Code
'"dc10:_ac"', '"j31:_ac"', '"734:_ac"', '"73s:_ac"', '"72s:_ac"', '"100:_ac"', '"757:_ac"', '"d9s:_ac"',
'"d10:_ac"', '"727:_ac"', '"m80:_ac"', '"747:_ac"', '"f28:_ac"', '"737:_ac"', '"733:_ac"', '"767:_ac"',
# day name
'"monday:_da"', '"thursday:_da"', '"saturday:_da"', '"friday:_da"', '"sunday:_da"', '"wednesday:_da"', '"tuesday:_da"',
# day number
'"12:_dn"', '"18:_dn"', '"19:_dn"', '"31:_dn"', '"7:_dn"', '"20:_dn"', '"27:_dn"', '"6:_dn"', '"26:_dn"',
'"17:_dn"', '"11:_dn"', '"10:_dn"', '"15:_dn"', '"23:_dn"','"1:_dn"', '"24:_dn"', '"25:_dn"', '"14:_dn"',
'"13:_dn"', '"29:_dn"', '"3:_dn"', '"28:_dn"', '"8:_dn"', '"5:_dn"', '"2:_dn"', '"9:_dn"', '"30:_dn"',
'"16:_dn"', '"4:_dn"', '"22:_dn"', '"21:_dn"',
# Month
'"january:_mn"', '"february:_mn"', '"december:_mn"', '"june:_mn"', '"august:_mn"', '"april:_mn"',
'"october:_mn"', '"november:_mn"', '"july:_mn"', '"may:_mn"', '"march:_mn"', '"september:_mn"',
# Year
'"1991:_yr"', '"1993:_yr"', '"1992:_yr"',
# pd
'"mealtime:_pd"', '"breakfast:_pd"', '"late:_pd"', '"afternoon:_pd"', '"late_evening:_pd"',
'"daytime:_pd"', '"pm:_pd"', '"late_night:_pd"', '"evening:_pd"', '"morning:_pd"', '"early:_pd"',
# fare basis code
'"y:_fb"', '"qx:_fb"', '"m:_fb"', '"fn:_fb"', '"b:_fb"', '"q:_fb"', '"bh:_fb"', '"qo:_fb"', '"h:_fb"',
'"c:_fb"', '"qw:_fb"', '"k:_fb"', '"f:_fb"', '"yn:_fb"',
# State
'"minnesota:_st"', '"florida:_st"', '"nevada:_st"', '"california:_st"', '"arizona:_st"',
# Number
'"2:_i"', '"3:_i"', '"1:_i"',
# aircraft basis type
'"737:_bat"', '"767:_bat"',
# Others
'"9:_hr"', '"boeing:_mf"', '"sa:_dc"',
]
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY["wsp"] = ['~"\s+"i']
COPY_TERMINAL_SET = {"constant"}
| 12,429 | 52.347639 | 130 |
py
|
Unimer
|
Unimer-master/grammars/atis/typed_funql_grammar.py
|
# coding=utf8
"""
Typed FunQL Grammar
Non-Terminals are Type of object
Each predicate is categoried based on what they return
"""
ROOT_RULE = 'statement -> [Query]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['(Query ws)']
GRAMMAR_DICTIONARY['Query'] = [
'("answer(" Predicate ")")'
]
GRAMMAR_DICTIONARY['Predicate'] = [
'Flight', 'Time', 'Airline', 'Airport', 'Aircraft',
'Stop', 'FlightNumber', 'Fare', 'City', 'GroundTransport',
'TimeZone', 'Meta', 'other', 'BookingClass', 'Meal',
]
GRAMMAR_DICTIONARY['Time'] = [
'("_arrival_time(" Flight ")")',
'("_departure_time(" Flight ")")',
'("_stop_arrival_time(" Flight ")")',
'("_time_elapsed(" Flight ")")',
'("_minimum_connection_time(" Airport ")")',
'("_minutes_distant(" GroundTransport ")")',
'("days_code(sa)")'
]
GRAMMAR_DICTIONARY['Stop'] = [
'("_stops(" Flight ")")'
]
GRAMMAR_DICTIONARY['FlightNumber'] = [
'("_flight_number(" Flight ")")',
'("flight_number(" flight_number_value ")")'
]
GRAMMAR_DICTIONARY['Flight'] = [
'("_airline_2(" Airline ")")',
'("_aircraft_2(" Aircraft ")")',
'("_flight" ws "(" ws "all" ws ")")',
'("_connecting(" Flight ")")',
'("_discounted(" Flight ")")',
'("_economy(" Flight ")")',
'("_flight_number_2(" FlightNumber ")")',
'("_flight(" Flight ")")',
'("_from_2(" City ")")',
'("_from_2(" Airport ")")',
'("_has_meal(" Flight ")")',
'("_has_stops(" Flight ")")',
'("_nonstop(" Flight ")")',
'("_oneway(" Flight ")")',
'("_round_trip(" Flight ")")',
'("_round_trip(" Flight ")")',
'("_to_2(" City ")")',
'("_to_2(" Airport ")")',
'("_to_2(" state_name ")")',
'("_jet(" Flight ")")',
'("_turboprop(" Flight ")")',
'("_manufacturer_2(manufacturer(boeing))")',
# Time
'("_day_2(" day ")")',
'("_after_day_2(" day ")")',
'("_before_day_2(" day ")")',
'("_day_arrival_2(" day ")")',
'("_approx_arrival_time_2(" time ")")',
'("_approx_return_time_2(" time ")")',
'("_approx_departure_time_2(" time ")")',
'("_arrival_time_2(" time ")")',
'("_departure_time_2(" time ")")',
'("_daily(" Flight ")")',
'("_day_after_tomorrow(" Flight ")")',
'("_day_number_2(" day_number ")")',
'("_day_number_arrival_2(" day_number ")")',
'("_day_number_return_2(" day_number ")")',
'("_day_return_2(" day ")")',
'("_days_from_today_2(" integer ")")',
'("_during_day_2(" day_period ")")',
'("_during_day_arrival_2(" day_period ")")',
'("_month_2(" month ")")',
'("_month_arrival_2(" month ")")',
'("_month_return_2(" month ")")',
'("_next_days_2(" integer ")")',
'("_overnight(" Flight ")")',
'("_today(" Flight ")")',
'("_tomorrow(" Flight ")")',
'("_tomorrow_arrival(" Flight ")")',
'("_tonight(" Flight ")")',
'("_weekday(" Flight ")")',
'("_year_2(" year ")")',
# Fare
'("_fare_2(" dollar ")")',
'("_fare_basis_code_2(" Fare ")")',
'("_stop_2(" City ")")',
'("_stop_2(" Airport ")")',
'("_stops_2(" integer ")")',
'("_booking_class_2(" class_description ")")',
'("_class_type_2(" class_description ")")',
'("_meal_2(" Meal ")")',
'("_meal_code_2(" Meal ")")',
'("_time_elapsed_2(hour(9))")',
# Meta
'("_<_arrival_time_2(" time ")")',
'("_<_departure_time_2(" time ")")',
'("_<_fare_2(" dollar ")")',
'("_>_arrival_time_2(" time ")")',
'("_>_departure_time_2(" time ")")',
'("_>_departure_time_2(" Flight ")")',
'("_>_capacity_2(" Aircraft ")")',
'("_>_stops_2(" integer ")")',
'("argmax_arrival_time(" Flight ")")',
'("argmax_departure_time(" Flight ")")',
'("argmax_fare(" Flight ")")',
'("argmax_stops(" Flight ")")',
'("argmin_capacity(" Flight ")")',
'("argmin_arrival_time(" Flight ")")',
'("argmin_departure_time(" Flight ")")',
'("argmin_fare(" Flight ")")',
'("argmin_stops(" Flight ")")',
'("argmin_time_elapsed(" Flight ")")',
'("intersection" ws "(" ws Flight ws "," Conjunction ")")',
'("or" ws "(" ws Flight ws "," Conjunction ")")',
'("not" ws "(" Flight ")")'
]
GRAMMAR_DICTIONARY['Conjunction'] = [
'(Predicate ws "," ws Conjunction)',
'(Predicate)'
]
GRAMMAR_DICTIONARY['Fare'] = [
'("_fare(" Flight ")")',
'("_fare_basis_code(" Flight ")")',
'("_fare_basis_code" ws "(" ws "all" ws ")")',
'("fare_basis_code(" fare_basis_code_value ")")',
'("_ground_fare(" GroundTransport ")")',
'("or" ws "(" ws Fare ws "," Fare ")")',
]
GRAMMAR_DICTIONARY['Airline'] = [
'("_airline(" Airline ")")',
'("_abbrev(" Airline ")")',
'("_airline_1(" Flight ")")',
'("_airline_name(" Flight ")")',
'("airline_code(" airline_code_value ")")',
'("_services_2(" City ")")',
'("_services_2(" Airport ")")',
'("argmax_count(" Airline ")")',
'("_airline" ws "(" ws "all" ws ")")',
'("intersection" ws "(" ws Airline ws "," Conjunction ")")',
'("or" ws "(" ws Airline ws "," Conjunction ")")',
'("not" ws "(" Airline ")")'
]
GRAMMAR_DICTIONARY['Aircraft'] = [
'("argmin_capacity(" Aircraft ")")',
'("argmax_capacity(" Aircraft ")")',
'("aircraft_code(" aircraft_code_value ")")',
'("_aircraft_1(" Flight ")")',
'("_airline_2(" Airline ")")',
'("_aircraft_basis_type_2(" basis_type ")")',
'("_aircraft(" Aircraft ")")',
'("_jet(" Aircraft ")")',
'("_turboprop(" Aircraft ")")',
'("_manufacturer_2(manufacturer(boeing))")',
'("_aircraft" ws "(" ws "all" ws ")")',
'("intersection" ws "(" ws Aircraft ws "," Conjunction ")")',
'("or" ws "(" ws Aircraft ws "," Conjunction ")")',
'("not" ws "(" Aircraft ")")'
]
GRAMMAR_DICTIONARY['Airport'] = [
'("_airport(" Airport ")")',
'("_airport_1(" City ")")',
'("_from_1(" Flight ")")',
'("_to_1(" Flight ")")',
'("_stop_1(" Flight ")")',
'("airport_code(" airport_code_value ")")',
'("_loc:_t_2(" City ")")',
'("_loc:_t_2(" state_name ")")',
'("argmin_miles_distant_2(" Airport ")")',
'("_airport" ws "(" ws "all" ws ")")',
'("intersection" ws "(" ws Airport ws "," Conjunction ")")',
'("or" ws "(" ws Airport ws "," Conjunction ")")',
'("not" ws "(" Airport ")")',
'("_services_1(" airline_code ")")',
]
GRAMMAR_DICTIONARY['City'] = [
'("_city(" City ")")',
'("city_name(" city_name_value ")")',
'("_city" ws "(" ws "all" ws ")")',
'("_to_1(" Flight ")")',
'("_from_1(" Flight ")")',
'("_services_1(" airline_code ")")',
'("_loc:_t_1(" Airport ")")',
'("intersection" ws "(" ws City ws "," Conjunction ")")',
]
GRAMMAR_DICTIONARY['BookingClass'] = [
'("_booking_class_1(" Flight ")")',
'("_booking_class:_t" ws "(" ws "all" ws ")")',
'("_class_of_service" ws "(" ws "all" ws ")")'
]
GRAMMAR_DICTIONARY['GroundTransport'] = [
'("_air_taxi_operation(" GroundTransport ")")',
'("_limousine(" GroundTransport ")")',
'("_rapid_transit(" GroundTransport ")")',
'("_rental_car(" GroundTransport ")")',
'("_from_airport_2(" Airport ")")',
'("_from_airport_2(" City ")")',
'("_ground_transport(" GroundTransport ")")',
'("_to_city_2(" City ")")',
'("_taxi(" GroundTransport ")")',
'("_ground_transport" ws "(" ws "all" ws ")")',
'("intersection" ws "(" ws GroundTransport ws "," Conjunction ")")',
'("or" ws "(" ws GroundTransport ws "," Conjunction ")")',
'("_weekday(" GroundTransport ")")',
]
GRAMMAR_DICTIONARY['Meal'] = [
'("_meal(" Flight ")")',
'("_meal_code" ws "(" ws "all" ws ")")',
'("meal_code(" meal_code_value ")")',
'("meal_description(" meal_description_value ")")',
'("intersection" ws "(" ws Meal ws "," Conjunction ")")',
]
GRAMMAR_DICTIONARY['TimeZone'] = [
'("_time_zone_code(" TimeZone ")")',
'("_loc:_t_1(" City ")")',
]
GRAMMAR_DICTIONARY['Meta'] = [
'("_equals(" ws Airline ws "," Airline ws ")")',
'("_equals(" ws Airport ws "," Airport ws ")")',
'("_max(" Fare ")")',
'("_min(" Fare ")")',
'("count(" Flight ")")',
'("count(" Airport ")")',
'("count(" Airline ")")',
'("count(" BookingClass ")")',
'("count(" Fare ")")',
'("count(" City ")")',
'("sum_capacity(" Aircraft ")")',
'("sum_stops(" Flight ")")',
]
GRAMMAR_DICTIONARY['other'] = [
'("_services(" Airline "," City ")")',
'("_capacity(" Aircraft ")")',
'("_capacity(" Flight ")")',
'("_restriction_code(" Flight ")")',
'("_flight_airline(" Flight ")")',
'("_flight_fare(" Flight ")")',
'("_flight_aircraft(" Flight ")")',
'("_fare_time(" Flight ")")',
'("_miles_distant(" ws Airport ws "," ws City ws ")")',
'("_named_1(" Airline ")")',
]
GRAMMAR_DICTIONARY['city_name_value'] = ['"cleveland"', '"milwaukee"', '"detroit"', '"los_angeles"', '"miami"', '"salt_lake_city"', '"ontario"', '"tacoma"', '"memphis"', '"denver"', '"san_francisco"', '"new_york"', '"tampa"', '"washington"', '"westchester_county"', '"boston"', '"newark"', '"pittsburgh"', '"charlotte"', '"columbus"', '"atlanta"', '"oakland"', '"kansas_city"', '"st_louis"', '"nashville"', '"chicago"', '"fort_worth"', '"san_jose"', '"dallas"', '"philadelphia"', '"st_petersburg"', '"baltimore"', '"san_diego"', '"cincinnati"', '"long_beach"', '"phoenix"', '"indianapolis"', '"burbank"', '"montreal"', '"seattle"', '"st_paul"', '"minneapolis"', '"houston"', '"orlando"', '"toronto"', '"las_vegas"']
GRAMMAR_DICTIONARY['basis_type'] = ['("basis_type(" basis_type_value ")")']
GRAMMAR_DICTIONARY['basis_type_value'] = ['"737"', '"767"']
GRAMMAR_DICTIONARY['flight_number_object'] = ['("flight_number(" flight_number_value ")")']
GRAMMAR_DICTIONARY['flight_number_value'] = [
'"1291"', '"345"', '"813"', '"71"', '"1059"', '"212"', '"1209"',
'"281"', '"201"', '"324"', '"19"', '"352"', '"137338"', '"4400"',
'"323"', '"505"', '"825"', '"82"', '"279"', '"1055"', '"296"', '"315"',
'"1765"', '"405"', '"771"', '"106"', '"2153"', '"257"', '"402"',
'"343"', '"98"', '"1039"', '"217"', '"539"', '"459"', '"417"',
'"1083"', '"3357"', '"311"', '"210"', '"139"', '"852"', '"838"',
'"415"', '"3724"', '"21"', '"928"', '"269"', '"270"',
'"297"', '"746"', '"1222"', '"271"'
]
GRAMMAR_DICTIONARY['day'] = ['("day(" day_value ")")']
GRAMMAR_DICTIONARY['day_value'] = ['"monday"', '"wednesday"', '"thursday"', '"tuesday"', '"saturday"', '"friday"', '"sunday"']
GRAMMAR_DICTIONARY['time'] = [
'("time(" time_value ")")',
]
GRAMMAR_DICTIONARY['time_value'] = [
'"1850"', '"1110"', '"2000"', '"1815"', '"1024"', '"1500"',
'"1900"', '"1600"', '"1300"', '"1800"', '"1200"', '"1628"',
'"1830"', '"823"', '"1245"', '"1524"', '"200"', '"1615"',
'"1230"', '"705"', '"1045"', '"1700"', '"1115"', '"1645"',
'"1730"', '"815"', '"0"', '"500"', '"1205"', '"1940"',
'"1400"', '"1130"', '"2200"', '"645"', '"718"', '"2220"',
'"600"', '"630"', '"800"', '"838"', '"1330"', '"845"', '"1630"',
'"1715"', '"2010"', '"1000"', '"1619"', '"2100"', '"1505"',
'"2400"', '"1923"', '"100"', '"1145"', '"2300"', '"1620"',
'"2023"', '"2358"', '"1425"', '"720"', '"1310"', '"700"', '"650"',
'"1410"', '"1030"', '"1900"', '"1017"', '"1430"', '"900"', '"1930"',
'"1133"', '"1220"', '"2226"', '"1100"', '"819"', '"755"', '"2134"', '"555"', '"1"',
]
GRAMMAR_DICTIONARY['day_number'] = ['("day_number(" day_number_value ")")']
GRAMMAR_DICTIONARY['day_number_value'] = ['"13"', '"29"', '"28"', '"22"', '"21"', '"16"', '"30"', '"12"', '"18"', '"19"', '"31"', '"20"', '"27"', '"6"', '"26"', '"17"', '"11"', '"10"', '"15"', '"23"', '"24"', '"25"', '"14"', '"1"', '"3"', '"8"', '"5"', '"2"', '"9"', '"4"', '"7"']
GRAMMAR_DICTIONARY['integer'] = ['("integer(" integer_value ")")']
GRAMMAR_DICTIONARY['integer_value'] = ['"2"', '"1"', '"3"']
GRAMMAR_DICTIONARY['day_period'] = ['("day_period(" day_period_value ")")']
GRAMMAR_DICTIONARY['day_period_value'] = ['"early"', '"afternoon"', '"late_evening"', '"late_night"', '"mealtime"', '"evening"', '"pm"', '"daytime"', '"breakfast"', '"morning"', '"late"']
GRAMMAR_DICTIONARY['month'] = ['("month(" month_value ")")']
GRAMMAR_DICTIONARY['month_value'] = ['"april"', '"august"', '"may"', '"october"', '"june"', '"november"', '"september"', '"february"', '"december"', '"march"', '"july"', '"january"']
GRAMMAR_DICTIONARY['dollar'] = ['("dollar(" dollar_value ")")']
GRAMMAR_DICTIONARY['dollar_value'] = ['"1000"', '"1500"', '"466"', '"1288"', '"300"', '"329"', '"416"', '"124"', '"932"', '"1100"', '"200"', '"500"', '"100"', '"415"', '"150"', '"400"']
GRAMMAR_DICTIONARY['fare_basis_code_value'] = ['"qx"', '"qw"', '"qo"', '"fn"', '"yn"', '"bh"', '"k"', '"b"', '"h"', '"f"', '"q"', '"c"', '"y"', '"m"',]
GRAMMAR_DICTIONARY['class_description'] = ['("class_description(" class_description_value ")")']
GRAMMAR_DICTIONARY['class_description_value'] = ['"thrift"', '"coach"', '"first"', '"business"']
GRAMMAR_DICTIONARY['meal_description_value'] = ['"snack"', '"breakfast"', '"lunch"', '"dinner"']
GRAMMAR_DICTIONARY['meal_code_value'] = ['"ap_58"', '"ap_57"', '"d_s"', '"b"', '"ap_55"', '"s_"', '"sd_d"', '"ls"', '"ap_68"', '"ap_80"', '"ap"', '"s"', ]
GRAMMAR_DICTIONARY['airline_code'] = ['("airline_code(" airline_code_value ")")']
GRAMMAR_DICTIONARY['airline_code_value'] = ['"usair"', '"co"', '"ua"', '"delta"', '"as"', '"ff"', '"canadian_airlines_international"', '"us"', '"nx"', '"hp"', '"aa"', '"kw"', '"ml"', '"nw"', '"ac"', '"tw"', '"yx"', '"ea"', '"dl"', '"wn"', '"lh"', '"cp"']
GRAMMAR_DICTIONARY['airport_code_value'] = ['"dallas"', '"ont"', '"stapelton"', '"bna"', '"bwi"', '"iad"', '"sfo"', '"phl"', '"pit"', '"slc"', '"phx"', '"lax"', '"bur"', '"ind"', '"iah"', '"dtw"', '"las"', '"dal"', '"den"', '"atl"', '"ewr"', '"bos"', '"tpa"', '"jfk"', '"mke"', '"oak"', '"yyz"', '"dfw"', '"cvg"', '"hou"', '"lga"', '"ord"', '"mia"', '"mco"']
GRAMMAR_DICTIONARY['year'] = ['("year(" year_value ")")']
GRAMMAR_DICTIONARY['year_value'] = ['"1991"', '"1993"', '"1992"']
GRAMMAR_DICTIONARY['aircraft_code_value'] = ['"m80"', '"dc10"', '"727"', '"d9s"', '"f28"', '"j31"', '"767"', '"734"', '"73s"', '"747"', '"737"', '"733"', '"d10"', '"100"', '"757"', '"72s"']
GRAMMAR_DICTIONARY['state_name'] = ['("state_name(" state_name_value ")")']
GRAMMAR_DICTIONARY['state_name_value'] = ['"minnesota"', '"florida"', '"arizona"', '"nevada"', '"california"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY["wsp"] = ['~"\s+"i']
COPY_TERMINAL_SET = {
'fare_basis_code_value', 'meal_code_value', 'airport_code_value', 'airline_code_value',
'aircraft_code_value', 'city_name_value', 'time_value', 'flight_number_value',
'class_description', 'day_period_value', 'state_name_value',
'day_number_value', 'month_value', 'day_value', 'dollar_value', 'meal_description_value',
'integer_value', 'basis_type_value', 'year_value',
}
| 14,870 | 45.327103 | 715 |
py
|
Unimer
|
Unimer-master/grammars/atis/atis_gnn_entity_matcher.py
|
# coding=utf8
import os
import re
import copy
import itertools
import collections
import numpy as np
from overrides import overrides
from typing import List, Dict
from allennlp.data.tokenizers import Token, WordTokenizer
from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
from .atis_entity_extractor import lambda_calculus_entity_extractor, funql_entity_extractor, prolog_entity_extractor
def clean_id(s, id_suffix, strip=None):
true_id = s.replace(' ', '_')
if strip:
for v in strip:
true_id = true_id.replace(v, '').strip()
return '%s:%s' % (true_id, id_suffix)
def clean_name(s, strip=None, split=None, prefix=None):
if split:
for v in split:
s = s.replace(v, ' ')
if strip:
for v in strip:
s = s.replace(v, '')
if prefix:
s = prefix + s
return s
def read_db(db_path, basename, id_col, name_col, id_suffix,
strip_id=None, strip_name=None, split_name=None, prefix_name=None):
filename = os.path.join(db_path, basename)
data = [] # Pairs of (name, id)
with open(filename) as f:
for line in f:
row = [s[1:-1] for s in re.findall('"[^"]*"|[0-9]+', line.strip())]
cur_name = clean_name(row[name_col].lower(), strip=strip_name,
split=split_name, prefix=prefix_name)
cur_id = clean_id(row[id_col].lower(), id_suffix, strip=strip_id)
data.append((cur_name, cur_id))
return data
def print_aligned(a, b, indent=0):
a_toks = []
b_toks = []
for x, y in zip(a, b):
cur_len = max(len(x), len(y))
a_toks.append(x.ljust(cur_len))
b_toks.append(y.ljust(cur_len))
prefix = ' ' * indent
print('%s%s' % (prefix, ' '.join(a_toks)))
print('%s%s' % (prefix, ' '.join(b_toks)))
def parse_entry(line):
"""Parse an entry from the CCG lexicon."""
return tuple(line.strip().split(' :- NP : '))
def strip_unk(w):
# Strip unk:%06d identifiers
m = re.match('^unk:[0-9]{6,}:(.*)$', w)
if m:
return m.group(1)
else:
return w
class ATISGNNLambdaCalculusEntityMatcher:
TYPE_DICT = {
'ci': 'city',
'da': 'day',
'al': 'airline',
'ti': 'time',
'pd': 'time of day',
'dn': 'date number',
'mn': 'month',
'ap': 'airport',
'cl': 'class',
'fb': 'fare code',
'fn': 'flight number',
'me': 'meal',
'do': 'dollars',
'rc': 'restrictions',
'ac': 'aircraft',
'yr': 'year',
'mf': 'manufacturer',
'dc': 'dc',
'st': 'state',
'hr': 'hour',
'i': 'stop'
}
DAYS_OF_WEEK = [
(s, '%s:_da' % s)
for s in ('monday', 'tuesday', 'wednesday', 'thursday',
'friday', 'saturday', 'sunday')
]
NEXT_DAYS_OF_WEEK = [('day following next wednesday', 'thursday:_da')]
# For dates
WORD_NUMBERS = [('one', '1:_dn'), ('two', '2:_dn'), ('three', '3:_dn'), ('four', '4:_dn'), ('five', '5:_dn'),
('six', '6:_dn'), ('seven', '7:_dn'), ('eight', '8:_dn'), ('nine', '9:_dn'), ('ten', '10:_dn'),
('eleven', '11:_dn'), ('twelve', '12:_dn'), ('thirteen', '13:_dn'), ('fourteen', '14:_dn'),
('fifteen', '15:_dn'), ('sixteen', '16:_dn'), ('seventeen', '17:_dn'), ('eighteen', '18:_dn'),
('nineteen', '19:_dn'), ('twenty', '20:_dn'), ('twenty one', '21:_dn'),
('twenty two', '22:_dn'),
('twenty three', '23:_dn'), ('twenty four', '24:_dn'), ('twenty five', '25:_dn'),
('twenty six', '26:_dn'), ('twenty seven', '27:_dn'), ('twenty eight', '28:_dn'),
('twenty nine', '29:_dn'), ('thirty', '30:_dn'), ('thirty one', '31:_dn')]
ORDINAL_NUMBERS = [('second', '2:_dn'), ('third', '3:_dn'), ('fourth', '4:_dn'), ('fifth', '5:_dn'),
('sixth', '6:_dn'), ('seventh', '7:_dn'), ('eighth', '8:_dn'), ('ninth', '9:_dn'),
('tenth', '10:_dn'), ('eleventh', '11:_dn'), ('twelfth', '12:_dn'), ('thirteenth', '13:_dn'),
('fourteenth', '14:_dn'), ('fifteenth', '15:_dn'), ('sixteenth', '16:_dn'),
('seventeenth', '17:_dn'), ('eighteenth', '18:_dn'), ('nineteenth', '19:_dn'),
('twentieth', '20:_dn'), ('twenty first', '21:_dn'), ('twenty second', '22:_dn'),
('twenty third', '23:_dn'), ('twenty fourth', '24:_dn'), ('twenty fifth', '25:_dn'),
('twenty sixth', '26:_dn'), ('twenty seventh', '27:_dn'), ('twenty eighth', '28:_dn'),
('twenty ninth', '29:_dn'), ('thirtieth', '30:_dn'),
('thirty first', '31:_dn')]
MEALS = [(m, '%s:_me' % m) for m in ('breakfast', 'lunch', 'dinner', 'snack')]
MEAL_CODES = [('s/', 's_:_rc'), ('sd / d', 'sd_d:_rc'), ('d / s', 'd_s:_rc')]
ST_CITIES = [(m, "%s:_ci" % m.replace(" . ", "_")) for m in ('st . louis', 'st . petersburg', 'st . paul')]
BAT_CODES = [('737', '737:_bat'), ('767', '767:_bat')]
AP_CODES = [('mco', 'mco:_ap'), ('ord', 'ord:_ap')]
AL_CODES = [('us air', 'usair:_al'), ('delta', 'delta:_al'), ('ff', 'ff:_al'),
('canadian airlines international', 'canadian_airlines_international:_al')]
def __init__(self, db_path):
self.entries = collections.OrderedDict()
self.handlers = []
self.unique_word_map = collections.OrderedDict()
self.seen_words = set()
# CCG Lexicon
filename = os.path.join(db_path, 'lexicon.txt')
entries = []
with open(filename) as f:
for line in f:
x, y = line.strip().split(' :- NP : ')
y = y.replace(':', ':_').strip()
entries.append((x, y))
self.add_entries(entries)
# Read DB
self.add_entries(read_db(db_path, 'CITY.TAB', 1, 1, '_ci', strip_id=['.']))
self.add_entries(self.DAYS_OF_WEEK)
self.add_entries([(x + 's', y) for x, y in self.DAYS_OF_WEEK]) # Handle "on tuesdays"
self.add_entries(read_db(db_path, 'AIRLINE.TAB', 0, 1, '_al',
strip_name=[', inc.', ', ltd.']))
self.add_entries(read_db(db_path, 'INTERVAL.TAB', 0, 0, '_pd'))
self.add_entries(read_db(db_path, 'MONTH.TAB', 1, 1, '_mn'))
self.add_entries(read_db(db_path, 'AIRPORT.TAB', 0, 1, '_ap',
strip_name=[], split_name=['/']))
self.add_entries(read_db(db_path, 'COMP_CLS.TAB', 1, 1, '_cl'))
self.add_entries(read_db(db_path, 'CLS_SVC.TAB', 0, 0, '_fb', prefix_name='code '))
self.add_entries(self.MEALS)
self.add_entries(self.WORD_NUMBERS)
self.add_entries(self.ORDINAL_NUMBERS)
self.add_entries(self.ST_CITIES)
self.add_entries(self.MEAL_CODES)
self.add_entries(self.BAT_CODES)
self.add_entries(self.AP_CODES)
self.add_entries(self.AL_CODES)
self.add_entries(self.NEXT_DAYS_OF_WEEK)
self.handle_times()
self.handle_rc()
self.handle_stop()
self.handle_dollars()
self.handle_flight_numbers()
def handle_times(self):
# Mod 12 deals with 12am/12pm special cases...
self.add_handler('([0-9]{1,2})\s*am$',
lambda m: '%d00:_ti' % (int(m.group(1)) % 12))
self.add_handler('([0-9]{1,2}) pm$',
lambda m: '%d00:_ti' % (int(m.group(1)) % 12 + 12))
self.add_handler('([0-9]{1,2})([0-9]{2}) am$',
lambda m: '%d%02d:_ti' % (int(m.group(1)) % 12, int(m.group(2))))
self.add_handler('([0-9]{1,2})([0-9]{2}) pm$',
lambda m: '%d%02d:_ti' % (int(m.group(1)) % 12 + 12, int(m.group(2))))
self.add_handler("([0-9]{1,2}) o'clock$",
lambda m: '%d00:_ti' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock am$",
lambda m: '%d00:_ti' % (int(m.group(1)) % 12))
self.add_handler("([0-9]{1,2}) o'clock pm$",
lambda m: '%d00:_ti' % (int(m.group(1)) % 12 + 12))
self.add_handler("([0-9]+) hours$",
lambda m: '%d:_hr' % (int(m.group(1))))
def handle_flight_numbers(self):
self.add_handler('[0-9]{2,}$', lambda m: '%d:_fn' % int(m.group(0)))
def handle_dollars(self):
self.add_handler('([0-9]+)$', lambda m: '%d:_do' % int(m.group(1)))
self.add_handler('([0-9]+) dollars$', lambda m: '%d:_do' % int(m.group(1)))
def handle_rc(self):
self.add_handler(re.compile(r'ap/(\d+)$'), lambda m: 'ap_%d:_rc' % int(m.group(1)))
self.add_handler(re.compile(r'ap(\d+)$'), lambda m: 'ap_%d:_rc' % int(m.group(1)))
def handle_stop(self):
self.add_handler('([0-9]+) stop$', lambda m: '%d:_i' % int(m.group(1)))
self.add_handler('([0-9]+) stops$', lambda m: '%d:_i' % int(m.group(1)))
def add_entries(self, entries):
for name, entity in entries:
# Update self.entries
if name in self.entries:
if entity not in self.entries[name]:
self.entries[name].append(entity)
else:
self.entries[name] = [entity]
# Update self.unique_word_map
# for w in name.split(' '):
# if w in self.seen_words:
# # This word is not unique!
# if w in self.unique_word_map:
# del self.unique_word_map[w]
# else:
# self.unique_word_map[w] = entity
# self.seen_words.add(w)
def add_handler(self, regex, func):
self.handlers.append((regex, func))
def _match_candidates(self, tokens: List[Token]) -> List:
words = [t.text for t in tokens]
entities = [[] for i in range(len(words))]
ind_pairs = sorted(list(itertools.combinations(range(len(words) + 1), 2)),
key=lambda x: x[0] - x[1])
ret_entries = []
words = [strip_unk(w) for w in words] # Strip unk:%06d stuff
# Handlers
for i, j in ind_pairs:
if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
for regex, func in self.handlers:
m = re.match(regex, span)
if m:
entity = func(m)
assert isinstance(entity, str)
for k in range(i, j):
entities[k] += [entity]
ret_entries.append(((i, j), [entity]))
# Entries
for i, j in ind_pairs:
# if any(x for x in entities[i:j]): continue
span = ' '.join(words[i:j])
if span in self.entries:
entity = self.entries[span]
assert isinstance(entity, list)
for k in range(i, j):
entities[k] += entity
ret_entries.append(((i, j), entity))
# Unique words
for i in range(len(words)):
if any(x for x in entities[i:i+1]): continue
word = words[i]
if entities[i]: continue
if word in self.unique_word_map:
entity = self.unique_word_map[word]
entities[i] = [entity]
ret_entries.append(((i, i+1), [entity]))
return ret_entries
def match(self, tokens: List[Token]) -> List[Dict]:
entity_candidates = self._match_candidates(tokens)
formatted_entity_candidates = list()
for ((beg_idx, end_idx), entities) in entity_candidates:
fes = list()
for e in entities:
match = re.match("^(.*):_(.*)$", e)
if match is None:
assert re.match('^\d+$', e)
ev = e
et = 'i'
else:
ev = match.group(1)
et = match.group(2)
fes.append({
"value": ev.replace('_', " "),
"beg_idx": beg_idx,
"end_idx": end_idx,
"type": et,
"formatted_value": e
})
formatted_entity_candidates += fes
# Remove Duplicate entities
entity_dict = dict()
for entity in formatted_entity_candidates:
if entity['formatted_value'] not in entity_dict:
entity_dict[entity['formatted_value']] = list()
entity_dict[entity['formatted_value']].append(entity)
results = list()
for _, entities in entity_dict.items():
indices = list()
for e in entities:
beg_idx, end_idx = e['beg_idx'], e['end_idx']
indices += list(range(beg_idx, end_idx))
ne = copy.deepcopy(entities[0])
del ne['beg_idx']
del ne['end_idx']
ne['indices'] = list(set(indices))
results.append(ne)
return results
class ATISGNNEntityMatcher(ATISGNNLambdaCalculusEntityMatcher):
def __init__(self, db_path):
super().__init__(db_path)
self.add_entries([('new york', 'lga:_ap'), ('the airport in dallas', 'dfw:_ap')])
@overrides
def match(self, tokens: List[Token]) -> List[Dict]:
entity_candidates = super().match(tokens)
for entity in entity_candidates:
formatted_value = entity['formatted_value']
match = re.match("^(.*):_(.*)$", formatted_value)
if match is None:
assert re.match('^\d+$', e)
ev = formatted_value
et = 'i'
else:
ev = match.group(1)
et = match.group(2)
entity['formatted_value'] = ev
# Remove Duplicate entities
entity_dict = dict()
for entity in entity_candidates:
if entity['formatted_value'] not in entity_dict:
entity_dict[entity['formatted_value']] = list()
entity_dict[entity['formatted_value']].append(entity)
results = list()
for _, entities in entity_dict.items():
types = list()
for e in entities:
types.append(e['type'])
ne = copy.deepcopy(entities[0])
ne['type'] = ';'.join(types)
results.append(ne)
return results
def test_entity_linking():
base_path = os.path.join('../../', 'data', 'atis')
entity_path = os.path.join(base_path, 'db')
matcher = ATISGNNEntityMatcher(entity_path)
toknerizer = WordTokenizer(SpacyWordSplitter())
import sys
sys.path += ['../../']
from grammars.grammar import get_grammar
from grammars.utils import get_logical_form_preprocessor
preprocessor = get_logical_form_preprocessor('atis', 'prolog')
grammar = get_grammar('atis', 'prolog')
train_data = os.path.join(base_path, 'atis_prolog_test.tsv')
empty_count = 0
max_number_of_candidates = 0
numbers = list()
invalid_count = 0
inst_count = 0
with open(train_data, 'r') as f:
for lidx, line in enumerate(f):
line = line.strip()
sentence, funql = line.split('\t')
tokens = toknerizer.tokenize(sentence)
candidates = matcher.match(tokens)
inst_count += 1
if len(candidates) > max_number_of_candidates:
max_number_of_candidates = len(candidates)
has_duplicate_entity = False
for cidx1, can1 in enumerate(candidates):
for cidx2, can2 in enumerate(candidates):
if cidx1 == cidx2:
continue
if can1['value'] == can2['value'] and can1['type'] == can2['type']:
has_duplicate_entity = True
break
if has_duplicate_entity:
break
if len(candidates) == 0:
empty_count += 1
numbers.append(len(candidates))
# Validate
processed_funql = preprocessor(funql).lower()
golden_entities = prolog_entity_extractor(grammar, processed_funql)
valid = True
for ge in golden_entities:
for candidate in candidates:
compare_value = candidate['value'] if 'formatted_value' not in candidate \
else candidate['formatted_value']
if compare_value == ge or candidate.get('abbreviation', "") == ge:
break
else:
valid = False
if not valid:
invalid_count += 1
print(lidx)
print(sentence)
print(tokens)
print(funql)
print("Number of Candidates: ", len(candidates))
print("Has Duplicate Candidates: ", has_duplicate_entity)
print(candidates)
print(golden_entities)
print("Is Valid: ", valid)
print('===\n\n')
print("Largest number of candidates: ", max_number_of_candidates)
print("Number of empty candidates: ", empty_count)
print("Averaged candidates: ", np.mean(numbers))
print("Invalid Count: ", invalid_count)
print("Overall Recall: ", (inst_count - invalid_count) / inst_count)
if __name__ == '__main__':
# matcher = ATISGNNLambdaCalculusEntityMatcher('../../data/atis/db')
# from allennlp.data.tokenizers import Token, WordTokenizer
# from allennlp.data.tokenizers.word_splitter import SpacyWordSplitter
# toknerizer = WordTokenizer(SpacyWordSplitter())
#
# question = "i'm looking for flights from pittsburgh to philadelphia leaving before 9am"
# tokens = toknerizer.tokenize(question)
#
# matched_entities = matcher.match(tokens)
# for entity in matched_entities:
# string = " ".join([tokens[idx].text for idx in entity['indices']])
# print(string, entity['value'], entity['type'])
test_entity_linking()
| 18,309 | 38.040512 | 116 |
py
|
Unimer
|
Unimer-master/grammars/atis/sql_grammar_2.py
|
# coding=utf8
ROOT_RULE = 'statement -> [mquery]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY["statement"] = ['(mquery ws)']
GRAMMAR_DICTIONARY["mquery"] = [
'(ws select_clause ws from_clause ws where_clause ws groupby_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws where_clause ws groupby_clause)',
'(ws select_clause ws from_clause ws where_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws where_clause)',
'(ws select_clause ws from_clause ws groupby_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws groupby_clause)',
'(ws select_clause ws from_clause)'
]
# SELECT
GRAMMAR_DICTIONARY["select_clause"] = [
'(select_with_distinct ws select_results)']
GRAMMAR_DICTIONARY["select_with_distinct"] = [
'(ws "select" ws "distinct")', '(ws "select")']
GRAMMAR_DICTIONARY["select_results"] = [
'(ws subject ws "," ws select_results)', '(ws subject)']
# FROM
GRAMMAR_DICTIONARY["from_clause"] = ['(ws "from" ws source)']
GRAMMAR_DICTIONARY["source"] = [
'(ws table_name ws table_alias ws "," ws source)', '(ws table_name ws table_alias)']
# ORDER
GRAMMAR_DICTIONARY["orderby_clause"] = ['ws "order by" ws subject']
# GROUP BY
GRAMMAR_DICTIONARY["groupby_clause"] = ['(ws "group by" ws subject)']
# WHERE
GRAMMAR_DICTIONARY["where_clause"] = ['(ws "where" wsp condition)']
GRAMMAR_DICTIONARY["condition"] = ['(ws single ws "and" wsp condition)',
'(ws single ws "or" wsp condition)',
'(single)']
GRAMMAR_DICTIONARY["single"] = ['(expr)',
'("(" ws condition ws ")")',
'("not" ws single)']
GRAMMAR_DICTIONARY["expr"] = [
'(subject wsp "between" wsp time_value wsp "and" wsp time_value)',
'(subject wsp "not" wsp "between" wsp time_value wsp "and" wsp time_value)',
'(subject wsp "is" wsp "not" wsp "null")',
'(subject wsp "not" wsp "in" wsp "(" ws mquery ws ")")',
'(subject wsp "in" ws "(" ws mquery ws ")")',
'(subject ws binaryop ws "all" ws "(" ws mquery ws ")")',
'(subject ws binaryop ws "any" ws "(" ws mquery ws ")")',
'(subject ws binaryop ws "(" ws mquery ws ")")',
'(concrete_value_expr)',
'(subject ws binaryop ws col_ref)',
]
GRAMMAR_DICTIONARY["value"] = ['col_ref']
GRAMMAR_DICTIONARY["subject"] = ['function', 'col_ref']
GRAMMAR_DICTIONARY["col_ref"] = ['table_columns', '"*"']
GRAMMAR_DICTIONARY["function"] = ['(fname ws "(" ws "distinct" ws col_ref ws ")")',
'(fname ws "(" ws col_ref ws ")")']
GRAMMAR_DICTIONARY["fname"] = ['"count"', '"sum"', '"max"', '"min"']
# TODO(MARK): This is not tight enough. AND/OR are strictly boolean value operators.
GRAMMAR_DICTIONARY["binaryop"] = ['"="', '"!="', '"<>"',
'">="', '"<="', '">"', '"<"', '"like"', '"not like"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY['wsp'] = ['~"\s+"i']
GRAMMAR_DICTIONARY["table_name"] = ['"aircraft"', '"airline"', '"airport_base"', '"airport_service"', '"fare_basis"',
'"city"', '"class_of_service"','"date_day"', '"days"', '"equipment_sequence"', '"fare_base"',
'"flight_base"', '"flight_fare"', '"flight_leg"',
'"flight_stop"', '"food_service"', '"ground_service"', '"restriction"', '"state"',]
GRAMMAR_DICTIONARY["table_alias"] = [
'aircraft_alias', '"airline_1"', 'airport_base_alias', 'airport_service_alias', 'fare_basis_alias',
'city_alias', '"class_of_service_1"', 'date_day_alias', 'days_alias', 'equipment_sequence_alias',
'fare_base_alias', 'flight_base_alias', 'flight_fare_alias', 'flight_leg_alias',
'"restriction_1"', 'state_alias', 'flight_stop_alias', 'food_service_alias', '"ground_service_1"'
]
GRAMMAR_DICTIONARY['aircraft_alias'] = [
'"aircraft_4"', '"aircraft_1"', '"aircraft_2"', '"aircraft_3"',
]
GRAMMAR_DICTIONARY['airline_alias'] = ['"airline_1"']
GRAMMAR_DICTIONARY['airport_base_alias'] = ['"airport_4"', '"airport_1"', '"airport_2"', '"airport_3"', ]
GRAMMAR_DICTIONARY['airport_service_alias'] = ['"airport_service_6"', '"airport_service_1"', '"airport_service_2"',
'"airport_service_3"', '"airport_service_4"', '"airport_service_5"',]
GRAMMAR_DICTIONARY['city_alias'] = [
'"city_6"', '"city_1"', '"city_2"', '"city_3"', '"city_4"', '"city_5"',
]
GRAMMAR_DICTIONARY['class_of_service_alias'] = ['"class_of_service_1"']
GRAMMAR_DICTIONARY['date_day_alias'] = [
'"date_day_5"', '"date_day_1"', '"date_day_2"',
'"date_day_3"', '"date_day_4"',
]
GRAMMAR_DICTIONARY['days_alias'] = [
'"days_10"', '"days_1"', '"days_2"',
'"days_3"', '"days_4"', '"days_5"', '"days_6"', '"days_7"', '"days_8"',
'"days_9"',
]
GRAMMAR_DICTIONARY['equipment_sequence_alias'] = [
'"equipment_sequence_3"', '"equipment_sequence_1"',
'"equipment_sequence_2"',
]
GRAMMAR_DICTIONARY['fare_base_alias'] = [
'"fare_5"', '"fare_1"',
'"fare_2"', '"fare_3"', '"fare_4"',
]
GRAMMAR_DICTIONARY['fare_basis_alias'] = [
'"fare_basis_6"', '"fare_basis_1"', '"fare_basis_2"',
'"fare_basis_3"', '"fare_basis_4"', '"fare_basis_5"',
]
GRAMMAR_DICTIONARY['flight_base_alias'] = [
'"flight_1"', '"flight_2"', '"flight_3"', '"flight_4"',
]
GRAMMAR_DICTIONARY['flight_fare_alias'] = [
'"flight_fare_5"', '"flight_fare_2"', '"flight_fare_3"', '"flight_fare_4"', '"flight_fare_1"'
]
GRAMMAR_DICTIONARY['flight_leg_alias'] = [
'"flight_leg_2"', '"flight_leg_1"'
]
GRAMMAR_DICTIONARY['flight_stop_alias'] = [
'"flight_stop_2"', '"flight_stop_1"',
]
GRAMMAR_DICTIONARY['food_service_alias'] = [
'"food_service_2"', '"food_service_1"'
]
# GRAMMAR_DICTIONARY['ground_service_alias'] = ['"ground_service_1"']
GRAMMAR_DICTIONARY['state_alias'] = [
'"state_4"', '"state_1"', '"state_2"', '"state_3"'
]
GRAMMAR_DICTIONARY['restriction_alias'] = ['"restriction_1"']
# Column Name
GRAMMAR_DICTIONARY['table_columns'] = [
'(aircraft_alias ws "." ws aircraft_columns)',
'("airline_1" ws "." ws airline_columns)',
'(airport_base_alias ws "." ws airport_base_columns)',
'(airport_service_alias ws "." ws airport_service_columns)',
'(city_alias ws "." ws city_columns)',
'("class_of_service_1" ws "." ws class_of_service_columns)',
'(date_day_alias ws "." ws date_day_columns)',
'(days_alias ws "." ws days_columns)',
'(equipment_sequence_alias ws "." ws equipment_sequence_columns)',
'(fare_base_alias ws "." ws fare_base_columns)',
'(fare_basis_alias ws "." ws fare_basis_columns)',
'(flight_base_alias ws "." ws flight_base_columns)',
'(flight_fare_alias ws "." ws flight_fare_columns)',
'(flight_leg_alias ws "." ws flight_leg_columns)',
'(flight_stop_alias ws "." ws flight_stop_columns)',
'(food_service_alias ws "." ws food_service_columns)',
'("ground_service_1" ws "." ws ground_service_columns)',
'(state_alias ws "." ws state_colums)',
'("restriction_1" ws "." ws restriction_columns)',
]
GRAMMAR_DICTIONARY['aircraft_columns'] = [
'"capacity"', '"manufacturer"', '"basic_type"', '"propulsion"', '"aircraft_code_base"']
GRAMMAR_DICTIONARY['airline_columns'] = [
'"airline_name"', '"airline_code"', '"note"']
GRAMMAR_DICTIONARY['airport_base_columns'] = ['"state_code"', '"airport_code"', '"airport_location"', '"minimum_connect_time"', '"time_zone_code"', '"country_name"', '"airport_name"']
GRAMMAR_DICTIONARY['airport_service_columns'] = [
'"miles_distant"', '"minutes_distant"', '"airport_code"', '"city_code"']
GRAMMAR_DICTIONARY['city_columns'] = ['"city_code"', '"time_zone_code"', '"country_name"', '"city_name"', '"state_code"']
GRAMMAR_DICTIONARY['class_of_service_columns'] = ['"rank"', '"class_description"', '"booking_class"']
GRAMMAR_DICTIONARY['date_day_columns'] = [
'"day_name"', '"day_number"', '"month_number"', '"year"']
GRAMMAR_DICTIONARY['days_columns'] = ['"days_code"', '"day_name"']
GRAMMAR_DICTIONARY['equipment_sequence_columns'] = [
'"aircraft_code_base"', '"aircraft_code_sequence"']
GRAMMAR_DICTIONARY['flight_base_columns'] = ['"connections"', '"meal_code"', '"flight_days"', '"flight_id"', '"from_airport"', '"flight_number"',
'"airline_code"', '"to_airport"', '"departure_time"', '"aircraft_code_sequence"', '"time_elapsed"', '"stops"', '"arrival_time"']
GRAMMAR_DICTIONARY['fare_base_columns'] = ['"restriction_code"', '"fare_id"', '"from_airport"', '"flight_id"',
'"fare_airline"', '"fare_basis_code"', '"to_airport"', '"one_direction_cost"', '"round_trip_required"', '"round_trip_cost"']
GRAMMAR_DICTIONARY['fare_basis_columns'] = ['"booking_class"', '"economy"',
'"basis_days"', '"fare_basis_code"', '"class_type"', '"discounted"']
GRAMMAR_DICTIONARY['flight_fare_columns'] = ['"fare_id"', '"flight_id"']
GRAMMAR_DICTIONARY['flight_leg_columns'] = ['"leg_flight"', '"flight_id"']
GRAMMAR_DICTIONARY['flight_stop_columns'] = [
'"arrival_time"', '"flight_id"', '"stop_number"', '"stop_airport"']
GRAMMAR_DICTIONARY['food_service_columns'] = ['"meal_code"',
'"meal_description"', '"meal_number"', '"compartment"']
GRAMMAR_DICTIONARY['ground_service_columns'] = [
'"ground_fare"', '"airport_code"', '"transport_type"', '"city_code"']
GRAMMAR_DICTIONARY['state_colums'] = ['"state_code"', '"state_name"']
GRAMMAR_DICTIONARY['restriction_columns'] = ['"advance_purchase"', '"stopovers"', '"minimum_stay"',
'"application"', '"maximum_stay"', '"saturday_stay_required"', '"restriction_code"', '"no_discounts"']
# Column Values
GRAMMAR_DICTIONARY['concrete_value_expr'] = [
'(days_alias ws "." ws "days_code" ws binaryop ws days_code_value)',
'(days_alias ws "." ws "day_name" ws binaryop ws day_name_value)',
'(fare_basis_alias ws "." ws "fare_basis_code" ws binaryop ws fare_basis_code_value)',
'(fare_basis_alias ws "." ws "class_type" ws binaryop ws class_type_value)',
'(fare_basis_alias ws "." ws "economy" ws binaryop ws economy_value)',
'(fare_basis_alias ws "." ws "discounted" ws binaryop ws discounted_value)',
'(fare_basis_alias ws "." ws "booking_class" ws binaryop ws booking_class_value)',
'(fare_base_alias ws "." ws "round_trip_required" ws binaryop ws round_trip_required_value)',
'(fare_base_alias ws "." ws "fare_basis_code" ws binaryop ws fare_basis_code_value)',
'(aircraft_alias ws "." ws "manufacturer" ws binaryop ws manufacturer_value)',
'(aircraft_alias ws "." ws "basic_type" ws binaryop ws basic_type_value)',
'(aircraft_alias ws "." ws "aircraft_code_base" ws binaryop ws aircraft_code_value)',
'(aircraft_alias ws "." ws "propulsion" ws binaryop ws propulsion_value)',
'(airport_base_alias ws "." ws "airport_code" ws binaryop ws airport_code_value)',
'(airport_base_alias ws "." ws "airport_name" ws binaryop ws airport_name_value)',
'(city_alias ws "." ws "city_name" ws binaryop ws city_name_value)',
'(city_alias ws "." ws "country_name" ws binaryop ws country_name_value)',
'(city_alias ws "." ws "state_code" ws binaryop ws state_code_value)',
'(state_alias ws "." ws "state_code" ws binaryop ws state_code_value)',
'(state_alias ws "." ws "state_name" ws binaryop ws state_name_value)',
'(flight_base_alias ws "." ws "airline_code" ws binaryop ws airline_code_value)',
'(flight_base_alias ws "." ws "flight_days" ws binaryop ws flight_days_value)',
'(flight_base_alias ws "." ws "meal_code" ws binaryop ws meal_code_value)',
'("airline_1" ws "." ws "airline_code" ws binaryop ws airline_code_value)',
'("airline_1" ws "." ws "airline_name" ws binaryop ws airline_name_value)',
'("ground_service_1" ws "." ws "transport_type" ws binaryop ws transport_type_value)',
'(food_service_alias ws "." ws "meal_description" ws binaryop ws meal_description_value)',
'(food_service_alias ws "." ws "meal_code" ws binaryop ws meal_code_value)',
'(airport_service_alias ws "." ws "airport_code" ws binaryop ws airport_code_value)',
'("restriction_1" ws "." ws "restriction_code" ws binaryop ws restriction_code_value)',
'("class_of_service_1" ws "." ws "booking_class" ws binaryop ws booking_class_value)',
# Numerical
'(date_day_alias ws "." ws "year" ws binaryop ws year_value)',
'(date_day_alias ws "." ws "month_number" ws binaryop ws month_number_value)',
'(date_day_alias ws "." ws "day_number" ws binaryop ws day_number_value)',
'(flight_stop_alias ws "." ws "arrival_time" ws binaryop ws time_value)',
'(flight_base_alias ws "." ws "flight_number" ws binaryop ws flight_number_value)',
'(flight_base_alias ws "." ws "connections" ws binaryop ws connections_value)',
'(flight_base_alias ws "." ws "arrival_time" ws binaryop ws time_value)',
'(flight_base_alias ws "." ws "departure_time" ws binaryop ws time_value)',
'(flight_base_alias ws "." ws "stops" ws binaryop ws stops_value)',
'(flight_base_alias ws "." ws "time_elapsed" ws binaryop ws time_elapsed_value)',
'(fare_base_alias ws "." ws "one_direction_cost" ws binaryop ws one_direction_cost_value)',
'(fare_base_alias ws "." ws "round_trip_cost" ws binaryop ws round_trip_cost_value)',
]
GRAMMAR_DICTIONARY['airport_code_value'] = ['"\'iah\'"', '"\'sfo\'"', '"\'tpa\'"', '"\'jfk\'"', '"\'cvg\'"', '"\'dfw\'"', '"\'mco\'"', '"\'phl\'"', '"\'lga\'"', '"\'lax\'"', '"\'yyz\'"', '"\'bwi\'"', '"\'oak\'"',
'"\'slc\'"', '"\'ont\'"', '"\'pit\'"', '"\'hou\'"', '"\'mia\'"', '"\'den\'"', '"\'bur\'"', '"\'ord\'"', '"\'dtw\'"', '"\'mke\'"', '"\'bna\'"', '"\'iad\'"', '"\'bos\'"', '"\'atl\'"', '"\'ewr\'"', '"\'dal\'"']
GRAMMAR_DICTIONARY['city_name_value'] = ['"\'salt lake city\'"', '"\'san jose\'"', '"\'newark\'"', '"\'montreal\'"', '"\'st. paul\'"', '"\'ontario\'"', '"\'orlando\'"', '"\'minneapolis\'"', '"\'westchester county\'"', '"\'memphis\'"', '"\'chicago\'"', '"\'tampa\'"', '"\'pittsburgh\'"', '"\'toronto\'"', '"\'houston\'"', '"\'detroit\'"', '"\'new york\'"', '"\'cleveland\'"', '"\'columbus\'"', '"\'nashville\'"', '"\'tacoma\'"', '"\'philadelphia\'"',
'"\'las vegas\'"', '"\'denver\'"', '"\'san diego\'"', '"\'miami\'"', '"\'indianapolis\'"', '"\'burbank\'"', '"\'cincinnati\'"', '"\'fort worth\'"', '"\'milwaukee\'"', '"\'boston\'"', '"\'baltimore\'"', '"\'dallas\'"', '"\'seattle\'"', '"\'atlanta\'"', '"\'kansas city\'"', '"\'los angeles\'"', '"\'phoenix\'"', '"\'oakland\'"', '"\'san francisco\'"', '"\'washington\'"', '"\'st. louis\'"', '"\'charlotte\'"', '"\'st. petersburg\'"', '"\'long beach\'"']
GRAMMAR_DICTIONARY['round_trip_required_value'] = ['"\'no\'"', '"\'yes\'"']
GRAMMAR_DICTIONARY['airline_code_value'] = ['"\'ua\'"', '"\'cp\'"', '"\'ea\'"', '"\'ac\'"', '"\'ml\'"', '"\'as\'"', '"\'lh\'"', '"\'dl\'"',
'"\'nw\'"', '"\'us\'"', '"\'yx\'"', '"\'tw\'"', '"\'wn\'"', '"\'ff\'"', '"\'nx\'"', '"\'kw\'"', '"\'co\'"', '"\'hp\'"', '"\'aa\'"']
GRAMMAR_DICTIONARY['day_name_value'] = ['"\'monday\'"', '"\'friday\'"', '"\'tuesday\'"',
'"\'thursday\'"', '"\'sunday\'"', '"\'saturday\'"', '"\'wednesday\'"']
GRAMMAR_DICTIONARY['aircraft_code_value'] = ['"\'757\'"', '"\'m80\'"', '"\'733\'"', '"\'j31\'"',
'"\'73s\'"', '"\'72s\'"', '"\'734\'"', '"\'d9s\'"', '"\'f28\'"', '"\'100\'"', '"\'d10\'"']
GRAMMAR_DICTIONARY['meal_code_value'] = ['"\'%s/%\'"', '"\'s\'"',
'"\'bb\'"', '"\'b\'"', '"\'s/\'"', '"\'sd/d\'"', '"\'ls\'"', '"\'d/s\'"']
GRAMMAR_DICTIONARY['state_name_value'] = ['"\'nevada\'"', '"\'ohio\'"', '"\'michigan\'"', '"\'minnesota\'"', '"\'new jersey\'"', '"\'colorado\'"', '"\'indiana\'"', '"\'california\'"',
'"\'washington\'"', '"\'georgia\'"', '"\'north carolina\'"', '"\'texas\'"', '"\'new york\'"', '"\'quebec\'"', '"\'utah\'"', '"\'missouri\'"', '"\'arizona\'"', '"\'florida\'"', '"\'tennessee\'"']
GRAMMAR_DICTIONARY['class_type_value'] = ['"\'first\'"',
'"\'coach\'"', '"\'business\'"', '"\'thrift\'"']
GRAMMAR_DICTIONARY['transport_type_value'] = ['"\'rapid transit\'"',
'"\'rental car\'"', '"\'air taxi operation\'"', '"\'taxi\'"', '"\'limousine\'"',
'"\'%limousine%\'"', '"\'%taxi%\'"']
GRAMMAR_DICTIONARY['state_code_value'] = ['"\'tx\'"',
'"\'ca\'"', '"\'asd\'"', '"\'dc\'"', '"\'ga\'"']
GRAMMAR_DICTIONARY['economy_value'] = ['"\'yes\'"', '"\'no\'"']
GRAMMAR_DICTIONARY['fare_basis_code_value'] = ['"\'b\'"', '"\'bh\'"', '"\'m\'"', '"\'c\'"', '"\'qx\'"',
'"\'h\'"', '"\'qw\'"', '"\'y\'"', '"\'q\'"', '"\'qo\'"', '"\'fn\'"', '"\'f\'"', '"\'yn\'"']
GRAMMAR_DICTIONARY['booking_class_value'] = [
'"\'c\'"', '"\'b\'"', '"\'h\'"', '"\'y\'"', '"\'q\'"', '"\'f\'"', '"\'yn\'"']
GRAMMAR_DICTIONARY['meal_description_value'] = [
'"\'lunch\'"', '"\'breakfast\'"', '"\'snack\'"', '"\'dinner\'"']
GRAMMAR_DICTIONARY['basic_type_value'] = ['"\'757\'"', '"\'747\'"',
'"\'767\'"', '"\'727\'"', '"\'dc10\'"', '"\'f28\'"', '"\'737\'"']
GRAMMAR_DICTIONARY['manufacturer_value'] = ['"\'boeing\'"']
GRAMMAR_DICTIONARY['days_code_value'] = ['"\'sa\'"', '"\'su\'"']
GRAMMAR_DICTIONARY['restriction_code_value'] = [
'"\'ap/80\'"', '"\'ap/68\'"', '"\'ap/57\'"', '"\'ap/55\'"', '"\'ap/58\'"', '"\'ap\'"']
GRAMMAR_DICTIONARY['flight_days_value'] = ['"\'daily\'"']
GRAMMAR_DICTIONARY['country_name_value'] = ['"\'canada\'"', '"\'usa\'"']
GRAMMAR_DICTIONARY['airline_name_value'] = [
'"\'united\'"', '"\'continental airlines\'"', '"\'%canadian airlines international%\'"', '"\'%canadian airlines%\'"', '"\'%delta%\'"', '"\'usair\'"']
GRAMMAR_DICTIONARY['propulsion_value'] = ['"\'jet\'"', '"\'turboprop\'"']
GRAMMAR_DICTIONARY['airport_name_value'] = [
'"\'general mitchell international\'"', '"\'%canadian airlines international%\'"', '"\'stapleton international\'"', '"\'%pearson%\'"', '"\'%lester%\'"', '"\'%stapleton%\'"']
GRAMMAR_DICTIONARY['discounted_value'] = ['"\'yes\'"', '"\'no\'"']
# Numerical Value
GRAMMAR_DICTIONARY['year_value'] = [
'"1991"', '"1993"', '"1994"', '"1990"', '"1992"']
GRAMMAR_DICTIONARY['time_value'] = ['"1619"', '"815"', '"2220"', '"2010"', '"1524"', '"1205"', '"1159"', '"1220"', '"1620"', '"705"', '"2330"', '"1045"', '"1401"', '"1024"', '"400"', '"755"', '"838"', '"823"', '"1430"', '"1017"', '"930"',
'"1000"', '"2159"', '"301"', '"2134"', '"645"', '"718"', '"1310"', '"1330"', '"1425"', '"1940"', '"1923"', '"1628"', '"1745"', '"1845"','"830"','"730"','"720"',
'"555"','"500"', '"1505"', '"2226"', '"1759"', '"300"', '"1800"', '"650"', '"601"', '"600"', '"845"', '"819"', '"1200"', '"2200"', '"2400"', '"1930"', '"430"', '"530"', '"41"',
'"2230"', '"2358"', '"2359"', '"2300"', '"1900"', '"1615"', '"1530"', '"1630"', '"2000"', '"1830"', '"630"', '"2100"', '"2030"', '"1130"', '"1715"',
'"1110"', '"1645"', '"800"', '"1230"', '"1730"', '"1700"', '"1030"', '"1850"', '"1500"', '"1600"', '"1400"', '"1300"', '"0"', '"200"', '"2130"', '"1115"',
'"1245"', '"1145"', '"1100"', '"900"', '"1410"', '"700"', '"100"', '"230"', '"30"', '"1"']
GRAMMAR_DICTIONARY['month_number_value'] = ['"12"', '"4"', '"6"',
'"9"', '"8"', '"11"', '"10"', '"1"', '"3"', '"7"', '"5"', '"2"']
GRAMMAR_DICTIONARY['day_number_value'] = ['"26"', '"28"', '"23"', '"24"', '"27"', '"25"', '"29"', '"22"', '"21"', '"20"', '"2"',
'"16"', '"11"','"13"', '"12"', '"14"', '"17"', '"15"', '"19"', '"18"', '"10"', '"1"',
'"31"', '"30"', '"3"', '"4"', '"8"', '"7"', '"5"', '"6"', '"9"']
GRAMMAR_DICTIONARY['stops_value'] = ['"0"', '"3"', '"1"']
GRAMMAR_DICTIONARY['flight_number_value'] = ['"297"', '"271"', '"2153"', '"229"', '"269"', '"270"', '"1222"', '"766"', '"505"', '"402"', '"343"', '"19"', '"417"', '"137338"', '"71"', '"324"', '"139"', '"1039"', '"771"', '"3724"', '"746"', '"217"', '"210"', '"212"', '"21"', '"852"', '"459"',
'"1291"', '"296"', '"311"', '"323"', '"1765"', '"279"', '"315"', '"497"', '"163"', '"1083"', '"1209"', '"98"', '"345"', '"928"', '"106"', '"825"', '"82"', '"4400"', '"352"', '"415"', '"3357"', '"838"', '"539"', '"281"', '"813"', '"257"', '"201"']
GRAMMAR_DICTIONARY['round_trip_cost_value'] = [
'"466"', '"300"', '"932"', '"1288"', '"1100"', '"1500"', '"1000"', '"100"']
GRAMMAR_DICTIONARY['connections_value'] = ['"0"', '"1"']
GRAMMAR_DICTIONARY['one_direction_cost_value'] = [
'"466"', '"400"', '"329"', '"300"', '"150"', '"200"', '"416"', '"500"']
GRAMMAR_DICTIONARY['time_elapsed_value'] = ['"60"', '"540"']
COPY_TERMINAL_SET = {'year_value', 'time_value', 'month_number_value', 'day_number_value', 'stops_value', 'flight_number_value', 'round_trip_cost_value',
'connections_value', 'one_direction_cost_value', 'time_elapsed_value', 'airport_code_value', 'city_name_value', 'round_trip_required_value',
'airline_code_value', 'day_name_value', 'aircraft_code_value', 'meal_code_value', 'state_name_value', 'class_type_value', 'transport_type_value',
'state_code_value', 'economy_value', 'fare_basis_code_value', 'booking_class_value', 'meal_description_value', 'basic_type_value', 'manufacturer_value',
'days_code_value', 'restriction_code_value', 'flight_days_value', 'country_name_value', 'airline_name_value', 'propulsion_value', 'airport_name_value',
'discounted_value'}
| 22,265 | 69.462025 | 493 |
py
|
Unimer
|
Unimer-master/grammars/atis/sql_grammar.py
|
# coding=utf8
ROOT_RULE = 'statement -> [mquery]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY["statement"] = ['(mquery ws)']
GRAMMAR_DICTIONARY["mquery"] = [
'(ws select_clause ws from_clause ws where_clause ws groupby_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws where_clause ws groupby_clause)',
'(ws select_clause ws from_clause ws where_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws where_clause)',
'(ws select_clause ws from_clause ws groupby_clause ws orderby_clause)',
'(ws select_clause ws from_clause ws groupby_clause)',
'(ws select_clause ws from_clause)'
]
# SELECT
GRAMMAR_DICTIONARY["select_clause"] = [
'(select_with_distinct ws select_results)']
GRAMMAR_DICTIONARY["select_with_distinct"] = [
'(ws "select" ws "distinct")', '(ws "select")']
GRAMMAR_DICTIONARY["select_results"] = [
'(ws subject ws "," ws select_results)', '(ws subject)']
# FROM
GRAMMAR_DICTIONARY["from_clause"] = ['(ws "from" ws source)']
GRAMMAR_DICTIONARY["source"] = [
'(ws table_name ws table_alias ws "," ws source)', '(ws table_name ws table_alias)']
# ORDER
GRAMMAR_DICTIONARY["orderby_clause"] = ['ws "order by" ws subject']
# GROUP BY
GRAMMAR_DICTIONARY["groupby_clause"] = ['(ws "group by" ws subject)']
# WHERE
GRAMMAR_DICTIONARY["where_clause"] = ['(ws "where" wsp condition)']
GRAMMAR_DICTIONARY["condition"] = ['(ws single ws "and" wsp condition)',
'(ws single ws "or" wsp condition)',
'(single)']
GRAMMAR_DICTIONARY["single"] = ['(expr)',
'("(" ws condition ws ")")',
'("not" ws single)']
GRAMMAR_DICTIONARY["expr"] = [
'(subject wsp "between" wsp value wsp "and" wsp value)',
'(subject wsp "not" wsp "between" wsp value wsp "and" wsp value)',
'(subject wsp "is" wsp "not" wsp "null")',
'(subject wsp "not" wsp "in" wsp "(" ws mquery ws ")")',
'(subject wsp "in" ws "(" ws mquery ws ")")',
'(subject ws binaryop ws "all" ws "(" ws mquery ws ")")',
'(subject ws binaryop ws "any" ws "(" ws mquery ws ")")',
'(subject ws binaryop ws "(" ws mquery ws ")")',
'(subject ws binaryop ws value)',
]
GRAMMAR_DICTIONARY["value"] = ['non_literal_number', 'string', 'col_ref']
GRAMMAR_DICTIONARY["subject"] = ['function', 'col_ref']
GRAMMAR_DICTIONARY["col_ref"] = ['(table_alias ws "." ws column_name)', '"*"']
GRAMMAR_DICTIONARY["function"] = ['(fname ws "(" ws "distinct" ws col_ref ws ")")',
'(fname ws "(" ws col_ref ws ")")']
GRAMMAR_DICTIONARY["fname"] = ['"count"', '"sum"', '"max"', '"min"']
# TODO(MARK): This is not tight enough. AND/OR are strictly boolean value operators.
GRAMMAR_DICTIONARY["binaryop"] = ['"="', '"!="', '"<>"',
'">="', '"<="', '">"', '"<"', '"like"', '"not like"']
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY['wsp'] = ['~"\s+"i']
GRAMMAR_DICTIONARY["table_name"] = ['"aircraft"', '"airline"', '"airport_base"', '"airport_service"',
'"city"', '"class_of_service"', '"code_description"',
'"compartment_class"', '"date_day"', '"days"',
'"dual_carrier"', '"equipment_sequence"', '"fare_base"',
'"fare_basis"', '"flight_base"', '"flight_fare"', '"flight_leg"',
'"flight_stop"', '"food_service"', '"ground_service"',
'"month"', '"restriction"', '"state"', '"time_interval"',
'"time_zone"']
GRAMMAR_DICTIONARY["table_alias"] = [
'"aircraft_4"', '"aircraft_1"', '"aircraft_2"', '"aircraft_3"', '"airline_1"',
'"airport_service_6"', '"airport_service_1"', '"airport_service_2"',
'"airport_service_3"', '"airport_service_4"', '"airport_service_5"',
'"airport_4"', '"airport_1"', '"airport_2"', '"airport_3"', '"city_6"',
'"city_1"', '"city_2"', '"city_3"', '"city_4"', '"city_5"',
'"class_of_service_1"', '"date_day_5"', '"date_day_1"', '"date_day_2"',
'"date_day_3"', '"date_day_4"', '"days_10"', '"days_1"', '"days_2"',
'"days_3"', '"days_4"', '"days_5"', '"days_6"', '"days_7"', '"days_8"',
'"days_9"', '"equipment_sequence_3"', '"equipment_sequence_1"',
'"equipment_sequence_2"', '"fare_basis_6"', '"fare_basis_1"', '"fare_basis_2"',
'"fare_basis_3"', '"fare_basis_4"', '"fare_basis_5"', '"fare_5"', '"fare_1"',
'"fare_2"', '"fare_3"', '"fare_4"', '"flight_fare_5"', '"flight_fare_1"',
'"flight_fare_2"', '"flight_fare_3"', '"flight_fare_4"', '"flight_leg_2"',
'"flight_leg_1"', '"flight_stop_2"', '"flight_stop_1"', '"flight_4"',
'"flight_1"', '"flight_2"', '"flight_3"', '"food_service_2"',
'"food_service_1"', '"ground_service_1"', '"restriction_1"', '"state_4"',
'"state_1"', '"state_2"', '"state_3"'
]
GRAMMAR_DICTIONARY["column_name"] = [
'"*"', '"meal_code"', '"range_miles"', '"departure_flight_number"', '"manufacturer"',
'"aircraft_description"', '"stop_time"', '"stop_airport"', '"fare_airline"', '"no_discounts"',
'"engines"', '"month_name"', '"restriction_code"', '"propulsion"', '"pressurized"',
'"from_airport"', '"wide_body"', '"flight_days"', '"time_zone_name"', '"capacity"', '"fare_id"',
'"class_type"', '"period"', '"minimum_connect_time"', '"stops"', '"service_name"', '"city_code"',
'"begin_time"', '"meal_description"', '"end_time"', '"minutes_distant"', '"round_trip_required"',
'"one_direction_cost"', '"day_number"', '"flight_id"', '"time_zone_code"', '"wing_span"',
'"length"', '"stop_number"', '"pay_load"', '"airport_code"', '"miles_distant"',
'"hours_from_gmt"', '"departure_airline"', '"to_airport"', '"rank"', '"city_name"',
'"dual_airline"', '"saturday_stay_required"', '"economy"', '"weight"', '"premium"',
'"booking_class"', '"day_name"', '"airport_location"', '"ground_fare"', '"days_code"',
'"note"', '"transport_type"', '"basic_type"', '"compartment"', '"leg_flight"',
'"arrival_airline"', '"maximum_stay"', '"month_number"', '"minimum_stay"', '"state_name"',
'"flight_number"', '"year"', '"airline_flight"', '"country_name"', '"arrival_flight_number"',
'"dual_carrier"', '"meal_number"', '"class_description"', '"departure_time"', '"airline_name"',
'"airline_code"', '"application"', '"fare_basis_code"', '"stopovers"', '"high_flight_number"',
'"airport_name"', '"low_flight_number"', '"discounted"', '"season"', '"advance_purchase"',
'"arrival_time"', '"basis_days"', '"leg_number"', '"main_airline"', '"aircraft_code_sequence"',
'"stop_days"', '"time_elapsed"', '"aircraft_code_base"', '"connections"', '"state_code"', '"night"',
'"cruising_speed"', '"direction"', '"round_trip_cost"', '"description"', '"code"', '"aircraft_code"'
]
GRAMMAR_DICTIONARY["non_literal_number"] = ['"137338"', '"1600"', '"1645"', '"2130"', '"1940"', '"1628"', '"1017"', '"2220"', '"2300"', '"1083"', '"1850"', '"1000"', '"1765"', '"1024"', '"1030"', '"1615"', '"1994"', '"1222"', '"1630"', '"1291"', '"1130"', '"2230"', '"2153"', '"1500"', '"1220"', '"1830"', '"1930"', '"1620"', '"1845"', '"1288"', '"1159"', '"1110"', '"1209"', '"1990"', '"1425"', '"1039"', '"1530"', '"2134"', '"1401"', '"1430"', '"1993"', '"1205"', '"3357"', '"1200"', '"1300"', '"1900"', '"1991"', '"1310"', '"1730"', '"2400"', '"1745"', '"1619"', '"1923"', '"2030"', '"1700"', '"1505"', '"2200"', '"2000"', '"1230"', '"4400"', '"1992"', '"2359"', '"1759"', '"1410"', '"2159"', '"2226"', '"1115"', '"2100"', '"2358"', '"2330"', '"2010"', '"1715"', '"1145"', '"1330"', '"1045"', '"1245"', '"1524"', '"1400"', '"1100"', '"3724"', '"1800"', '"727"', '"324"', '"163"', '"540"', '"106"', '"500"', '"329"', '"539"', '"650"',
'"73S"', '"466"', '"645"', '"800"', '"281"', '"200"', '"417"', '"402"', '"767"', '"766"', '"323"', '"343"', '"601"', '"497"', '"720"', '"928"', '"700"', '"737"', '"269"', '"838"', '"747"', '"900"', '"72S"', '"600"', '"459"', '"257"', '"345"', '"746"', '"825"', '"932"', '"139"', '"823"', '"100"', '"400"', '"530"', '"555"', '"815"', '"813"', '"311"', '"315"', '"852"', '"201"', '"301"', '"430"', '"229"', '"930"', '"279"', '"505"', '"755"', '"415"', '"212"', '"705"', '"297"', '"150"', '"210"', '"230"', '"733"', '"352"', '"771"', '"300"', '"845"', '"630"', '"270"', '"757"', '"217"', '"271"', '"718"', '"734"', '"830"', '"730"', '"819"', '"416"', '"296"', '"31"', '"20"', '"17"', '"19"', '"27"', '"22"', '"28"', '"71"', '"82"', '"14"', '"24"', '"29"', '"21"', '"25"', '"18"', '"12"', '"13"', '"11"', '"16"', '"30"', '"98"', '"23"', '"60"', '"26"', '"15"', '"10"', '"41"', '"8"', '"0"', '"2"', '"9"', '"6"', '"5"', '"1"', '"3"', '"4"', '"7"']
GRAMMAR_DICTIONARY["string"] = ['"\'%canadian airlines international%\'"', '"\'general mitchell international\'"', '"\'stapleton international\'"', '"\'continental airlines\'"', '"\'%canadian airlines%\'"', '"\'westchester county\'"', '"\'air taxi operation\'"', '"\'salt lake city\'"', '"\'north carolina\'"', '"\'st. petersburg\'"', '"\'rapid transit\'"', '"\'san francisco\'"', '"\'indianapolis\'"', '"\'philadelphia\'"', '"\'kansas city\'"', '"\'%limousine%\'"', '"\'los angeles\'"', '"\'minneapolis\'"', '"\'%stapleton%\'"', '"\'cincinnati\'"', '"\'washington\'"', '"\'new jersey\'"', '"\'long beach\'"', '"\'fort worth\'"', '"\'rental car\'"', '"\'pittsburgh\'"', '"\'california\'"', '"\'limousine\'"', '"\'cleveland\'"', '"\'st. louis\'"', '"\'minnesota\'"', '"\'san diego\'"', '"\'baltimore\'"', '"\'las vegas\'"', '"\'nashville\'"', '"\'wednesday\'"', '"\'charlotte\'"', '"\'breakfast\'"', '"\'turboprop\'"', '"\'tennessee\'"', '"\'%pearson%\'"', '"\'milwaukee\'"', '"\'thursday\'"', '"\'columbus\'"', '"\'new york\'"', '"\'san jose\'"', '"\'colorado\'"', '"\'michigan\'"', '"\'%lester%\'"', '"\'business\'"', '"\'saturday\'"', '"\'missouri\'"', '"\'montreal\'"', '"\'st. paul\'"', '"\'phoenix\'"', '"\'burbank\'"', '"\'toronto\'"', '"\'orlando\'"', '"\'houston\'"', '"\'florida\'"', '"\'%delta%\'"', '"\'indiana\'"', '"\'arizona\'"', '"\'seattle\'"', '"\'oakland\'"', '"\'chicago\'"', '"\'georgia\'"', '"\'memphis\'"', '"\'detroit\'"', '"\'ontario\'"', '"\'atlanta\'"', '"\'tuesday\'"', '"\'boston\'"', '"\'friday\'"', '"\'newark\'"', '"\'canada\'"', '"\'dinner\'"', '"\'denver\'"',
'"\'quebec\'"', '"\'dallas\'"', '"\'boeing\'"', '"\'tacoma\'"', '"\'monday\'"', '"\'thrift\'"', '"\'sunday\'"', '"\'%taxi%\'"', '"\'united\'"', '"\'nevada\'"', '"\'miami\'"', '"\'daily\'"', '"\'ap/57\'"', '"\'texas\'"', '"\'ap/58\'"', '"\'coach\'"', '"\'snack\'"', '"\'lunch\'"', '"\'first\'"', '"\'ap/80\'"', '"\'usair\'"', '"\'ap/68\'"', '"\'ap/55\'"', '"\'tampa\'"', '"\'asd\'"', '"\'taxi\'"', '"\'sd/d\'"', '"\'utah\'"', '"\'%s/%\'"', '"\'ohio\'"', '"\'dc10\'"', '"\'767\'"', '"\'jfk\'"', '"\'mke\'"', '"\'pit\'"', '"\'jet\'"', '"\'bos\'"', '"\'ont\'"', '"\'mco\'"', '"\'ewr\'"', '"\'j31\'"', '"\'bwi\'"', '"\'tpa\'"', '"\'iah\'"', '"\'iad\'"', '"\'ord\'"', '"\'yyz\'"', '"\'747\'"', '"\'d/s\'"', '"\'hou\'"', '"\'727\'"', '"\'100\'"', '"\'733\'"', '"\'72s\'"', '"\'atl\'"', '"\'cvg\'"', '"\'dfw\'"', '"\'oak\'"', '"\'757\'"', '"\'f28\'"', '"\'phl\'"', '"\'m80\'"', '"\'737\'"', '"\'den\'"', '"\'mia\'"', '"\'lga\'"', '"\'bur\'"', '"\'73s\'"', '"\'slc\'"', '"\'dtw\'"', '"\'d9s\'"', '"\'lax\'"', '"\'sfo\'"', '"\'dal\'"', '"\'yes\'"', '"\'734\'"', '"\'d10\'"', '"\'bna\'"', '"\'yx\'"', '"\'tw\'"', '"\'tx\'"', '"\'qo\'"', '"\'qx\'"', '"\'hp\'"', '"\'ga\'"', '"\'bh\'"', '"\'wn\'"', '"\'ff\'"', '"\'co\'"', '"\'dl\'"', '"\'qw\'"', '"\'bb\'"', '"\'cp\'"', '"\'nx\'"', '"\'lh\'"', '"\'fn\'"', '"\'yn\'"', '"\'ap\'"', '"\'ls\'"', '"\'ca\'"', '"\'no\'"', '"\'nw\'"', '"\'s/\'"', '"\'dc\'"', '"\'sa\'"', '"\'ea\'"', '"\'ml\'"', '"\'kw\'"', '"\'us\'"', '"\'aa\'"', '"\'as\'"', '"\'ac\'"', '"\'ua\'"', '"\'q\'"', '"\'b\'"', '"\'f\'"', '"\'h\'"', '"\'y\'"', '"\'c\'"', '"\'m\'"', '"\'s\'"']
COPY_TERMINAL_SET = {"non_literal_number", "string"}
| 12,125 | 93.734375 | 1,617 |
py
|
Unimer
|
Unimer-master/grammars/atis/atis_normalization.py
|
# coding=utf8
import re
def standardize_lambda_calculus_varnames(ans):
toks = ans.split(' ')
varnames = {}
new_toks = []
for t in toks:
if t == 'x' or t.startswith('$'):
if ':' in t:
# var definition
splits = t.split(':')
name, var_type = splits[0], splits[1]
assert name not in varnames
new_name = '$v%d' % len(varnames)
varnames[name] = new_name
new_toks.append(new_name + ":" + var_type)
else:
# t is a variable name
if t in varnames:
new_toks.append(varnames[t])
else:
new_varname = '$v%d' % len(varnames)
varnames[t] = new_varname
new_toks.append(new_varname)
else:
new_toks.append(t)
lf = ' '.join(new_toks)
return lf
def normalize_lambda_calculus(logical_form):
s = standardize_lambda_calculus_varnames(logical_form)
s = s.replace(' :', ":").replace(
'\s+', ' ').replace("( ", "(").replace(" )", ")").replace(')\s)', '))').replace(" )", ")").strip().lower()
variables = ["$v0", "$v1", "$v10", "$1", "$2", "$3", "$y",
"$0", "$v7", "$v3", "$f", "$x", "$v6", "$v14", "$airline", "$v2", "$v5", "x"]
for var in variables:
s = s.replace(var + " e ", "%s:e " % var)
s = s.replace(var + " i ", "%s:i " % var)
return s
def to_lisp_tree(logical_form):
expr = logical_form.replace("(", " ( ").replace(")", " ) ").strip()
expr = re.sub(' +', ' ', expr)
toks = expr.split(' ')
def recurse(i):
if toks[i] == '(':
subtrees = []
j = i+1
while True:
subtree, j = recurse(j)
subtrees.append(subtree)
if toks[j] == ')':
return subtrees, j + 1
else:
return toks[i], i+1
try:
lisp_tree, final_ind = recurse(0)
return lisp_tree
except Exception as e:
print('Failed to convert "%s" to lisp tree' % expr)
print(e)
return None
def postprocess_lambda_calculus(logical_form):
lisp_tree = to_lisp_tree(logical_form)
if lisp_tree is None: return logical_form
# Post-order traversal, sorting as we go
def recurse(node):
if isinstance(node, str): return
for child in node:
recurse(child)
if node[0] in ('_and', '_or'):
node[1:] = sorted(node[1:], key=lambda x: str(x))
recurse(lisp_tree)
def tree_to_str(node):
if isinstance(node, str):
return node
else:
return '( %s )' % ' '.join(tree_to_str(child) for child in node)
return tree_to_str(lisp_tree)
def preprocess_sql(logical_form):
s = re.sub(' +', ' ', logical_form).lower()
s = s.replace('and 1 = 1', '')
s = s.replace('where 1 = 1', '')
s = s.replace('max (', 'max(')
s = s.replace('min (', 'min(')
s = s.replace('avg (', 'avg(')
s = s.replace('count (', 'count(')
s = s.replace('sum (', 'sum(')
s = s.replace('< =', '<=')
s = s.replace('> =', '>=')
# Domain
s = s.replace(' flight ', ' flight_base ')
s = s.replace(' airport ', ' airport_base ')
s = s.replace(' fare ', ' fare_base ')
s = s.replace('aircraft_code ', 'aircraft_code_base ')
s = s.replace('aircraft_code)', 'aircraft_code_base)')
s = re.sub(' +', ' ', s)
s = s.replace("( ", "(").replace(" )", ")").replace(";", "").replace('"', "'").replace(' . ', '.').strip()
return s.strip()
def postprocess_sql(logical_form):
# Domain
s = logical_form + ';'
s = s.replace(' flight_base ', ' flight ')
s = s.replace(' airport_base ', ' airport ')
s = s.replace(' fare_base ', ' fare ')
s = s.replace('aircraft_code_base ', 'aircraft_code ')
s = s.replace('aircraft_code_base)', 'aircraft_code)')
s = s.replace('aircraft_code_base;', 'aircraft_code;')
return s
def standardize_prolog_varnames(ans):
toks = ans.split(' ')
varnames = {}
new_toks = []
for t in toks:
if re.match('[A-B|a-b]]', t):
if t in varnames:
new_toks.append(varnames[t])
else:
new_varname = chr(ord('A')+len(varnames))
varnames[t] = new_varname
new_toks.append(new_varname)
else:
new_toks.append(t)
lf = ' '.join(new_toks)
return lf
def tokenize_prolog(logical_form):
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
toks = [t if "::" not in t else t.replace(
"::", " ") for t in normalized_lf.split()]
return toks
# Pre/Post Processors of Prolog
def preprocess_prolog(logical_form):
tokens = tokenize_prolog(logical_form)
standardized = standardize_prolog_varnames(" ".join(tokens)).replace(" ", "").lower()
return standardized
def normalize_prolog_variable_names(logical_form):
"""Standardize variable names in Prolog with De Brujin indices."""
toks = tokenize_prolog(logical_form)
# Replace Variable
cur_vars = []
new_toks = []
for w in toks:
if len(w) == 1 and w.isalpha() and re.match('[a-z]', w):
if w in cur_vars:
ind_from_end = len(cur_vars) - cur_vars.index(w) - 1
new_toks.append('V%d' % ind_from_end)
else:
cur_vars.append(w)
new_toks.append('NV')
else:
new_toks.append(w)
return ''.join(new_toks).lower()
def preprocess_funql(lf):
l = re.sub(r"\s*\(\s*", "(", lf)
l = re.sub(r"\s*\)\s*", ")", l)
l = re.sub(r"\s*,\s*", ",", l)
l = l.lower()
return l
if __name__ == '__main__':
processed = preprocess_prolog('answer_1(A,(is_flight(A),is_to(A,B),const(B,airport_code(mke))))')
print(processed)
| 6,133 | 29.979798 | 114 |
py
|
Unimer
|
Unimer-master/grammars/atis/atis_entity_extractor.py
|
# coding=utf8
from allennlp.data import Token
def lambda_calculus_entity_extractor(grammar, lc):
"""
:param grammar: Lambda Calculus Grammar 1
:param prolog:
:return:
"""
applied_production_rules = grammar.parse(lc)
entities = set()
for rule in applied_production_rules:
if rule.lhs == 'constant' and len(rule.rhs_nonterminal) == 0:
# country
entities.add(rule.rhs.replace('"', "").replace('(', '').replace(')', '').replace("\'", "").replace('[', "").replace("]", ""))
print(rule)
elif rule.lhs in grammar.copy_terminal_set:
rhs = rule.rhs.replace('"', "").replace("\'", "").replace('[', "").replace("]", "")
if rhs == '_':
continue
entities.add(rhs)
return entities
def funql_entity_extractor(grammar, funql):
"""
:param grammar: FunQL grammar 1
:param funql:
:return:
"""
applied_production_rules = grammar.parse(funql)
entities = set()
for rule in applied_production_rules:
if rule.lhs == 'object' and len(rule.rhs_nonterminal) == 0:
value = rule.rhs.replace('"', "").replace('(', '').replace(')', '').replace("\'", "").replace('[', "").replace("]", "")
if value == 'hour9':
entities.add('9')
elif value == 'days_codesa':
entities.add('sa')
else:
assert value == 'manufacturerboeing'
entities.add('boeing')
elif rule.lhs in grammar.copy_terminal_set:
rhs = rule.rhs.replace('"', "").replace("\'", "").replace('[', "").replace("]", "")
if rhs == '_':
continue
entities.add(rhs)
return entities
def prolog_entity_extractor(grammar, prolog):
"""
:param grammar: Prolog grammar 1
:param funql:
:return:
"""
applied_production_rules = grammar.parse(prolog)
entities = set()
for rule in applied_production_rules:
if rule.lhs == 'object' and len(rule.rhs_nonterminal) == 0:
value = rule.rhs.replace('"', "").replace('(', '').replace(')', '').replace("\'", "").replace('[', "").replace("]", "")
if value == 'hour9':
entities.add('9')
elif value == 'days_codesa':
entities.add('sa')
else:
assert value == 'manufacturerboeing'
entities.add('boeing')
elif rule.lhs in grammar.copy_terminal_set:
rhs = rule.rhs.replace('"', "").replace("\'", "").replace('[', "").replace("]", "")
if rhs == '_':
continue
entities.add(rhs)
return entities
def replace_lambda_calculus_entity(grammar, lc, lc_tokens, candidates):
entities = lambda_calculus_entity_extractor(grammar, lc)
replaced_tokens = list()
is_valid = True
for token in lc_tokens:
text = token.text.replace("'", "")
if text in entities:
# entity
for candidate in candidates:
if candidate['formatted_value'] == text:
replaced_tokens.append(Token('@entity_%d' % candidate['index']))
break
else:
is_valid = False
replaced_tokens.append(token)
else:
replaced_tokens.append(token)
return is_valid, replaced_tokens
def replace_funql_entity(grammar, funql, funql_tokens, candidates):
entities = funql_entity_extractor(grammar, funql)
replaced_tokens = list()
is_valid = True
for token in funql_tokens:
text = token.text.replace("'", "")
if text in entities:
# entity
for candidate in candidates:
if candidate['formatted_value'] == text:
replaced_tokens.append(Token('@entity_%d' % candidate['index']))
break
else:
is_valid = False
replaced_tokens.append(token)
else:
replaced_tokens.append(token)
return is_valid, replaced_tokens
def replace_prolog_entity(grammar, prolog, prolog_tokens, candidates):
entities = prolog_entity_extractor(grammar, prolog)
replaced_tokens = list()
is_valid = True
for token in prolog_tokens:
text = token.text.replace("'", "")
if text in entities:
# entity
for candidate in candidates:
if candidate['formatted_value'] == text:
replaced_tokens.append(Token('@entity_%d' % candidate['index']))
break
else:
is_valid = False
replaced_tokens.append(token)
else:
replaced_tokens.append(token)
return is_valid, replaced_tokens
def test_lambda_calculus_entity_extractor():
import sys
sys.path += ['../../']
from grammars.grammar import get_grammar
from grammars.utils import get_logical_form_preprocessor
preprocessor = get_logical_form_preprocessor('atis', 'lambda')
grammar = get_grammar('atis', 'lambda')
lc = preprocessor("( _lambda $v1 e ( _exists $v0 ( _and ( _flight $v0 ) ( _from $v0 washington:_ci ) ( _to $v0 toronto:_ci ) ( _day $v0 saturday:_da ) ( _= ( _fare $v0 ) $v1 ) ) ) )")
entities = lambda_calculus_entity_extractor(grammar, lc)
print(entities)
def test_funql_entity_extractor():
import sys
sys.path += ['../../']
from grammars.grammar import get_grammar
from grammars.utils import get_logical_form_preprocessor
preprocessor = get_logical_form_preprocessor('atis', 'funql')
grammar = get_grammar('atis', 'funql')
lc = preprocessor("answer(intersection(_meal_2(meal_description(snack)),_airline_2(airline_code(ff))))")
entities = funql_entity_extractor(grammar, lc)
print(entities)
if __name__ == '__main__':
test_funql_entity_extractor()
| 5,921 | 35.109756 | 187 |
py
|
Unimer
|
Unimer-master/grammars/job/job_tokenizer.py
|
# coding=utf8
from typing import List
from overrides import overrides
from allennlp.data.tokenizers import Token, WordTokenizer
from allennlp.data.tokenizers.word_splitter import WordSplitter
class PrologWordSplitter(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [Token(t) if "::" not in t else Token(
t.replace("::", " ")) for t in normalized_lf.split()]
return tokens
class PrologWordSplitter2(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
("'", " ' "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = list()
for t in normalized_lf.split():
if "::" not in t:
tokens.append(Token(t))
else:
tokens += [Token(_t) for _t in t.replace("::", " ").split()]
return tokens
class FunQLWordSplitter(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = [Token(t) if "::" not in t else Token(
t.replace("::", " ")) for t in normalized_lf.split()]
return tokens
class FunQLWordSplitter2(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
("'", " ' "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
tokens = list()
for t in normalized_lf.split():
if "::" not in t:
tokens.append(Token(t))
else:
tokens += [Token(_t) for _t in t.replace("::", " ").split()]
return tokens
class SQLWordSplitter(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_sql = logical_form
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
('.', ' . '),
("'", " \\' ")
]
for a, b in replacements:
normalized_sql = normalized_sql.replace(a, b)
tokens = [Token(t) for t in normalized_sql.split()]
return tokens
class LambdaCalculusWordSplitter(WordSplitter):
@overrides
def split_words(self, logical_form: str) -> List[Token]:
normalized_lc = logical_form
replacements = [
('(', ' ( '),
(')', ' ) '),
("'", " \\' ")
]
for a, b in replacements:
normalized_lc = normalized_lc.replace(a, b)
tokens = [Token(t) for t in normalized_lc.split()]
return tokens
def get_logical_tokenizer(language: str) -> WordTokenizer:
splitter = None
if language == 'prolog':
splitter = PrologWordSplitter()
elif language == 'prolog2':
splitter = PrologWordSplitter2()
elif language == 'funql':
splitter = FunQLWordSplitter()
elif language == 'funql2':
splitter = FunQLWordSplitter2()
elif language == 'sql':
splitter = SQLWordSplitter()
elif language == 'lambda':
splitter = LambdaCalculusWordSplitter()
assert splitter is not None
return splitter
if __name__ == '__main__':
prolog = "(lambda $0:e (and (job $0) (language $0 perl) (company $0 \'Lockheed Martin Aeronautics\') (loc $0 colorado)))"
spliiter = get_logical_tokenizer('lambda')
tokenizer = WordTokenizer(spliiter)
tokens = tokenizer.tokenize(prolog)
print(tokens)
| 4,463 | 30.216783 | 125 |
py
|
Unimer
|
Unimer-master/grammars/job/get_prolog_terminals.py
|
# coding=utf8
import re
def read_data():
questions, logical_forms = list(), list()
paths = [
"../../data/job/job_prolog_train.tsv",
"../../data/job/job_prolog_test.tsv"]
for p in paths:
with open(p, "r") as f:
for line in f:
line = line.strip()
splits = line.split('\t')
questions.append(splits[0])
logical_forms.append(splits[1])
return questions, logical_forms
def get_binary_predicates(logical_forms):
binary_predicates = set()
binary_pattern = re.compile('(([a-z|_|\d]+?)\(([A-Z]|[_|\d]+),([A-Z]|[_|\d]+)\))')
for lf in logical_forms:
matches = binary_pattern.findall(lf)
for match in matches:
binary_predicates.add(match[1])
return binary_predicates
def get_unary_predicates(logical_forms):
unary_predicates = set()
unary_pattern = re.compile('(([a-z|_|\d]+?)\(([A-Z]|[_|\d]+)\))')
for lf in logical_forms:
matches = unary_pattern.findall(lf)
for match in matches:
unary_predicates.add(match[1])
return unary_predicates
def get_terminals(logical_forms):
terminals = set()
terminal_pattern = re.compile("const\(([A-Z]|[_|\d]+),(.+?)\)")
for lf in logical_forms:
matches = terminal_pattern.findall(lf)
for match in matches:
terminals.add(match[1])
return terminals
def get_all_salary_values(logical_forms):
salary = set()
salary_pattern = re.compile('salary_greater_than\(([A-Z]|[_|\d]+),(\d+),([a-z]+)\)')
for lf in logical_forms:
matches = salary_pattern.findall(lf)
for match in matches:
salary.add(match[1])
return salary
if __name__ == '__main__':
questions, logical_forms = read_data()
binary_predicates = get_binary_predicates(logical_forms)
print("Binary Relations")
print(binary_predicates)
print("""GRAMMAR_DICTIONARY['binary_relation'] = %s""" % (["(is_%s)" % r for r in binary_predicates]))
for r in binary_predicates:
print("""GRAMMAR_DICTIONARY['is_%s'] = ['("%s(" var "," var ")")']""" % (r, r))
print("==\n\n")
unary_predicates = get_unary_predicates(logical_forms)
print("Unary Relations")
print(unary_predicates)
for r in unary_predicates:
print("""GRAMMAR_DICTIONARY['%s'] = ['("%s(" var ")")']""" % (r, r))
print("===\n\n")
terminals = get_terminals(logical_forms)
print("Terminals")
formatted_terminals = sorted(['"%s"' % t.replace("'", "\'") for t in terminals], key=lambda x: len(x), reverse=True)
print(formatted_terminals)
salary_values = get_all_salary_values(logical_forms)
print("Salary Values:")
print(['"%s"' % v for v in sorted(list(salary_values), key=lambda x: len(x), reverse=True)])
| 2,823 | 31.090909 | 120 |
py
|
Unimer
|
Unimer-master/grammars/job/funql_grammar.py
|
# coding=utf8
"""
FunQL Grammar
"""
ROOT_RULE = 'statement -> [answer]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['(answer ws)']
GRAMMAR_DICTIONARY['answer'] = ['("answer" ws "(" ws predicate ws ")" )']
GRAMMAR_DICTIONARY['predicate'] = [
'meta', 'object', '"job(all)"', 'relation',
'("intersect" ws "(" ws predicate ws "," ws conjunction ws ")")',
'("or" ws "(" ws predicate ws "," ws predicate ws ")")',
'("not" ws "(" ws predicate ws ")")',
]
GRAMMAR_DICTIONARY['conjunction'] = [
'(predicate ws "," ws conjunction)',
'(predicate)'
]
GRAMMAR_DICTIONARY['object'] = ['("const(" string ")")']
GRAMMAR_DICTIONARY['relation'] = [
'("req_exp_1(" ws predicate ws ")")',
'("req_exp_0(" ws predicate ws ")")',
'("req_deg_1(" ws predicate ws ")")',
'("req_deg_0(" ws predicate ws ")")',
'("platform_1(" ws predicate ws ")")',
'("platform_0(" ws predicate ws ")")',
'("language_1(" ws predicate ws ")")',
'("language_0(" ws predicate ws ")")',
'("application_1(" ws predicate ws ")")',
'("company_1(" ws predicate ws ")")',
'("company_0(" ws predicate ws ")")',
'("recruiter_1(" ws predicate ws ")")',
'("des_deg_1(" ws predicate ws ")")',
'("des_exp_1(" ws predicate ws ")")',
'("des_exp_0(" ws predicate ws ")")',
'("country_1(" ws predicate ws ")")',
'("title_1(" ws predicate ws ")")',
'("title_0(" ws predicate ws ")")',
'("area_1(" ws predicate ws ")")',
'("loc_1(" ws predicate ws ")")',
'("loc_0(" ws predicate ws ")")',
'("des_exp(" ws predicate ws ")")',
'("des_deg(" ws predicate ws ")")',
'("req_exp(" ws predicate ws ")")',
'("req_deg(" ws predicate ws ")")',
'("job(" ws predicate ws ")")',
]
GRAMMAR_DICTIONARY['meta'] = [
'("salary_greater_than(" salary_value "," "year" ")")',
'("salary_greater_than(" salary_value "," "hour" ")")',
'("salary_greater_than(" salary_value "," "month" ")")',
'("salary_less_than(" salary_value "," "year" ")")'
]
GRAMMAR_DICTIONARY['string'] = ['"\'Senior Development Engineer\'"', '"\'Lockheed Martin Aeronautics\'"', '"\'Senior Consulting Engineer\'"', '"\'Senior Software Developer\'"', '"\'oil pipeline modeling\'"', '"\'NetWare Administrator\'"', '"\'Verification Engineer\'"', '"\'Systems Administrator\'"', '"\'Manufacturing Manager\'"', '"\'National Instruments\'"', '"\'System Administrator\'"', '"\'research assistant\'"', '"\'Software Developer\'"', '"\'Ic Design Engineer\'"', '"\'Applied Materials\'"', '"\'Software Engineer\'"', '"telecommunications"', '"\'data warehousing\'"', '"\'silicon graphics\'"', '"\'Systems Analyst\'"', '"\'Project Manager\'"', '"speedy3dgraphics"', '"\'microsoft word\'"', '"\'Web Developer\'"', '"\'Test Engineer\'"', '"\'device driver\'"', '"\'visual basic\'"', '"\'Sql Engineer\'"', '"\'3d graphics\'"', '"\'software qa\'"', '"\'san antonio\'"', '"client/server"', '"\'windows nt\'"', '"\'windows 95\'"', '"\'visual j++\'"', '"\'visual c++\'"', '"\'los alamos\'"', '"\'sql server\'"', '"\'Phil Smith\'"', '"powerbuilder"', '"playstation"', '"california"', '"washington"', '"networking"', '"management"', '"commodores"', '" Microsoft"', '"\'cobol ii\'"', '"\'san jose\'"', '"statistics"', '"Consultant"', '"\'new york\'"', '"Programmer"', '"animation"', '"Developer"', '"nashville"', '"Microsoft"', '"colorado"', '"internet"', '"engineer"', '"graphics"', '"database"', '"ethernet"', '"assembly"', '"Longhorn"', '"network"', '"autocad"', '"Trilogy"', '"houston"', '"seattle"', '"solaris"', '"haskell"', '"windows"', '"fortran"', '"tcp/ip"', '"master"', '"prolog"', '"apache"', '"novell"', '"dallas"', '"Boeing"', '"delphi"', '"oracle"', '"canada"', '"Tivoli"', '"tcl/tk"', '"austin"', '"pascal"', '"boston"', '"Compaq"', '"intern"', '"texas"', '"games"', '"tulsa"', '"cobol"', '"Apple"', '"pdp11"', '"shell"', '"linux"', '"latex"', '"excel"', '"Dell"', '"odbc"', '"BSCS"', '"lisp"', '"perl"', '"MSEE"', '"vc++"', '"unix"', '"cics"', '"html"', '"java"', '"MSCS"', '"BSEE"', '"BACS"', '"IBM"', '"rpg"', '"com"', '"ibm"', '"mfc"', '"usa"', '"LCS"', '"vax"', '"sql"', '"c++"', '"sun"', '"JPL"', '"lan"', '"wan"', '"ole"', '"PhD"', '"web"', '"mvs"', '"ada"', '"mac"', '"MBA"', '"aix"', '"vms"', '"gui"', '"x86"', '"ai"', '"BS"', '"BA"', '"10"', '"HP"', '"MA"', '"pc"', '"vb"', '"c"', '"1"', '"4"', '"3"', '"2"', '"5"']
GRAMMAR_DICTIONARY['salary_value'] = [
'"100000"', '"70000"', '"65000"', '"50000"', '"80000"',
'"30000"', '"90000"', '"60000"', '"40000"', '"10000"', '"5000"', '"20"'
]
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
COPY_TERMINAL_SET = {'string', 'salary_value'}
| 4,627 | 69.121212 | 2,343 |
py
|
Unimer
|
Unimer-master/grammars/job/get_sql_terminals.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/grammars/job/lambda_grammar.py
|
# coding=utf8
ROOT_RULE = 'statement -> [expression]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['(expression ws)']
GRAMMAR_DICTIONARY['expression'] = ['(application)', '(abstraction)', '(constant)', '(variable)']
GRAMMAR_DICTIONARY['abstraction'] = ['("(" ws "lambda" wsp variable_definition wsp expression ws ")")']
GRAMMAR_DICTIONARY['application'] = ['("(" ws function ws ")")']
GRAMMAR_DICTIONARY['function'] = [
'("company" wsp expression wsp expression)',
'("area" wsp expression wsp expression)',
'("platform" wsp expression wsp expression)',
'("recruiter" wsp expression wsp expression)',
'("language" wsp expression wsp expression)',
'("title" wsp expression wsp expression)',
'("application" wsp expression wsp expression)',
'("loc" wsp expression wsp expression)',
'("country" wsp expression wsp expression)',
'("des_exp" wsp expression wsp expression)',
'("des_exp" wsp expression)',
'("req_exp" wsp expression wsp expression)',
'("req_exp" wsp expression)',
'("des_deg" wsp expression wsp expression)',
'("des_deg" wsp expression)',
'("req_deg" wsp expression wsp expression)',
'("req_deg" wsp expression)',
'("job" wsp expression)',
'("and_" wsp application wsp polyvariadic_expression)',
'("or_" wsp application wsp polyvariadic_expression)',
'("not_" wsp expression)',
'("exists_" wsp variable_definition wsp expression)',
'("salary_greater_than" wsp expression wsp expression wsp expression)',
'("salary_less_than" wsp expression wsp expression wsp expression)',
]
GRAMMAR_DICTIONARY['polyvariadic_expression'] = ['(application ws polyvariadic_expression)', '""']
GRAMMAR_DICTIONARY['variable'] = ['"$0"', '"$1"']
GRAMMAR_DICTIONARY['variable_definition'] = ['"$0:e"', '"$1:e"']
GRAMMAR_DICTIONARY['constant'] = ['salary_value', 'string']
GRAMMAR_DICTIONARY['string'] = [
'"\'Senior Development Engineer\'"', '"\'Lockheed Martin Aeronautics\'"',
'"\'Senior Consulting Engineer\'"', '"\'Senior Software Developer\'"', '"\'oil pipeline modeling\'"', '"\'NetWare Administrator\'"', '"\'Verification Engineer\'"', '"\'Systems Administrator\'"', '"\'Manufacturing Manager\'"', '"\'National Instruments\'"', '"\'System Administrator\'"', '"\'research assistant\'"', '"\'Software Developer\'"', '"\'Ic Design Engineer\'"', '"\'Applied Materials\'"', '"\'Software Engineer\'"', '"telecommunications"', '"\'data warehousing\'"', '"\'silicon graphics\'"', '"\'Systems Analyst\'"', '"\'Project Manager\'"', '"speedy3dgraphics"', '"\'microsoft word\'"', '"\'Web Developer\'"', '"\'Test Engineer\'"', '"\'device driver\'"', '"\'visual basic\'"', '"\'Sql Engineer\'"', '"\'3d graphics\'"', '"\'software qa\'"', '"\'san antonio\'"', '"client/server"', '"\'windows nt\'"', '"\'windows 95\'"', '"\'visual j++\'"', '"\'visual c++\'"', '"\'los alamos\'"', '"\'sql server\'"', '"\'Phil Smith\'"', '"powerbuilder"', '"playstation"', '"california"', '"washington"', '"networking"', '"management"', '"commodores"', '" Microsoft"', '"\'cobol ii\'"', '"\'san jose\'"', '"statistics"', '"Consultant"', '"\'new york\'"', '"Programmer"', '"animation"', '"Developer"', '"nashville"', '"Microsoft"', '"colorado"', '"internet"', '"engineer"', '"graphics"', '"database"', '"ethernet"', '"assembly"', '"Longhorn"', '"network"', '"autocad"', '"Trilogy"', '"houston"', '"seattle"', '"solaris"', '"haskell"', '"windows"', '"fortran"', '"tcp/ip"', '"master"', '"prolog"', '"apache"', '"novell"', '"dallas"', '"Boeing"', '"delphi"', '"oracle"', '"canada"', '"Tivoli"', '"tcl/tk"', '"austin"', '"pascal"', '"boston"', '"Compaq"', '"intern"', '"texas"', '"games"', '"tulsa"', '"cobol"', '"Apple"', '"pdp11"', '"shell"', '"linux"', '"latex"', '"excel"', '"Dell"', '"odbc"', '"BSCS"', '"lisp"', '"perl"', '"MSEE"', '"vc++"', '"unix"', '"cics"', '"html"', '"java"', '"MSCS"', '"BSEE"', '"BACS"', '"IBM"', '"rpg"', '"com"', '"ibm"', '"mfc"', '"usa"', '"LCS"', '"vax"', '"sql"', '"c++"', '"sun"', '"JPL"', '"lan"', '"wan"', '"ole"', '"PhD"', '"web"', '"mvs"', '"ada"', '"mac"', '"MBA"', '"aix"', '"vms"', '"gui"', '"x86"', '"ai"', '"BS"', '"BA"', '"10"', '"HP"', '"MA"', '"pc"', '"vb"', '"hour"', '"year"', '"month"', '"c"', '"1"', '"4"', '"3"', '"2"', '"5"',]
GRAMMAR_DICTIONARY['salary_value'] = [
'"100000"', '"70000"', '"65000"', '"50000"', '"80000"',
'"30000"', '"90000"', '"60000"', '"40000"', '"10000"', '"5000"', '"20"'
]
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY["wsp"] = ['~"\s+"i']
COPY_TERMINAL_SET = {'string', 'salary_value'}
| 4,561 | 86.730769 | 2,273 |
py
|
Unimer
|
Unimer-master/grammars/job/__init__.py
| 0 | 0 | 0 |
py
|
|
Unimer
|
Unimer-master/grammars/job/prolog_grammar.py
|
# coding=utf-8
"""
Prolog Grammar
"""
# First-order logical form
ROOT_RULE = 'statement -> ["answer(" var "," goal ")"]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY['statement'] = ['("answer(" var "," goal ")")']
# Goal
GRAMMAR_DICTIONARY['goal'] = [
'(declaration)',
'(binary_relation)',
'(unit_relation)',
'(meta)',
'("(" goal "," conjunction ")")', # and
'("(" goal ";" conjunction ")")', # or
'("\+(" goal ")")',
'("\+" unit_relation)'
]
GRAMMAR_DICTIONARY['conjunction'] = [
'(goal "," conjunction)',
'(goal ";" conjunction)',
'(goal)'
]
# Meta Predicates
GRAMMAR_DICTIONARY['meta'] = [
'("salary_greater_than(" var "," salary_value "," "year" ")")',
'("salary_greater_than(" var "," salary_value "," "hour" ")")',
'("salary_greater_than(" var "," salary_value "," "month" ")")',
'("salary_less_than(" var "," salary_value "," "year" ")")'
]
# Relation
GRAMMAR_DICTIONARY['unit_relation'] = [
'(is_job)', '(is_unit_req_exp)', '(is_unit_req_deg)',
'(is_unit_des_deg)','(is_unit_des_exp)'
]
GRAMMAR_DICTIONARY['is_job'] = ['("job(" var ")")']
GRAMMAR_DICTIONARY['is_unit_des_exp'] = ['("des_exp(" var ")")']
GRAMMAR_DICTIONARY['is_unit_req_exp'] = ['("req_exp(" var ")")']
GRAMMAR_DICTIONARY['is_unit_des_deg'] = ['("des_deg(" var ")")']
GRAMMAR_DICTIONARY['is_unit_req_deg'] = ['("req_deg(" var ")")']
GRAMMAR_DICTIONARY['binary_relation'] = [
'(is_company)', '(is_req_deg)', '(is_area)', '(is_platform)',
'(is_recruiter)', '(is_const)', '(is_language)', '(is_title)',
'(is_des_deg)', '(is_application)', '(is_req_exp)', '(is_loc)',
'(is_country)', '(is_des_exp)'
]
GRAMMAR_DICTIONARY['is_company'] = ['("company(" var "," var ")")']
GRAMMAR_DICTIONARY['is_req_deg'] = ['("req_deg(" var "," var ")")']
GRAMMAR_DICTIONARY['is_area'] = ['("area(" var "," var ")")']
GRAMMAR_DICTIONARY['is_platform'] = ['("platform(" var "," var ")")']
GRAMMAR_DICTIONARY['is_recruiter'] = ['("recruiter(" var "," var ")")']
GRAMMAR_DICTIONARY['is_const'] = ['("const(" var "," var ")")']
GRAMMAR_DICTIONARY['is_language'] = ['("language(" var "," var ")")']
GRAMMAR_DICTIONARY['is_title'] = ['("title(" var "," var ")")']
GRAMMAR_DICTIONARY['is_des_deg'] = ['("des_deg(" var "," var ")")']
GRAMMAR_DICTIONARY['is_application'] = ['("application(" var "," var ")")']
GRAMMAR_DICTIONARY['is_req_exp'] = ['("req_exp(" var "," var ")")']
GRAMMAR_DICTIONARY['is_loc'] = ['("loc(" var "," var ")")']
GRAMMAR_DICTIONARY['is_country'] = ['("country(" var "," var ")")']
GRAMMAR_DICTIONARY['is_des_exp'] = ['("des_exp(" var "," var ")")']
# Terminal
# Normalized Variable
GRAMMAR_DICTIONARY['var'] = ['"NV"', '"V0"', '"V1"', '"V2"',
'"V3"', '"V4"', '"V5"', '"V6"', '"V7"']
# Declaration
GRAMMAR_DICTIONARY['declaration'] = [
'("const(" var "," string ")")']
GRAMMAR_DICTIONARY['string'] = ['"\'Senior Development Engineer\'"', '"\'Lockheed Martin Aeronautics\'"', '"\'Senior Consulting Engineer\'"', '"\'Senior Software Developer\'"', '"\'oil pipeline modeling\'"', '"\'NetWare Administrator\'"', '"\'Verification Engineer\'"', '"\'Systems Administrator\'"', '"\'Manufacturing Manager\'"', '"\'National Instruments\'"', '"\'System Administrator\'"', '"\'research assistant\'"', '"\'Software Developer\'"', '"\'Ic Design Engineer\'"', '"\'Applied Materials\'"', '"\'Software Engineer\'"', '"telecommunications"', '"\'data warehousing\'"', '"\'silicon graphics\'"', '"\'Systems Analyst\'"', '"\'Project Manager\'"', '"speedy3dgraphics"', '"\'microsoft word\'"', '"\'Web Developer\'"', '"\'Test Engineer\'"', '"\'device driver\'"', '"\'visual basic\'"', '"\'Sql Engineer\'"', '"\'3d graphics\'"', '"\'software qa\'"', '"\'san antonio\'"', '"client/server"', '"\'windows nt\'"', '"\'windows 95\'"', '"\'visual j++\'"', '"\'visual c++\'"', '"\'los alamos\'"', '"\'sql server\'"', '"\'Phil Smith\'"', '"powerbuilder"', '"playstation"', '"california"', '"washington"', '"networking"', '"management"', '"commodores"', '" Microsoft"', '"\'cobol ii\'"', '"\'san jose\'"', '"statistics"', '"Consultant"', '"\'new york\'"', '"Programmer"', '"animation"', '"Developer"', '"nashville"', '"Microsoft"', '"colorado"', '"internet"', '"engineer"', '"graphics"', '"database"', '"ethernet"', '"assembly"', '"Longhorn"', '"network"', '"autocad"', '"Trilogy"', '"houston"', '"seattle"', '"solaris"', '"haskell"', '"windows"', '"fortran"', '"tcp/ip"', '"master"', '"prolog"', '"apache"', '"novell"', '"dallas"', '"Boeing"', '"delphi"', '"oracle"', '"canada"', '"Tivoli"', '"tcl/tk"', '"austin"', '"pascal"', '"boston"', '"Compaq"', '"intern"', '"texas"', '"games"', '"tulsa"', '"cobol"', '"Apple"', '"pdp11"', '"shell"', '"linux"', '"latex"', '"excel"', '"Dell"', '"odbc"', '"BSCS"', '"lisp"', '"perl"', '"MSEE"', '"vc++"', '"unix"', '"cics"', '"html"', '"java"', '"MSCS"', '"BSEE"', '"BACS"', '"IBM"', '"rpg"', '"com"', '"ibm"', '"mfc"', '"usa"', '"LCS"', '"vax"', '"sql"', '"c++"', '"sun"', '"JPL"', '"lan"', '"wan"', '"ole"', '"PhD"', '"web"', '"mvs"', '"ada"', '"mac"', '"MBA"', '"aix"', '"vms"', '"gui"', '"x86"', '"ai"', '"BS"', '"BA"', '"10"', '"HP"', '"MA"', '"pc"', '"vb"', '"c"', '"1"', '"4"', '"3"', '"2"', '"5"']
GRAMMAR_DICTIONARY['salary_value'] = [
'"100000"', '"70000"', '"65000"', '"50000"', '"80000"',
'"30000"', '"90000"', '"60000"', '"40000"', '"10000"', '"5000"', '"20"'
]
COPY_TERMINAL_SET = {'string', 'salary_value'}
| 5,427 | 64.39759 | 2,343 |
py
|
Unimer
|
Unimer-master/grammars/job/job_normalization.py
|
# coding=utf8
import re
def tokenize_prolog(logical_form):
# Tokenize Prolog
normalized_lf = logical_form.replace(" ", "::")
replacements = [
('(', ' ( '),
(')', ' ) '),
(',', ' , '),
("\\+", " \\+ "),
]
for a, b in replacements:
normalized_lf = normalized_lf.replace(a, b)
toks = [t if "::" not in t else t.replace(
"::", " ") for t in normalized_lf.split()]
return toks
def normalize_prolog_variable_names(logical_form):
"""Standardize variable names in Prolog with De Brujin indices."""
toks = tokenize_prolog(logical_form)
# Replace Variable
cur_vars = []
new_toks = []
for w in toks:
if re.match('[A-Z]', w) or re.match('_\d+', w):
if w in cur_vars:
ind_from_end = len(cur_vars) - cur_vars.index(w) - 1
new_toks.append('V%d' % ind_from_end)
else:
cur_vars.append(w)
new_toks.append('NV')
else:
new_toks.append(w)
return ''.join(new_toks)
def preprocess_prolog(logical_form):
normalized_prolog = normalize_prolog_variable_names(logical_form)
normalized_prolog = re.sub(r"\s*\(\s*", "(", normalized_prolog)
normalized_prolog = re.sub(r"\s*\)\s*", ")", normalized_prolog)
normalized_prolog = re.sub(r"\s*,\s*", ",", normalized_prolog)
normalized_prolog = normalized_prolog.replace("\+ r", "\+r")
normalized_prolog = normalized_prolog
return normalized_prolog
def preprocess_funql(lf):
l = re.sub(r"\s*\(\s*", "(", lf)
l = re.sub(r"\s*\)\s*", ")", l)
l = re.sub(r"\s*,\s*", ",", l)
return l
def postprocess_prolog(logical_form):
normalized_prolog = logical_form.replace("windo nt", "windows nt")
normalized_prolog = normalized_prolog.replace("windo 95", "windows 95")
return normalized_prolog
def postprocess_sql(logical_form):
normalized_sql = logical_form.replace("windo nt", "windows nt")
normalized_sql = normalized_sql.replace("windo 95", "windows 95")
normalized_sql = normalized_sql.replace("\\'", "'")
return normalized_sql
def postprocess_lambda(logical_form):
normalized_lc = logical_form.replace("windo nt", "windows nt")
normalized_lc = normalized_lc.replace("windo 95", "windows 95")
normalized_lc = normalized_lc.replace("\\'", "'")
return normalized_lc
def normalize_sql(logical_form):
s = logical_form.replace("( ", "(").replace(" )", ")").replace(
";", "").replace('"', "'").replace(' . ', '.').strip().lower()
s = s.replace('max (', 'max(')
s = s.replace('min (', 'min(')
s = s.replace('avg (', 'avg(')
s = s.replace('count (', 'count(')
s = s.replace('sum (', 'sum(')
s = s.replace('count(1)', 'count(*)')
return s
def normalize_lambda_calculus(logical_form):
s = logical_form.replace(
'\s+', ' ').replace("( ", "(").replace(" )", ")").replace(') )', '))').replace(' :', ':').strip()
s = s.replace('"', "'").replace(') )', '))')
return s
if __name__ == '__main__':
sql = '(lambda $0:e (and (job $0) (language $0 perl) (company $0 "Lockheed Martin Aeronautics") (loc $0 colorado)))'
normalized_sql = normalize_lambda_calculus(sql).replace("'", "\\'")
sql_ = postprocess_lambda(normalized_sql)
print(sql)
print(normalized_sql)
print(sql_)
| 3,371 | 31.114286 | 120 |
py
|
Unimer
|
Unimer-master/grammars/job/sql_grammar.py
|
# coding=utf8
ROOT_RULE = 'statement -> [mquery]'
GRAMMAR_DICTIONARY = {}
GRAMMAR_DICTIONARY["statement"] = ['(mquery ws)']
GRAMMAR_DICTIONARY["mquery"] = [
'(ws select_clause ws from_clause ws where_clause)',
'(ws select_clause ws from_clause)'
]
# SELECT
GRAMMAR_DICTIONARY["select_clause"] = [
'("select" ws "distinct" ws col_ref)']
# FROM
GRAMMAR_DICTIONARY["from_clause"] = ['(ws "from" ws table_name ws join_clauses)', '(ws "from" ws table_name)']
GRAMMAR_DICTIONARY["join_clauses"] = [
'(join_clause ws join_clauses)', 'join_clause']
GRAMMAR_DICTIONARY["join_clause"] = [
'"join" ws table_name ws "on" ws join_condition']
GRAMMAR_DICTIONARY["join_condition"] = ['ws col_ref ws "=" ws col_ref']
# WHERE
GRAMMAR_DICTIONARY["where_clause"] = ['(ws "where" wsp condition)']
GRAMMAR_DICTIONARY["condition"] = ['(ws single ws "and" wsp condition)',
'(ws single ws "or" wsp condition)',
'(single)']
GRAMMAR_DICTIONARY["single"] = ['(expr)',
'("(" ws condition ws ")")',
'("not" ws single)']
GRAMMAR_DICTIONARY["expr"] = [
'(col_ref wsp "not" wsp "in" wsp "(" ws mquery ws ")")',
'(col_ref wsp "in" wsp "(" ws mquery ws ")")',
'(col_ref wsp "is" wsp "null")',
'(col_ref wsp "is" wsp "not" wsp "null")',
'(col_ref wsp binaryop ws value)',
]
GRAMMAR_DICTIONARY["value"] = ['non_literal_number', 'string']
GRAMMAR_DICTIONARY["col_ref"] = ['(table_name ws "." ws column_name)']
# TODO(MARK): This is not tight enough. AND/OR are strictly boolean value operators.
GRAMMAR_DICTIONARY["binaryop"] = ['"="', '"!="', '"<>"',
'">="', '"<="', '">"', '"<"', ]
GRAMMAR_DICTIONARY["ws"] = ['~"\s*"i']
GRAMMAR_DICTIONARY['wsp'] = ['~"\s+"i']
GRAMMAR_DICTIONARY["table_name"] = ['"job"', '"salary"', '"country"', '"city"',
'"platform"', '"age"', '"application"',
'"area"', '"language"']
GRAMMAR_DICTIONARY["column_name"] = [
'"job_id"', '"age"', '"time"', '"money"', '"language"',
'"country"', '"platform"', '"application"', '"title"',
'"company"', '"major"', '"req_exp"', '"req_deg"',
'"des_deg"', '"des_exp"', '"recruiter"', '"post_day"',
'"area"', '"city_name"'
]
GRAMMAR_DICTIONARY["non_literal_number"] = ['"100000"', '"90000"', '"80000"', '"70000"', '"65000"', '"60000"', '"50000"', '"40000"', '"30000"', '"10000"', '"5000"', '"20"', '"10"', '"5"', '"4"', '"3"', '"2"', '"1"']
GRAMMAR_DICTIONARY["string"] = ['"\'senior development engineer\'"', '"\'lockheed martin aeronautics\'"', '"\'senior consulting engineer\'"', '"\'senior software developer\'"', '"\'manufacturing manager\'"', '"\'systems administrator\'"', '"\'verification engineer\'"', '"\'oil pipeline modeling\'"', '"\'netware administrator\'"', '"\'national instruments\'"', '"\'system administrator\'"', '"\'software developer\'"', '"\'research assistant\'"', '"\'telecommunications\'"', '"\'ic design engineer\'"', '"\'software engineer\'"', '"\'applied materials\'"', '"\'silicon graphics\'"', '"\'speedy3dgraphics\'"', '"\'data warehousing\'"', '"\'systems analyst\'"', '"\'project manager\'"', '"\'microsoft word\'"', '"\'device driver\'"', '"\'web developer\'"', '"\'test engineer\'"', '"\'client/server\'"', '"\'powerbuilder\'"', '"\'visual basic\'"', '"\'sql engineer\'"', '"\'san antonio\'"', '"\'3d graphics\'"', '"\'software qa\'"', '"\'playstation\'"', '"\'programmer\'"', '"\'networking\'"', '"\'commodores\'"', '"\'visual c++\'"', '"\'sql server\'"', '"\'windows nt\'"', '"\'phil smith\'"', '"\'statistics\'"', '"\'washington\'"', '"\'los alamos\'"', '"\'management\'"', '"\'california\'"', '"\'consultant\'"', '"\'windows 95\'"', '"\'visual j++\'"', '"\'microsoft\'"', '"\'developer\'"', '"\'nashville\'"', '"\'animation\'"', '"\'new york\'"', '"\'engineer\'"', '"\'internet\'"', '"\'assembly\'"', '"\'database\'"', '"\'cobol ii\'"', '"\'san jose\'"', '"\'graphics\'"', '"\'longhorn\'"', '"\'colorado\'"', '"\'ethernet\'"', '"\'windows\'"', '"\'seattle\'"', '"\'autocad\'"', '"\'solaris\'"', '"\'network\'"', '"\'haskell\'"', '"\'trilogy\'"', '"\'fortran\'"', '"\'houston\'"', '"\'dallas\'"', '"\'apache\'"', '"\'boeing\'"', '"\'tcp/ip\'"', '"\'tcl/tk\'"', '"\'canada\'"', '"\'boston\'"', '"\'compaq\'"', '"\'prolog\'"', '"\'intern\'"', '"\'novell\'"', '"\'austin\'"', '"\'pascal\'"', '"\'master\'"', '"\'oracle\'"', '"\'tivoli\'"', '"\'delphi\'"', '"\'texas\'"', '"\'shell\'"', '"\'tulsa\'"', '"\'cobol\'"', '"\'month\'"', '"\'pdp11\'"', '"\'excel\'"', '"\'games\'"', '"\'latex\'"', '"\'apple\'"', '"\'linux\'"', '"\'year\'"', '"\'bscs\'"', '"\'bsee\'"', '"\'dell\'"', '"\'mscs\'"', '"\'html\'"', '"\'hour\'"', '"\'unix\'"', '"\'lisp\'"', '"\'cics\'"', '"\'bacs\'"', '"\'vc++\'"', '"\'msee\'"', '"\'java\'"', '"\'odbc\'"', '"\'perl\'"', '"\'mvs\'"', '"\'ole\'"', '"\'web\'"', '"\'usa\'"', '"\'ada\'"', '"\'sun\'"', '"\'vms\'"', '"\'c++\'"', '"\'mba\'"', '"\'gui\'"', '"\'wan\'"', '"\'lcs\'"', '"\'aix\'"', '"\'mfc\'"', '"\'vax\'"', '"\'x86\'"', '"\'mac\'"', '"\'jpl\'"', '"\'lan\'"', '"\'sql\'"', '"\'phd\'"', '"\'ibm\'"', '"\'rpg\'"', '"\'com\'"', '"\'ma\'"', '"\'vb\'"', '"\'pc\'"', '"\'ai\'"', '"\'ba\'"', '"\'bs\'"', '"\'hp\'"', '"\'c\'"']
COPY_TERMINAL_SET = {"non_literal_number", "string"}
| 5,376 | 83.015625 | 2,746 |
py
|
seld-dcase2023
|
seld-dcase2023-main/visualize_seldnet_output.py
|
#
# A wrapper script that trains the SELDnet. The training stops when the early stopping metric - SELD error stops improving.
#
import numpy as np
import os
import sys
import cls_data_generator
import seldnet_model
import parameters
import torch
from IPython import embed
import matplotlib
matplotlib.use('Agg')
#matplotlib.use('TkAgg')
import matplotlib.pyplot as plot
plot.rcParams.update({'font.size': 22})
def main(argv):
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
# use parameter set defined by user
task_id = '1' if len(argv) < 2 else argv[1]
params = parameters.get_params(task_id)
print('\nLoading the best model and predicting results on the testing split')
print('\tLoading testing dataset:')
data_gen_test = cls_data_generator.DataGenerator(
params=params, split=1, shuffle=False, is_eval=True if params['mode']=='eval' else False
)
data_in, data_out = data_gen_test.get_data_sizes()
dump_figures = True
# CHOOSE THE MODEL WHOSE OUTPUT YOU WANT TO VISUALIZE
checkpoint_name = "models/1_1_foa_dev_split6_model.h5"
model = seldnet_model.SeldModel(data_in, data_out, params)
model.eval()
model.load_state_dict(torch.load(checkpoint_name, map_location=torch.device('cpu')))
model = model.to(device)
if dump_figures:
dump_folder = os.path.join('dump_dir', os.path.basename(checkpoint_name).split('.')[0])
os.makedirs(dump_folder, exist_ok=True)
with torch.no_grad():
file_cnt = 0
for data, target in data_gen_test.generate():
data, target = torch.tensor(data).to(device).float(), torch.tensor(target).to(device).float()
output = model(data)
# (batch, sequence, max_nb_doas*3) to (batch, sequence, 3, max_nb_doas)
max_nb_doas = output.shape[2]//3
output = output.view(output.shape[0], output.shape[1], 3, max_nb_doas).transpose(-1, -2)
target = target.view(target.shape[0], target.shape[1], 3, max_nb_doas).transpose(-1, -2)
# get pair-wise distance matrix between predicted and reference.
output, target = output.view(-1, output.shape[-2], output.shape[-1]), target.view(-1, target.shape[-2], target.shape[-1])
output = output.cpu().detach().numpy()
target = target.cpu().detach().numpy()
use_activity_detector = False
if use_activity_detector:
activity = (torch.sigmoid(activity_out).cpu().detach().numpy() >0.5)
mel_spec = data[0][0].cpu()
foa_iv = data[0][-1].cpu()
target[target > 1] =0
plot.figure(figsize=(20,10))
plot.subplot(321), plot.imshow(torch.transpose(mel_spec, -1, -2))
plot.subplot(322), plot.imshow(torch.transpose(foa_iv, -1, -2))
plot.subplot(323), plot.plot(target[:params['label_sequence_length'], 0, 0], 'r', lw=2)
plot.subplot(323), plot.plot(target[:params['label_sequence_length'], 0, 1], 'g', lw=2)
plot.subplot(323), plot.plot(target[:params['label_sequence_length'], 0, 2], 'b', lw=2)
plot.grid()
plot.ylim([-1.1, 1.1])
plot.subplot(324), plot.plot(target[:params['label_sequence_length'], 1, 0], 'r', lw=2)
plot.subplot(324), plot.plot(target[:params['label_sequence_length'], 1, 1], 'g', lw=2)
plot.subplot(324), plot.plot(target[:params['label_sequence_length'], 1, 2], 'b', lw=2)
plot.grid()
plot.ylim([-1.1, 1.1])
if use_activity_detector:
output[:, 0, 0:3] = activity[:, 0][:, np.newaxis]*output[:, 0, 0:3]
output[:, 1, 0:3] = activity[:, 1][:, np.newaxis]*output[:, 1, 0:3]
plot.subplot(325), plot.plot(output[:params['label_sequence_length'], 0, 0], 'r', lw=2)
plot.subplot(325), plot.plot(output[:params['label_sequence_length'], 0, 1], 'g', lw=2)
plot.subplot(325), plot.plot(output[:params['label_sequence_length'], 0, 2], 'b', lw=2)
plot.grid()
plot.ylim([-1.1, 1.1])
plot.subplot(326), plot.plot(output[:params['label_sequence_length'], 1, 0], 'r', lw=2)
plot.subplot(326), plot.plot(output[:params['label_sequence_length'], 1, 1], 'g', lw=2)
plot.subplot(326), plot.plot(output[:params['label_sequence_length'], 1, 2], 'b', lw=2)
plot.grid()
plot.ylim([-1.1, 1.1])
if dump_figures:
fig_name = '{}'.format(os.path.join(dump_folder, '{}.png'.format(file_cnt)))
print('saving figure : {}'.format(fig_name))
plot.savefig(fig_name, dpi=100)
plot.close()
file_cnt += 1
else:
plot.show()
if file_cnt>2:
break
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| 5,100 | 41.157025 | 133 |
py
|
seld-dcase2023
|
seld-dcase2023-main/batch_feature_extraction.py
|
# Extracts the features, labels, and normalizes the development and evaluation split features.
import cls_feature_class
import parameters
import sys
def main(argv):
# Expects one input - task-id - corresponding to the configuration given in the parameter.py file.
# Extracts features and labels relevant for the task-id
# It is enough to compute the feature and labels once.
# use parameter set defined by user
task_id = '1' if len(argv) < 2 else argv[1]
params = parameters.get_params(task_id)
# -------------- Extract features and labels for development set -----------------------------
dev_feat_cls = cls_feature_class.FeatureClass(params)
# # Extract features and normalize them
dev_feat_cls.extract_all_feature()
dev_feat_cls.preprocess_features()
# # Extract labels
dev_feat_cls.extract_all_labels()
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| 998 | 28.382353 | 102 |
py
|
seld-dcase2023
|
seld-dcase2023-main/cls_feature_class.py
|
# Contains routines for labels creation, features extraction and normalization
#
import os
import numpy as np
import scipy.io.wavfile as wav
from sklearn import preprocessing
import joblib
from IPython import embed
import matplotlib.pyplot as plot
import librosa
plot.switch_backend('agg')
import shutil
import math
import wave
import contextlib
def nCr(n, r):
return math.factorial(n) // math.factorial(r) // math.factorial(n-r)
class FeatureClass:
def __init__(self, params, is_eval=False):
"""
:param params: parameters dictionary
:param is_eval: if True, does not load dataset labels.
"""
# Input directories
self._feat_label_dir = params['feat_label_dir']
self._dataset_dir = params['dataset_dir']
self._dataset_combination = '{}_{}'.format(params['dataset'], 'eval' if is_eval else 'dev')
self._aud_dir = os.path.join(self._dataset_dir, self._dataset_combination)
self._desc_dir = None if is_eval else os.path.join(self._dataset_dir, 'metadata_dev')
# Output directories
self._label_dir = None
self._feat_dir = None
self._feat_dir_norm = None
# Local parameters
self._is_eval = is_eval
self._fs = params['fs']
self._hop_len_s = params['hop_len_s']
self._hop_len = int(self._fs * self._hop_len_s)
self._label_hop_len_s = params['label_hop_len_s']
self._label_hop_len = int(self._fs * self._label_hop_len_s)
self._label_frame_res = self._fs / float(self._label_hop_len)
self._nb_label_frames_1s = int(self._label_frame_res)
self._win_len = 2 * self._hop_len
self._nfft = self._next_greater_power_of_2(self._win_len)
self._dataset = params['dataset']
self._eps = 1e-8
self._nb_channels = 4
self._multi_accdoa = params['multi_accdoa']
self._use_salsalite = params['use_salsalite']
if self._use_salsalite and self._dataset=='mic':
# Initialize the spatial feature constants
self._lower_bin = np.int(np.floor(params['fmin_doa_salsalite'] * self._nfft / np.float(self._fs)))
self._lower_bin = np.max((1, self._lower_bin))
self._upper_bin = np.int(np.floor(np.min((params['fmax_doa_salsalite'], self._fs//2)) * self._nfft / np.float(self._fs)))
# Normalization factor for salsalite
c = 343
self._delta = 2 * np.pi * self._fs / (self._nfft * c)
self._freq_vector = np.arange(self._nfft//2 + 1)
self._freq_vector[0] = 1
self._freq_vector = self._freq_vector[None, :, None] # 1 x n_bins x 1
# Initialize spectral feature constants
self._cutoff_bin = np.int(np.floor(params['fmax_spectra_salsalite'] * self._nfft / np.float(self._fs)))
assert self._upper_bin <= self._cutoff_bin, 'Upper bin for doa featurei {} is higher than cutoff bin for spectrogram {}!'.format()
self._nb_mel_bins = self._cutoff_bin-self._lower_bin
else:
self._nb_mel_bins = params['nb_mel_bins']
self._mel_wts = librosa.filters.mel(sr=self._fs, n_fft=self._nfft, n_mels=self._nb_mel_bins).T
# Sound event classes dictionary
self._nb_unique_classes = params['unique_classes']
self._filewise_frames = {}
def get_frame_stats(self):
if len(self._filewise_frames)!=0:
return
print('Computing frame stats:')
print('\t\taud_dir {}\n\t\tdesc_dir {}\n\t\tfeat_dir {}'.format(
self._aud_dir, self._desc_dir, self._feat_dir))
for sub_folder in os.listdir(self._aud_dir):
loc_aud_folder = os.path.join(self._aud_dir, sub_folder)
for file_cnt, file_name in enumerate(os.listdir(loc_aud_folder)):
wav_filename = '{}.wav'.format(file_name.split('.')[0])
with contextlib.closing(wave.open(os.path.join(loc_aud_folder, wav_filename),'r')) as f:
audio_len = f.getnframes()
nb_feat_frames = int(audio_len / float(self._hop_len))
nb_label_frames = int(audio_len / float(self._label_hop_len))
self._filewise_frames[file_name.split('.')[0]] = [nb_feat_frames, nb_label_frames]
return
def _load_audio(self, audio_path):
fs, audio = wav.read(audio_path)
audio = audio[:, :self._nb_channels] / 32768.0 + self._eps
return audio, fs
# INPUT FEATURES
@staticmethod
def _next_greater_power_of_2(x):
return 2 ** (x - 1).bit_length()
def _spectrogram(self, audio_input, _nb_frames):
_nb_ch = audio_input.shape[1]
nb_bins = self._nfft // 2
spectra = []
for ch_cnt in range(_nb_ch):
stft_ch = librosa.core.stft(np.asfortranarray(audio_input[:, ch_cnt]), n_fft=self._nfft, hop_length=self._hop_len,
win_length=self._win_len, window='hann')
spectra.append(stft_ch[:, :_nb_frames])
return np.array(spectra).T
def _get_mel_spectrogram(self, linear_spectra):
mel_feat = np.zeros((linear_spectra.shape[0], self._nb_mel_bins, linear_spectra.shape[-1]))
for ch_cnt in range(linear_spectra.shape[-1]):
mag_spectra = np.abs(linear_spectra[:, :, ch_cnt])**2
mel_spectra = np.dot(mag_spectra, self._mel_wts)
log_mel_spectra = librosa.power_to_db(mel_spectra)
mel_feat[:, :, ch_cnt] = log_mel_spectra
mel_feat = mel_feat.transpose((0, 2, 1)).reshape((linear_spectra.shape[0], -1))
return mel_feat
def _get_foa_intensity_vectors(self, linear_spectra):
W = linear_spectra[:, :, 0]
I = np.real(np.conj(W)[:, :, np.newaxis] * linear_spectra[:, :, 1:])
E = self._eps + (np.abs(W)**2 + ((np.abs(linear_spectra[:, :, 1:])**2).sum(-1))/3.0 )
I_norm = I/E[:, :, np.newaxis]
I_norm_mel = np.transpose(np.dot(np.transpose(I_norm, (0,2,1)), self._mel_wts), (0,2,1))
foa_iv = I_norm_mel.transpose((0, 2, 1)).reshape((linear_spectra.shape[0], self._nb_mel_bins * 3))
if np.isnan(foa_iv).any():
print('Feature extraction is generating nan outputs')
exit()
return foa_iv
def _get_gcc(self, linear_spectra):
gcc_channels = nCr(linear_spectra.shape[-1], 2)
gcc_feat = np.zeros((linear_spectra.shape[0], self._nb_mel_bins, gcc_channels))
cnt = 0
for m in range(linear_spectra.shape[-1]):
for n in range(m+1, linear_spectra.shape[-1]):
R = np.conj(linear_spectra[:, :, m]) * linear_spectra[:, :, n]
cc = np.fft.irfft(np.exp(1.j*np.angle(R)))
cc = np.concatenate((cc[:, -self._nb_mel_bins//2:], cc[:, :self._nb_mel_bins//2]), axis=-1)
gcc_feat[:, :, cnt] = cc
cnt += 1
return gcc_feat.transpose((0, 2, 1)).reshape((linear_spectra.shape[0], -1))
def _get_salsalite(self, linear_spectra):
# Adapted from the official SALSA repo- https://github.com/thomeou/SALSA
# spatial features
phase_vector = np.angle(linear_spectra[:, :, 1:] * np.conj(linear_spectra[:, :, 0, None]))
phase_vector = phase_vector / (self._delta * self._freq_vector)
phase_vector = phase_vector[:, self._lower_bin:self._cutoff_bin, :]
phase_vector[:, self._upper_bin:, :] = 0
phase_vector = phase_vector.transpose((0, 2, 1)).reshape((phase_vector.shape[0], -1))
# spectral features
linear_spectra = np.abs(linear_spectra)**2
for ch_cnt in range(linear_spectra.shape[-1]):
linear_spectra[:, :, ch_cnt] = librosa.power_to_db(linear_spectra[:, :, ch_cnt], ref=1.0, amin=1e-10, top_db=None)
linear_spectra = linear_spectra[:, self._lower_bin:self._cutoff_bin, :]
linear_spectra = linear_spectra.transpose((0, 2, 1)).reshape((linear_spectra.shape[0], -1))
return np.concatenate((linear_spectra, phase_vector), axis=-1)
def _get_spectrogram_for_file(self, audio_filename):
audio_in, fs = self._load_audio(audio_filename)
nb_feat_frames = int(len(audio_in) / float(self._hop_len))
nb_label_frames = int(len(audio_in) / float(self._label_hop_len))
self._filewise_frames[os.path.basename(audio_filename).split('.')[0]] = [nb_feat_frames, nb_label_frames]
audio_spec = self._spectrogram(audio_in, nb_feat_frames)
return audio_spec
# OUTPUT LABELS
def get_labels_for_file(self, _desc_file, _nb_label_frames):
"""
Reads description file and returns classification based SED labels and regression based DOA labels
:param _desc_file: metadata description file
:return: label_mat: of dimension [nb_frames, 3*max_classes], max_classes each for x, y, z axis,
"""
# If using Hungarian net set default DOA value to a fixed value greater than 1 for all axis. We are choosing a fixed value of 10
# If not using Hungarian net use a deafult DOA, which is a unit vector. We are choosing (x, y, z) = (0, 0, 1)
se_label = np.zeros((_nb_label_frames, self._nb_unique_classes))
x_label = np.zeros((_nb_label_frames, self._nb_unique_classes))
y_label = np.zeros((_nb_label_frames, self._nb_unique_classes))
z_label = np.zeros((_nb_label_frames, self._nb_unique_classes))
for frame_ind, active_event_list in _desc_file.items():
if frame_ind < _nb_label_frames:
for active_event in active_event_list:
se_label[frame_ind, active_event[0]] = 1
x_label[frame_ind, active_event[0]] = active_event[2]
y_label[frame_ind, active_event[0]] = active_event[3]
z_label[frame_ind, active_event[0]] = active_event[4]
label_mat = np.concatenate((se_label, x_label, y_label, z_label), axis=1)
return label_mat
# OUTPUT LABELS
def get_adpit_labels_for_file(self, _desc_file, _nb_label_frames):
"""
Reads description file and returns classification based SED labels and regression based DOA labels
for multi-ACCDOA with Auxiliary Duplicating Permutation Invariant Training (ADPIT)
:param _desc_file: metadata description file
:return: label_mat: of dimension [nb_frames, 6, 4(=act+XYZ), max_classes]
"""
se_label = np.zeros((_nb_label_frames, 6, self._nb_unique_classes)) # [nb_frames, 6, max_classes]
x_label = np.zeros((_nb_label_frames, 6, self._nb_unique_classes))
y_label = np.zeros((_nb_label_frames, 6, self._nb_unique_classes))
z_label = np.zeros((_nb_label_frames, 6, self._nb_unique_classes))
for frame_ind, active_event_list in _desc_file.items():
if frame_ind < _nb_label_frames:
active_event_list.sort(key=lambda x: x[0]) # sort for ov from the same class
active_event_list_per_class = []
for i, active_event in enumerate(active_event_list):
active_event_list_per_class.append(active_event)
if i == len(active_event_list) - 1: # if the last
if len(active_event_list_per_class) == 1: # if no ov from the same class
# a0----
active_event_a0 = active_event_list_per_class[0]
se_label[frame_ind, 0, active_event_a0[0]] = 1
x_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[2]
y_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[3]
z_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[4]
elif len(active_event_list_per_class) == 2: # if ov with 2 sources from the same class
# --b0--
active_event_b0 = active_event_list_per_class[0]
se_label[frame_ind, 1, active_event_b0[0]] = 1
x_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[2]
y_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[3]
z_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[4]
# --b1--
active_event_b1 = active_event_list_per_class[1]
se_label[frame_ind, 2, active_event_b1[0]] = 1
x_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[2]
y_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[3]
z_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[4]
else: # if ov with more than 2 sources from the same class
# ----c0
active_event_c0 = active_event_list_per_class[0]
se_label[frame_ind, 3, active_event_c0[0]] = 1
x_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[2]
y_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[3]
z_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[4]
# ----c1
active_event_c1 = active_event_list_per_class[1]
se_label[frame_ind, 4, active_event_c1[0]] = 1
x_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[2]
y_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[3]
z_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[4]
# ----c2
active_event_c2 = active_event_list_per_class[2]
se_label[frame_ind, 5, active_event_c2[0]] = 1
x_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[2]
y_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[3]
z_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[4]
elif active_event[0] != active_event_list[i + 1][0]: # if the next is not the same class
if len(active_event_list_per_class) == 1: # if no ov from the same class
# a0----
active_event_a0 = active_event_list_per_class[0]
se_label[frame_ind, 0, active_event_a0[0]] = 1
x_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[2]
y_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[3]
z_label[frame_ind, 0, active_event_a0[0]] = active_event_a0[4]
elif len(active_event_list_per_class) == 2: # if ov with 2 sources from the same class
# --b0--
active_event_b0 = active_event_list_per_class[0]
se_label[frame_ind, 1, active_event_b0[0]] = 1
x_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[2]
y_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[3]
z_label[frame_ind, 1, active_event_b0[0]] = active_event_b0[4]
# --b1--
active_event_b1 = active_event_list_per_class[1]
se_label[frame_ind, 2, active_event_b1[0]] = 1
x_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[2]
y_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[3]
z_label[frame_ind, 2, active_event_b1[0]] = active_event_b1[4]
else: # if ov with more than 2 sources from the same class
# ----c0
active_event_c0 = active_event_list_per_class[0]
se_label[frame_ind, 3, active_event_c0[0]] = 1
x_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[2]
y_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[3]
z_label[frame_ind, 3, active_event_c0[0]] = active_event_c0[4]
# ----c1
active_event_c1 = active_event_list_per_class[1]
se_label[frame_ind, 4, active_event_c1[0]] = 1
x_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[2]
y_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[3]
z_label[frame_ind, 4, active_event_c1[0]] = active_event_c1[4]
# ----c2
active_event_c2 = active_event_list_per_class[2]
se_label[frame_ind, 5, active_event_c2[0]] = 1
x_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[2]
y_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[3]
z_label[frame_ind, 5, active_event_c2[0]] = active_event_c2[4]
active_event_list_per_class = []
label_mat = np.stack((se_label, x_label, y_label, z_label), axis=2) # [nb_frames, 6, 4(=act+XYZ), max_classes]
return label_mat
# ------------------------------- EXTRACT FEATURE AND PREPROCESS IT -------------------------------
def extract_file_feature(self, _arg_in):
_file_cnt, _wav_path, _feat_path = _arg_in
spect = self._get_spectrogram_for_file(_wav_path)
#extract mel
if not self._use_salsalite:
mel_spect = self._get_mel_spectrogram(spect)
feat = None
if self._dataset == 'foa':
# extract intensity vectors
foa_iv = self._get_foa_intensity_vectors(spect)
feat = np.concatenate((mel_spect, foa_iv), axis=-1)
elif self._dataset == 'mic':
if self._use_salsalite:
feat = self._get_salsalite(spect)
else:
# extract gcc
gcc = self._get_gcc(spect)
feat = np.concatenate((mel_spect, gcc), axis=-1)
else:
print('ERROR: Unknown dataset format {}'.format(self._dataset))
exit()
if feat is not None:
print('{}: {}, {}'.format(_file_cnt, os.path.basename(_wav_path), feat.shape ))
np.save(_feat_path, feat)
def extract_all_feature(self):
# setting up folders
self._feat_dir = self.get_unnormalized_feat_dir()
create_folder(self._feat_dir)
from multiprocessing import Pool
import time
start_s = time.time()
# extraction starts
print('Extracting spectrogram:')
print('\t\taud_dir {}\n\t\tdesc_dir {}\n\t\tfeat_dir {}'.format(
self._aud_dir, self._desc_dir, self._feat_dir))
arg_list = []
for sub_folder in os.listdir(self._aud_dir):
loc_aud_folder = os.path.join(self._aud_dir, sub_folder)
for file_cnt, file_name in enumerate(os.listdir(loc_aud_folder)):
wav_filename = '{}.wav'.format(file_name.split('.')[0])
wav_path = os.path.join(loc_aud_folder, wav_filename)
feat_path = os.path.join(self._feat_dir, '{}.npy'.format(wav_filename.split('.')[0]))
self.extract_file_feature((file_cnt, wav_path, feat_path))
arg_list.append((file_cnt, wav_path, feat_path))
# with Pool() as pool:
# result = pool.map(self.extract_file_feature, iterable=arg_list)
# pool.close()
# pool.join()
print(time.time()-start_s)
def preprocess_features(self):
# Setting up folders and filenames
self._feat_dir = self.get_unnormalized_feat_dir()
self._feat_dir_norm = self.get_normalized_feat_dir()
create_folder(self._feat_dir_norm)
normalized_features_wts_file = self.get_normalized_wts_file()
spec_scaler = None
# pre-processing starts
if self._is_eval:
spec_scaler = joblib.load(normalized_features_wts_file)
print('Normalized_features_wts_file: {}. Loaded.'.format(normalized_features_wts_file))
else:
print('Estimating weights for normalizing feature files:')
print('\t\tfeat_dir: {}'.format(self._feat_dir))
spec_scaler = preprocessing.StandardScaler()
for file_cnt, file_name in enumerate(os.listdir(self._feat_dir)):
print('{}: {}'.format(file_cnt, file_name))
feat_file = np.load(os.path.join(self._feat_dir, file_name))
spec_scaler.partial_fit(feat_file)
del feat_file
joblib.dump(
spec_scaler,
normalized_features_wts_file
)
print('Normalized_features_wts_file: {}. Saved.'.format(normalized_features_wts_file))
print('Normalizing feature files:')
print('\t\tfeat_dir_norm {}'.format(self._feat_dir_norm))
for file_cnt, file_name in enumerate(os.listdir(self._feat_dir)):
print('{}: {}'.format(file_cnt, file_name))
feat_file = np.load(os.path.join(self._feat_dir, file_name))
feat_file = spec_scaler.transform(feat_file)
np.save(
os.path.join(self._feat_dir_norm, file_name),
feat_file
)
del feat_file
print('normalized files written to {}'.format(self._feat_dir_norm))
# ------------------------------- EXTRACT LABELS AND PREPROCESS IT -------------------------------
def extract_all_labels(self):
self.get_frame_stats()
self._label_dir = self.get_label_dir()
print('Extracting labels:')
print('\t\taud_dir {}\n\t\tdesc_dir {}\n\t\tlabel_dir {}'.format(
self._aud_dir, self._desc_dir, self._label_dir))
create_folder(self._label_dir)
for sub_folder in os.listdir(self._desc_dir):
loc_desc_folder = os.path.join(self._desc_dir, sub_folder)
for file_cnt, file_name in enumerate(os.listdir(loc_desc_folder)):
wav_filename = '{}.wav'.format(file_name.split('.')[0])
nb_label_frames = self._filewise_frames[file_name.split('.')[0]][1]
desc_file_polar = self.load_output_format_file(os.path.join(loc_desc_folder, file_name))
desc_file = self.convert_output_format_polar_to_cartesian(desc_file_polar)
if self._multi_accdoa:
label_mat = self.get_adpit_labels_for_file(desc_file, nb_label_frames)
else:
label_mat = self.get_labels_for_file(desc_file, nb_label_frames)
print('{}: {}, {}'.format(file_cnt, file_name, label_mat.shape))
np.save(os.path.join(self._label_dir, '{}.npy'.format(wav_filename.split('.')[0])), label_mat)
# ------------------------------- DCASE OUTPUT FORMAT FUNCTIONS -------------------------------
def load_output_format_file(self, _output_format_file):
"""
Loads DCASE output format csv file and returns it in dictionary format
:param _output_format_file: DCASE output format CSV
:return: _output_dict: dictionary
"""
_output_dict = {}
_fid = open(_output_format_file, 'r')
# next(_fid)
for _line in _fid:
_words = _line.strip().split(',')
_frame_ind = int(_words[0])
if _frame_ind not in _output_dict:
_output_dict[_frame_ind] = []
if len(_words) == 5: # frame, class idx, source_id, polar coordinates(2) # no distance data, for example in synthetic data fold 1 and 2
_output_dict[_frame_ind].append([int(_words[1]), int(_words[2]), float(_words[3]), float(_words[4])])
if len(_words) == 6: # frame, class idx, source_id, polar coordinates(2), distance
_output_dict[_frame_ind].append([int(_words[1]), int(_words[2]), float(_words[3]), float(_words[4])])
elif len(_words) == 7: # frame, class idx, source_id, cartesian coordinates(3), distance
_output_dict[_frame_ind].append([int(_words[1]), int(_words[2]), float(_words[3]), float(_words[4]), float(_words[5])])
_fid.close()
return _output_dict
def write_output_format_file(self, _output_format_file, _output_format_dict):
"""
Writes DCASE output format csv file, given output format dictionary
:param _output_format_file:
:param _output_format_dict:
:return:
"""
_fid = open(_output_format_file, 'w')
# _fid.write('{},{},{},{}\n'.format('frame number with 20ms hop (int)', 'class index (int)', 'azimuth angle (int)', 'elevation angle (int)'))
for _frame_ind in _output_format_dict.keys():
for _value in _output_format_dict[_frame_ind]:
# Write Cartesian format output. Since baseline does not estimate track count and distance we use fixed values.
_fid.write('{},{},{},{},{},{},{}\n'.format(int(_frame_ind), int(_value[0]), 0, float(_value[1]), float(_value[2]), float(_value[3]), 0))
_fid.close()
def segment_labels(self, _pred_dict, _max_frames):
'''
Collects class-wise sound event location information in segments of length 1s from reference dataset
:param _pred_dict: Dictionary containing frame-wise sound event time and location information. Output of SELD method
:param _max_frames: Total number of frames in the recording
:return: Dictionary containing class-wise sound event location information in each segment of audio
dictionary_name[segment-index][class-index] = list(frame-cnt-within-segment, azimuth, elevation)
'''
nb_blocks = int(np.ceil(_max_frames/float(self._nb_label_frames_1s)))
output_dict = {x: {} for x in range(nb_blocks)}
for frame_cnt in range(0, _max_frames, self._nb_label_frames_1s):
# Collect class-wise information for each block
# [class][frame] = <list of doa values>
# Data structure supports multi-instance occurence of same class
block_cnt = frame_cnt // self._nb_label_frames_1s
loc_dict = {}
for audio_frame in range(frame_cnt, frame_cnt+self._nb_label_frames_1s):
if audio_frame not in _pred_dict:
continue
for value in _pred_dict[audio_frame]:
if value[0] not in loc_dict:
loc_dict[value[0]] = {}
block_frame = audio_frame - frame_cnt
if block_frame not in loc_dict[value[0]]:
loc_dict[value[0]][block_frame] = []
loc_dict[value[0]][block_frame].append(value[1:])
# Update the block wise details collected above in a global structure
for class_cnt in loc_dict:
if class_cnt not in output_dict[block_cnt]:
output_dict[block_cnt][class_cnt] = []
keys = [k for k in loc_dict[class_cnt]]
values = [loc_dict[class_cnt][k] for k in loc_dict[class_cnt]]
output_dict[block_cnt][class_cnt].append([keys, values])
return output_dict
def regression_label_format_to_output_format(self, _sed_labels, _doa_labels):
"""
Converts the sed (classification) and doa labels predicted in regression format to dcase output format.
:param _sed_labels: SED labels matrix [nb_frames, nb_classes]
:param _doa_labels: DOA labels matrix [nb_frames, 2*nb_classes] or [nb_frames, 3*nb_classes]
:return: _output_dict: returns a dict containing dcase output format
"""
_nb_classes = self._nb_unique_classes
_is_polar = _doa_labels.shape[-1] == 2*_nb_classes
_azi_labels, _ele_labels = None, None
_x, _y, _z = None, None, None
if _is_polar:
_azi_labels = _doa_labels[:, :_nb_classes]
_ele_labels = _doa_labels[:, _nb_classes:]
else:
_x = _doa_labels[:, :_nb_classes]
_y = _doa_labels[:, _nb_classes:2*_nb_classes]
_z = _doa_labels[:, 2*_nb_classes:]
_output_dict = {}
for _frame_ind in range(_sed_labels.shape[0]):
_tmp_ind = np.where(_sed_labels[_frame_ind, :])
if len(_tmp_ind[0]):
_output_dict[_frame_ind] = []
for _tmp_class in _tmp_ind[0]:
if _is_polar:
_output_dict[_frame_ind].append([_tmp_class, _azi_labels[_frame_ind, _tmp_class], _ele_labels[_frame_ind, _tmp_class]])
else:
_output_dict[_frame_ind].append([_tmp_class, _x[_frame_ind, _tmp_class], _y[_frame_ind, _tmp_class], _z[_frame_ind, _tmp_class]])
return _output_dict
def convert_output_format_polar_to_cartesian(self, in_dict):
out_dict = {}
for frame_cnt in in_dict.keys():
if frame_cnt not in out_dict:
out_dict[frame_cnt] = []
for tmp_val in in_dict[frame_cnt]:
ele_rad = tmp_val[3]*np.pi/180.
azi_rad = tmp_val[2]*np.pi/180
tmp_label = np.cos(ele_rad)
x = np.cos(azi_rad) * tmp_label
y = np.sin(azi_rad) * tmp_label
z = np.sin(ele_rad)
out_dict[frame_cnt].append([tmp_val[0], tmp_val[1], x, y, z])
return out_dict
def convert_output_format_cartesian_to_polar(self, in_dict):
out_dict = {}
for frame_cnt in in_dict.keys():
if frame_cnt not in out_dict:
out_dict[frame_cnt] = []
for tmp_val in in_dict[frame_cnt]:
x, y, z = tmp_val[2], tmp_val[3], tmp_val[4]
# in degrees
azimuth = np.arctan2(y, x) * 180 / np.pi
elevation = np.arctan2(z, np.sqrt(x**2 + y**2)) * 180 / np.pi
r = np.sqrt(x**2 + y**2 + z**2)
out_dict[frame_cnt].append([tmp_val[0], tmp_val[1], azimuth, elevation])
return out_dict
# ------------------------------- Misc public functions -------------------------------
def get_normalized_feat_dir(self):
return os.path.join(
self._feat_label_dir,
'{}_norm'.format('{}_salsa'.format(self._dataset_combination) if (self._dataset=='mic' and self._use_salsalite) else self._dataset_combination)
)
def get_unnormalized_feat_dir(self):
return os.path.join(
self._feat_label_dir,
'{}'.format('{}_salsa'.format(self._dataset_combination) if (self._dataset=='mic' and self._use_salsalite) else self._dataset_combination)
)
def get_label_dir(self):
if self._is_eval:
return None
else:
return os.path.join(
self._feat_label_dir,
'{}_label'.format('{}_adpit'.format(self._dataset_combination) if self._multi_accdoa else self._dataset_combination)
)
def get_normalized_wts_file(self):
return os.path.join(
self._feat_label_dir,
'{}_wts'.format(self._dataset)
)
def get_nb_channels(self):
return self._nb_channels
def get_nb_classes(self):
return self._nb_unique_classes
def nb_frames_1s(self):
return self._nb_label_frames_1s
def get_hop_len_sec(self):
return self._hop_len_s
def get_nb_mel_bins(self):
return self._nb_mel_bins
def create_folder(folder_name):
if not os.path.exists(folder_name):
print('{} folder does not exist, creating it.'.format(folder_name))
os.makedirs(folder_name)
def delete_and_create_folder(folder_name):
if os.path.exists(folder_name) and os.path.isdir(folder_name):
shutil.rmtree(folder_name)
os.makedirs(folder_name, exist_ok=True)
| 32,805 | 49.238897 | 155 |
py
|
seld-dcase2023
|
seld-dcase2023-main/cls_data_generator.py
|
#
# Data generator for training the SELDnet
#
import os
import numpy as np
import cls_feature_class
from IPython import embed
from collections import deque
import random
class DataGenerator(object):
def __init__(
self, params, split=1, shuffle=True, per_file=False, is_eval=False
):
self._per_file = per_file
self._is_eval = is_eval
self._splits = np.array(split)
self._batch_size = params['batch_size']
self._feature_seq_len = params['feature_sequence_length']
self._label_seq_len = params['label_sequence_length']
self._shuffle = shuffle
self._feat_cls = cls_feature_class.FeatureClass(params=params, is_eval=self._is_eval)
self._label_dir = self._feat_cls.get_label_dir()
self._feat_dir = self._feat_cls.get_normalized_feat_dir()
self._multi_accdoa = params['multi_accdoa']
self._filenames_list = list()
self._nb_frames_file = 0 # Using a fixed number of frames in feat files. Updated in _get_label_filenames_sizes()
self._nb_mel_bins = self._feat_cls.get_nb_mel_bins()
self._nb_ch = None
self._label_len = None # total length of label - DOA + SED
self._doa_len = None # DOA label length
self._nb_classes = self._feat_cls.get_nb_classes()
self._circ_buf_feat = None
self._circ_buf_label = None
self._get_filenames_list_and_feat_label_sizes()
print(
'\tDatagen_mode: {}, nb_files: {}, nb_classes:{}\n'
'\tnb_frames_file: {}, feat_len: {}, nb_ch: {}, label_len:{}\n'.format(
'eval' if self._is_eval else 'dev', len(self._filenames_list), self._nb_classes,
self._nb_frames_file, self._nb_mel_bins, self._nb_ch, self._label_len
)
)
print(
'\tDataset: {}, split: {}\n'
'\tbatch_size: {}, feat_seq_len: {}, label_seq_len: {}, shuffle: {}\n'
'\tTotal batches in dataset: {}\n'
'\tlabel_dir: {}\n '
'\tfeat_dir: {}\n'.format(
params['dataset'], split,
self._batch_size, self._feature_seq_len, self._label_seq_len, self._shuffle,
self._nb_total_batches,
self._label_dir, self._feat_dir
)
)
def get_data_sizes(self):
feat_shape = (self._batch_size, self._nb_ch, self._feature_seq_len, self._nb_mel_bins)
if self._is_eval:
label_shape = None
else:
if self._multi_accdoa is True:
label_shape = (self._batch_size, self._label_seq_len, self._nb_classes*3*3)
else:
label_shape = (self._batch_size, self._label_seq_len, self._nb_classes*3)
return feat_shape, label_shape
def get_total_batches_in_data(self):
return self._nb_total_batches
def _get_filenames_list_and_feat_label_sizes(self):
print('Computing some stats about the dataset')
max_frames, total_frames, temp_feat = -1, 0, []
for filename in os.listdir(self._feat_dir):
if int(filename[4]) in self._splits: # check which split the file belongs to
self._filenames_list.append(filename)
temp_feat = np.load(os.path.join(self._feat_dir, filename))
total_frames += (temp_feat.shape[0] - (temp_feat.shape[0] % self._feature_seq_len))
if temp_feat.shape[0]>max_frames:
max_frames = temp_feat.shape[0]
if len(temp_feat)!=0:
self._nb_frames_file = max_frames if self._per_file else temp_feat.shape[0]
self._nb_ch = temp_feat.shape[1] // self._nb_mel_bins
else:
print('Loading features failed')
exit()
if not self._is_eval:
temp_label = np.load(os.path.join(self._label_dir, self._filenames_list[0]))
if self._multi_accdoa is True:
self._num_track_dummy = temp_label.shape[-3]
self._num_axis = temp_label.shape[-2]
self._num_class = temp_label.shape[-1]
else:
self._label_len = temp_label.shape[-1]
self._doa_len = 3 # Cartesian
if self._per_file:
self._batch_size = int(np.ceil(max_frames/float(self._feature_seq_len)))
print('\tWARNING: Resetting batch size to {}. To accommodate the inference of longest file of {} frames in a single batch'.format(self._batch_size, max_frames))
self._nb_total_batches = len(self._filenames_list)
else:
self._nb_total_batches = int(np.floor(total_frames / (self._batch_size*self._feature_seq_len)))
self._feature_batch_seq_len = self._batch_size*self._feature_seq_len
self._label_batch_seq_len = self._batch_size*self._label_seq_len
return
def generate(self):
"""
Generates batches of samples
:return:
"""
if self._shuffle:
random.shuffle(self._filenames_list)
# Ideally this should have been outside the while loop. But while generating the test data we want the data
# to be the same exactly for all epoch's hence we keep it here.
self._circ_buf_feat = deque()
self._circ_buf_label = deque()
file_cnt = 0
if self._is_eval:
for i in range(self._nb_total_batches):
# load feat and label to circular buffer. Always maintain atleast one batch worth feat and label in the
# circular buffer. If not keep refilling it.
while len(self._circ_buf_feat) < self._feature_batch_seq_len:
temp_feat = np.load(os.path.join(self._feat_dir, self._filenames_list[file_cnt]))
for row_cnt, row in enumerate(temp_feat):
self._circ_buf_feat.append(row)
# If self._per_file is True, this returns the sequences belonging to a single audio recording
if self._per_file:
extra_frames = self._feature_batch_seq_len - temp_feat.shape[0]
extra_feat = np.ones((extra_frames, temp_feat.shape[1])) * 1e-6
for row_cnt, row in enumerate(extra_feat):
self._circ_buf_feat.append(row)
file_cnt = file_cnt + 1
# Read one batch size from the circular buffer
feat = np.zeros((self._feature_batch_seq_len, self._nb_mel_bins * self._nb_ch))
for j in range(self._feature_batch_seq_len):
feat[j, :] = self._circ_buf_feat.popleft()
feat = np.reshape(feat, (self._feature_batch_seq_len, self._nb_ch, self._nb_mel_bins))
# Split to sequences
feat = self._split_in_seqs(feat, self._feature_seq_len)
feat = np.transpose(feat, (0, 2, 1, 3))
yield feat
else:
for i in range(self._nb_total_batches):
# load feat and label to circular buffer. Always maintain atleast one batch worth feat and label in the
# circular buffer. If not keep refilling it.
while len(self._circ_buf_feat) < self._feature_batch_seq_len:
temp_feat = np.load(os.path.join(self._feat_dir, self._filenames_list[file_cnt]))
temp_label = np.load(os.path.join(self._label_dir, self._filenames_list[file_cnt]))
if not self._per_file:
# Inorder to support variable length features, and labels of different resolution.
# We remove all frames in features and labels matrix that are outside
# the multiple of self._label_seq_len and self._feature_seq_len. Further we do this only in training.
temp_label = temp_label[:temp_label.shape[0] - (temp_label.shape[0] % self._label_seq_len)]
temp_mul = temp_label.shape[0]//self._label_seq_len
temp_feat = temp_feat[:temp_mul*self._feature_seq_len, :]
for f_row in temp_feat:
self._circ_buf_feat.append(f_row)
for l_row in temp_label:
self._circ_buf_label.append(l_row)
# If self._per_file is True, this returns the sequences belonging to a single audio recording
if self._per_file:
feat_extra_frames = self._feature_batch_seq_len - temp_feat.shape[0]
extra_feat = np.ones((feat_extra_frames, temp_feat.shape[1])) * 1e-6
label_extra_frames = self._label_batch_seq_len - temp_label.shape[0]
if self._multi_accdoa is True:
extra_labels = np.zeros((label_extra_frames, self._num_track_dummy, self._num_axis, self._num_class))
else:
extra_labels = np.zeros((label_extra_frames, temp_label.shape[1]))
for f_row in extra_feat:
self._circ_buf_feat.append(f_row)
for l_row in extra_labels:
self._circ_buf_label.append(l_row)
file_cnt = file_cnt + 1
# Read one batch size from the circular buffer
feat = np.zeros((self._feature_batch_seq_len, self._nb_mel_bins * self._nb_ch))
for j in range(self._feature_batch_seq_len):
feat[j, :] = self._circ_buf_feat.popleft()
feat = np.reshape(feat, (self._feature_batch_seq_len, self._nb_ch, self._nb_mel_bins))
if self._multi_accdoa is True:
label = np.zeros((self._label_batch_seq_len, self._num_track_dummy, self._num_axis, self._num_class))
for j in range(self._label_batch_seq_len):
label[j, :, :, :] = self._circ_buf_label.popleft()
else:
label = np.zeros((self._label_batch_seq_len, self._label_len))
for j in range(self._label_batch_seq_len):
label[j, :] = self._circ_buf_label.popleft()
# Split to sequences
feat = self._split_in_seqs(feat, self._feature_seq_len)
feat = np.transpose(feat, (0, 2, 1, 3))
label = self._split_in_seqs(label, self._label_seq_len)
if self._multi_accdoa is True:
pass
else:
mask = label[:, :, :self._nb_classes]
mask = np.tile(mask, 3)
label = mask * label[:, :, self._nb_classes:]
yield feat, label
def _split_in_seqs(self, data, _seq_len):
if len(data.shape) == 1:
if data.shape[0] % _seq_len:
data = data[:-(data.shape[0] % _seq_len), :]
data = data.reshape((data.shape[0] // _seq_len, _seq_len, 1))
elif len(data.shape) == 2:
if data.shape[0] % _seq_len:
data = data[:-(data.shape[0] % _seq_len), :]
data = data.reshape((data.shape[0] // _seq_len, _seq_len, data.shape[1]))
elif len(data.shape) == 3:
if data.shape[0] % _seq_len:
data = data[:-(data.shape[0] % _seq_len), :, :]
data = data.reshape((data.shape[0] // _seq_len, _seq_len, data.shape[1], data.shape[2]))
elif len(data.shape) == 4: # for multi-ACCDOA with ADPIT
if data.shape[0] % _seq_len:
data = data[:-(data.shape[0] % _seq_len), :, :, :]
data = data.reshape((data.shape[0] // _seq_len, _seq_len, data.shape[1], data.shape[2], data.shape[3]))
else:
print('ERROR: Unknown data dimensions: {}'.format(data.shape))
exit()
return data
@staticmethod
def split_multi_channels(data, num_channels):
tmp = None
in_shape = data.shape
if len(in_shape) == 3:
hop = in_shape[2] / num_channels
tmp = np.zeros((in_shape[0], num_channels, in_shape[1], hop))
for i in range(num_channels):
tmp[:, i, :, :] = data[:, :, i * hop:(i + 1) * hop]
elif len(in_shape) == 4 and num_channels == 1:
tmp = np.zeros((in_shape[0], 1, in_shape[1], in_shape[2], in_shape[3]))
tmp[:, 0, :, :, :] = data
else:
print('ERROR: The input should be a 3D matrix but it seems to have dimensions: {}'.format(in_shape))
exit()
return tmp
def get_nb_classes(self):
return self._nb_classes
def nb_frames_1s(self):
return self._feat_cls.nb_frames_1s()
def get_hop_len_sec(self):
return self._feat_cls.get_hop_len_sec()
def get_filelist(self):
return self._filenames_list
def get_frame_per_file(self):
return self._label_batch_seq_len
def get_nb_frames(self):
return self._feat_cls.get_nb_frames()
def get_data_gen_mode(self):
return self._is_eval
def write_output_format_file(self, _out_file, _out_dict):
return self._feat_cls.write_output_format_file(_out_file, _out_dict)
| 13,414 | 44.941781 | 172 |
py
|
seld-dcase2023
|
seld-dcase2023-main/parameters.py
|
# Parameters used in the feature extraction, neural network model, and training the SELDnet can be changed here.
#
# Ideally, do not change the values of the default parameters. Create separate cases with unique <task-id> as seen in
# the code below (if-else loop) and use them. This way you can easily reproduce a configuration on a later time.
def get_params(argv='1'):
print("SET: {}".format(argv))
# ########### default parameters ##############
params = dict(
quick_test=True, # To do quick test. Trains/test on small subset of dataset, and # of epochs
finetune_mode = False, # Finetune on existing model, requires the pretrained model path set - pretrained_model_weights
pretrained_model_weights='models/1_1_foa_dev_split6_model.h5',
# INPUT PATH
# dataset_dir='DCASE2020_SELD_dataset/', # Base folder containing the foa/mic and metadata folders
dataset_dir='/scratch/asignal/partha/DCASE2023/DCASE2023_SELD_dataset',
# OUTPUT PATHS
# feat_label_dir='DCASE2020_SELD_dataset/feat_label_hnet/', # Directory to dump extracted features and labels
feat_label_dir='/scratch/asignal/partha/DCASE2023/DCASE2023_SELD_dataset/seld_feat_label',
model_dir='models/', # Dumps the trained models and training curves in this folder
dcase_output_dir='results/', # recording-wise results are dumped in this path.
# DATASET LOADING PARAMETERS
mode='dev', # 'dev' - development or 'eval' - evaluation dataset
dataset='foa', # 'foa' - ambisonic or 'mic' - microphone signals
#FEATURE PARAMS
fs=24000,
hop_len_s=0.02,
label_hop_len_s=0.1,
max_audio_len_s=60,
nb_mel_bins=64,
use_salsalite = False, # Used for MIC dataset only. If true use salsalite features, else use GCC features
fmin_doa_salsalite = 50,
fmax_doa_salsalite = 2000,
fmax_spectra_salsalite = 9000,
# MODEL TYPE
multi_accdoa=False, # False - Single-ACCDOA or True - Multi-ACCDOA
thresh_unify=15, # Required for Multi-ACCDOA only. Threshold of unification for inference in degrees.
# DNN MODEL PARAMETERS
label_sequence_length=50, # Feature sequence length
batch_size=128, # Batch size
dropout_rate=0.05, # Dropout rate, constant for all layers
nb_cnn2d_filt=64, # Number of CNN nodes, constant for each layer
f_pool_size=[4, 4, 2], # CNN frequency pooling, length of list = number of CNN layers, list value = pooling per layer
self_attn=True,
nb_heads=8,
nb_self_attn_layers=2,
nb_rnn_layers=2,
rnn_size=128,
nb_fnn_layers=1,
fnn_size=128, # FNN contents, length of list = number of layers, list value = number of nodes
nb_epochs=100, # Train for maximum epochs
lr=1e-3,
# METRIC
average='macro', # Supports 'micro': sample-wise average and 'macro': class-wise average
lad_doa_thresh=20
)
# ########### User defined parameters ##############
if argv == '1':
print("USING DEFAULT PARAMETERS\n")
elif argv == '2':
print("FOA + ACCDOA\n")
params['quick_test'] = False
params['dataset'] = 'foa'
params['multi_accdoa'] = False
elif argv == '3':
print("FOA + multi ACCDOA\n")
params['quick_test'] = False
params['dataset'] = 'foa'
params['multi_accdoa'] = True
elif argv == '4':
print("MIC + GCC + ACCDOA\n")
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = False
params['multi_accdoa'] = False
elif argv == '5':
print("MIC + SALSA + ACCDOA\n")
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = True
params['multi_accdoa'] = False
elif argv == '6':
print("MIC + GCC + multi ACCDOA\n")
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = False
params['multi_accdoa'] = True
elif argv == '7':
print("MIC + SALSA + multi ACCDOA\n")
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = True
params['multi_accdoa'] = True
elif argv == '999':
print("QUICK TEST MODE\n")
params['quick_test'] = True
else:
print('ERROR: unknown argument {}'.format(argv))
exit()
feature_label_resolution = int(params['label_hop_len_s'] // params['hop_len_s'])
params['feature_sequence_length'] = params['label_sequence_length'] * feature_label_resolution
params['t_pool_size'] = [feature_label_resolution, 1, 1] # CNN time pooling
params['patience'] = int(params['nb_epochs']) # Stop training if patience is reached
if '2020' in params['dataset_dir']:
params['unique_classes'] = 14
elif '2021' in params['dataset_dir']:
params['unique_classes'] = 12
elif '2022' in params['dataset_dir']:
params['unique_classes'] = 13
elif '2023' in params['dataset_dir']:
params['unique_classes'] = 13
for key, value in params.items():
print("\t{}: {}".format(key, value))
return params
| 5,433 | 37.267606 | 130 |
py
|
seld-dcase2023
|
seld-dcase2023-main/seldnet_model.py
|
# The SELDnet architecture
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from IPython import embed
class MSELoss_ADPIT(object):
def __init__(self):
super().__init__()
self._each_loss = nn.MSELoss(reduction='none')
def _each_calc(self, output, target):
return self._each_loss(output, target).mean(dim=(2)) # class-wise frame-level
def __call__(self, output, target):
"""
Auxiliary Duplicating Permutation Invariant Training (ADPIT) for 13 (=1+6+6) possible combinations
Args:
output: [batch_size, frames, num_track*num_axis*num_class=3*3*12]
target: [batch_size, frames, num_track_dummy=6, num_axis=4, num_class=12]
Return:
loss: scalar
"""
target_A0 = target[:, :, 0, 0:1, :] * target[:, :, 0, 1:, :] # A0, no ov from the same class, [batch_size, frames, num_axis(act)=1, num_class=12] * [batch_size, frames, num_axis(XYZ)=3, num_class=12]
target_B0 = target[:, :, 1, 0:1, :] * target[:, :, 1, 1:, :] # B0, ov with 2 sources from the same class
target_B1 = target[:, :, 2, 0:1, :] * target[:, :, 2, 1:, :] # B1
target_C0 = target[:, :, 3, 0:1, :] * target[:, :, 3, 1:, :] # C0, ov with 3 sources from the same class
target_C1 = target[:, :, 4, 0:1, :] * target[:, :, 4, 1:, :] # C1
target_C2 = target[:, :, 5, 0:1, :] * target[:, :, 5, 1:, :] # C2
target_A0A0A0 = torch.cat((target_A0, target_A0, target_A0), 2) # 1 permutation of A (no ov from the same class), [batch_size, frames, num_track*num_axis=3*3, num_class=12]
target_B0B0B1 = torch.cat((target_B0, target_B0, target_B1), 2) # 6 permutations of B (ov with 2 sources from the same class)
target_B0B1B0 = torch.cat((target_B0, target_B1, target_B0), 2)
target_B0B1B1 = torch.cat((target_B0, target_B1, target_B1), 2)
target_B1B0B0 = torch.cat((target_B1, target_B0, target_B0), 2)
target_B1B0B1 = torch.cat((target_B1, target_B0, target_B1), 2)
target_B1B1B0 = torch.cat((target_B1, target_B1, target_B0), 2)
target_C0C1C2 = torch.cat((target_C0, target_C1, target_C2), 2) # 6 permutations of C (ov with 3 sources from the same class)
target_C0C2C1 = torch.cat((target_C0, target_C2, target_C1), 2)
target_C1C0C2 = torch.cat((target_C1, target_C0, target_C2), 2)
target_C1C2C0 = torch.cat((target_C1, target_C2, target_C0), 2)
target_C2C0C1 = torch.cat((target_C2, target_C0, target_C1), 2)
target_C2C1C0 = torch.cat((target_C2, target_C1, target_C0), 2)
output = output.reshape(output.shape[0], output.shape[1], target_A0A0A0.shape[2], target_A0A0A0.shape[3]) # output is set the same shape of target, [batch_size, frames, num_track*num_axis=3*3, num_class=12]
pad4A = target_B0B0B1 + target_C0C1C2
pad4B = target_A0A0A0 + target_C0C1C2
pad4C = target_A0A0A0 + target_B0B0B1
loss_0 = self._each_calc(output, target_A0A0A0 + pad4A) # padded with target_B0B0B1 and target_C0C1C2 in order to avoid to set zero as target
loss_1 = self._each_calc(output, target_B0B0B1 + pad4B) # padded with target_A0A0A0 and target_C0C1C2
loss_2 = self._each_calc(output, target_B0B1B0 + pad4B)
loss_3 = self._each_calc(output, target_B0B1B1 + pad4B)
loss_4 = self._each_calc(output, target_B1B0B0 + pad4B)
loss_5 = self._each_calc(output, target_B1B0B1 + pad4B)
loss_6 = self._each_calc(output, target_B1B1B0 + pad4B)
loss_7 = self._each_calc(output, target_C0C1C2 + pad4C) # padded with target_A0A0A0 and target_B0B0B1
loss_8 = self._each_calc(output, target_C0C2C1 + pad4C)
loss_9 = self._each_calc(output, target_C1C0C2 + pad4C)
loss_10 = self._each_calc(output, target_C1C2C0 + pad4C)
loss_11 = self._each_calc(output, target_C2C0C1 + pad4C)
loss_12 = self._each_calc(output, target_C2C1C0 + pad4C)
loss_min = torch.min(
torch.stack((loss_0,
loss_1,
loss_2,
loss_3,
loss_4,
loss_5,
loss_6,
loss_7,
loss_8,
loss_9,
loss_10,
loss_11,
loss_12), dim=0),
dim=0).indices
loss = (loss_0 * (loss_min == 0) +
loss_1 * (loss_min == 1) +
loss_2 * (loss_min == 2) +
loss_3 * (loss_min == 3) +
loss_4 * (loss_min == 4) +
loss_5 * (loss_min == 5) +
loss_6 * (loss_min == 6) +
loss_7 * (loss_min == 7) +
loss_8 * (loss_min == 8) +
loss_9 * (loss_min == 9) +
loss_10 * (loss_min == 10) +
loss_11 * (loss_min == 11) +
loss_12 * (loss_min == 12)).mean()
return loss
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)):
super().__init__()
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding)
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = F.relu(self.bn(self.conv(x)))
return x
class PositionalEmbedding(nn.Module): # Not used in the baseline
def __init__(self, d_model, max_len=512):
super().__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model).float()
pe.require_grad = False
position = torch.arange(0, max_len).float().unsqueeze(1)
div_term = (torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)).exp()
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return self.pe[:, :x.size(1)]
class SeldModel(torch.nn.Module):
def __init__(self, in_feat_shape, out_shape, params):
super().__init__()
self.nb_classes = params['unique_classes']
self.params=params
self.conv_block_list = nn.ModuleList()
if len(params['f_pool_size']):
for conv_cnt in range(len(params['f_pool_size'])):
self.conv_block_list.append(ConvBlock(in_channels=params['nb_cnn2d_filt'] if conv_cnt else in_feat_shape[1], out_channels=params['nb_cnn2d_filt']))
self.conv_block_list.append(nn.MaxPool2d((params['t_pool_size'][conv_cnt], params['f_pool_size'][conv_cnt])))
self.conv_block_list.append(nn.Dropout2d(p=params['dropout_rate']))
self.gru_input_dim = params['nb_cnn2d_filt'] * int(np.floor(in_feat_shape[-1] / np.prod(params['f_pool_size'])))
self.gru = torch.nn.GRU(input_size=self.gru_input_dim, hidden_size=params['rnn_size'],
num_layers=params['nb_rnn_layers'], batch_first=True,
dropout=params['dropout_rate'], bidirectional=True)
# self.pos_embedder = PositionalEmbedding(self.params['rnn_size'])
self.mhsa_block_list = nn.ModuleList()
self.layer_norm_list = nn.ModuleList()
for mhsa_cnt in range(params['nb_self_attn_layers']):
self.mhsa_block_list.append(nn.MultiheadAttention(embed_dim=self.params['rnn_size'], num_heads=params['nb_heads'], dropout=params['dropout_rate'], batch_first=True))
self.layer_norm_list.append(nn.LayerNorm(self.params['rnn_size']))
self.fnn_list = torch.nn.ModuleList()
if params['nb_fnn_layers']:
for fc_cnt in range(params['nb_fnn_layers']):
self.fnn_list.append(nn.Linear(params['fnn_size'] if fc_cnt else self.params['rnn_size'], params['fnn_size'], bias=True))
self.fnn_list.append(nn.Linear(params['fnn_size'] if params['nb_fnn_layers'] else self.params['rnn_size'], out_shape[-1], bias=True))
def forward(self, x):
"""input: (batch_size, mic_channels, time_steps, mel_bins)"""
for conv_cnt in range(len(self.conv_block_list)):
x = self.conv_block_list[conv_cnt](x)
x = x.transpose(1, 2).contiguous()
x = x.view(x.shape[0], x.shape[1], -1).contiguous()
(x, _) = self.gru(x)
x = torch.tanh(x)
x = x[:, :, x.shape[-1]//2:] * x[:, :, :x.shape[-1]//2]
# pos_embedding = self.pos_embedder(x)
# x = x + pos_embedding
for mhsa_cnt in range(len(self.mhsa_block_list)):
x_attn_in = x
x, _ = self.mhsa_block_list[mhsa_cnt](x_attn_in, x_attn_in, x_attn_in)
x = x + x_attn_in
x = self.layer_norm_list[mhsa_cnt](x)
for fnn_cnt in range(len(self.fnn_list) - 1):
x = self.fnn_list[fnn_cnt](x)
doa = torch.tanh(self.fnn_list[-1](x))
return doa
| 9,178 | 48.085561 | 215 |
py
|
seld-dcase2023
|
seld-dcase2023-main/train_seldnet.py
|
#
# A wrapper script that trains the SELDnet. The training stops when the early stopping metric - SELD error stops improving.
#
import os
import sys
import numpy as np
import matplotlib.pyplot as plot
import cls_feature_class
import cls_data_generator
import seldnet_model
import parameters
import time
from time import gmtime, strftime
import torch
import torch.nn as nn
import torch.optim as optim
plot.switch_backend('agg')
from IPython import embed
from cls_compute_seld_results import ComputeSELDResults, reshape_3Dto2D
from SELD_evaluation_metrics import distance_between_cartesian_coordinates
import seldnet_model
def get_accdoa_labels(accdoa_in, nb_classes):
x, y, z = accdoa_in[:, :, :nb_classes], accdoa_in[:, :, nb_classes:2*nb_classes], accdoa_in[:, :, 2*nb_classes:]
sed = np.sqrt(x**2 + y**2 + z**2) > 0.5
return sed, accdoa_in
def get_multi_accdoa_labels(accdoa_in, nb_classes):
"""
Args:
accdoa_in: [batch_size, frames, num_track*num_axis*num_class=3*3*12]
nb_classes: scalar
Return:
sedX: [batch_size, frames, num_class=12]
doaX: [batch_size, frames, num_axis*num_class=3*12]
"""
x0, y0, z0 = accdoa_in[:, :, :1*nb_classes], accdoa_in[:, :, 1*nb_classes:2*nb_classes], accdoa_in[:, :, 2*nb_classes:3*nb_classes]
sed0 = np.sqrt(x0**2 + y0**2 + z0**2) > 0.5
doa0 = accdoa_in[:, :, :3*nb_classes]
x1, y1, z1 = accdoa_in[:, :, 3*nb_classes:4*nb_classes], accdoa_in[:, :, 4*nb_classes:5*nb_classes], accdoa_in[:, :, 5*nb_classes:6*nb_classes]
sed1 = np.sqrt(x1**2 + y1**2 + z1**2) > 0.5
doa1 = accdoa_in[:, :, 3*nb_classes: 6*nb_classes]
x2, y2, z2 = accdoa_in[:, :, 6*nb_classes:7*nb_classes], accdoa_in[:, :, 7*nb_classes:8*nb_classes], accdoa_in[:, :, 8*nb_classes:]
sed2 = np.sqrt(x2**2 + y2**2 + z2**2) > 0.5
doa2 = accdoa_in[:, :, 6*nb_classes:]
return sed0, doa0, sed1, doa1, sed2, doa2
def determine_similar_location(sed_pred0, sed_pred1, doa_pred0, doa_pred1, class_cnt, thresh_unify, nb_classes):
if (sed_pred0 == 1) and (sed_pred1 == 1):
if distance_between_cartesian_coordinates(doa_pred0[class_cnt], doa_pred0[class_cnt+1*nb_classes], doa_pred0[class_cnt+2*nb_classes],
doa_pred1[class_cnt], doa_pred1[class_cnt+1*nb_classes], doa_pred1[class_cnt+2*nb_classes]) < thresh_unify:
return 1
else:
return 0
else:
return 0
def test_epoch(data_generator, model, criterion, dcase_output_folder, params, device):
# Number of frames for a 60 second audio with 100ms hop length = 600 frames
# Number of frames in one batch (batch_size* sequence_length) consists of all the 600 frames above with zero padding in the remaining frames
test_filelist = data_generator.get_filelist()
nb_test_batches, test_loss = 0, 0.
model.eval()
file_cnt = 0
with torch.no_grad():
for data, target in data_generator.generate():
# load one batch of data
data, target = torch.tensor(data).to(device).float(), torch.tensor(target).to(device).float()
# process the batch of data based on chosen mode
output = model(data)
loss = criterion(output, target)
if params['multi_accdoa'] is True:
sed_pred0, doa_pred0, sed_pred1, doa_pred1, sed_pred2, doa_pred2 = get_multi_accdoa_labels(output.detach().cpu().numpy(), params['unique_classes'])
sed_pred0 = reshape_3Dto2D(sed_pred0)
doa_pred0 = reshape_3Dto2D(doa_pred0)
sed_pred1 = reshape_3Dto2D(sed_pred1)
doa_pred1 = reshape_3Dto2D(doa_pred1)
sed_pred2 = reshape_3Dto2D(sed_pred2)
doa_pred2 = reshape_3Dto2D(doa_pred2)
else:
sed_pred, doa_pred = get_accdoa_labels(output.detach().cpu().numpy(), params['unique_classes'])
sed_pred = reshape_3Dto2D(sed_pred)
doa_pred = reshape_3Dto2D(doa_pred)
# dump SELD results to the correspondin file
output_file = os.path.join(dcase_output_folder, test_filelist[file_cnt].replace('.npy', '.csv'))
file_cnt += 1
output_dict = {}
if params['multi_accdoa'] is True:
for frame_cnt in range(sed_pred0.shape[0]):
for class_cnt in range(sed_pred0.shape[1]):
# determine whether track0 is similar to track1
flag_0sim1 = determine_similar_location(sed_pred0[frame_cnt][class_cnt], sed_pred1[frame_cnt][class_cnt], doa_pred0[frame_cnt], doa_pred1[frame_cnt], class_cnt, params['thresh_unify'], params['unique_classes'])
flag_1sim2 = determine_similar_location(sed_pred1[frame_cnt][class_cnt], sed_pred2[frame_cnt][class_cnt], doa_pred1[frame_cnt], doa_pred2[frame_cnt], class_cnt, params['thresh_unify'], params['unique_classes'])
flag_2sim0 = determine_similar_location(sed_pred2[frame_cnt][class_cnt], sed_pred0[frame_cnt][class_cnt], doa_pred2[frame_cnt], doa_pred0[frame_cnt], class_cnt, params['thresh_unify'], params['unique_classes'])
# unify or not unify according to flag
if flag_0sim1 + flag_1sim2 + flag_2sim0 == 0:
if sed_pred0[frame_cnt][class_cnt]>0.5:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
output_dict[frame_cnt].append([class_cnt, doa_pred0[frame_cnt][class_cnt], doa_pred0[frame_cnt][class_cnt+params['unique_classes']], doa_pred0[frame_cnt][class_cnt+2*params['unique_classes']]])
if sed_pred1[frame_cnt][class_cnt]>0.5:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
output_dict[frame_cnt].append([class_cnt, doa_pred1[frame_cnt][class_cnt], doa_pred1[frame_cnt][class_cnt+params['unique_classes']], doa_pred1[frame_cnt][class_cnt+2*params['unique_classes']]])
if sed_pred2[frame_cnt][class_cnt]>0.5:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
output_dict[frame_cnt].append([class_cnt, doa_pred2[frame_cnt][class_cnt], doa_pred2[frame_cnt][class_cnt+params['unique_classes']], doa_pred2[frame_cnt][class_cnt+2*params['unique_classes']]])
elif flag_0sim1 + flag_1sim2 + flag_2sim0 == 1:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
if flag_0sim1:
if sed_pred2[frame_cnt][class_cnt]>0.5:
output_dict[frame_cnt].append([class_cnt, doa_pred2[frame_cnt][class_cnt], doa_pred2[frame_cnt][class_cnt+params['unique_classes']], doa_pred2[frame_cnt][class_cnt+2*params['unique_classes']]])
doa_pred_fc = (doa_pred0[frame_cnt] + doa_pred1[frame_cnt]) / 2
output_dict[frame_cnt].append([class_cnt, doa_pred_fc[class_cnt], doa_pred_fc[class_cnt+params['unique_classes']], doa_pred_fc[class_cnt+2*params['unique_classes']]])
elif flag_1sim2:
if sed_pred0[frame_cnt][class_cnt]>0.5:
output_dict[frame_cnt].append([class_cnt, doa_pred0[frame_cnt][class_cnt], doa_pred0[frame_cnt][class_cnt+params['unique_classes']], doa_pred0[frame_cnt][class_cnt+2*params['unique_classes']]])
doa_pred_fc = (doa_pred1[frame_cnt] + doa_pred2[frame_cnt]) / 2
output_dict[frame_cnt].append([class_cnt, doa_pred_fc[class_cnt], doa_pred_fc[class_cnt+params['unique_classes']], doa_pred_fc[class_cnt+2*params['unique_classes']]])
elif flag_2sim0:
if sed_pred1[frame_cnt][class_cnt]>0.5:
output_dict[frame_cnt].append([class_cnt, doa_pred1[frame_cnt][class_cnt], doa_pred1[frame_cnt][class_cnt+params['unique_classes']], doa_pred1[frame_cnt][class_cnt+2*params['unique_classes']]])
doa_pred_fc = (doa_pred2[frame_cnt] + doa_pred0[frame_cnt]) / 2
output_dict[frame_cnt].append([class_cnt, doa_pred_fc[class_cnt], doa_pred_fc[class_cnt+params['unique_classes']], doa_pred_fc[class_cnt+2*params['unique_classes']]])
elif flag_0sim1 + flag_1sim2 + flag_2sim0 >= 2:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
doa_pred_fc = (doa_pred0[frame_cnt] + doa_pred1[frame_cnt] + doa_pred2[frame_cnt]) / 3
output_dict[frame_cnt].append([class_cnt, doa_pred_fc[class_cnt], doa_pred_fc[class_cnt+params['unique_classes']], doa_pred_fc[class_cnt+2*params['unique_classes']]])
else:
for frame_cnt in range(sed_pred.shape[0]):
for class_cnt in range(sed_pred.shape[1]):
if sed_pred[frame_cnt][class_cnt]>0.5:
if frame_cnt not in output_dict:
output_dict[frame_cnt] = []
output_dict[frame_cnt].append([class_cnt, doa_pred[frame_cnt][class_cnt], doa_pred[frame_cnt][class_cnt+params['unique_classes']], doa_pred[frame_cnt][class_cnt+2*params['unique_classes']]])
data_generator.write_output_format_file(output_file, output_dict)
test_loss += loss.item()
nb_test_batches += 1
if params['quick_test'] and nb_test_batches == 4:
break
test_loss /= nb_test_batches
return test_loss
def train_epoch(data_generator, optimizer, model, criterion, params, device):
nb_train_batches, train_loss = 0, 0.
model.train()
for data, target in data_generator.generate():
# load one batch of data
data, target = torch.tensor(data).to(device).float(), torch.tensor(target).to(device).float()
optimizer.zero_grad()
# process the batch of data based on chosen mode
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item()
nb_train_batches += 1
if params['quick_test'] and nb_train_batches == 4:
break
train_loss /= nb_train_batches
return train_loss
def main(argv):
"""
Main wrapper for training sound event localization and detection network.
:param argv: expects two optional inputs.
first input: task_id - (optional) To chose the system configuration in parameters.py.
(default) 1 - uses default parameters
second input: job_id - (optional) all the output files will be uniquely represented with this.
(default) 1
"""
print(argv)
if len(argv) != 3:
print('\n\n')
print('-------------------------------------------------------------------------------------------------------')
print('The code expected two optional inputs')
print('\t>> python seld.py <task-id> <job-id>')
print('\t\t<task-id> is used to choose the user-defined parameter set from parameter.py')
print('Using default inputs for now')
print('\t\t<job-id> is a unique identifier which is used for output filenames (models, training plots). '
'You can use any number or string for this.')
print('-------------------------------------------------------------------------------------------------------')
print('\n\n')
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
torch.autograd.set_detect_anomaly(True)
# use parameter set defined by user
task_id = '1' if len(argv) < 2 else argv[1]
params = parameters.get_params(task_id)
job_id = 1 if len(argv) < 3 else argv[-1]
# Training setup
train_splits, val_splits, test_splits = None, None, None
if params['mode'] == 'dev':
if '2020' in params['dataset_dir']:
test_splits = [1]
val_splits = [2]
train_splits = [[3, 4, 5, 6]]
elif '2021' in params['dataset_dir']:
test_splits = [6]
val_splits = [5]
train_splits = [[1, 2, 3, 4]]
elif '2022' in params['dataset_dir']:
test_splits = [[4]]
val_splits = [[4]]
train_splits = [[1, 2, 3]]
elif '2023' in params['dataset_dir']:
test_splits = [[4]]
val_splits = [[4]]
train_splits = [[1, 2, 3]]
else:
print('ERROR: Unknown dataset splits')
exit()
for split_cnt, split in enumerate(test_splits):
print('\n\n---------------------------------------------------------------------------------------------------')
print('------------------------------------ SPLIT {} -----------------------------------------------'.format(split))
print('---------------------------------------------------------------------------------------------------')
# Unique name for the run
loc_feat = params['dataset']
if params['dataset'] == 'mic':
if params['use_salsalite']:
loc_feat = '{}_salsa'.format(params['dataset'])
else:
loc_feat = '{}_gcc'.format(params['dataset'])
loc_output = 'multiaccdoa' if params['multi_accdoa'] else 'accdoa'
cls_feature_class.create_folder(params['model_dir'])
unique_name = '{}_{}_{}_split{}_{}_{}'.format(
task_id, job_id, params['mode'], split_cnt, loc_output, loc_feat
)
model_name = '{}_model.h5'.format(os.path.join(params['model_dir'], unique_name))
print("unique_name: {}\n".format(unique_name))
# Load train and validation data
print('Loading training dataset:')
data_gen_train = cls_data_generator.DataGenerator(
params=params, split=train_splits[split_cnt]
)
print('Loading validation dataset:')
data_gen_val = cls_data_generator.DataGenerator(
params=params, split=val_splits[split_cnt], shuffle=False, per_file=True
)
# Collect i/o data size and load model configuration
data_in, data_out = data_gen_train.get_data_sizes()
model = seldnet_model.SeldModel(data_in, data_out, params).to(device)
if params['finetune_mode']:
print('Running in finetuning mode. Initializing the model to the weights - {}'.format(params['pretrained_model_weights']))
model.load_state_dict(torch.load(params['pretrained_model_weights'], map_location='cpu'))
print('---------------- SELD-net -------------------')
print('FEATURES:\n\tdata_in: {}\n\tdata_out: {}\n'.format(data_in, data_out))
print('MODEL:\n\tdropout_rate: {}\n\tCNN: nb_cnn_filt: {}, f_pool_size{}, t_pool_size{}\n, rnn_size: {}\n, nb_attention_blocks: {}\n, fnn_size: {}\n'.format(
params['dropout_rate'], params['nb_cnn2d_filt'], params['f_pool_size'], params['t_pool_size'], params['rnn_size'], params['nb_self_attn_layers'],
params['fnn_size']))
print(model)
# Dump results in DCASE output format for calculating final scores
dcase_output_val_folder = os.path.join(params['dcase_output_dir'], '{}_{}_val'.format(unique_name, strftime("%Y%m%d%H%M%S", gmtime())))
cls_feature_class.delete_and_create_folder(dcase_output_val_folder)
print('Dumping recording-wise val results in: {}'.format(dcase_output_val_folder))
# Initialize evaluation metric class
score_obj = ComputeSELDResults(params)
# start training
best_val_epoch = -1
best_ER, best_F, best_LE, best_LR, best_seld_scr = 1., 0., 180., 0., 9999
patience_cnt = 0
nb_epoch = 2 if params['quick_test'] else params['nb_epochs']
optimizer = optim.Adam(model.parameters(), lr=params['lr'])
if params['multi_accdoa'] is True:
criterion = seldnet_model.MSELoss_ADPIT()
else:
criterion = nn.MSELoss()
for epoch_cnt in range(nb_epoch):
# ---------------------------------------------------------------------
# TRAINING
# ---------------------------------------------------------------------
start_time = time.time()
train_loss = train_epoch(data_gen_train, optimizer, model, criterion, params, device)
train_time = time.time() - start_time
# ---------------------------------------------------------------------
# VALIDATION
# ---------------------------------------------------------------------
start_time = time.time()
val_loss = test_epoch(data_gen_val, model, criterion, dcase_output_val_folder, params, device)
# Calculate the DCASE 2021 metrics - Location-aware detection and Class-aware localization scores
val_ER, val_F, val_LE, val_LR, val_seld_scr, classwise_val_scr = score_obj.get_SELD_Results(dcase_output_val_folder)
val_time = time.time() - start_time
# Save model if loss is good
if val_seld_scr <= best_seld_scr:
best_val_epoch, best_ER, best_F, best_LE, best_LR, best_seld_scr = epoch_cnt, val_ER, val_F, val_LE, val_LR, val_seld_scr
torch.save(model.state_dict(), model_name)
# Print stats
print(
'epoch: {}, time: {:0.2f}/{:0.2f}, '
# 'train_loss: {:0.2f}, val_loss: {:0.2f}, '
'train_loss: {:0.4f}, val_loss: {:0.4f}, '
'ER/F/LE/LR/SELD: {}, '
'best_val_epoch: {} {}'.format(
epoch_cnt, train_time, val_time,
train_loss, val_loss,
'{:0.2f}/{:0.2f}/{:0.2f}/{:0.2f}/{:0.2f}'.format(val_ER, val_F, val_LE, val_LR, val_seld_scr),
best_val_epoch, '({:0.2f}/{:0.2f}/{:0.2f}/{:0.2f}/{:0.2f})'.format(best_ER, best_F, best_LE, best_LR, best_seld_scr))
)
patience_cnt += 1
if patience_cnt > params['patience']:
break
# ---------------------------------------------------------------------
# Evaluate on unseen test data
# ---------------------------------------------------------------------
print('Load best model weights')
model.load_state_dict(torch.load(model_name, map_location='cpu'))
print('Loading unseen test dataset:')
data_gen_test = cls_data_generator.DataGenerator(
params=params, split=test_splits[split_cnt], shuffle=False, per_file=True
)
# Dump results in DCASE output format for calculating final scores
dcase_output_test_folder = os.path.join(params['dcase_output_dir'], '{}_{}_test'.format(unique_name, strftime("%Y%m%d%H%M%S", gmtime())))
cls_feature_class.delete_and_create_folder(dcase_output_test_folder)
print('Dumping recording-wise test results in: {}'.format(dcase_output_test_folder))
test_loss = test_epoch(data_gen_test, model, criterion, dcase_output_test_folder, params, device)
use_jackknife=True
test_ER, test_F, test_LE, test_LR, test_seld_scr, classwise_test_scr = score_obj.get_SELD_Results(dcase_output_test_folder, is_jackknife=use_jackknife )
print('\nTest Loss')
print('SELD score (early stopping metric): {:0.2f} {}'.format(test_seld_scr[0] if use_jackknife else test_seld_scr, '[{:0.2f}, {:0.2f}]'.format(test_seld_scr[1][0], test_seld_scr[1][1]) if use_jackknife else ''))
print('SED metrics: Error rate: {:0.2f} {}, F-score: {:0.1f} {}'.format(test_ER[0] if use_jackknife else test_ER, '[{:0.2f}, {:0.2f}]'.format(test_ER[1][0], test_ER[1][1]) if use_jackknife else '', 100* test_F[0] if use_jackknife else 100* test_F, '[{:0.2f}, {:0.2f}]'.format(100* test_F[1][0], 100* test_F[1][1]) if use_jackknife else ''))
print('DOA metrics: Localization error: {:0.1f} {}, Localization Recall: {:0.1f} {}'.format(test_LE[0] if use_jackknife else test_LE, '[{:0.2f} , {:0.2f}]'.format(test_LE[1][0], test_LE[1][1]) if use_jackknife else '', 100*test_LR[0] if use_jackknife else 100*test_LR,'[{:0.2f}, {:0.2f}]'.format(100*test_LR[1][0], 100*test_LR[1][1]) if use_jackknife else ''))
if params['average']=='macro':
print('Classwise results on unseen test data')
print('Class\tER\tF\tLE\tLR\tSELD_score')
for cls_cnt in range(params['unique_classes']):
print('{}\t{:0.2f} {}\t{:0.2f} {}\t{:0.2f} {}\t{:0.2f} {}\t{:0.2f} {}'.format(
cls_cnt,
classwise_test_scr[0][0][cls_cnt] if use_jackknife else classwise_test_scr[0][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][0][cls_cnt][0], classwise_test_scr[1][0][cls_cnt][1]) if use_jackknife else '',
classwise_test_scr[0][1][cls_cnt] if use_jackknife else classwise_test_scr[1][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][1][cls_cnt][0], classwise_test_scr[1][1][cls_cnt][1]) if use_jackknife else '',
classwise_test_scr[0][2][cls_cnt] if use_jackknife else classwise_test_scr[2][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][2][cls_cnt][0], classwise_test_scr[1][2][cls_cnt][1]) if use_jackknife else '',
classwise_test_scr[0][3][cls_cnt] if use_jackknife else classwise_test_scr[3][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][3][cls_cnt][0], classwise_test_scr[1][3][cls_cnt][1]) if use_jackknife else '',
classwise_test_scr[0][4][cls_cnt] if use_jackknife else classwise_test_scr[4][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][4][cls_cnt][0], classwise_test_scr[1][4][cls_cnt][1]) if use_jackknife else ''))
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| 22,604 | 55.5125 | 369 |
py
|
seld-dcase2023
|
seld-dcase2023-main/SELD_evaluation_metrics.py
|
# Implements the localization and detection metrics proposed in [1] with extensions to support multi-instance of the same class from [2].
#
# [1] Joint Measurement of Localization and Detection of Sound Events
# Annamaria Mesaros, Sharath Adavanne, Archontis Politis, Toni Heittola, Tuomas Virtanen
# WASPAA 2019
#
# [2] Overview and Evaluation of Sound Event Localization and Detection in DCASE 2019
# Politis, Archontis, Annamaria Mesaros, Sharath Adavanne, Toni Heittola, and Tuomas Virtanen.
# IEEE/ACM Transactions on Audio, Speech, and Language Processing (2020).
#
# This script has MIT license
#
import numpy as np
eps = np.finfo(np.float).eps
from scipy.optimize import linear_sum_assignment
from IPython import embed
class SELDMetrics(object):
def __init__(self, doa_threshold=20, nb_classes=11, average='macro'):
'''
This class implements both the class-sensitive localization and location-sensitive detection metrics.
Additionally, based on the user input, the corresponding averaging is performed within the segment.
:param nb_classes: Number of sound classes. In the paper, nb_classes = 11
:param doa_thresh: DOA threshold for location sensitive detection.
'''
self._nb_classes = nb_classes
# Variables for Location-senstive detection performance
self._TP = np.zeros(self._nb_classes)
self._FP = np.zeros(self._nb_classes)
self._FP_spatial = np.zeros(self._nb_classes)
self._FN = np.zeros(self._nb_classes)
self._Nref = np.zeros(self._nb_classes)
self._spatial_T = doa_threshold
self._S = 0
self._D = 0
self._I = 0
# Variables for Class-sensitive localization performance
self._total_DE = np.zeros(self._nb_classes)
self._DE_TP = np.zeros(self._nb_classes)
self._DE_FP = np.zeros(self._nb_classes)
self._DE_FN = np.zeros(self._nb_classes)
self._average = average
def early_stopping_metric(self, _er, _f, _le, _lr):
"""
Compute early stopping metric from sed and doa errors.
:param sed_error: [error rate (0 to 1 range), f score (0 to 1 range)]
:param doa_error: [doa error (in degrees), frame recall (0 to 1 range)]
:return: early stopping metric result
"""
seld_metric = np.mean([
_er,
1 - _f,
_le / 180,
1 - _lr
], 0)
return seld_metric
def compute_seld_scores(self):
'''
Collect the final SELD scores
:return: returns both location-sensitive detection scores and class-sensitive localization scores
'''
ER = (self._S + self._D + self._I) / (self._Nref.sum() + eps)
classwise_results = []
if self._average == 'micro':
# Location-sensitive detection performance
F = self._TP.sum() / (eps + self._TP.sum() + self._FP_spatial.sum() + 0.5 * (self._FP.sum() + self._FN.sum()))
# Class-sensitive localization performance
LE = self._total_DE.sum() / float(self._DE_TP.sum() + eps) if self._DE_TP.sum() else 180
LR = self._DE_TP.sum() / (eps + self._DE_TP.sum() + self._DE_FN.sum())
SELD_scr = self.early_stopping_metric(ER, F, LE, LR)
elif self._average == 'macro':
# Location-sensitive detection performance
F = self._TP / (eps + self._TP + self._FP_spatial + 0.5 * (self._FP + self._FN))
# Class-sensitive localization performance
LE = self._total_DE / (self._DE_TP + eps)
LE[self._DE_TP==0] = 180.0
LR = self._DE_TP / (eps + self._DE_TP + self._DE_FN)
SELD_scr = self.early_stopping_metric(np.repeat(ER, self._nb_classes), F, LE, LR)
classwise_results = np.array([np.repeat(ER, self._nb_classes), F, LE, LR, SELD_scr])
F, LE, LR, SELD_scr = F.mean(), LE.mean(), LR.mean(), SELD_scr.mean()
return ER, F, LE, LR, SELD_scr, classwise_results
def update_seld_scores(self, pred, gt):
'''
Implements the spatial error averaging according to equation 5 in the paper [1] (see papers in the title of the code).
Adds the multitrack extensions proposed in paper [2]
The input pred/gt can either both be Cartesian or Degrees
:param pred: dictionary containing class-wise prediction results for each N-seconds segment block
:param gt: dictionary containing class-wise groundtruth for each N-seconds segment block
'''
for block_cnt in range(len(gt.keys())):
loc_FN, loc_FP = 0, 0
for class_cnt in range(self._nb_classes):
# Counting the number of referece tracks for each class in the segment
nb_gt_doas = max([len(val) for val in gt[block_cnt][class_cnt][0][1]]) if class_cnt in gt[block_cnt] else None
nb_pred_doas = max([len(val) for val in pred[block_cnt][class_cnt][0][1]]) if class_cnt in pred[block_cnt] else None
if nb_gt_doas is not None:
self._Nref[class_cnt] += nb_gt_doas
if class_cnt in gt[block_cnt] and class_cnt in pred[block_cnt]:
# True positives or False positive case
# NOTE: For multiple tracks per class, associate the predicted DOAs to corresponding reference
# DOA-tracks using hungarian algorithm and then compute the average spatial distance between
# the associated reference-predicted tracks.
# Reference and predicted track matching
matched_track_dist = {}
matched_track_cnt = {}
gt_ind_list = gt[block_cnt][class_cnt][0][0]
pred_ind_list = pred[block_cnt][class_cnt][0][0]
for gt_ind, gt_val in enumerate(gt_ind_list):
if gt_val in pred_ind_list:
gt_arr = np.array(gt[block_cnt][class_cnt][0][1][gt_ind])
gt_ids = np.arange(len(gt_arr[:, -1])) #TODO if the reference has track IDS use here - gt_arr[:, -1]
gt_doas = gt_arr[:, 1:]
pred_ind = pred_ind_list.index(gt_val)
pred_arr = np.array(pred[block_cnt][class_cnt][0][1][pred_ind])
pred_doas = pred_arr[:, 1:]
if gt_doas.shape[-1] == 2: # convert DOAs to radians, if the input is in degrees
gt_doas = gt_doas * np.pi / 180.
pred_doas = pred_doas * np.pi / 180.
dist_list, row_inds, col_inds = least_distance_between_gt_pred(gt_doas, pred_doas)
# Collect the frame-wise distance between matched ref-pred DOA pairs
for dist_cnt, dist_val in enumerate(dist_list):
matched_gt_track = gt_ids[row_inds[dist_cnt]]
if matched_gt_track not in matched_track_dist:
matched_track_dist[matched_gt_track], matched_track_cnt[matched_gt_track] = [], []
matched_track_dist[matched_gt_track].append(dist_val)
matched_track_cnt[matched_gt_track].append(pred_ind)
# Update evaluation metrics based on the distance between ref-pred tracks
if len(matched_track_dist) == 0:
# if no tracks are found. This occurs when the predicted DOAs are not aligned frame-wise to the reference DOAs
loc_FN += nb_pred_doas
self._FN[class_cnt] += nb_pred_doas
self._DE_FN[class_cnt] += nb_pred_doas
else:
# for the associated ref-pred tracks compute the metrics
for track_id in matched_track_dist:
total_spatial_dist = sum(matched_track_dist[track_id])
total_framewise_matching_doa = len(matched_track_cnt[track_id])
avg_spatial_dist = total_spatial_dist / total_framewise_matching_doa
# Class-sensitive localization performance
self._total_DE[class_cnt] += avg_spatial_dist
self._DE_TP[class_cnt] += 1
# Location-sensitive detection performance
if avg_spatial_dist <= self._spatial_T:
self._TP[class_cnt] += 1
else:
loc_FP += 1
self._FP_spatial[class_cnt] += 1
# in the multi-instance of same class scenario, if the number of predicted tracks are greater
# than reference tracks count as FP, if it less than reference count as FN
if nb_pred_doas > nb_gt_doas:
# False positive
loc_FP += (nb_pred_doas-nb_gt_doas)
self._FP[class_cnt] += (nb_pred_doas-nb_gt_doas)
self._DE_FP[class_cnt] += (nb_pred_doas-nb_gt_doas)
elif nb_pred_doas < nb_gt_doas:
# False negative
loc_FN += (nb_gt_doas-nb_pred_doas)
self._FN[class_cnt] += (nb_gt_doas-nb_pred_doas)
self._DE_FN[class_cnt] += (nb_gt_doas-nb_pred_doas)
elif class_cnt in gt[block_cnt] and class_cnt not in pred[block_cnt]:
# False negative
loc_FN += nb_gt_doas
self._FN[class_cnt] += nb_gt_doas
self._DE_FN[class_cnt] += nb_gt_doas
elif class_cnt not in gt[block_cnt] and class_cnt in pred[block_cnt]:
# False positive
loc_FP += nb_pred_doas
self._FP[class_cnt] += nb_pred_doas
self._DE_FP[class_cnt] += nb_pred_doas
self._S += np.minimum(loc_FP, loc_FN)
self._D += np.maximum(0, loc_FN - loc_FP)
self._I += np.maximum(0, loc_FP - loc_FN)
return
def distance_between_spherical_coordinates_rad(az1, ele1, az2, ele2):
"""
Angular distance between two spherical coordinates
MORE: https://en.wikipedia.org/wiki/Great-circle_distance
:return: angular distance in degrees
"""
dist = np.sin(ele1) * np.sin(ele2) + np.cos(ele1) * np.cos(ele2) * np.cos(np.abs(az1 - az2))
# Making sure the dist values are in -1 to 1 range, else np.arccos kills the job
dist = np.clip(dist, -1, 1)
dist = np.arccos(dist) * 180 / np.pi
return dist
def distance_between_cartesian_coordinates(x1, y1, z1, x2, y2, z2):
"""
Angular distance between two cartesian coordinates
MORE: https://en.wikipedia.org/wiki/Great-circle_distance
Check 'From chord length' section
:return: angular distance in degrees
"""
# Normalize the Cartesian vectors
N1 = np.sqrt(x1**2 + y1**2 + z1**2 + 1e-10)
N2 = np.sqrt(x2**2 + y2**2 + z2**2 + 1e-10)
x1, y1, z1, x2, y2, z2 = x1/N1, y1/N1, z1/N1, x2/N2, y2/N2, z2/N2
#Compute the distance
dist = x1*x2 + y1*y2 + z1*z2
dist = np.clip(dist, -1, 1)
dist = np.arccos(dist) * 180 / np.pi
return dist
def least_distance_between_gt_pred(gt_list, pred_list):
"""
Shortest distance between two sets of DOA coordinates. Given a set of groundtruth coordinates,
and its respective predicted coordinates, we calculate the distance between each of the
coordinate pairs resulting in a matrix of distances, where one axis represents the number of groundtruth
coordinates and the other the predicted coordinates. The number of estimated peaks need not be the same as in
groundtruth, thus the distance matrix is not always a square matrix. We use the hungarian algorithm to find the
least cost in this distance matrix.
:param gt_list_xyz: list of ground-truth Cartesian or Polar coordinates in Radians
:param pred_list_xyz: list of predicted Carteisan or Polar coordinates in Radians
:return: cost - distance
:return: less - number of DOA's missed
:return: extra - number of DOA's over-estimated
"""
gt_len, pred_len = gt_list.shape[0], pred_list.shape[0]
ind_pairs = np.array([[x, y] for y in range(pred_len) for x in range(gt_len)])
cost_mat = np.zeros((gt_len, pred_len))
if gt_len and pred_len:
if len(gt_list[0]) == 3: #Cartesian
x1, y1, z1, x2, y2, z2 = gt_list[ind_pairs[:, 0], 0], gt_list[ind_pairs[:, 0], 1], gt_list[ind_pairs[:, 0], 2], pred_list[ind_pairs[:, 1], 0], pred_list[ind_pairs[:, 1], 1], pred_list[ind_pairs[:, 1], 2]
cost_mat[ind_pairs[:, 0], ind_pairs[:, 1]] = distance_between_cartesian_coordinates(x1, y1, z1, x2, y2, z2)
else:
az1, ele1, az2, ele2 = gt_list[ind_pairs[:, 0], 0], gt_list[ind_pairs[:, 0], 1], pred_list[ind_pairs[:, 1], 0], pred_list[ind_pairs[:, 1], 1]
cost_mat[ind_pairs[:, 0], ind_pairs[:, 1]] = distance_between_spherical_coordinates_rad(az1, ele1, az2, ele2)
row_ind, col_ind = linear_sum_assignment(cost_mat)
cost = cost_mat[row_ind, col_ind]
return cost, row_ind, col_ind
| 13,630 | 48.930403 | 215 |
py
|
seld-dcase2023
|
seld-dcase2023-main/cls_compute_seld_results.py
|
import os
import SELD_evaluation_metrics
import cls_feature_class
import parameters
import numpy as np
from scipy import stats
from IPython import embed
def jackknife_estimation(global_value, partial_estimates, significance_level=0.05):
"""
Compute jackknife statistics from a global value and partial estimates.
Original function by Nicolas Turpault
:param global_value: Value calculated using all (N) examples
:param partial_estimates: Partial estimates using N-1 examples at a time
:param significance_level: Significance value used for t-test
:return:
estimate: estimated value using partial estimates
bias: Bias computed between global value and the partial estimates
std_err: Standard deviation of partial estimates
conf_interval: Confidence interval obtained after t-test
"""
mean_jack_stat = np.mean(partial_estimates)
n = len(partial_estimates)
bias = (n - 1) * (mean_jack_stat - global_value)
std_err = np.sqrt(
(n - 1) * np.mean((partial_estimates - mean_jack_stat) * (partial_estimates - mean_jack_stat), axis=0)
)
# bias-corrected "jackknifed estimate"
estimate = global_value - bias
# jackknife confidence interval
if not (0 < significance_level < 1):
raise ValueError("confidence level must be in (0, 1).")
t_value = stats.t.ppf(1 - significance_level / 2, n - 1)
# t-test
conf_interval = estimate + t_value * np.array((-std_err, std_err))
return estimate, bias, std_err, conf_interval
class ComputeSELDResults(object):
def __init__(
self, params, ref_files_folder=None, use_polar_format=True
):
self._use_polar_format = use_polar_format
self._desc_dir = ref_files_folder if ref_files_folder is not None else os.path.join(params['dataset_dir'], 'metadata_dev')
self._doa_thresh = params['lad_doa_thresh']
# Load feature class
self._feat_cls = cls_feature_class.FeatureClass(params)
# collect reference files
self._ref_labels = {}
for split in os.listdir(self._desc_dir):
for ref_file in os.listdir(os.path.join(self._desc_dir, split)):
# Load reference description file
gt_dict = self._feat_cls.load_output_format_file(os.path.join(self._desc_dir, split, ref_file))
if not self._use_polar_format:
gt_dict = self._feat_cls.convert_output_format_polar_to_cartesian(gt_dict)
nb_ref_frames = max(list(gt_dict.keys()))
self._ref_labels[ref_file] = [self._feat_cls.segment_labels(gt_dict, nb_ref_frames), nb_ref_frames]
self._nb_ref_files = len(self._ref_labels)
self._average = params['average']
@staticmethod
def get_nb_files(file_list, tag='all'):
'''
Given the file_list, this function returns a subset of files corresponding to the tag.
Tags supported
'all' -
'ir'
:param file_list: complete list of predicted files
:param tag: Supports two tags 'all', 'ir'
:return: Subset of files according to chosen tag
'''
_group_ind = {'room': 10}
_cnt_dict = {}
for _filename in file_list:
if tag == 'all':
_ind = 0
else:
_ind = int(_filename[_group_ind[tag]])
if _ind not in _cnt_dict:
_cnt_dict[_ind] = []
_cnt_dict[_ind].append(_filename)
return _cnt_dict
def get_SELD_Results(self, pred_files_path, is_jackknife=False):
# collect predicted files info
pred_files = os.listdir(pred_files_path)
pred_labels_dict = {}
eval = SELD_evaluation_metrics.SELDMetrics(nb_classes=self._feat_cls.get_nb_classes(), doa_threshold=self._doa_thresh, average=self._average)
for pred_cnt, pred_file in enumerate(pred_files):
# Load predicted output format file
pred_dict = self._feat_cls.load_output_format_file(os.path.join(pred_files_path, pred_file))
if self._use_polar_format:
pred_dict = self._feat_cls.convert_output_format_cartesian_to_polar(pred_dict)
pred_labels = self._feat_cls.segment_labels(pred_dict, self._ref_labels[pred_file][1])
# Calculated scores
eval.update_seld_scores(pred_labels, self._ref_labels[pred_file][0])
if is_jackknife:
pred_labels_dict[pred_file] = pred_labels
# Overall SED and DOA scores
ER, F, LE, LR, seld_scr, classwise_results = eval.compute_seld_scores()
if is_jackknife:
global_values = [ER, F, LE, LR, seld_scr]
if len(classwise_results):
global_values.extend(classwise_results.reshape(-1).tolist())
partial_estimates = []
# Calculate partial estimates by leave-one-out method
for leave_file in pred_files:
leave_one_out_list = pred_files[:]
leave_one_out_list.remove(leave_file)
eval = SELD_evaluation_metrics.SELDMetrics(nb_classes=self._feat_cls.get_nb_classes(), doa_threshold=self._doa_thresh, average=self._average)
for pred_cnt, pred_file in enumerate(leave_one_out_list):
# Calculated scores
eval.update_seld_scores(pred_labels_dict[pred_file], self._ref_labels[pred_file][0])
ER, F, LE, LR, seld_scr, classwise_results = eval.compute_seld_scores()
leave_one_out_est = [ER, F, LE, LR, seld_scr]
if len(classwise_results):
leave_one_out_est.extend(classwise_results.reshape(-1).tolist())
# Overall SED and DOA scores
partial_estimates.append(leave_one_out_est)
partial_estimates = np.array(partial_estimates)
estimate, bias, std_err, conf_interval = [-1]*len(global_values), [-1]*len(global_values), [-1]*len(global_values), [-1]*len(global_values)
for i in range(len(global_values)):
estimate[i], bias[i], std_err[i], conf_interval[i] = jackknife_estimation(
global_value=global_values[i],
partial_estimates=partial_estimates[:, i],
significance_level=0.05
)
return [ER, conf_interval[0]], [F, conf_interval[1]], [LE, conf_interval[2]], [LR, conf_interval[3]], [seld_scr, conf_interval[4]], [classwise_results, np.array(conf_interval)[5:].reshape(5,13,2) if len(classwise_results) else []]
else:
return ER, F, LE, LR, seld_scr, classwise_results
def get_consolidated_SELD_results(self, pred_files_path, score_type_list=['all', 'room']):
'''
Get all categories of results.
;score_type_list: Supported
'all' - all the predicted files
'room' - for individual rooms
'''
# collect predicted files info
pred_files = os.listdir(pred_files_path)
nb_pred_files = len(pred_files)
# Calculate scores for different splits, overlapping sound events, and impulse responses (reverberant scenes)
print('Number of predicted files: {}\nNumber of reference files: {}'.format(nb_pred_files, self._nb_ref_files))
print('\nCalculating {} scores for {}'.format(score_type_list, os.path.basename(pred_output_format_files)))
for score_type in score_type_list:
print('\n\n---------------------------------------------------------------------------------------------------')
print('------------------------------------ {} ---------------------------------------------'.format('Total score' if score_type=='all' else 'score per {}'.format(score_type)))
print('---------------------------------------------------------------------------------------------------')
split_cnt_dict = self.get_nb_files(pred_files, tag=score_type) # collect files corresponding to score_type
# Calculate scores across files for a given score_type
for split_key in np.sort(list(split_cnt_dict)):
# Load evaluation metric class
eval = SELD_evaluation_metrics.SELDMetrics(nb_classes=self._feat_cls.get_nb_classes(), doa_threshold=self._doa_thresh, average=self._average)
for pred_cnt, pred_file in enumerate(split_cnt_dict[split_key]):
# Load predicted output format file
pred_dict = self._feat_cls.load_output_format_file(os.path.join(pred_output_format_files, pred_file))
if self._use_polar_format:
pred_dict = self._feat_cls.convert_output_format_cartesian_to_polar(pred_dict)
pred_labels = self._feat_cls.segment_labels(pred_dict, self._ref_labels[pred_file][1])
# Calculated scores
eval.update_seld_scores(pred_labels, self._ref_labels[pred_file][0])
# Overall SED and DOA scores
ER, F, LE, LR, seld_scr, classwise_results = eval.compute_seld_scores()
print('\nAverage score for {} {} data using {} coordinates'.format(score_type, 'fold' if score_type=='all' else split_key, 'Polar' if self._use_polar_format else 'Cartesian' ))
print('SELD score (early stopping metric): {:0.2f}'.format(seld_scr))
print('SED metrics: Error rate: {:0.2f}, F-score:{:0.1f}'.format(ER, 100*F))
print('DOA metrics: Localization error: {:0.1f}, Localization Recall: {:0.1f}'.format(LE, 100*LR))
def reshape_3Dto2D(A):
return A.reshape(A.shape[0] * A.shape[1], A.shape[2])
if __name__ == "__main__":
pred_output_format_files = 'results/3_11553814_dev_split0_multiaccdoa_foa_20220429142557_test' # Path of the DCASEoutput format files
params = parameters.get_params()
# Compute just the DCASE final results
score_obj = ComputeSELDResults(params)
use_jackknife=False
ER, F, LE, LR, seld_scr, classwise_test_scr = score_obj.get_SELD_Results(pred_output_format_files,is_jackknife=use_jackknife )
print('SELD score (early stopping metric): {:0.2f} {}'.format(seld_scr[0] if use_jackknife else seld_scr, '[{:0.2f}, {:0.2f}]'.format(seld_scr[1][0], seld_scr[1][1]) if use_jackknife else ''))
print('SED metrics: Error rate: {:0.2f} {}, F-score: {:0.1f} {}'.format(ER[0] if use_jackknife else ER, '[{:0.2f}, {:0.2f}]'.format(ER[1][0], ER[1][1]) if use_jackknife else '', 100*F[0] if use_jackknife else 100*F, '[{:0.2f}, {:0.2f}]'.format(100*F[1][0], 100*F[1][1]) if use_jackknife else ''))
print('DOA metrics: Localization error: {:0.1f} {}, Localization Recall: {:0.1f} {}'.format(LE[0] if use_jackknife else LE, '[{:0.2f}, {:0.2f}]'.format(LE[1][0], LE[1][1]) if use_jackknife else '', 100*LR[0] if use_jackknife else 100*LR,'[{:0.2f}, {:0.2f}]'.format(100*LR[1][0], 100*LR[1][1]) if use_jackknife else ''))
if params['average']=='macro':
print('Classwise results on unseen test data')
print('Class\tER\tF\tLE\tLR\tSELD_score')
for cls_cnt in range(params['unique_classes']):
print('{}\t{:0.2f} {}\t{:0.2f} {}\t{:0.2f} {}\t{:0.2f} {}\t{:0.2f} {}'.format(
cls_cnt,
classwise_test_scr[0][0][cls_cnt] if use_jackknife else classwise_test_scr[0][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][0][cls_cnt][0], classwise_test_scr[1][0][cls_cnt][1]) if use_jackknife else '',
classwise_test_scr[0][1][cls_cnt] if use_jackknife else classwise_test_scr[1][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][1][cls_cnt][0], classwise_test_scr[1][1][cls_cnt][1]) if use_jackknife else '',
classwise_test_scr[0][2][cls_cnt] if use_jackknife else classwise_test_scr[2][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][2][cls_cnt][0], classwise_test_scr[1][2][cls_cnt][1]) if use_jackknife else '',
classwise_test_scr[0][3][cls_cnt] if use_jackknife else classwise_test_scr[3][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][3][cls_cnt][0], classwise_test_scr[1][3][cls_cnt][1]) if use_jackknife else '',
classwise_test_scr[0][4][cls_cnt] if use_jackknife else classwise_test_scr[4][cls_cnt], '[{:0.2f}, {:0.2f}]'.format(classwise_test_scr[1][4][cls_cnt][0], classwise_test_scr[1][4][cls_cnt][1]) if use_jackknife else ''))
# UNCOMMENT to Compute DCASE results along with room-wise performance
# score_obj.get_consolidated_SELD_results(pred_output_format_files)
| 12,633 | 54.170306 | 324 |
py
|
api
|
api-master/api/DBFunctions.py
|
import psycopg2
import time
from configparser import ConfigParser
config = ConfigParser()
config.read ('credentials.ini')
class pgdb:
def __init__(self):
self.connect()
def connect(self):
DB_PASSWORD = config.get('database','password')
DB_USER = config.get('database','user')
self.db = psycopg2.connect("dbname='reddit' user='" + DB_USER + "' host='jupiter' password='" + DB_PASSWORD + "'")
self.db.set_session(autocommit=True)
def execute(self,sql,params):
retries = 5
while True:
retries -= 1
try:
cur = self.db.cursor()
cur.execute(sql,(params,))
rows = cur.fetchall()
cur.close()
return rows
except:
if retries <= 0:
raise
try:
time.sleep(1)
self.connect()
except:
raise
pgdb = pgdb()
| 1,008 | 24.225 | 122 |
py
|
api
|
api-master/api/Helpers.py
|
from collections import defaultdict
import requests
import json
import DBFunctions
def LooksLikeInt(s):
try:
int(s)
return True
except ValueError:
return False
def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int, int)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def getSubmissionsFromES(ids):
nested_dict = lambda: defaultdict(nested_dict)
if not isinstance(ids, (list, tuple)):
ids = [ids]
ids_to_get = []
q = nested_dict()
q["query"]["terms"]["id"] = ids
q["size"] = 1000
response = requests.get("http://mars:9200/rs/submissions/_search", data=json.dumps(q))
s = json.loads(response.text)
results = {}
for hit in s["hits"]["hits"]:
source = hit["_source"]
base_10_id = source["id"]
source["id"] = base36encode(int(hit["_id"]))
results[base_10_id] = source
return results
def getSubmissionsFromPg(ids):
if not isinstance(ids, (list, tuple)):
ids = [ids]
ids_to_get_from_db = []
rows = DBFunctions.pgdb.execute("SELECT * FROM submission WHERE (json->>'id')::int IN %s LIMIT 5000",tuple(ids))
results = {}
data = {}
if rows:
for row in rows:
submission = row[0]
base_10_id = submission['id']
submission['id'] = base36encode(submission['id'])
if 'subreddit_id' in submission:
submission['subreddit_id'] = "t5_" + base36encode(submission['subreddit_id'])
submission.pop('name', None)
results[base_10_id] = submission
return results
| 2,063 | 27.666667 | 116 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.