Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def write_temporal_network(gtfs, output_filename, start_time_ut=None, end_time_ut=None):
"""
Parameters
----------
gtfs : gtfspy.GTFS
output_filename : str
path to the directory where to store the extracts
start_time_ut: int | None
start time of the extract in unixtime (seconds after epoch)
end_time_ut: int | None
end time of the extract in unixtime (seconds after epoch)
"""
util.makedirs(os.path.dirname(os.path.abspath(output_filename)))
pandas_data_frame = temporal_network(gtfs, start_time_ut=start_time_ut, end_time_ut=end_time_ut)
pandas_data_frame.to_csv(output_filename, encoding='utf-8', index=False)
|
def _write_stop_to_stop_network_edges(net, file_name, data=True, fmt=None):
"""
Write out a network
Parameters
----------
net: networkx.DiGraph
base_name: str
path to the filename (without extension)
data: bool, optional
whether or not to write out any edge data present
fmt: str, optional
If "csv" write out the network in csv format.
"""
if fmt is None:
fmt = "edg"
if fmt == "edg":
if data:
networkx.write_edgelist(net, file_name, data=True)
else:
networkx.write_edgelist(net, file_name)
elif fmt == "csv":
with open(file_name, 'w') as f:
# writing out the header
edge_iter = net.edges_iter(data=True)
_, _, edg_data = next(edge_iter)
edg_data_keys = list(sorted(edg_data.keys()))
header = ";".join(["from_stop_I", "to_stop_I"] + edg_data_keys)
f.write(header)
for from_node_I, to_node_I, data in net.edges_iter(data=True):
f.write("\n")
values = [str(from_node_I), str(to_node_I)]
data_values = []
for key in edg_data_keys:
if key == "route_I_counts":
route_I_counts_string = str(data[key]).replace(" ", "")[1:-1]
data_values.append(route_I_counts_string)
else:
data_values.append(str(data[key]))
all_values = values + data_values
f.write(";".join(all_values))
|
def write_gtfs(gtfs, output):
"""
Write out the database according to the GTFS format.
Parameters
----------
gtfs: gtfspy.GTFS
output: str
Path where to put the GTFS files
if output ends with ".zip" a ZIP-file is created instead.
Returns
-------
None
"""
output = os.path.abspath(output)
uuid_str = "tmp_" + str(uuid.uuid1())
if output[-4:] == '.zip':
zip = True
out_basepath = os.path.dirname(os.path.abspath(output))
if not os.path.exists(out_basepath):
raise IOError(out_basepath + " does not exist, cannot write gtfs as a zip")
tmp_dir = os.path.join(out_basepath, str(uuid_str))
# zip_file_na,e = ../out_basedir + ".zip
else:
zip = False
out_basepath = output
tmp_dir = os.path.join(out_basepath + "_" + str(uuid_str))
os.makedirs(tmp_dir, exist_ok=True)
gtfs_table_to_writer = {
"agency": _write_gtfs_agencies,
"calendar": _write_gtfs_calendar,
"calendar_dates": _write_gtfs_calendar_dates,
# fare attributes and fare_rules omitted (seldomly used)
"feed_info": _write_gtfs_feed_info,
# "frequencies": not written, as they are incorporated into trips and routes,
# Frequencies table is expanded into other tables on initial import. -> Thus frequencies.txt is not created
"routes": _write_gtfs_routes,
"shapes": _write_gtfs_shapes,
"stops": _write_gtfs_stops,
"stop_times": _write_gtfs_stop_times,
"transfers": _write_gtfs_transfers,
"trips": _write_gtfs_trips,
}
for table, writer in gtfs_table_to_writer.items():
fname_to_write = os.path.join(tmp_dir, table + '.txt')
print(fname_to_write)
writer(gtfs, open(os.path.join(tmp_dir, table + '.txt'), 'w'))
if zip:
shutil.make_archive(output[:-4], 'zip', tmp_dir)
shutil.rmtree(tmp_dir)
else:
print("moving " + str(tmp_dir) + " to " + out_basepath)
os.rename(tmp_dir, out_basepath)
|
def _remove_I_columns(df):
"""
Remove columns ending with I from a pandas.DataFrame
Parameters
----------
df: dataFrame
Returns
-------
None
"""
all_columns = list(filter(lambda el: el[-2:] == "_I", df.columns))
for column in all_columns:
del df[column]
|
def _scan_footpaths_to_departure_stop(self, connection_dep_stop, connection_dep_time, arrival_time_target):
""" A helper method for scanning the footpaths. Updates self._stop_profiles accordingly"""
for _, neighbor, data in self._walk_network.edges_iter(nbunch=[connection_dep_stop],
data=True):
d_walk = data['d_walk']
neighbor_dep_time = connection_dep_time - d_walk / self._walk_speed
pt = LabelTimeSimple(departure_time=neighbor_dep_time, arrival_time_target=arrival_time_target)
self._stop_profiles[neighbor].update_pareto_optimal_tuples(pt)
|
def plot_route_network_from_gtfs(g, ax=None, spatial_bounds=None, map_alpha=0.8, scalebar=True, legend=True,
return_smopy_map=False, map_style=None):
"""
Parameters
----------
g: A gtfspy.gtfs.GTFS object
Where to get the data from?
ax: matplotlib.Axes object, optional
If None, a new figure and an axis is created
spatial_bounds: dict, optional
with str keys: lon_min, lon_max, lat_min, lat_max
return_smopy_map: bool, optional
defaulting to false
Returns
-------
ax: matplotlib.axes.Axes
"""
assert(isinstance(g, GTFS))
route_shapes = g.get_all_route_shapes()
if spatial_bounds is None:
spatial_bounds = get_spatial_bounds(g, as_dict=True)
if ax is not None:
bbox = ax.get_window_extent().transformed(ax.figure.dpi_scale_trans.inverted())
width, height = bbox.width, bbox.height
spatial_bounds = _expand_spatial_bounds_to_fit_axes(spatial_bounds, width, height)
return plot_as_routes(route_shapes,
ax=ax,
spatial_bounds=spatial_bounds,
map_alpha=map_alpha,
plot_scalebar=scalebar,
legend=legend,
return_smopy_map=return_smopy_map,
map_style=map_style)
|
def plot_as_routes(route_shapes, ax=None, spatial_bounds=None, map_alpha=0.8, plot_scalebar=True, legend=True,
return_smopy_map=False, line_width_attribute=None, line_width_scale=1.0, map_style=None):
"""
Parameters
----------
route_shapes: list of dicts that should have the following keys
name, type, agency, lats, lons
with types
list, list, str, list, list
ax: axis object
spatial_bounds: dict
map_alpha:
plot_scalebar: bool
legend:
return_smopy_map:
line_width_attribute:
line_width_scale:
Returns
-------
ax: matplotlib.axes object
"""
lon_min = spatial_bounds['lon_min']
lon_max = spatial_bounds['lon_max']
lat_min = spatial_bounds['lat_min']
lat_max = spatial_bounds['lat_max']
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max, map_style=map_style)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=map_alpha)
bound_pixel_xs, bound_pixel_ys = smopy_map.to_pixels(numpy.array([lat_min, lat_max]),
numpy.array([lon_min, lon_max]))
route_types_to_lines = {}
for shape in route_shapes:
route_type = ROUTE_TYPE_CONVERSION[shape['type']]
lats = numpy.array(shape['lats'])
lons = numpy.array(shape['lons'])
if line_width_attribute:
line_width = line_width_scale * shape[line_width_attribute]
else:
line_width = 1
xs, ys = smopy_map.to_pixels(lats, lons)
line, = ax.plot(xs, ys, linewidth=line_width, color=ROUTE_TYPE_TO_COLOR[route_type], zorder=ROUTE_TYPE_TO_ZORDER[route_type])
route_types_to_lines[route_type] = line
if legend:
lines = list(route_types_to_lines.values())
labels = [ROUTE_TYPE_TO_SHORT_DESCRIPTION[route_type] for route_type in route_types_to_lines.keys()]
ax.legend(lines, labels, loc="upper left")
if plot_scalebar:
_add_scale_bar(ax, lat_max, lon_min, lon_max, bound_pixel_xs.max() - bound_pixel_xs.min())
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim(bound_pixel_xs.min(), bound_pixel_xs.max())
ax.set_ylim(bound_pixel_ys.max(), bound_pixel_ys.min())
if return_smopy_map:
return ax, smopy_map
else:
return ax
|
def _expand_spatial_bounds_to_fit_axes(bounds, ax_width, ax_height):
"""
Parameters
----------
bounds: dict
ax_width: float
ax_height: float
Returns
-------
spatial_bounds
"""
b = bounds
height_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_max'], b['lon_min'])
width_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_min'], b['lon_max'])
x_per_y_meters = width_meters / height_meters
x_per_y_axes = ax_width / ax_height
if x_per_y_axes > x_per_y_meters: # x-axis
# axis x_axis has slack -> the spatial longitude bounds need to be extended
width_meters_new = (height_meters * x_per_y_axes)
d_lon_new = ((b['lon_max'] - b['lon_min']) / width_meters) * width_meters_new
mean_lon = (b['lon_min'] + b['lon_max'])/2.
lon_min = mean_lon - d_lon_new / 2.
lon_max = mean_lon + d_lon_new / 2.
spatial_bounds = {
"lon_min": lon_min,
"lon_max": lon_max,
"lat_min": b['lat_min'],
"lat_max": b['lat_max']
}
else:
# axis y_axis has slack -> the spatial latitude bounds need to be extended
height_meters_new = (width_meters / x_per_y_axes)
d_lat_new = ((b['lat_max'] - b['lat_min']) / height_meters) * height_meters_new
mean_lat = (b['lat_min'] + b['lat_max']) / 2.
lat_min = mean_lat - d_lat_new / 2.
lat_max = mean_lat + d_lat_new / 2.
spatial_bounds = {
"lon_min": b['lon_min'],
"lon_max": b['lon_max'],
"lat_min": lat_min,
"lat_max": lat_max
}
return spatial_bounds
|
def plot_all_stops(g, ax=None, scalebar=False):
"""
Parameters
----------
g: A gtfspy.gtfs.GTFS object
ax: matplotlib.Axes object, optional
If None, a new figure and an axis is created, otherwise results are plotted on the axis.
scalebar: bool, optional
Whether to include a scalebar to the plot.
Returns
-------
ax: matplotlib.Axes
"""
assert(isinstance(g, GTFS))
lon_min, lon_max, lat_min, lat_max = get_spatial_bounds(g)
smopy_map = get_smopy_map(lon_min, lon_max, lat_min, lat_max)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax = smopy_map.show_mpl(figsize=None, ax=ax, alpha=0.8)
stops = g.stops()
lats = numpy.array(stops['lat'])
lons = numpy.array(stops['lon'])
xs, ys = smopy_map.to_pixels(lats, lons)
ax.scatter(xs, ys, color="red", s=10)
ax.set_xlim(min(xs), max(xs))
ax.set_ylim(max(ys), min(ys))
return ax
|
def set_process_timezone(TZ):
"""
Parameters
----------
TZ: string
"""
try:
prev_timezone = os.environ['TZ']
except KeyError:
prev_timezone = None
os.environ['TZ'] = TZ
time.tzset() # Cause C-library functions to notice the update.
return prev_timezone
|
def wgs84_distance(lat1, lon1, lat2, lon2):
"""Distance (in meters) between two points in WGS84 coord system."""
dLat = math.radians(lat2 - lat1)
dLon = math.radians(lon2 - lon1)
a = (math.sin(dLat / 2) * math.sin(dLat / 2) +
math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *
math.sin(dLon / 2) * math.sin(dLon / 2))
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = EARTH_RADIUS * c
return d
|
def create_file(fname=None, fname_tmp=None, tmpdir=None,
save_tmpfile=False, keepext=False):
"""Context manager for making files with possibility of failure.
If you are creating a file, it is possible that the code will fail
and leave a corrupt intermediate file. This is especially damaging
if this is used as automatic input to another process. This context
manager helps by creating a temporary filename, your code runs and
creates that temporary file, and then if no exceptions are raised,
the context manager will move the temporary file to the original
filename you intended to open.
Parameters
----------
fname : str
Target filename, this file will be created if all goes well
fname_tmp : str
If given, this is used as the temporary filename.
tmpdir : str or bool
If given, put temporary files in this directory. If `True`,
then find a good tmpdir that is not on local filesystem.
save_tmpfile : bool
If true, the temporary file is not deleteted if an exception
is raised.
keepext : bool, default False
If true, have tmpfile have same extension as final file.
Returns (as context manager value)
----------------------------------
fname_tmp: str
Temporary filename to be used. Same as `fname_tmp`
if given as an argument.
Raises
------
Re-raises any except occuring during the context block.
"""
# Do nothing if requesting sqlite memory DB.
if fname == ':memory:':
yield fname
return
if fname_tmp is None:
# no tmpfile name given - compute some basic info
basename = os.path.basename(fname)
root, ext = os.path.splitext(basename)
dir_ = this_dir = os.path.dirname(fname)
# Remove filename extension, in case this matters for
# automatic things itself.
if not keepext:
root = root + ext
ext = ''
if tmpdir:
# we should use a different temporary directory
if tmpdir is True:
# Find a directory ourself, searching some common
# places.
for dir__ in possible_tmpdirs:
if os.access(dir__, os.F_OK):
dir_ = dir__
break
# Make the actual tmpfile, with our chosen tmpdir, directory,
# extension. Set it to not delete automatically, since on
# success we will move it to elsewhere.
tmpfile = tempfile.NamedTemporaryFile(
prefix='tmp-' + root + '-', suffix=ext, dir=dir_, delete=False)
fname_tmp = tmpfile.name
try:
yield fname_tmp
except Exception as e:
if save_tmpfile:
print("Temporary file is '%s'" % fname_tmp)
else:
os.unlink(fname_tmp)
raise
# Move the file back to the original location.
try:
os.rename(fname_tmp, fname)
# We have to manually set permissions. tempfile does not use
# umask, for obvious reasons.
os.chmod(fname, 0o777 & ~current_umask)
# 'Invalid cross-device link' - you can't rename files across
# filesystems. So, we have to fallback to moving it. But, we
# want to move it using tmpfiles also, so that the final file
# appearing is atomic. We use... tmpfiles.
except OSError as e:
# New temporary file in same directory
tmpfile2 = tempfile.NamedTemporaryFile(
prefix='tmp-' + root + '-', suffix=ext, dir=this_dir, delete=False)
# Copy contents over
shutil.copy(fname_tmp, tmpfile2.name)
# Rename new tmpfile, unlink old one on other filesystem.
os.rename(tmpfile2.name, fname)
os.chmod(fname, 0o666 & ~current_umask)
os.unlink(fname_tmp)
|
def execute(cur, *args):
"""Utility function to print sqlite queries before executing.
Use instead of cur.execute(). First argument is cursor.
cur.execute(stmt)
becomes
util.execute(cur, stmt)
"""
stmt = args[0]
if len(args) > 1:
stmt = stmt.replace('%', '%%').replace('?', '%r')
print(stmt % (args[1]))
return cur.execute(*args)
|
def str_time_to_day_seconds(time):
"""
Converts time strings to integer seconds
:param time: %H:%M:%S string
:return: integer seconds
"""
t = str(time).split(':')
seconds = int(t[0]) * 3600 + int(t[1]) * 60 + int(t[2])
return seconds
|
def makedirs(path):
"""
Create directories if they do not exist, otherwise do nothing.
Return path for convenience
"""
if not os.path.isdir(path):
os.makedirs(path)
return path
|
def source_csv_to_pandas(path, table, read_csv_args=None):
"""
Parameters
----------
path: str
path to directory or zipfile
table: str
name of table
read_csv_args:
string arguments passed to the read_csv function
Returns
-------
df: pandas:DataFrame
"""
if '.txt' not in table:
table += '.txt'
if isinstance(path, dict):
data_obj = path[table]
f = data_obj.split("\n")
else:
if os.path.isdir(path):
f = open(os.path.join(path, table))
else:
z = zipfile.ZipFile(path)
for path in z.namelist():
if table in path:
table = path
break
try:
f = zip_open(z, table)
except KeyError as e:
return pd.DataFrame()
if read_csv_args:
df = pd.read_csv(**read_csv_args)
else:
df = pd.read_csv(f)
return df
|
def write_shapefile(data, shapefile_path):
from numpy import int64
"""
:param data: list of dicts where dictionary contains the keys lons and lats
:param shapefile_path: path where shapefile is saved
:return:
"""
w = shp.Writer(shp.POLYLINE) # shapeType=3)
fields = []
encode_strings = []
# This makes sure every geom has all the attributes
w.autoBalance = 1
# Create all attribute fields except for lats and lons. In addition the field names are saved for the
# datastoring phase. Encode_strings stores .encode methods as strings for all fields that are strings
if not fields:
for key, value in data[0].items():
if key != u'lats' and key != u'lons':
fields.append(key)
if type(value) == float:
w.field(key.encode('ascii'), fieldType='N', size=11, decimal=3)
print("float", type(value))
elif type(value) == int or type(value) == int64:
print("int", type(value))
# encode_strings.append(".encode('ascii')")
w.field(key.encode('ascii'), fieldType='N', size=6, decimal=0)
else:
print("other type", type(value))
w.field(key.encode('ascii'))
for dict_item in data:
line = []
lineparts = []
records = []
records_string = ''
for lat, lon in zip(dict_item[u'lats'], dict_item[u'lons']):
line.append([float(lon), float(lat)])
lineparts.append(line)
w.line(parts=lineparts)
# The shapefile records command is built up as strings to allow a differing number of columns
for field in fields:
if records_string:
records_string += ", dict_item['" + field + "']"
else:
records_string += "dict_item['" + field + "']"
method_string = "w.record(" + records_string + ")"
# w.record(dict_item['name'], dict_item['agency'], dict_item['agency_name'], dict_item['type'], dict_item['lons'])
print(method_string)
eval(method_string)
w.save(shapefile_path)
|
def draw_net_using_node_coords(net):
"""
Plot a networkx.Graph by using the lat and lon attributes of nodes.
Parameters
----------
net : networkx.Graph
Returns
-------
fig : matplotlib.figure
the figure object where the network is plotted
"""
import matplotlib.pyplot as plt
fig = plt.figure()
node_coords = {}
for node, data in net.nodes(data=True):
node_coords[node] = (data['lon'], data['lat'])
ax = fig.add_subplot(111)
networkx.draw(net, pos=node_coords, ax=ax, node_size=50)
return fig
|
def difference_of_pandas_dfs(df_self, df_other, col_names=None):
"""
Returns a dataframe with all of df_other that are not in df_self, when considering the columns specified in col_names
:param df_self: pandas Dataframe
:param df_other: pandas Dataframe
:param col_names: list of column names
:return:
"""
df = pd.concat([df_self, df_other])
df = df.reset_index(drop=True)
df_gpby = df.groupby(col_names)
idx = [x[0] for x in list(df_gpby.groups.values()) if len(x) == 1]
df_sym_diff = df.reindex(idx)
df_diff = pd.concat([df_other, df_sym_diff])
df_diff = df_diff.reset_index(drop=True)
df_gpby = df_diff.groupby(col_names)
idx = [x[0] for x in list(df_gpby.groups.values()) if len(x) == 2]
df_diff = df_diff.reindex(idx)
return df_diff
|
def _finalize_profiles(self):
"""
Deal with the first walks by joining profiles to other stops within walking distance.
"""
for stop, stop_profile in self._stop_profiles.items():
assert (isinstance(stop_profile, NodeProfileMultiObjective))
neighbor_label_bags = []
walk_durations_to_neighbors = []
departure_arrival_stop_pairs = []
if stop_profile.get_walk_to_target_duration() != 0 and stop in self._walk_network.node:
neighbors = networkx.all_neighbors(self._walk_network, stop)
for neighbor in neighbors:
neighbor_profile = self._stop_profiles[neighbor]
assert (isinstance(neighbor_profile, NodeProfileMultiObjective))
neighbor_real_connection_labels = neighbor_profile.get_labels_for_real_connections()
neighbor_label_bags.append(neighbor_real_connection_labels)
walk_durations_to_neighbors.append(int(self._walk_network.get_edge_data(stop, neighbor)["d_walk"] /
self._walk_speed))
departure_arrival_stop_pairs.append((stop, neighbor))
stop_profile.finalize(neighbor_label_bags, walk_durations_to_neighbors, departure_arrival_stop_pairs)
|
def import_gtfs(gtfs_sources, output, preserve_connection=False,
print_progress=True, location_name=None, **kwargs):
"""Import a GTFS database
gtfs_sources: str, dict, list
Paths to the gtfs zip file or to the directory containing the GTFS data.
Alternatively, a dict can be provide that maps gtfs filenames
(like 'stops.txt' and 'agencies.txt') to their string presentations.
output: str or sqlite3.Connection
path to the new database to be created, or an existing
sqlite3 connection
preserve_connection: bool, optional
Whether to close the connection in the end, or not.
print_progress: bool, optional
Whether to print progress output
location_name: str, optional
set the location of this database
"""
if isinstance(output, sqlite3.Connection):
conn = output
else:
# if os.path.isfile(output):
# raise RuntimeError('File already exists')
conn = sqlite3.connect(output)
if not isinstance(gtfs_sources, list):
gtfs_sources = [gtfs_sources]
cur = conn.cursor()
time_import_start = time.time()
# These are a bit unsafe, but make importing much faster,
# especially on scratch.
cur.execute('PRAGMA page_size = 4096;')
cur.execute('PRAGMA mmap_size = 1073741824;')
cur.execute('PRAGMA cache_size = -2000000;')
cur.execute('PRAGMA temp_store=2;')
# Changes of isolation level are python3.6 workarounds -
# eventually will probably be fixed and this can be removed.
conn.isolation_level = None # change to autocommit mode (former default)
cur.execute('PRAGMA journal_mode = OFF;')
#cur.execute('PRAGMA journal_mode = WAL;')
cur.execute('PRAGMA synchronous = OFF;')
conn.isolation_level = '' # change back to python default.
# end python3.6 workaround
# Do the actual importing.
loaders = [L(gtfssource=gtfs_sources, print_progress=print_progress, **kwargs) for L in Loaders]
for loader in loaders:
loader.assert_exists_if_required()
# Do initial import. This consists of making tables, raw insert
# of the CSVs, and then indexing.
for loader in loaders:
loader.import_(conn)
# Do any operations that require all tables present.
for Loader in loaders:
Loader.post_import_round2(conn)
# Make any views
for Loader in loaders:
Loader.make_views(conn)
# Make any views
for F in postprocessors:
F(conn)
# Set up same basic metadata.
from gtfspy import gtfs as mod_gtfs
G = mod_gtfs.GTFS(output)
G.meta['gen_time_ut'] = time.time()
G.meta['gen_time'] = time.ctime()
G.meta['import_seconds'] = time.time() - time_import_start
G.meta['download_date'] = ''
G.meta['location_name'] = ''
G.meta['n_gtfs_sources'] = len(gtfs_sources)
# Extract things from GTFS
download_date_strs = []
for i, source in enumerate(gtfs_sources):
if len(gtfs_sources) == 1:
prefix = ""
else:
prefix = "feed_" + str(i) + "_"
if isinstance(source, string_types):
G.meta[prefix + 'original_gtfs'] = decode_six(source) if source else None
# Extract GTFS date. Last date pattern in filename.
filename_date_list = re.findall(r'\d{4}-\d{2}-\d{2}', source)
if filename_date_list:
date_str = filename_date_list[-1]
G.meta[prefix + 'download_date'] = date_str
download_date_strs.append(date_str)
if location_name:
G.meta['location_name'] = location_name
else:
location_name_list = re.findall(r'/([^/]+)/\d{4}-\d{2}-\d{2}', source)
if location_name_list:
G.meta[prefix + 'location_name'] = location_name_list[-1]
else:
try:
G.meta[prefix + 'location_name'] = source.split("/")[-4]
except:
G.meta[prefix + 'location_name'] = source
if G.meta['download_date'] == "":
unique_download_dates = list(set(download_date_strs))
if len(unique_download_dates) == 1:
G.meta['download_date'] = unique_download_dates[0]
G.meta['timezone'] = cur.execute('SELECT timezone FROM agencies LIMIT 1').fetchone()[0]
stats.update_stats(G)
del G
if print_progress:
print("Vacuuming...")
# Next 3 lines are python 3.6 work-arounds again.
conn.isolation_level = None # former default of autocommit mode
cur.execute('VACUUM;')
conn.isolation_level = '' # back to python default
# end python3.6 workaround
if print_progress:
print("Analyzing...")
cur.execute('ANALYZE')
if not (preserve_connection is True):
conn.close()
|
def validate_day_start_ut(conn):
"""This validates the day_start_ut of the days table."""
G = GTFS(conn)
cur = conn.execute('SELECT date, day_start_ut FROM days')
for date, day_start_ut in cur:
#print date, day_start_ut
assert day_start_ut == G.get_day_start_ut(date)
|
def main_make_views(gtfs_fname):
"""Re-create all views.
"""
print("creating views")
conn = GTFS(fname_or_conn=gtfs_fname).conn
for L in Loaders:
L(None).make_views(conn)
conn.commit()
|
def _validate_table_row_counts(self):
"""
Imports source .txt files, checks row counts and then compares the rowcounts with the gtfsobject
:return:
"""
for db_table_name in DB_TABLE_NAME_TO_SOURCE_FILE.keys():
table_name_source_file = DB_TABLE_NAME_TO_SOURCE_FILE[db_table_name]
row_warning_str = DB_TABLE_NAME_TO_ROWS_MISSING_WARNING[db_table_name]
# Row count in GTFS object:
database_row_count = self.gtfs.get_row_count(db_table_name)
# Row counts in source files:
source_row_count = 0
for gtfs_source in self.gtfs_sources:
frequencies_in_source = source_csv_to_pandas(gtfs_source, 'frequencies.txt')
try:
if table_name_source_file == 'trips' and not frequencies_in_source.empty:
source_row_count += self._frequency_generated_trips_rows(gtfs_source)
elif table_name_source_file == 'stop_times' and not frequencies_in_source.empty:
source_row_count += self._compute_number_of_frequency_generated_stop_times(gtfs_source)
else:
df = source_csv_to_pandas(gtfs_source, table_name_source_file)
source_row_count += len(df.index)
except IOError as e:
if hasattr(e, "filename") and db_table_name in e.filename:
pass
else:
raise e
if source_row_count == database_row_count and self.verbose:
print("Row counts match for " + table_name_source_file + " between the source and database ("
+ str(database_row_count) + ")")
else:
difference = database_row_count - source_row_count
('Row counts do not match for ' + str(table_name_source_file) + ': (source=' + str(source_row_count) +
', database=' + str(database_row_count) + ")")
if table_name_source_file == "calendar" and difference > 0:
query = "SELECT count(*) FROM (SELECT * FROM calendar ORDER BY service_I DESC LIMIT " \
+ str(int(difference)) + \
") WHERE start_date=end_date AND m=0 AND t=0 AND w=0 AND th=0 AND f=0 AND s=0 AND su=0"
number_of_entries_added_by_calendar_dates_loader = self.gtfs.execute_custom_query(query).fetchone()[
0]
if number_of_entries_added_by_calendar_dates_loader == difference and self.verbose:
print(" But don't worry, the extra entries seem to just dummy entries due to calendar_dates")
else:
if self.verbose:
print(" Reason for this is unknown.")
self.warnings_container.add_warning(row_warning_str, self.location, difference)
else:
self.warnings_container.add_warning(row_warning_str, self.location, difference)
|
def _validate_no_null_values(self):
"""
Loads the tables from the gtfs object and counts the number of rows that have null values in
fields that should not be null. Stores the number of null rows in warnings_container
"""
for table in DB_TABLE_NAMES:
null_not_ok_warning = "Null values in must-have columns in table {table}".format(table=table)
null_warn_warning = "Null values in good-to-have columns in table {table}".format(table=table)
null_not_ok_fields = DB_TABLE_NAME_TO_FIELDS_WHERE_NULL_NOT_OK[table]
null_warn_fields = DB_TABLE_NAME_TO_FIELDS_WHERE_NULL_OK_BUT_WARN[table]
# CW, TODO: make this validation source by source
df = self.gtfs.get_table(table)
for warning, fields in zip([null_not_ok_warning, null_warn_warning], [null_not_ok_fields, null_warn_fields]):
null_unwanted_df = df[fields]
rows_having_null = null_unwanted_df.isnull().any(1)
if sum(rows_having_null) > 0:
rows_having_unwanted_null = df[rows_having_null.values]
self.warnings_container.add_warning(warning, rows_having_unwanted_null, len(rows_having_unwanted_null))
|
def _validate_danglers(self):
"""
Checks for rows that are not referenced in the the tables that should be linked
stops <> stop_times using stop_I
stop_times <> trips <> days, using trip_I
trips <> routes, using route_I
:return:
"""
for query, warning in zip(DANGLER_QUERIES, DANGLER_WARNINGS):
dangler_count = self.gtfs.execute_custom_query(query).fetchone()[0]
if dangler_count > 0:
if self.verbose:
print(str(dangler_count) + " " + warning)
self.warnings_container.add_warning(warning, self.location, count=dangler_count)
|
def _frequency_generated_trips_rows(self, gtfs_soure_path, return_df_freq=False):
"""
This function calculates the equivalent rowcounts for trips when
taking into account the generated rows in the gtfs object
Parameters
----------
gtfs_soure_path: path to the source file
param txt: txt file in question
:return: sum of all trips
"""
df_freq = source_csv_to_pandas(gtfs_soure_path, 'frequencies')
df_trips = source_csv_to_pandas(gtfs_soure_path, "trips")
df_freq['n_trips'] = df_freq.apply(lambda row: len(range(str_time_to_day_seconds(row['start_time']),
str_time_to_day_seconds(row['end_time']),
row['headway_secs'])), axis=1)
df_trips_freq = pd.merge(df_freq, df_trips, how='outer', on='trip_id')
n_freq_generated_trips = int(df_trips_freq['n_trips'].fillna(1).sum(axis=0))
if return_df_freq:
return df_trips_freq
else:
return n_freq_generated_trips
|
def _compute_number_of_frequency_generated_stop_times(self, gtfs_source_path):
"""
Parameters
----------
Same as for "_frequency_generated_trips_rows" but for stop times table
gtfs_source_path:
table_name:
Return
------
"""
df_freq = self._frequency_generated_trips_rows(gtfs_source_path, return_df_freq=True)
df_stop_times = source_csv_to_pandas(gtfs_source_path, "stop_times")
df_stop_freq = pd.merge(df_freq, df_stop_times, how='outer', on='trip_id')
return int(df_stop_freq['n_trips'].fillna(1).sum(axis=0))
|
def update_pareto_optimal_tuples(self, new_label):
"""
Parameters
----------
new_label: LabelTime
Returns
-------
updated: bool
"""
assert (isinstance(new_label, LabelTime))
if self._labels:
assert (new_label.departure_time <= self._labels[-1].departure_time)
best_later_departing_arrival_time = self._labels[-1].arrival_time_target
else:
best_later_departing_arrival_time = float('inf')
walk_to_target_arrival_time = new_label.departure_time + self._walk_to_target_duration
best_arrival_time = min(walk_to_target_arrival_time,
best_later_departing_arrival_time,
new_label.arrival_time_target)
# this should be changed to get constant time insertions / additions
# (with time-indexing)
if (new_label.arrival_time_target < walk_to_target_arrival_time and
new_label.arrival_time_target < best_later_departing_arrival_time):
self._labels.append(LabelTime(new_label.departure_time, best_arrival_time))
return True
else:
return False
|
def print_coords(rows, prefix=''):
"""Print coordinates within a sequence.
This is only used for debugging. Printed in a form that can be
pasted into Python for visualization."""
lat = [row['lat'] for row in rows]
lon = [row['lon'] for row in rows]
print('COORDS'+'-' * 5)
print("%slat, %slon = %r, %r" % (prefix, prefix, lat, lon))
print('-'*5)
|
def find_segments(stops, shape):
"""Find corresponding shape points for a list of stops and create shape break points.
Parameters
----------
stops: stop-sequence (list)
List of stop points
shape: list of shape points
shape-sequence of shape points
Returns
-------
break_points: list[int]
stops[i] corresponds to shape[break_points[i]]. This list can
be used to partition the shape points into segments between
one stop and the next.
badness: float
Lower indicates better fit to the shape. This is the sum of
distances (in meters) between every each stop and its closest
shape point. This is not needed in normal use, but in the
cases where you must determine the best-fitting shape for a
stop-sequence, use this.
"""
if not shape:
return [], 0
break_points = []
last_i = 0
cumul_d = 0
badness = 0
d_last_stop = float('inf')
lstlat, lstlon = None, None
break_shape_points = []
for stop in stops:
stlat, stlon = stop['lat'], stop['lon']
best_d = float('inf')
# print stop
if badness > 500 and badness > 30 * len(break_points):
return [], badness
for i in range(last_i, len(shape)):
d = wgs84_distance(stlat, stlon, shape[i]['lat'], shape[i]['lon'])
if lstlat:
d_last_stop = wgs84_distance(lstlat, lstlon, shape[i]['lat'], shape[i]['lon'])
# If we are getting closer to next stop, record this as
# the best stop so far.continue
if d < best_d:
best_d = d
best_i = i
# print best_d, i, last_i, len(shape)
cumul_d += d
# We have to be very careful about our stop condition.
# This is trial and error, basically.
if (d_last_stop < d) or (d > 500) or (i < best_i + 100):
continue
# We have decided our best stop, stop looking and continue
# the outer loop.
else:
badness += best_d
break_points.append(best_i)
last_i = best_i
lstlat, lstlon = stlat, stlon
break_shape_points.append(shape[best_i])
break
else:
# Executed if we did *not* break the inner loop
badness += best_d
break_points.append(best_i)
last_i = best_i
lstlat, lstlon = stlat, stlon
break_shape_points.append(shape[best_i])
pass
# print "Badness:", badness
# print_coords(stops, 'stop')
# print_coords(shape, 'shape')
# print_coords(break_shape_points, 'break')
return break_points, badness
|
def find_best_segments(cur, stops, shape_ids, route_id=None,
breakpoints_cache=None):
"""Finds the best shape_id for a stop-sequence.
This is used in cases like when you have GPS data with a route
name, but you don't know the route direction. It tries shapes
going both directions and returns the shape that best matches.
Could be used in other cases as well.
Parameters
----------
cur : sqlite3.Cursor
database cursor
stops : list
shape_ids : list of shape_id:s
route_id : route_id to search for stops
breakpoints_cache : dict
If given, use this to cache results from this function.
"""
cache_key = None
if breakpoints_cache is not None:
# Calculate a cache key for this sequence. If shape_id and
# all stop_Is are the same, then we assume that it is the same
# route and re-use existing breakpoints.
cache_key = (route_id, tuple(x['stop_I'] for x in stops))
if cache_key in breakpoints_cache:
print('found in cache')
return breakpoints_cache[cache_key]
if route_id is not None:
cur.execute('''SELECT DISTINCT shape_id
FROM routes
LEFT JOIN trips
USING (route_I)
WHERE route_id=?''',
(route_id,))
data = cur.fetchall()
# If not data, then route_id didn't match anything, or there
# were no shapes defined. We have to exit in this case.
if not data:
print("No data for route_id=%s" % route_id)
return [], None, None, None
#
shape_ids = zip(*data)[0]
# print 'find_best_segments:', shape_ids
results = []
for shape_id in shape_ids:
shape = get_shape_points(cur, shape_id)
breakpoints, badness = find_segments(stops, shape)
results.append([badness, breakpoints, shape, shape_id])
if len(stops) > 5 and badness < 5*(len(stops)):
break
best = np.argmin(zip(*results)[0])
# print 'best', best
badness = results[best][0]
breakpoints = results[best][1]
shape = results[best][2]
shape_id = results[best][3]
if breakpoints_cache is not None:
print("storing in cache", cache_key[0], hash(cache_key[1:]))
breakpoints_cache[cache_key] = breakpoints, badness, shape, shape_id
return breakpoints, badness, shape, shape_id
|
def return_segments(shape, break_points):
"""Break a shape into segments between stops using break_points.
This function can use the `break_points` outputs from
`find_segments`, and cuts the shape-sequence into pieces
corresponding to each stop.
"""
# print 'xxx'
# print stops
# print shape
# print break_points
# assert len(stops) == len(break_points)
segs = []
bp = 0 # not used
bp2 = 0
for i in range(len(break_points)-1):
bp = break_points[i] if break_points[i] is not None else bp2
bp2 = break_points[i+1] if break_points[i+1] is not None else bp
segs.append(shape[bp:bp2+1])
segs.append([])
return segs
|
def gen_cumulative_distances(stops):
"""
Add a 'd' key for distances to a stop/shape-sequence.
This takes a shape-sequence or stop-sequence, and adds an extra
'd' key that is cumulative, geographic distances between each
point. This uses `wgs84_distance` from the util module. The
distances are in meters. Distances are rounded to the nearest
integer, because otherwise JSON size increases greatly.
Parameters
----------
stops: list
elements are dicts with 'lat' and 'lon' keys
and the function adds the 'd' key ('d' stands for distance)
to the dictionaries
"""
stops[0]['d'] = 0.0
for i in range(1, len(stops)):
stops[i]['d'] = stops[i-1]['d'] + wgs84_distance(
stops[i-1]['lat'], stops[i-1]['lon'],
stops[i]['lat'], stops[i]['lon'],
)
for stop in stops:
stop['d'] = int(stop['d'])
|
def get_shape_points(cur, shape_id):
"""
Given a shape_id, return its shape-sequence.
Parameters
----------
cur: sqlite3.Cursor
cursor to a GTFS database
shape_id: str
id of the route
Returns
-------
shape_points: list
elements are dictionaries containing the 'seq', 'lat', and 'lon' of the shape
"""
cur.execute('''SELECT seq, lat, lon, d FROM shapes where shape_id=?
ORDER BY seq''', (shape_id,))
shape_points = [dict(seq=row[0], lat=row[1], lon=row[2], d=row[3])
for row in cur]
return shape_points
|
def get_shape_points2(cur, shape_id):
"""
Given a shape_id, return its shape-sequence (as a dict of lists).
get_shape_points function returns them as a list of dicts
Parameters
----------
cur: sqlite3.Cursor
cursor to a GTFS database
shape_id: str
id of the route
Returns
-------
shape_points: dict of lists
dict contains keys 'seq', 'lat', 'lon', and 'd'(istance) of the shape
"""
cur.execute('''SELECT seq, lat, lon, d FROM shapes where shape_id=?
ORDER BY seq''', (shape_id,))
shape_points = {'seqs': [], 'lats': [], 'lons': [], 'd': []}
for row in cur:
shape_points['seqs'].append(row[0])
shape_points['lats'].append(row[1])
shape_points['lons'].append(row[2])
shape_points['d'].append(row[3])
return shape_points
|
def get_route_shape_segments(cur, route_id):
"""
Given a route_id, return its stop-sequence.
Parameters
----------
cur: sqlite3.Cursor
cursor to a GTFS database
route_id: str
id of the route
Returns
-------
shape_points: list
elements are dictionaries containing the 'seq', 'lat', and 'lon' of the shape
"""
cur.execute('''SELECT seq, lat, lon
FROM (
SELECT shape_id
FROM route
LEFT JOIN trips
USING (route_I)
WHERE route_id=? limit 1
)
JOIN shapes
USING (shape_id)
ORDER BY seq''', (route_id,))
shape_points = [dict(seq=row[0], lat=row[1], lon=row[2]) for row in cur]
return shape_points
|
def get_shape_between_stops(cur, trip_I, seq_stop1=None, seq_stop2=None, shape_breaks=None):
"""
Given a trip_I (shortened id), return shape points between two stops
(seq_stop1 and seq_stop2).
Trip_I is used for matching obtaining the full shape of one trip (route).
From the resulting shape we then obtain only shape points between
stop_seq1 and stop_seq2
trip_I---(trips)--->shape_id
trip_I, seq_stop1----(stop_times)---> shape_break1
trip_I, seq_stop2----(stop_times)---> shape_break2
shapes_id+shape_break1+shape_break2 --(shapes)--> result
Parameters
----------
cur : sqlite3.Cursor
cursor to sqlite3 DB containing GTFS
trip_I : int
transformed trip_id (i.e. a new column that is created when
GTFS is imported to a DB)
seq_stop1: int
a positive inger describing the index of the point of the shape that
corresponds to the first stop
seq_stop2: int
a positive inger describing the index of the point of the shape that
corresponds to the second stop
shape_breaks: ??
Returns
-------
shapedict: dict
Dictionary containing the latitudes and longitudes:
lats=shapedict['lat']
lons=shapedict['lon']
"""
assert (seq_stop1 and seq_stop2) or shape_breaks
if not shape_breaks:
shape_breaks = []
for seq_stop in [seq_stop1, seq_stop2]:
query = """SELECT shape_break FROM stop_times
WHERE trip_I=%d AND seq=%d
""" % (trip_I, seq_stop)
for row in cur.execute(query):
shape_breaks.append(row[0])
assert len(shape_breaks) == 2
query = """SELECT seq, lat, lon
FROM (SELECT shape_id FROM trips WHERE trip_I=%d)
JOIN shapes USING (shape_id)
WHERE seq>=%d AND seq <= %d;
""" % (trip_I, shape_breaks[0], shape_breaks[1])
shapedict = {'lat': [], 'lon': [], 'seq': []}
for row in cur.execute(query):
shapedict['seq'].append(row[0])
shapedict['lat'].append(row[1])
shapedict['lon'].append(row[2])
return shapedict
|
def get_trip_points(cur, route_id, offset=0, tripid_glob=''):
"""Get all scheduled stops on a particular route_id.
Given a route_id, return the trip-stop-list with
latitude/longitudes. This is a bit more tricky than it seems,
because we have to go from table route->trips->stop_times. This
functions finds an arbitrary trip (in trip table) with this route ID
and, and then returns all stop points for that trip.
Parameters
----------
cur : sqlite3.Cursor
cursor to sqlite3 DB containing GTFS
route_id : string or any
route_id to get stop points of
offset : int
LIMIT offset if you don't want the first trip returned.
tripid_glob : string
If given, allows you to limit tripids which can be selected.
Mainly useful in debugging.
Returns
-------
stop-list
List of stops in stop-seq format.
"""
extra_where = ''
if tripid_glob:
extra_where = "AND trip_id GLOB '%s'" % tripid_glob
cur.execute('SELECT seq, lat, lon '
'FROM (select trip_I from route '
' LEFT JOIN trips USING (route_I) '
' WHERE route_id=? %s limit 1 offset ? ) '
'JOIN stop_times USING (trip_I) '
'LEFT JOIN stop USING (stop_id) '
'ORDER BY seq' % extra_where, (route_id, offset))
stop_points = [dict(seq=row[0], lat=row[1], lon=row[2]) for row in cur]
return stop_points
|
def interpolate_shape_times(shape_distances, shape_breaks, stop_times):
"""
Interpolate passage times for shape points.
Parameters
----------
shape_distances: list
list of cumulative distances along the shape
shape_breaks: list
list of shape_breaks
stop_times: list
list of stop_times
Returns
-------
shape_times: list of ints (seconds) / numpy array
interpolated shape passage times
The values of stop times before the first shape-break are given the first
stopping time, and the any shape points after the last break point are
given the value of the last shape point.
"""
shape_times = np.zeros(len(shape_distances))
shape_times[:shape_breaks[0]] = stop_times[0]
for i in range(len(shape_breaks)-1):
cur_break = shape_breaks[i]
cur_time = stop_times[i]
next_break = shape_breaks[i+1]
next_time = stop_times[i+1]
if cur_break == next_break:
shape_times[cur_break] = stop_times[i]
else:
cur_distances = shape_distances[cur_break:next_break+1]
norm_distances = ((np.array(cur_distances)-float(cur_distances[0])) /
float(cur_distances[-1] - cur_distances[0]))
times = (1.-norm_distances)*cur_time+norm_distances*next_time
shape_times[cur_break:next_break] = times[:-1]
# deal final ones separately:
shape_times[shape_breaks[-1]:] = stop_times[-1]
return list(shape_times)
|
def update_pareto_optimal_tuples(self, new_pareto_tuple):
"""
# this function should be optimized
Parameters
----------
new_pareto_tuple: LabelTimeSimple
Returns
-------
added: bool
whether new_pareto_tuple was added to the set of pareto-optimal tuples
"""
if new_pareto_tuple.duration() > self._walk_to_target_duration:
direct_walk_label = self._label_class.direct_walk_label(new_pareto_tuple.departure_time,
self._walk_to_target_duration)
if not direct_walk_label.dominates(new_pareto_tuple):
raise
direct_walk_label = self._label_class.direct_walk_label(new_pareto_tuple.departure_time, self._walk_to_target_duration)
if direct_walk_label.dominates(new_pareto_tuple):
return False
if self._new_paretotuple_is_dominated_by_old_tuples(new_pareto_tuple):
return False
else:
self._remove_old_tuples_dominated_by_new_and_insert_new_paretotuple(new_pareto_tuple)
return True
|
def evaluate_earliest_arrival_time_at_target(self, dep_time, transfer_margin):
"""
Get the earliest arrival time at the target, given a departure time.
Parameters
----------
dep_time : float, int
time in unix seconds
transfer_margin: float, int
transfer margin in seconds
Returns
-------
arrival_time : float
Arrival time in the given time unit (seconds after unix epoch).
"""
minimum = dep_time + self._walk_to_target_duration
dep_time_plus_transfer_margin = dep_time + transfer_margin
for label in self._labels:
if label.departure_time >= dep_time_plus_transfer_margin and label.arrival_time_target < minimum:
minimum = label.arrival_time_target
return float(minimum)
|
def _run(self):
"""
Run the actual simulation.
"""
if self._has_run:
raise RuntimeError("This spreader instance has already been run: "
"create a new Spreader object for a new run.")
i = 1
while self.event_heap.size() > 0 and len(self._uninfected_stops) > 0:
event = self.event_heap.pop_next_event()
this_stop = self._stop_I_to_spreading_stop[event.from_stop_I]
if event.arr_time_ut > self.start_time_ut + self.max_duration_ut:
break
if this_stop.can_infect(event):
target_stop = self._stop_I_to_spreading_stop[event.to_stop_I]
already_visited = target_stop.has_been_visited()
target_stop.visit(event)
if not already_visited:
self._uninfected_stops.remove(event.to_stop_I)
print(i, self.event_heap.size())
transfer_distances = self.gtfs.get_straight_line_transfer_distances(event.to_stop_I)
self.event_heap.add_walk_events_to_heap(transfer_distances, event, self.start_time_ut,
self.walk_speed, self._uninfected_stops,
self.max_duration_ut)
i += 1
self._has_run = True
|
def add_walk_distances_to_db_python(gtfs, osm_path, cutoff_distance_m=1000):
"""
Computes the walk paths between stops, and updates these to the gtfs database.
Parameters
----------
gtfs: gtfspy.GTFS or str
A GTFS object or a string representation.
osm_path: str
path to the OpenStreetMap file
cutoff_distance_m: number
maximum allowed distance in meters
Returns
-------
None
See Also
--------
gtfspy.calc_transfers
compute_walk_paths_java
"""
if isinstance(gtfs, str):
gtfs = GTFS(gtfs)
assert (isinstance(gtfs, GTFS))
print("Reading in walk network")
walk_network = create_walk_network_from_osm(osm_path)
print("Matching stops to the OSM network")
stop_I_to_nearest_osm_node, stop_I_to_nearest_osm_node_distance = match_stops_to_nodes(gtfs, walk_network)
transfers = gtfs.get_straight_line_transfer_distances()
from_I_to_to_stop_Is = {stop_I: set() for stop_I in stop_I_to_nearest_osm_node}
for transfer_tuple in transfers.itertuples():
from_I = transfer_tuple.from_stop_I
to_I = transfer_tuple.to_stop_I
from_I_to_to_stop_Is[from_I].add(to_I)
print("Computing walking distances")
for from_I, to_stop_Is in from_I_to_to_stop_Is.items():
from_node = stop_I_to_nearest_osm_node[from_I]
from_dist = stop_I_to_nearest_osm_node_distance[from_I]
shortest_paths = networkx.single_source_dijkstra_path_length(walk_network,
from_node,
cutoff=cutoff_distance_m - from_dist,
weight="distance")
for to_I in to_stop_Is:
to_distance = stop_I_to_nearest_osm_node_distance[to_I]
to_node = stop_I_to_nearest_osm_node[to_I]
osm_distance = shortest_paths.get(to_node, float('inf'))
total_distance = from_dist + osm_distance + to_distance
from_stop_I_transfers = transfers[transfers['from_stop_I'] == from_I]
straigth_distance = from_stop_I_transfers[from_stop_I_transfers["to_stop_I"] == to_I]["d"].values[0]
assert (straigth_distance < total_distance + 2) # allow for a maximum of 2 meters in calculations
if total_distance <= cutoff_distance_m:
gtfs.conn.execute("UPDATE stop_distances "
"SET d_walk = " + str(int(total_distance)) +
" WHERE from_stop_I=" + str(from_I) + " AND to_stop_I=" + str(to_I))
gtfs.conn.commit()
|
def match_stops_to_nodes(gtfs, walk_network):
"""
Parameters
----------
gtfs : a GTFS object
walk_network : networkx.Graph
Returns
-------
stop_I_to_node: dict
maps stop_I to closest walk_network node
stop_I_to_dist: dict
maps stop_I to the distance to the closest walk_network node
"""
network_nodes = walk_network.nodes(data="true")
stop_Is = set(gtfs.get_straight_line_transfer_distances()['from_stop_I'])
stops_df = gtfs.stops()
geo_index = GeoGridIndex(precision=6)
for net_node, data in network_nodes:
geo_index.add_point(GeoPoint(data['lat'], data['lon'], ref=net_node))
stop_I_to_node = {}
stop_I_to_dist = {}
for stop_I in stop_Is:
stop_lat = float(stops_df[stops_df.stop_I == stop_I].lat)
stop_lon = float(stops_df[stops_df.stop_I == stop_I].lon)
geo_point = GeoPoint(stop_lat, stop_lon)
min_dist = float('inf')
min_dist_node = None
search_distances_m = [0.100, 0.500]
for search_distance_m in search_distances_m:
for point, distance in geo_index.get_nearest_points(geo_point, search_distance_m, "km"):
if distance < min_dist:
min_dist = distance * 1000
min_dist_node = point.ref
if min_dist_node is not None:
break
if min_dist_node is None:
warn("No OSM node found for stop: " + str(stops_df[stops_df.stop_I == stop_I]))
stop_I_to_node[stop_I] = min_dist_node
stop_I_to_dist[stop_I] = min_dist
return stop_I_to_node, stop_I_to_dist
|
def walk_transfer_stop_to_stop_network(gtfs, max_link_distance=None):
"""
Construct the walk network.
If OpenStreetMap-based walking distances have been computed, then those are used as the distance.
Otherwise, the great circle distances ("d") is used.
Parameters
----------
gtfs: gtfspy.GTFS
max_link_distance: int, optional
If given, all walking transfers with great circle distance longer
than this limit (expressed in meters) will be omitted.
Returns
-------
net: networkx.DiGraph
edges have attributes
d:
straight-line distance between stops
d_walk:
distance along the road/tracks/..
"""
if max_link_distance is None:
max_link_distance = 1000
net = networkx.Graph()
_add_stops_to_net(net, gtfs.get_table("stops"))
stop_distances = gtfs.get_table("stop_distances")
if stop_distances["d_walk"][0] is None:
osm_distances_available = False
warn("Warning: OpenStreetMap-based walking distances have not been computed, using euclidean distances instead."
"Ignore this warning if running unit tests.")
else:
osm_distances_available = True
for stop_distance_tuple in stop_distances.itertuples():
from_node = stop_distance_tuple.from_stop_I
to_node = stop_distance_tuple.to_stop_I
if osm_distances_available:
if stop_distance_tuple.d_walk > max_link_distance or isnan(stop_distance_tuple.d_walk):
continue
data = {'d': stop_distance_tuple.d, 'd_walk': stop_distance_tuple.d_walk}
else:
if stop_distance_tuple.d > max_link_distance:
continue
data = {'d': stop_distance_tuple.d}
net.add_edge(from_node, to_node, data)
return net
|
def stop_to_stop_network_for_route_type(gtfs,
route_type,
link_attributes=None,
start_time_ut=None,
end_time_ut=None):
"""
Get a stop-to-stop network describing a single mode of travel.
Parameters
----------
gtfs : gtfspy.GTFS
route_type : int
See gtfspy.route_types.TRANSIT_ROUTE_TYPES for the list of possible types.
link_attributes: list[str], optional
defaulting to use the following link attributes:
"n_vehicles" : Number of vehicles passed
"duration_min" : minimum travel time between stops
"duration_max" : maximum travel time between stops
"duration_median" : median travel time between stops
"duration_avg" : average travel time between stops
"d" : distance along straight line (wgs84_distance)
"distance_shape" : minimum distance along shape
"capacity_estimate" : approximate capacity passed through the stop
"route_I_counts" : dict from route_I to counts
start_time_ut: int
start time of the time span (in unix time)
end_time_ut: int
end time of the time span (in unix time)
Returns
-------
net: networkx.DiGraph
A directed graph Directed graph
"""
if link_attributes is None:
link_attributes = DEFAULT_STOP_TO_STOP_LINK_ATTRIBUTES
assert(route_type in route_types.TRANSIT_ROUTE_TYPES)
stops_dataframe = gtfs.get_stops_for_route_type(route_type)
net = networkx.DiGraph()
_add_stops_to_net(net, stops_dataframe)
events_df = gtfs.get_transit_events(start_time_ut=start_time_ut,
end_time_ut=end_time_ut,
route_type=route_type)
if len(net.nodes()) < 2:
assert events_df.shape[0] == 0
# group events by links, and loop over them (i.e. each link):
link_event_groups = events_df.groupby(['from_stop_I', 'to_stop_I'], sort=False)
for key, link_events in link_event_groups:
from_stop_I, to_stop_I = key
assert isinstance(link_events, pd.DataFrame)
# 'dep_time_ut' 'arr_time_ut' 'shape_id' 'route_type' 'trip_I' 'duration' 'from_seq' 'to_seq'
if link_attributes is None:
net.add_edge(from_stop_I, to_stop_I)
else:
link_data = {}
if "duration_min" in link_attributes:
link_data['duration_min'] = float(link_events['duration'].min())
if "duration_max" in link_attributes:
link_data['duration_max'] = float(link_events['duration'].max())
if "duration_median" in link_attributes:
link_data['duration_median'] = float(link_events['duration'].median())
if "duration_avg" in link_attributes:
link_data['duration_avg'] = float(link_events['duration'].mean())
# statistics on numbers of vehicles:
if "n_vehicles" in link_attributes:
link_data['n_vehicles'] = int(link_events.shape[0])
if "capacity_estimate" in link_attributes:
link_data['capacity_estimate'] = route_types.ROUTE_TYPE_TO_APPROXIMATE_CAPACITY[route_type] \
* int(link_events.shape[0])
if "d" in link_attributes:
from_lat = net.node[from_stop_I]['lat']
from_lon = net.node[from_stop_I]['lon']
to_lat = net.node[to_stop_I]['lat']
to_lon = net.node[to_stop_I]['lon']
distance = wgs84_distance(from_lat, from_lon, to_lat, to_lon)
link_data['d'] = int(distance)
if "distance_shape" in link_attributes:
assert "shape_id" in link_events.columns.values
found = None
for i, shape_id in enumerate(link_events["shape_id"].values):
if shape_id is not None:
found = i
break
if found is None:
link_data["distance_shape"] = None
else:
link_event = link_events.iloc[found]
distance = gtfs.get_shape_distance_between_stops(
link_event["trip_I"],
int(link_event["from_seq"]),
int(link_event["to_seq"])
)
link_data['distance_shape'] = distance
if "route_I_counts" in link_attributes:
link_data["route_I_counts"] = link_events.groupby("route_I").size().to_dict()
net.add_edge(from_stop_I, to_stop_I, attr_dict=link_data)
return net
|
def stop_to_stop_networks_by_type(gtfs):
"""
Compute stop-to-stop networks for all travel modes (route_types).
Parameters
----------
gtfs: gtfspy.GTFS
Returns
-------
dict: dict[int, networkx.DiGraph]
keys should be one of route_types.ALL_ROUTE_TYPES (i.e. GTFS route_types)
"""
route_type_to_network = dict()
for route_type in route_types.ALL_ROUTE_TYPES:
if route_type == route_types.WALK:
net = walk_transfer_stop_to_stop_network(gtfs)
else:
net = stop_to_stop_network_for_route_type(gtfs, route_type)
route_type_to_network[route_type] = net
assert len(route_type_to_network) == len(route_types.ALL_ROUTE_TYPES)
return route_type_to_network
|
def combined_stop_to_stop_transit_network(gtfs, start_time_ut=None, end_time_ut=None):
"""
Compute stop-to-stop networks for all travel modes and combine them into a single network.
The modes of transport are encoded to a single network.
The network consists of multiple links corresponding to each travel mode.
Walk mode is not included.
Parameters
----------
gtfs: gtfspy.GTFS
Returns
-------
net: networkx.MultiDiGraph
keys should be one of route_types.TRANSIT_ROUTE_TYPES (i.e. GTFS route_types)
"""
multi_di_graph = networkx.MultiDiGraph()
for route_type in route_types.TRANSIT_ROUTE_TYPES:
graph = stop_to_stop_network_for_route_type(gtfs, route_type,
start_time_ut=start_time_ut, end_time_ut=end_time_ut)
for from_node, to_node, data in graph.edges(data=True):
data['route_type'] = route_type
multi_di_graph.add_edges_from(graph.edges(data=True))
multi_di_graph.add_nodes_from(graph.nodes(data=True))
return multi_di_graph
|
def _add_stops_to_net(net, stops):
"""
Add nodes to the network from the pandas dataframe describing (a part of the) stops table in the GTFS database.
Parameters
----------
net: networkx.Graph
stops: pandas.DataFrame
"""
for stop in stops.itertuples():
data = {
"lat": stop.lat,
"lon": stop.lon,
"name": stop.name
}
net.add_node(stop.stop_I, data)
|
def temporal_network(gtfs,
start_time_ut=None,
end_time_ut=None,
route_type=None):
"""
Compute the temporal network of the data, and return it as a pandas.DataFrame
Parameters
----------
gtfs : gtfspy.GTFS
start_time_ut: int | None
start time of the time span (in unix time)
end_time_ut: int | None
end time of the time span (in unix time)
route_type: int | None
Specifies which mode of public transport are included, or whether all modes should be included.
The int should be one of the standard GTFS route_types:
(see also gtfspy.route_types.TRANSIT_ROUTE_TYPES )
If route_type is not specified, all modes are included.
Returns
-------
events_df: pandas.DataFrame
Columns: departure_stop, arrival_stop, departure_time_ut, arrival_time_ut, route_type, route_I, trip_I
"""
events_df = gtfs.get_transit_events(start_time_ut=start_time_ut,
end_time_ut=end_time_ut,
route_type=route_type)
events_df.drop('to_seq', 1, inplace=True)
events_df.drop('shape_id', 1, inplace=True)
events_df.drop('duration', 1, inplace=True)
events_df.drop('route_id', 1, inplace=True)
events_df.rename(
columns={
'from_seq': "seq"
},
inplace=True
)
return events_df
|
def route_to_route_network(gtfs, walking_threshold, start_time, end_time):
"""
Creates networkx graph where the nodes are bus routes and a edge indicates that there is a possibility to transfer
between the routes
:param gtfs:
:param walking_threshold:
:param start_time:
:param end_time:
:return:
"""
graph = networkx.Graph()
routes = gtfs.get_table("routes")
for i in routes.itertuples():
graph.add_node(i.route_id, attr_dict={"type": i.type, "color": route_types.ROUTE_TYPE_TO_COLOR[i.type]})
query = """SELECT stop1.route_id AS route_id1, stop1.type, stop2.route_id AS route_id2, stop2.type FROM
(SELECT * FROM stop_distances WHERE d_walk < %s) sd,
(SELECT * FROM stop_times, trips, routes
WHERE stop_times.trip_I=trips.trip_I AND trips.route_I=routes.route_I
AND stop_times.dep_time_ds > %s AND stop_times.dep_time_ds < %s) stop1,
(SELECT * FROM stop_times, trips, routes
WHERE stop_times.trip_I=trips.trip_I AND trips.route_I=routes.route_I
AND stop_times.dep_time_ds > %s AND stop_times.dep_time_ds < %s) stop2
WHERE sd.from_stop_I = stop1.stop_I AND sd.to_stop_I = stop2.stop_I AND stop1.route_id != stop2.route_id
GROUP BY stop1.route_id, stop2.route_id""" % (walking_threshold, start_time, end_time, start_time,
end_time)
df = gtfs.execute_custom_query_pandas(query)
for items in df.itertuples():
graph.add_edge(items.route_id1, items.route_id2)
graph.remove_nodes_from(networkx.isolates(graph))
return graph
|
def mean_temporal_distance(self):
"""
Get mean temporal distance (in seconds) to the target.
Returns
-------
mean_temporal_distance : float
"""
total_width = self.end_time_dep - self.start_time_dep
total_area = sum([block.area() for block in self._profile_blocks])
return total_area / total_width
|
def plot_temporal_distance_cdf(self):
"""
Plot the temporal distance cumulative density function.
Returns
-------
fig: matplotlib.Figure
"""
xvalues, cdf = self.profile_block_analyzer._temporal_distance_cdf()
fig = plt.figure()
ax = fig.add_subplot(111)
xvalues = numpy.array(xvalues) / 60.0
ax.plot(xvalues, cdf, "-k")
ax.fill_between(xvalues, cdf, color="red", alpha=0.2)
ax.set_ylabel("CDF(t)")
ax.set_xlabel("Temporal distance t (min)")
return fig
|
def plot_temporal_distance_pdf(self, use_minutes=True, color="green", ax=None):
"""
Plot the temporal distance probability density function.
Returns
-------
fig: matplotlib.Figure
"""
from matplotlib import pyplot as plt
plt.rc('text', usetex=True)
temporal_distance_split_points_ordered, densities, delta_peaks = self._temporal_distance_pdf()
xs = []
for i, x in enumerate(temporal_distance_split_points_ordered):
xs.append(x)
xs.append(x)
xs = numpy.array(xs)
ys = [0]
for y in densities:
ys.append(y)
ys.append(y)
ys.append(0)
ys = numpy.array(ys)
# convert data to minutes:
xlabel = "Temporal distance (s)"
ylabel = "Probability density (t)"
if use_minutes:
xs /= 60.0
ys *= 60.0
xlabel = "Temporal distance (min)"
delta_peaks = {peak / 60.0: mass for peak, mass in delta_peaks.items()}
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xs, ys, "k-")
ax.fill_between(xs, ys, color="green", alpha=0.2)
if delta_peaks:
peak_height = max(ys) * 1.4
max_x = max(xs)
min_x = min(xs)
now_max_x = max(xs) + 0.3 * (max_x - min_x)
now_min_x = min_x - 0.1 * (max_x - min_x)
text_x_offset = 0.1 * (now_max_x - max_x)
for loc, mass in delta_peaks.items():
ax.plot([loc, loc], [0, peak_height], color="green", lw=5)
ax.text(loc + text_x_offset, peak_height * 0.99, "$P(\\mathrm{walk}) = %.2f$" % (mass), color="green")
ax.set_xlim(now_min_x, now_max_x)
tot_delta_peak_mass = sum(delta_peaks.values())
transit_text_x = (min_x + max_x) / 2
transit_text_y = min(ys[ys > 0]) / 2.
ax.text(transit_text_x,
transit_text_y,
"$P(mathrm{PT}) = %.2f$" % (1 - tot_delta_peak_mass),
color="green",
va="center",
ha="center")
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_ylim(bottom=0)
return ax.figure
|
def plot_temporal_distance_pdf_horizontal(self, use_minutes=True,
color="green",
ax=None,
duration_divider=60.0,
legend_font_size=None,
legend_loc=None):
"""
Plot the temporal distance probability density function.
Returns
-------
fig: matplotlib.Figure
"""
from matplotlib import pyplot as plt
plt.rc('text', usetex=True)
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
temporal_distance_split_points_ordered, densities, delta_peaks = self._temporal_distance_pdf()
xs = []
for i, x in enumerate(temporal_distance_split_points_ordered):
xs.append(x)
xs.append(x)
xs = numpy.array(xs)
ys = [0]
for y in densities:
ys.append(y)
ys.append(y)
ys.append(0)
ys = numpy.array(ys)
# convert data to minutes:
xlabel = "Temporal distance (s)"
ylabel = "Probability density $P(\\tau)$"
if use_minutes:
xs /= duration_divider
ys *= duration_divider
xlabel = "Temporal distance (min)"
delta_peaks = {peak / 60.0: mass for peak, mass in delta_peaks.items()}
if delta_peaks:
peak_height = max(ys) * 1.4
max_x = max(xs)
min_x = min(xs)
now_max_x = max(xs) + 0.3 * (max_x - min_x)
now_min_x = min_x - 0.1 * (max_x - min_x)
text_x_offset = 0.1 * (now_max_x - max_x)
for loc, mass in delta_peaks.items():
text = "$P(\\mathrm{walk}) = " + ("%.2f$" % (mass))
ax.plot([0, peak_height], [loc, loc], color=color, lw=5, label=text)
ax.plot(ys, xs, "k-")
if delta_peaks:
tot_delta_peak_mass = sum(delta_peaks.values())
fill_label = "$P(\\mathrm{PT}) = %.2f$" % (1-tot_delta_peak_mass)
else:
fill_label = None
ax.fill_betweenx(xs, ys, color=color, alpha=0.2, label=fill_label)
ax.set_ylabel(xlabel)
ax.set_xlabel(ylabel)
ax.set_xlim(left=0, right=max(ys) * 1.2)
if delta_peaks:
if legend_font_size is None:
legend_font_size = 12
if legend_loc is None:
legend_loc = "best"
ax.legend(loc=legend_loc, prop={'size': legend_font_size})
if True:
line_tyles = ["-.", "--", "-"][::-1]
to_plot_funcs = [self.max_temporal_distance, self.mean_temporal_distance, self.min_temporal_distance]
xmin, xmax = ax.get_xlim()
for to_plot_func, ls in zip(to_plot_funcs, line_tyles):
y = to_plot_func() / duration_divider
assert y < float('inf')
# factor of 10 just to be safe that the lines cover the whole region.
ax.plot([xmin, xmax*10], [y, y], color="black", ls=ls, lw=1)
return ax.figure
|
def plot_temporal_distance_profile(self,
timezone=None,
color="black",
alpha=0.15,
ax=None,
lw=2,
label="",
plot_tdist_stats=False,
plot_trip_stats=False,
format_string="%Y-%m-%d %H:%M:%S",
plot_journeys=False,
duration_divider=60.0,
fill_color="green",
journey_letters=None,
return_letters=False):
"""
Parameters
----------
timezone: str
color: color
format_string: str, None
if None, the original values are used
plot_journeys: bool, optional
if True, small dots are plotted at the departure times
"""
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
if timezone is None:
warnings.warn("Warning: No timezone specified, defaulting to UTC")
timezone = pytz.timezone("Etc/UTC")
def _ut_to_unloc_datetime(ut):
dt = datetime.datetime.fromtimestamp(ut, timezone)
return dt.replace(tzinfo=None)
if format_string:
x_axis_formatter = md.DateFormatter(format_string)
ax.xaxis.set_major_formatter(x_axis_formatter)
else:
_ut_to_unloc_datetime = lambda x: x
ax.set_xlim(
_ut_to_unloc_datetime(self.start_time_dep),
_ut_to_unloc_datetime(self.end_time_dep)
)
if plot_tdist_stats:
line_tyles = ["-.", "--", "-"][::-1]
# to_plot_labels = ["maximum temporal distance", "mean temporal distance", "minimum temporal distance"]
to_plot_labels = ["$\\tau_\\mathrm{max} \\;$ = ", "$\\tau_\\mathrm{mean}$ = ", "$\\tau_\\mathrm{min} \\:\\:$ = "]
to_plot_funcs = [self.max_temporal_distance, self.mean_temporal_distance, self.min_temporal_distance]
xmin, xmax = ax.get_xlim()
for to_plot_label, to_plot_func, ls in zip(to_plot_labels, to_plot_funcs, line_tyles):
y = to_plot_func() / duration_divider
assert y < float('inf'), to_plot_label
to_plot_label = to_plot_label + "%.1f min" % (y)
ax.plot([xmin, xmax], [y, y], color="black", ls=ls, lw=1, label=to_plot_label)
if plot_trip_stats:
assert (not plot_tdist_stats)
line_tyles = ["-", "-.", "--"]
to_plot_labels = ["min journey duration", "max journey duration", "mean journey duration"]
to_plot_funcs = [self.min_trip_duration, self.max_trip_duration, self.mean_trip_duration]
xmin, xmax = ax.get_xlim()
for to_plot_label, to_plot_func, ls in zip(to_plot_labels, to_plot_funcs, line_tyles):
y = to_plot_func() / duration_divider
if not numpy.math.isnan(y):
ax.plot([xmin, xmax], [y, y], color="red", ls=ls, lw=2)
txt = to_plot_label + "\n = %.1f min" % y
ax.text(xmax + 0.01 * (xmax - xmin), y, txt, color="red", va="center", ha="left")
old_xmax = xmax
xmax += (xmax - xmin) * 0.3
ymin, ymax = ax.get_ylim()
ax.fill_between([old_xmax, xmax], ymin, ymax, color="gray", alpha=0.1)
ax.set_xlim(xmin, xmax)
# plot the actual profile
vertical_lines, slopes = self.profile_block_analyzer.get_vlines_and_slopes_for_plotting()
for i, line in enumerate(slopes):
xs = [_ut_to_unloc_datetime(x) for x in line['x']]
if i is 0:
label = u"profile"
else:
label = None
ax.plot(xs, numpy.array(line['y']) / duration_divider, "-", color=color, lw=lw, label=label)
for line in vertical_lines:
xs = [_ut_to_unloc_datetime(x) for x in line['x']]
ax.plot(xs, numpy.array(line['y']) / duration_divider, ":", color=color) # , lw=lw)
assert (isinstance(ax, plt.Axes))
if plot_journeys:
xs = [_ut_to_unloc_datetime(x) for x in self.trip_departure_times]
ys = self.trip_durations
ax.plot(xs, numpy.array(ys) / duration_divider, "o", color="black", ms=8, label="journeys")
if journey_letters is None:
journey_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def cycle_journey_letters(journey_letters):
# cycle('ABCD') --> A B C D A B C D A B C D ...
saved = []
for element in journey_letters:
yield element
saved.append(element)
count = 1
while saved:
for element in saved:
yield element + str(count)
count += 1
journey_letters_iterator = cycle_journey_letters(journey_letters)
time_letters = {int(time): letter for letter, time in zip(journey_letters_iterator, self.trip_departure_times)}
for x, y, letter in zip(xs, ys, journey_letters_iterator):
walking = - self._walk_time_to_target / 30 if numpy.isfinite(self._walk_time_to_target) else 0
ax.text(x + datetime.timedelta(seconds=(self.end_time_dep - self.start_time_dep) / 60),
(y + walking) / duration_divider, letter, va="top", ha="left")
fill_between_x = []
fill_between_y = []
for line in slopes:
xs = [_ut_to_unloc_datetime(x) for x in line['x']]
fill_between_x.extend(xs)
fill_between_y.extend(numpy.array(line["y"]) / duration_divider)
ax.fill_between(fill_between_x, y1=fill_between_y, color=fill_color, alpha=alpha, label=label)
ax.set_ylim(bottom=0)
ax.set_ylim(ax.get_ylim()[0], ax.get_ylim()[1] * 1.05)
if rcParams['text.usetex']:
ax.set_xlabel(r"Departure time $t_{\mathrm{dep}}$")
else:
ax.set_xlabel("Departure time")
ax.set_ylabel(r"Temporal distance $\tau$ (min)")
if plot_journeys and return_letters:
return ax, time_letters
else:
return ax
|
def add_leg(self, leg):
"""
Parameters
----------
leg: Connection
"""
assert(isinstance(leg, Connection))
if not self.legs:
self.departure_time = leg.departure_time
self.arrival_time = leg.arrival_time
if leg.trip_id and (not self.legs or (leg.trip_id != self.legs[-1].trip_id)):
self.n_boardings += 1
self.arrival_time = leg.arrival_time
self.legs.append(leg)
|
def get_transfer_stop_pairs(self):
"""
Get stop pairs through which transfers take place
Returns
-------
transfer_stop_pairs: list
"""
transfer_stop_pairs = []
previous_arrival_stop = None
current_trip_id = None
for leg in self.legs:
if leg.trip_id is not None and leg.trip_id != current_trip_id and previous_arrival_stop is not None:
transfer_stop_pair = (previous_arrival_stop, leg.departure_stop)
transfer_stop_pairs.append(transfer_stop_pair)
previous_arrival_stop = leg.arrival_stop
current_trip_id = leg.trip_id
return transfer_stop_pairs
|
def _truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100):
"""
Truncates a colormap to use.
Code originall from http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib
"""
new_cmap = LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(numpy.linspace(minval, maxval, n))
)
return new_cmap
|
def get_time_profile_analyzer(self, max_n_boardings=None):
"""
Parameters
----------
max_n_boardings: int
The maximum number of boardings allowed for the labels used to construct the "temporal distance profile"
Returns
-------
analyzer: NodeProfileAnalyzerTime
"""
if max_n_boardings is None:
max_n_boardings = self.max_trip_n_boardings()
# compute only if not yet computed
if not max_n_boardings in self._n_boardings_to_simple_time_analyzers:
if max_n_boardings == 0:
valids = []
else:
candidate_labels = [LabelTimeSimple(label.departure_time, label.arrival_time_target)
for label in self._node_profile_final_labels if
((self.start_time_dep <= label.departure_time)
and label.n_boardings <= max_n_boardings)]
valids = compute_pareto_front(candidate_labels)
valids.sort(key=lambda label: -label.departure_time)
profile = NodeProfileSimple(self._walk_to_target_duration)
for valid in valids:
profile.update_pareto_optimal_tuples(valid)
npat = NodeProfileAnalyzerTime.from_profile(profile, self.start_time_dep, self.end_time_dep)
self._n_boardings_to_simple_time_analyzers[max_n_boardings] = npat
return self._n_boardings_to_simple_time_analyzers[max_n_boardings]
|
def median_temporal_distances(self, min_n_boardings=None, max_n_boardings=None):
"""
Returns
-------
mean_temporal_distances: list
list indices encode the number of vehicle legs each element
in the list tells gets the mean temporal distance
"""
if min_n_boardings is None:
min_n_boardings = 0
if max_n_boardings is None:
max_n_boardings = self.max_trip_n_boardings()
if max_n_boardings is None:
max_n_boardings = 0
median_temporal_distances = [float('inf') for _ in range(min_n_boardings, max_n_boardings + 1)]
for n_boardings in range(min_n_boardings, max_n_boardings + 1):
simple_analyzer = self.get_time_profile_analyzer(n_boardings)
median_temporal_distances[n_boardings] = simple_analyzer.median_temporal_distance()
return median_temporal_distances
|
def from_directory_as_inmemory_db(cls, gtfs_directory):
"""
Instantiate a GTFS object by computing
Parameters
----------
gtfs_directory: str
path to the directory for importing the database
"""
# this import is here to avoid circular imports (which turned out to be a problem)
from gtfspy.import_gtfs import import_gtfs
conn = sqlite3.connect(":memory:")
import_gtfs(gtfs_directory,
conn,
preserve_connection=True,
print_progress=False)
return cls(conn)
|
def get_main_database_path(self):
"""
Should return the path to the database
Returns
-------
path : unicode
path to the database, empty string for in-memory databases
"""
cur = self.conn.cursor()
cur.execute("PRAGMA database_list")
rows = cur.fetchall()
for row in rows:
if row[1] == str("main"):
return row[2]
|
def get_shape_distance_between_stops(self, trip_I, from_stop_seq, to_stop_seq):
"""
Get the distance along a shape between stops
Parameters
----------
trip_I : int
trip_ID along which we travel
from_stop_seq : int
the sequence number of the 'origin' stop
to_stop_seq : int
the sequence number of the 'destination' stop
Returns
-------
distance : float, None
If the shape calculation succeeded, return a float, otherwise return None
(i.e. in the case where the shapes table is empty)
"""
query_template = "SELECT shape_break FROM stop_times WHERE trip_I={trip_I} AND seq={seq} "
stop_seqs = [from_stop_seq, to_stop_seq]
shape_breaks = []
for seq in stop_seqs:
q = query_template.format(seq=seq, trip_I=trip_I)
shape_breaks.append(self.conn.execute(q).fetchone())
query_template = "SELECT max(d) - min(d) " \
"FROM shapes JOIN trips ON(trips.shape_id=shapes.shape_id) " \
"WHERE trip_I={trip_I} AND shapes.seq>={from_stop_seq} AND shapes.seq<={to_stop_seq};"
distance_query = query_template.format(trip_I=trip_I, from_stop_seq=from_stop_seq, to_stop_seq=to_stop_seq)
return self.conn.execute(distance_query).fetchone()[0]
|
def get_directly_accessible_stops_within_distance(self, stop, distance):
"""
Returns stops that are accessible without transfer from the stops that are within a specific walking distance
:param stop: int
:param distance: int
:return:
"""
query = """SELECT stop.* FROM
(SELECT st2.* FROM
(SELECT * FROM stop_distances
WHERE from_stop_I = %s) sd,
(SELECT * FROM stop_times) st1,
(SELECT * FROM stop_times) st2
WHERE sd.d < %s AND sd.to_stop_I = st1.stop_I AND st1.trip_I = st2.trip_I
GROUP BY st2.stop_I) sq,
(SELECT * FROM stops) stop
WHERE sq.stop_I = stop.stop_I""" % (stop, distance)
return pd.read_sql_query(query, self.conn)
|
def get_timezone_name(self):
"""
Get name of the GTFS timezone
Returns
-------
timezone_name : str
name of the time zone, e.g. "Europe/Helsinki"
"""
tz_name = self.conn.execute('SELECT timezone FROM agencies LIMIT 1').fetchone()
if tz_name is None:
raise ValueError("This database does not have a timezone defined.")
return tz_name[0]
|
def get_timezone_string(self, dt=None):
"""
Return the timezone of the GTFS database object as a string.
The assumed time when the timezone (difference) is computed
is the download date of the file.
This might not be optimal in all cases.
So this function should return values like:
"+0200" or "-1100"
Parameters
----------
dt : datetime.datetime, optional
The (unlocalized) date when the timezone should be computed.
Defaults first to download_date, and then to the runtime date.
Returns
-------
timezone_string : str
"""
if dt is None:
download_date = self.meta.get('download_date')
if download_date:
dt = datetime.datetime.strptime(download_date, '%Y-%m-%d')
else:
dt = datetime.datetime.today()
loc_dt = self._timezone.localize(dt)
# get the timezone
timezone_string = loc_dt.strftime("%z")
return timezone_string
|
def unlocalized_datetime_to_ut_seconds(self, unlocalized_datetime):
"""
Convert datetime (in GTFS timezone) to unixtime
Parameters
----------
unlocalized_datetime : datetime.datetime
(tz coerced to GTFS timezone, should NOT be UTC.)
Returns
-------
output : int (unixtime)
"""
loc_dt = self._timezone.localize(unlocalized_datetime)
unixtime_seconds = calendar.timegm(loc_dt.utctimetuple())
return unixtime_seconds
|
def get_day_start_ut(self, date):
"""
Get day start time (as specified by GTFS) as unix time in seconds
Parameters
----------
date : str | unicode | datetime.datetime
something describing the date
Returns
-------
day_start_ut : int
start time of the day in unixtime
"""
if isinstance(date, string_types):
date = datetime.datetime.strptime(date, '%Y-%m-%d')
date_noon = datetime.datetime(date.year, date.month, date.day, 12, 0, 0)
ut_noon = self.unlocalized_datetime_to_ut_seconds(date_noon)
return ut_noon - 12 * 60 * 60
|
def get_trip_trajectories_within_timespan(self, start, end, use_shapes=True, filter_name=None):
"""
Get complete trip data for visualizing public transport operation based on gtfs.
Parameters
----------
start: number
Earliest position data to return (in unix time)
end: number
Latest position data to return (in unix time)
use_shapes: bool, optional
Whether or not shapes should be included
filter_name: str
Pick only routes having this name.
Returns
-------
trips: dict
trips['trips'] is a list whose each element (e.g. el = trips['trips'][0])
is a dict with the following properties:
el['lats'] -- list of latitudes
el['lons'] -- list of longitudes
el['times'] -- list of passage_times
el['route_type'] -- type of vehicle as specified by GTFS
el['name'] -- name of the route
"""
trips = []
trip_df = self.get_tripIs_active_in_range(start, end)
print("gtfs_viz.py: fetched " + str(len(trip_df)) + " trip ids")
shape_cache = {}
# loop over all trips:
for row in trip_df.itertuples():
trip_I = row.trip_I
day_start_ut = row.day_start_ut
shape_id = row.shape_id
trip = {}
name, route_type = self.get_route_name_and_type_of_tripI(trip_I)
trip['route_type'] = int(route_type)
trip['name'] = str(name)
if filter_name and (name != filter_name):
continue
stop_lats = []
stop_lons = []
stop_dep_times = []
shape_breaks = []
stop_seqs = []
# get stop_data and store it:
stop_time_df = self.get_trip_stop_time_data(trip_I, day_start_ut)
for stop_row in stop_time_df.itertuples():
stop_lats.append(float(stop_row.lat))
stop_lons.append(float(stop_row.lon))
stop_dep_times.append(float(stop_row.dep_time_ut))
try:
stop_seqs.append(int(stop_row.seq))
except TypeError:
stop_seqs.append(None)
if use_shapes:
try:
shape_breaks.append(int(stop_row.shape_break))
except (TypeError, ValueError):
shape_breaks.append(None)
if use_shapes:
# get shape data (from cache, if possible)
if shape_id not in shape_cache:
shape_cache[shape_id] = shapes.get_shape_points2(self.conn.cursor(), shape_id)
shape_data = shape_cache[shape_id]
# noinspection PyBroadException
try:
trip['times'] = shapes.interpolate_shape_times(shape_data['d'], shape_breaks, stop_dep_times)
trip['lats'] = shape_data['lats']
trip['lons'] = shape_data['lons']
start_break = shape_breaks[0]
end_break = shape_breaks[-1]
trip['times'] = trip['times'][start_break:end_break + 1]
trip['lats'] = trip['lats'][start_break:end_break + 1]
trip['lons'] = trip['lons'][start_break:end_break + 1]
except:
# In case interpolation fails:
trip['times'] = stop_dep_times
trip['lats'] = stop_lats
trip['lons'] = stop_lons
else:
trip['times'] = stop_dep_times
trip['lats'] = stop_lats
trip['lons'] = stop_lons
trips.append(trip)
return {"trips": trips}
|
def get_stop_count_data(self, start_ut, end_ut):
"""
Get stop count data.
Parameters
----------
start_ut : int
start time in unixtime
end_ut : int
end time in unixtime
Returns
-------
stopData : pandas.DataFrame
each row in the stopData dataFrame is a dictionary with the following elements
stop_I, count, lat, lon, name
with data types
(int, int, float, float, str)
"""
# TODO! this function could perhaps be made a single sql query now with the new tables?
trips_df = self.get_tripIs_active_in_range(start_ut, end_ut)
# stop_I -> count, lat, lon, name
stop_counts = Counter()
# loop over all trips:
for row in trips_df.itertuples():
# get stop_data and store it:
stops_seq = self.get_trip_stop_time_data(row.trip_I, row.day_start_ut)
for stop_time_row in stops_seq.itertuples(index=False):
if (stop_time_row.dep_time_ut >= start_ut) and (stop_time_row.dep_time_ut <= end_ut):
stop_counts[stop_time_row.stop_I] += 1
all_stop_data = self.stops()
counts = [stop_counts[stop_I] for stop_I in all_stop_data["stop_I"].values]
all_stop_data.loc[:, "count"] = pd.Series(counts, index=all_stop_data.index)
return all_stop_data
|
def get_segment_count_data(self, start, end, use_shapes=True):
"""
Get segment data including PTN vehicle counts per segment that are
fully _contained_ within the interval (start, end)
Parameters
----------
start : int
start time of the simulation in unix time
end : int
end time of the simulation in unix time
use_shapes : bool, optional
whether to include shapes (if available)
Returns
-------
seg_data : list
each element in the list is a dict containing keys:
"trip_I", "lats", "lons", "shape_id", "stop_seqs", "shape_breaks"
"""
cur = self.conn.cursor()
# get all possible trip_ids that take place between start and end
trips_df = self.get_tripIs_active_in_range(start, end)
# stop_I -> count, lat, lon, name
segment_counts = Counter()
seg_to_info = {}
# tripI_to_seq = "inverted segToShapeData"
tripI_to_seq = defaultdict(list)
# loop over all trips:
for row in trips_df.itertuples():
# get stop_data and store it:
stops_df = self.get_trip_stop_time_data(row.trip_I, row.day_start_ut)
for i in range(len(stops_df) - 1):
(stop_I, dep_time_ut, s_lat, s_lon, s_seq, shape_break) = stops_df.iloc[i]
(stop_I_n, dep_time_ut_n, s_lat_n, s_lon_n, s_seq_n, shape_break_n) = stops_df.iloc[i + 1]
# test if _contained_ in the interval
# overlap would read:
# (dep_time_ut <= end) and (start <= dep_time_ut_n)
if (dep_time_ut >= start) and (dep_time_ut_n <= end):
seg = (stop_I, stop_I_n)
segment_counts[seg] += 1
if seg not in seg_to_info:
seg_to_info[seg] = {
u"trip_I": row.trip_I,
u"lats": [s_lat, s_lat_n],
u"lons": [s_lon, s_lon_n],
u"shape_id": row.shape_id,
u"stop_seqs": [s_seq, s_seq_n],
u"shape_breaks": [shape_break, shape_break_n]
}
tripI_to_seq[row.trip_I].append(seg)
stop_names = {}
for (stop_I, stop_J) in segment_counts.keys():
for s in [stop_I, stop_J]:
if s not in stop_names:
stop_names[s] = self.stop(s)[u'name'].values[0]
seg_data = []
for seg, count in segment_counts.items():
segInfo = seg_to_info[seg]
shape_breaks = segInfo[u"shape_breaks"]
seg_el = {}
if use_shapes and shape_breaks and shape_breaks[0] and shape_breaks[1]:
shape = shapes.get_shape_between_stops(
cur,
segInfo[u'trip_I'],
shape_breaks=shape_breaks
)
seg_el[u'lats'] = segInfo[u'lats'][:1] + shape[u'lat'] + segInfo[u'lats'][1:]
seg_el[u'lons'] = segInfo[u'lons'][:1] + shape[u'lon'] + segInfo[u'lons'][1:]
else:
seg_el[u'lats'] = segInfo[u'lats']
seg_el[u'lons'] = segInfo[u'lons']
seg_el[u'name'] = stop_names[seg[0]] + u"-" + stop_names[seg[1]]
seg_el[u'count'] = count
seg_data.append(seg_el)
return seg_data
|
def get_all_route_shapes(self, use_shapes=True):
"""
Get the shapes of all routes.
Parameters
----------
use_shapes : bool, optional
by default True (i.e. use shapes as the name of the function indicates)
if False (fall back to lats and longitudes)
Returns
-------
routeShapes: list of dicts that should have the following keys
name, type, agency, lats, lons
with types
list, list, str, list, list
"""
cur = self.conn.cursor()
# all shape_id:s corresponding to a route_I:
# query = "SELECT DISTINCT name, shape_id, trips.route_I, route_type
# FROM trips LEFT JOIN routes USING(route_I)"
# data1 = pd.read_sql_query(query, self.conn)
# one (arbitrary) shape_id per route_I ("one direction") -> less than half of the routes
query = "SELECT routes.name as name, shape_id, route_I, trip_I, routes.type, " \
" agency_id, agencies.name as agency_name, max(end_time_ds-start_time_ds) as trip_duration " \
"FROM trips " \
"LEFT JOIN routes " \
"USING(route_I) " \
"LEFT JOIN agencies " \
"USING(agency_I) " \
"GROUP BY routes.route_I"
data = pd.read_sql_query(query, self.conn)
routeShapes = []
for i, row in enumerate(data.itertuples()):
datum = {"name": str(row.name), "type": int(row.type), "route_I": row.route_I, "agency": str(row.agency_id),
"agency_name": str(row.agency_name)}
# this function should be made also non-shape friendly (at this point)
if use_shapes and row.shape_id:
shape = shapes.get_shape_points2(cur, row.shape_id)
lats = shape['lats']
lons = shape['lons']
else:
stop_shape = self.get_trip_stop_coordinates(row.trip_I)
lats = list(stop_shape['lat'])
lons = list(stop_shape['lon'])
datum['lats'] = [float(lat) for lat in lats]
datum['lons'] = [float(lon) for lon in lons]
routeShapes.append(datum)
return routeShapes
|
def get_tripIs_active_in_range(self, start, end):
"""
Obtain from the (standard) GTFS database, list of trip_IDs (and other trip_related info)
that are active between given 'start' and 'end' times.
The start time of a trip is determined by the departure time at the last stop of the trip.
The end time of a trip is determined by the arrival time at the last stop of the trip.
Parameters
----------
start, end : int
the start and end of the time interval in unix time seconds
Returns
-------
active_trips : pandas.DataFrame with columns
trip_I, day_start_ut, start_time_ut, end_time_ut, shape_id
"""
to_select = "trip_I, day_start_ut, start_time_ut, end_time_ut, shape_id "
query = "SELECT " + to_select + \
"FROM day_trips " \
"WHERE " \
"(end_time_ut > {start_ut} AND start_time_ut < {end_ut})".format(start_ut=start, end_ut=end)
return pd.read_sql_query(query, self.conn)
|
def get_trip_counts_per_day(self):
"""
Get trip counts per day between the start and end day of the feed.
Returns
-------
trip_counts : pandas.DataFrame
Has columns "date_str" (dtype str) "trip_counts" (dtype int)
"""
query = "SELECT date, count(*) AS number_of_trips FROM day_trips GROUP BY date"
# this yields the actual data
trip_counts_per_day = pd.read_sql_query(query, self.conn, index_col="date")
# the rest is simply code for filling out "gaps" in the time span
# (necessary for some visualizations)
max_day = trip_counts_per_day.index.max()
min_day = trip_counts_per_day.index.min()
min_date = datetime.datetime.strptime(min_day, '%Y-%m-%d')
max_date = datetime.datetime.strptime(max_day, '%Y-%m-%d')
num_days = (max_date - min_date).days
dates = [min_date + datetime.timedelta(days=x) for x in range(num_days + 1)]
trip_counts = []
date_strings = []
for date in dates:
date_string = date.strftime("%Y-%m-%d")
date_strings.append(date_string)
try:
value = trip_counts_per_day.loc[date_string, 'number_of_trips']
except KeyError:
# set value to 0 if dsut is not present, i.e. when no trips
# take place on that day
value = 0
trip_counts.append(value)
# check that all date_strings are included (move this to tests?)
for date_string in trip_counts_per_day.index:
assert date_string in date_strings
data = {"date": dates, "date_str": date_strings, "trip_counts": trip_counts}
return pd.DataFrame(data)
|
def get_suitable_date_for_daily_extract(self, date=None, ut=False):
"""
Parameters
----------
date : str
ut : bool
Whether to return the date as a string or as a an int (seconds after epoch).
Returns
-------
Selects suitable date for daily extract
Iterates trough the available dates forward and backward from the download date accepting the first day that has
at least 90 percent of the number of trips of the maximum date. The condition can be changed to something else.
If the download date is out of range, the process will look through the dates from first to last.
"""
daily_trips = self.get_trip_counts_per_day()
max_daily_trips = daily_trips[u'trip_counts'].max(axis=0)
if date in daily_trips[u'date_str']:
start_index = daily_trips[daily_trips[u'date_str'] == date].index.tolist()[0]
daily_trips[u'old_index'] = daily_trips.index
daily_trips[u'date_dist'] = abs(start_index - daily_trips.index)
daily_trips = daily_trips.sort_values(by=[u'date_dist', u'old_index']).reindex()
for row in daily_trips.itertuples():
if row.trip_counts >= 0.9 * max_daily_trips:
if ut:
return self.get_day_start_ut(row.date_str)
else:
return row.date_str
|
def get_weekly_extract_start_date(self, ut=False, weekdays_at_least_of_max=0.9,
verbose=False, download_date_override=None):
"""
Find a suitable weekly extract start date (monday).
The goal is to obtain as 'usual' week as possible.
The weekdays of the weekly extract week should contain
at least 0.9 of the total maximum of trips.
Parameters
----------
ut: return unixtime?
weekdays_at_least_of_max: float
download_date_override: str, semi-optional
Download-date in format %Y-%m-%d, weeks close to this.
Overrides the (possibly) recorded downloaded date in the database
Returns
-------
date: int or str
Raises
------
error: RuntimeError
If no download date could be found.
"""
daily_trip_counts = self.get_trip_counts_per_day()
if isinstance(download_date_override, str):
search_start_date = datetime.datetime.strptime(download_date_override, "%Y-%m-%d")
elif isinstance(download_date_override, datetime.datetime):
search_start_date = download_date_override
else:
assert download_date_override is None
download_date_str = self.meta['download_date']
if download_date_str == "":
warnings.warn("Download date is not speficied in the database. "
"Download date used in GTFS." + self.get_weekly_extract_start_date.__name__ +
"() defaults to the smallest date when any operations take place.")
search_start_date = daily_trip_counts['date'].min()
else:
search_start_date = datetime.datetime.strptime(download_date_str, "%Y-%m-%d")
feed_min_date = daily_trip_counts['date'].min()
feed_max_date = daily_trip_counts['date'].max()
assert (feed_max_date - feed_min_date >= datetime.timedelta(days=7)), \
"Dataset is not long enough for providing week long extracts"
# get first a valid monday where the search for the week can be started:
next_monday_from_search_start_date = search_start_date + timedelta(days=(7 - search_start_date.weekday()))
if not (feed_min_date <= next_monday_from_search_start_date <= feed_max_date):
warnings.warn("The next monday after the (possibly user) specified download date is not present in the database."
"Resorting to first monday after the beginning of operations instead.")
next_monday_from_search_start_date = feed_min_date + timedelta(days=(7 - feed_min_date.weekday()))
max_trip_count = daily_trip_counts['trip_counts'].quantile(0.95)
# Take 95th percentile to omit special days, if any exist.
threshold = weekdays_at_least_of_max * max_trip_count
threshold_fulfilling_days = daily_trip_counts['trip_counts'] > threshold
# look forward first
# get the index of the trip:
search_start_monday_index = daily_trip_counts[daily_trip_counts['date'] == next_monday_from_search_start_date].index[0]
# get starting point
while_loop_monday_index = search_start_monday_index
while len(daily_trip_counts.index) >= while_loop_monday_index + 7:
if all(threshold_fulfilling_days[while_loop_monday_index:while_loop_monday_index + 5]):
row = daily_trip_counts.iloc[while_loop_monday_index]
if ut:
return self.get_day_start_ut(row.date_str)
else:
return row['date']
while_loop_monday_index += 7
while_loop_monday_index = search_start_monday_index - 7
# then backwards
while while_loop_monday_index >= 0:
if all(threshold_fulfilling_days[while_loop_monday_index:while_loop_monday_index + 5]):
row = daily_trip_counts.iloc[while_loop_monday_index]
if ut:
return self.get_day_start_ut(row.date_str)
else:
return row['date']
while_loop_monday_index -= 7
raise RuntimeError("No suitable weekly extract start date could be determined!")
|
def get_spreading_trips(self, start_time_ut, lat, lon,
max_duration_ut=4 * 3600,
min_transfer_time=30,
use_shapes=False):
"""
Starting from a specific point and time, get complete single source
shortest path spreading dynamics as trips, or "events".
Parameters
----------
start_time_ut: number
Start time of the spreading.
lat: float
latitude of the spreading seed location
lon: float
longitude of the spreading seed location
max_duration_ut: int
maximum duration of the spreading process (in seconds)
min_transfer_time : int
minimum transfer time in seconds
use_shapes : bool
whether to include shapes
Returns
-------
trips: dict
trips['trips'] is a list whose each element (e.g. el = trips['trips'][0])
is a dict with the following properties:
el['lats'] : list of latitudes
el['lons'] : list of longitudes
el['times'] : list of passage_times
el['route_type'] : type of vehicle as specified by GTFS, or -1 if walking
el['name'] : name of the route
"""
from gtfspy.spreading.spreader import Spreader
spreader = Spreader(self, start_time_ut, lat, lon, max_duration_ut, min_transfer_time, use_shapes)
return spreader.spread()
|
def get_closest_stop(self, lat, lon):
"""
Get closest stop to a given location.
Parameters
----------
lat: float
latitude coordinate of the location
lon: float
longitude coordinate of the location
Returns
-------
stop_I: int
the index of the stop in the database
"""
cur = self.conn.cursor()
min_dist = float("inf")
min_stop_I = None
rows = cur.execute("SELECT stop_I, lat, lon FROM stops")
for stop_I, lat_s, lon_s in rows:
dist_now = wgs84_distance(lat, lon, lat_s, lon_s)
if dist_now < min_dist:
min_dist = dist_now
min_stop_I = stop_I
return min_stop_I
|
def get_route_name_and_type_of_tripI(self, trip_I):
"""
Get route short name and type
Parameters
----------
trip_I: int
short trip index created when creating the database
Returns
-------
name: str
short name of the route, eg. 195N
type: int
route_type according to the GTFS standard
"""
cur = self.conn.cursor()
results = cur.execute("SELECT name, type FROM routes JOIN trips USING(route_I) WHERE trip_I={trip_I}"
.format(trip_I=trip_I))
name, rtype = results.fetchone()
return u"%s" % str(name), int(rtype)
|
def get_route_name_and_type(self, route_I):
"""
Get route short name and type
Parameters
----------
route_I: int
route index (database specific)
Returns
-------
name: str
short name of the route, eg. 195N
type: int
route_type according to the GTFS standard
"""
cur = self.conn.cursor()
results = cur.execute("SELECT name, type FROM routes WHERE route_I=(?)", (route_I,))
name, rtype = results.fetchone()
return name, int(rtype)
|
def get_trip_stop_coordinates(self, trip_I):
"""
Get coordinates for a given trip_I
Parameters
----------
trip_I : int
the integer id of the trip
Returns
-------
stop_coords : pandas.DataFrame
with columns "lats" and "lons"
"""
query = """SELECT lat, lon
FROM stop_times
JOIN stops
USING(stop_I)
WHERE trip_I={trip_I}
ORDER BY stop_times.seq""".format(trip_I=trip_I)
stop_coords = pd.read_sql(query, self.conn)
return stop_coords
|
def get_trip_stop_time_data(self, trip_I, day_start_ut):
"""
Obtain from the (standard) GTFS database, trip stop data
(departure time in ut, lat, lon, seq, shape_break) as a pandas DataFrame
Some filtering could be applied here, if only e.g. departure times
corresponding within some time interval should be considered.
Parameters
----------
trip_I : int
integer index of the trip
day_start_ut : int
the start time of the day in unix time (seconds)
Returns
-------
df: pandas.DataFrame
df has the following columns
'departure_time_ut, lat, lon, seq, shape_break'
"""
to_select = "stop_I, " + str(day_start_ut) + "+dep_time_ds AS dep_time_ut, lat, lon, seq, shape_break"
str_to_run = "SELECT " + to_select + """
FROM stop_times JOIN stops USING(stop_I)
WHERE (trip_I ={trip_I}) ORDER BY seq
"""
str_to_run = str_to_run.format(trip_I=trip_I)
return pd.read_sql_query(str_to_run, self.conn)
|
def get_events_by_tripI_and_dsut(self, trip_I, day_start_ut,
start_ut=None, end_ut=None):
"""
Get trip data as a list of events (i.e. dicts).
Parameters
----------
trip_I : int
shorthand index of the trip.
day_start_ut : int
the start time of the day in unix time (seconds)
start_ut : int, optional
consider only events that start after this time
If not specified, this filtering is not applied.
end_ut : int, optional
Consider only events that end before this time
If not specified, this filtering is not applied.
Returns
-------
events: list of dicts
each element contains the following data:
from_stop: int (stop_I)
to_stop: int (stop_I)
dep_time_ut: int (in unix time)
arr_time_ut: int (in unix time)
"""
# for checking input:
assert day_start_ut <= start_ut
assert day_start_ut <= end_ut
assert start_ut <= end_ut
events = []
# check that trip takes place on that day:
if not self.tripI_takes_place_on_dsut(trip_I, day_start_ut):
return events
query = """SELECT stop_I, arr_time_ds+?, dep_time_ds+?
FROM stop_times JOIN stops USING(stop_I)
WHERE
(trip_I = ?)
"""
params = [day_start_ut, day_start_ut,
trip_I]
if start_ut:
query += "AND (dep_time_ds > ?-?)"
params += [start_ut, day_start_ut]
if end_ut:
query += "AND (arr_time_ds < ?-?)"
params += [end_ut, day_start_ut]
query += "ORDER BY arr_time_ds"
cur = self.conn.cursor()
rows = cur.execute(query, params)
stop_data = list(rows)
for i in range(len(stop_data) - 1):
event = {
"from_stop": stop_data[i][0],
"to_stop": stop_data[i + 1][0],
"dep_time_ut": stop_data[i][2],
"arr_time_ut": stop_data[i + 1][1]
}
events.append(event)
return events
|
def tripI_takes_place_on_dsut(self, trip_I, day_start_ut):
"""
Check that a trip takes place during a day
Parameters
----------
trip_I : int
index of the trip in the gtfs data base
day_start_ut : int
the starting time of the day in unix time (seconds)
Returns
-------
takes_place: bool
boolean value describing whether the trip takes place during
the given day or not
"""
query = "SELECT * FROM days WHERE trip_I=? AND day_start_ut=?"
params = (trip_I, day_start_ut)
cur = self.conn.cursor()
rows = list(cur.execute(query, params))
if len(rows) == 0:
return False
else:
assert len(rows) == 1, 'On a day, a trip_I should be present at most once'
return True
|
def day_start_ut(self, ut):
"""
Convert unixtime to unixtime on GTFS start-of-day.
GTFS defines the start of a day as "noon minus 12 hours" to solve
most DST-related problems. This means that on DST-changing days,
the day start isn't midnight. This function isn't idempotent.
Running it twice on the "move clocks backwards" day will result in
being one day too early.
Parameters
----------
ut: int
Unixtime
Returns
-------
ut: int
Unixtime corresponding to start of day
"""
# set timezone to the one of gtfs
old_tz = self.set_current_process_time_zone()
ut = time.mktime(time.localtime(ut)[:3] + (12, 00, 0, 0, 0, -1)) - 43200
set_process_timezone(old_tz)
return ut
|
def increment_day_start_ut(self, day_start_ut, n_days=1):
"""Increment the GTFS-definition of "day start".
Parameters
----------
day_start_ut : int
unixtime of the previous start of day. If this time is between
12:00 or greater, there *will* be bugs. To solve this, run the
input through day_start_ut first.
n_days: int
number of days to increment
"""
old_tz = self.set_current_process_time_zone()
day0 = time.localtime(day_start_ut + 43200) # time of noon
dayN = time.mktime(day0[:2] + # YYYY, MM
(day0[2] + n_days,) + # DD
(12, 00, 0, 0, 0, -1)) - 43200 # HHMM, etc. Minus 12 hours.
set_process_timezone(old_tz)
return dayN
|
def _get_possible_day_starts(self, start_ut, end_ut, max_time_overnight=None):
"""
Get all possible day start times between start_ut and end_ut
Currently this function is used only by get_tripIs_within_range_by_dsut
Parameters
----------
start_ut : list<int>
start time in unix time
end_ut : list<int>
end time in unix time
max_time_overnight : list<int>
the maximum length of time that a trip can take place on
during the next day (i.e. after midnight run times like 25:35)
Returns
-------
day_start_times_ut : list
list of ints (unix times in seconds) for returning all possible day
start times
start_times_ds : list
list of ints (unix times in seconds) stating the valid start time in
day seconds
end_times_ds : list
list of ints (unix times in seconds) stating the valid end times in
day_seconds
"""
if max_time_overnight is None:
# 7 hours:
max_time_overnight = 7 * 60 * 60
# sanity checks for the timezone parameter
# assert timezone < 14
# assert timezone > -14
# tz_seconds = int(timezone*3600)
assert start_ut < end_ut
start_day_ut = self.day_start_ut(start_ut)
# start_day_ds = int(start_ut+tz_seconds) % seconds_in_a_day #??? needed?
start_day_ds = start_ut - start_day_ut
# assert (start_day_ut+tz_seconds) % seconds_in_a_day == 0
end_day_ut = self.day_start_ut(end_ut)
# end_day_ds = int(end_ut+tz_seconds) % seconds_in_a_day #??? needed?
# end_day_ds = end_ut - end_day_ut
# assert (end_day_ut+tz_seconds) % seconds_in_a_day == 0
# If we are early enough in a day that we might have trips from
# the previous day still running, decrement the start day.
if start_day_ds < max_time_overnight:
start_day_ut = self.increment_day_start_ut(start_day_ut, n_days=-1)
# day_start_times_ut = range(start_day_ut, end_day_ut+seconds_in_a_day, seconds_in_a_day)
# Create a list of all possible day start times. This is roughly
# range(day_start_ut, day_end_ut+1day, 1day).
day_start_times_ut = [start_day_ut]
while day_start_times_ut[-1] < end_day_ut:
day_start_times_ut.append(self.increment_day_start_ut(day_start_times_ut[-1]))
start_times_ds = []
end_times_ds = []
# For every possible day start:
for dsut in day_start_times_ut:
# start day_seconds starts at either zero, or time - daystart
day_start_ut = max(0, start_ut - dsut)
start_times_ds.append(day_start_ut)
# end day_seconds is time-day_start
day_end_ut = end_ut - dsut
end_times_ds.append(day_end_ut)
# Return three tuples which can be zip:ped together.
return day_start_times_ut, start_times_ds, end_times_ds
|
def get_tripIs_within_range_by_dsut(self,
start_time_ut,
end_time_ut):
"""
Obtain a list of trip_Is that take place during a time interval.
The trip needs to be only partially overlapping with the given time interval.
The grouping by dsut (day_start_ut) is required as same trip_I could
take place on multiple days.
Parameters
----------
start_time_ut : int
start of the time interval in unix time (seconds)
end_time_ut: int
end of the time interval in unix time (seconds)
Returns
-------
trip_I_dict: dict
keys: day_start_times to list of integers (trip_Is)
"""
cur = self.conn.cursor()
assert start_time_ut <= end_time_ut
dst_ut, st_ds, et_ds = \
self._get_possible_day_starts(start_time_ut, end_time_ut, 7)
# noinspection PyTypeChecker
assert len(dst_ut) >= 0
trip_I_dict = {}
for day_start_ut, start_ds, end_ds in \
zip(dst_ut, st_ds, et_ds):
query = """
SELECT distinct(trip_I)
FROM days
JOIN trips
USING(trip_I)
WHERE
(days.day_start_ut == ?)
AND (
(trips.start_time_ds <= ?)
AND
(trips.end_time_ds >= ?)
)
"""
params = (day_start_ut, end_ds, start_ds)
trip_Is = [el[0] for el in cur.execute(query, params)]
if len(trip_Is) > 0:
trip_I_dict[day_start_ut] = trip_Is
return trip_I_dict
|
def stop(self, stop_I):
"""
Get all stop data as a pandas DataFrame for all stops, or an individual stop'
Parameters
----------
stop_I : int
stop index
Returns
-------
stop: pandas.DataFrame
"""
return pd.read_sql_query("SELECT * FROM stops WHERE stop_I={stop_I}".format(stop_I=stop_I), self.conn)
|
def get_stops_for_route_type(self, route_type):
"""
Parameters
----------
route_type: int
Returns
-------
stops: pandas.DataFrame
"""
if route_type is WALK:
return self.stops()
else:
return pd.read_sql_query("SELECT DISTINCT stops.* "
"FROM stops JOIN stop_times ON stops.stop_I == stop_times.stop_I "
" JOIN trips ON stop_times.trip_I = trips.trip_I"
" JOIN routes ON trips.route_I == routes.route_I "
"WHERE routes.type=(?)", self.conn, params=(route_type,))
|
def generate_routable_transit_events(self, start_time_ut=None, end_time_ut=None, route_type=None):
"""
Generates events that take place during a time interval [start_time_ut, end_time_ut].
Each event needs to be only partially overlap the given time interval.
Does not include walking events.
This is just a quick and dirty implementation to get a way of quickly get a
method for generating events compatible with the routing algorithm
Parameters
----------
start_time_ut: int
end_time_ut: int
route_type: ?
Yields
------
event: namedtuple
containing:
dep_time_ut: int
arr_time_ut: int
from_stop_I: int
to_stop_I: int
trip_I : int
route_type : int
seq: int
"""
from gtfspy.networks import temporal_network
df = temporal_network(self, start_time_ut=start_time_ut, end_time_ut=end_time_ut, route_type=route_type)
df.sort_values("dep_time_ut", ascending=False, inplace=True)
for row in df.itertuples():
yield row
|
def get_transit_events(self, start_time_ut=None, end_time_ut=None, route_type=None):
"""
Obtain a list of events that take place during a time interval.
Each event needs to be only partially overlap the given time interval.
Does not include walking events.
Parameters
----------
start_time_ut : int
start of the time interval in unix time (seconds)
end_time_ut: int
end of the time interval in unix time (seconds)
route_type: int
consider only events for this route_type
Returns
-------
events: pandas.DataFrame
with the following columns and types
dep_time_ut: int
arr_time_ut: int
from_stop_I: int
to_stop_I: int
trip_I : int
shape_id : int
route_type : int
See also
--------
get_transit_events_in_time_span : an older version of the same thing
"""
table_name = self._get_day_trips_table_name()
event_query = "SELECT stop_I, seq, trip_I, route_I, routes.route_id AS route_id, routes.type AS route_type, " \
"shape_id, day_start_ut+dep_time_ds AS dep_time_ut, day_start_ut+arr_time_ds AS arr_time_ut " \
"FROM " + table_name + " " \
"JOIN trips USING(trip_I) " \
"JOIN routes USING(route_I) " \
"JOIN stop_times USING(trip_I)"
where_clauses = []
if end_time_ut:
where_clauses.append(table_name + ".start_time_ut< {end_time_ut}".format(end_time_ut=end_time_ut))
where_clauses.append("dep_time_ut <={end_time_ut}".format(end_time_ut=end_time_ut))
if start_time_ut:
where_clauses.append(table_name + ".end_time_ut > {start_time_ut}".format(start_time_ut=start_time_ut))
where_clauses.append("arr_time_ut >={start_time_ut}".format(start_time_ut=start_time_ut))
if route_type is not None:
assert route_type in ALL_ROUTE_TYPES
where_clauses.append("routes.type={route_type}".format(route_type=route_type))
if len(where_clauses) > 0:
event_query += " WHERE "
for i, where_clause in enumerate(where_clauses):
if i is not 0:
event_query += " AND "
event_query += where_clause
# ordering is required for later stages
event_query += " ORDER BY trip_I, day_start_ut+dep_time_ds;"
events_result = pd.read_sql_query(event_query, self.conn)
# 'filter' results so that only real "events" are taken into account
from_indices = numpy.nonzero(
(events_result['trip_I'][:-1].values == events_result['trip_I'][1:].values) *
(events_result['seq'][:-1].values < events_result['seq'][1:].values)
)[0]
to_indices = from_indices + 1
# these should have same trip_ids
assert (events_result['trip_I'][from_indices].values == events_result['trip_I'][to_indices].values).all()
trip_Is = events_result['trip_I'][from_indices]
from_stops = events_result['stop_I'][from_indices]
to_stops = events_result['stop_I'][to_indices]
shape_ids = events_result['shape_id'][from_indices]
dep_times = events_result['dep_time_ut'][from_indices]
arr_times = events_result['arr_time_ut'][to_indices]
route_types = events_result['route_type'][from_indices]
route_ids = events_result['route_id'][from_indices]
route_Is = events_result['route_I'][from_indices]
durations = arr_times.values - dep_times.values
assert (durations >= 0).all()
from_seqs = events_result['seq'][from_indices]
to_seqs = events_result['seq'][to_indices]
data_tuples = zip(from_stops, to_stops, dep_times, arr_times,
shape_ids, route_types, route_ids, trip_Is,
durations, from_seqs, to_seqs, route_Is)
columns = ["from_stop_I", "to_stop_I", "dep_time_ut", "arr_time_ut",
"shape_id", "route_type", "route_id", "trip_I",
"duration", "from_seq", "to_seq", "route_I"]
df = pd.DataFrame.from_records(data_tuples, columns=columns)
return df
|
def get_route_difference_with_other_db(self, other_gtfs, start_time, end_time, uniqueness_threshold=None,
uniqueness_ratio=None):
"""
Compares the routes based on stops in the schedule with the routes in another db and returns the ones without match.
Uniqueness thresholds or ratio can be used to allow small differences
:param uniqueness_threshold:
:param uniqueness_ratio:
:return:
"""
from gtfspy.stats import frequencies_by_generated_route
this_df = frequencies_by_generated_route(self, start_time, end_time)
other_df = frequencies_by_generated_route(other_gtfs, start_time, end_time)
this_routes = {x: set(x.split(',')) for x in this_df["route"]}
other_routes = {x: set(x.split(',')) for x in other_df["route"]}
# this_df["route_set"] = this_df.apply(lambda x: set(x.route.split(',')), axis=1)
# other_df["route_set"] = other_df.apply(lambda x: set(x.route.split(',')), axis=1)
this_uniques = list(this_routes.keys())
other_uniques = list(other_routes.keys())
print("initial routes A:", len(this_uniques))
print("initial routes B:", len(other_uniques))
for i_key, i in this_routes.items():
for j_key, j in other_routes.items():
union = i | j
intersection = i & j
symmetric_difference = i ^ j
if uniqueness_ratio:
if len(intersection) / len(union) >= uniqueness_ratio:
try:
this_uniques.remove(i_key)
this_df = this_df[this_df["route"] != i_key]
except ValueError:
pass
try:
other_uniques.remove(j_key)
other_df = other_df[other_df["route"] != j_key]
except ValueError:
pass
print("unique routes A", len(this_df))
print("unique routes B", len(other_df))
return this_df, other_df
|
def get_straight_line_transfer_distances(self, stop_I=None):
"""
Get (straight line) distances to stations that can be transferred to.
Parameters
----------
stop_I : int, optional
If not specified return all possible transfer distances
Returns
-------
distances: pandas.DataFrame
each row has the following items
from_stop_I: int
to_stop_I: int
d: float or int #distance in meters
"""
if stop_I is not None:
query = u""" SELECT from_stop_I, to_stop_I, d
FROM stop_distances
WHERE
from_stop_I=?
"""
params = (u"{stop_I}".format(stop_I=stop_I),)
else:
query = """ SELECT from_stop_I, to_stop_I, d
FROM stop_distances
"""
params = None
stop_data_df = pd.read_sql_query(query, self.conn, params=params)
return stop_data_df
|
def get_day_start_ut_span(self):
"""
Return the first and last day_start_ut
Returns
-------
first_day_start_ut: int
last_day_start_ut: int
"""
cur = self.conn.cursor()
first_day_start_ut, last_day_start_ut = \
cur.execute("SELECT min(day_start_ut), max(day_start_ut) FROM days;").fetchone()
return first_day_start_ut, last_day_start_ut
|
def homogenize_stops_table_with_other_db(self, source):
"""
This function takes an external database, looks of common stops and adds the missing stops to both databases.
In addition the stop_pair_I column is added. This id links the stops between these two sources.
:param source: directory of external database
:return:
"""
cur = self.conn.cursor()
self.attach_gtfs_database(source)
query_inner_join = """SELECT t1.*
FROM stops t1
INNER JOIN other.stops t2
ON t1.stop_id=t2.stop_id
AND find_distance(t1.lon, t1.lat, t2.lon, t2.lat) <= 50"""
df_inner_join = self.execute_custom_query_pandas(query_inner_join)
print("number of common stops: ", len(df_inner_join.index))
df_not_in_other = self.execute_custom_query_pandas("SELECT * FROM stops EXCEPT " + query_inner_join)
print("number of stops missing in second feed: ", len(df_not_in_other.index))
df_not_in_self = self.execute_custom_query_pandas("SELECT * FROM other.stops EXCEPT " +
query_inner_join.replace("t1.*", "t2.*"))
print("number of stops missing in first feed: ", len(df_not_in_self.index))
try:
self.execute_custom_query("""ALTER TABLE stops ADD COLUMN stop_pair_I INT """)
self.execute_custom_query("""ALTER TABLE other.stops ADD COLUMN stop_pair_I INT """)
except sqlite3.OperationalError:
pass
stop_id_stub = "added_stop_"
counter = 0
rows_to_update_self = []
rows_to_update_other = []
rows_to_add_to_self = []
rows_to_add_to_other = []
for items in df_inner_join.itertuples(index=False):
rows_to_update_self.append((counter, items[1]))
rows_to_update_other.append((counter, items[1]))
counter += 1
for items in df_not_in_other.itertuples(index=False):
rows_to_update_self.append((counter, items[1]))
rows_to_add_to_other.append((stop_id_stub + str(counter),) + tuple(items[x] for x in [2, 3, 4, 5, 6, 8, 9])
+ (counter,))
counter += 1
for items in df_not_in_self.itertuples(index=False):
rows_to_update_other.append((counter, items[1]))
rows_to_add_to_self.append((stop_id_stub + str(counter),) + tuple(items[x] for x in [2, 3, 4, 5, 6, 8, 9])
+ (counter,))
counter += 1
query_add_row = """INSERT INTO stops(
stop_id,
code,
name,
desc,
lat,
lon,
location_type,
wheelchair_boarding,
stop_pair_I) VALUES (%s) """ % (", ".join(["?" for x in range(9)]))
query_update_row = """UPDATE stops SET stop_pair_I=? WHERE stop_id=?"""
print("adding rows to databases")
cur.executemany(query_add_row, rows_to_add_to_self)
cur.executemany(query_update_row, rows_to_update_self)
cur.executemany(query_add_row.replace("stops", "other.stops"), rows_to_add_to_other)
cur.executemany(query_update_row.replace("stops", "other.stops"), rows_to_update_other)
self.conn.commit()
print("finished")
|
def read_data_as_dataframe(self,
travel_impedance_measure,
from_stop_I=None,
to_stop_I=None,
statistic=None):
"""
Recover pre-computed travel_impedance between od-pairs from the database.
Returns
-------
values: number | Pandas DataFrame
"""
to_select = []
where_clauses = []
to_select.append("from_stop_I")
to_select.append("to_stop_I")
if from_stop_I is not None:
where_clauses.append("from_stop_I=" + str(int(from_stop_I)))
if to_stop_I is not None:
where_clauses.append("to_stop_I=" + str(int(to_stop_I)))
where_clause = ""
if len(where_clauses) > 0:
where_clause = " WHERE " + " AND ".join(where_clauses)
if not statistic:
to_select.extend(["min", "mean", "median", "max"])
else:
to_select.append(statistic)
to_select_clause = ",".join(to_select)
if not to_select_clause:
to_select_clause = "*"
sql = "SELECT " + to_select_clause + " FROM " + travel_impedance_measure + where_clause + ";"
df = pd.read_sql(sql, self.conn)
return df
|
def insert_data(self, travel_impedance_measure_name, data):
"""
Parameters
----------
travel_impedance_measure_name: str
data: list[dict]
Each list element must contain keys:
"from_stop_I", "to_stop_I", "min", "max", "median" and "mean"
"""
f = float
data_tuple = [(int(x["from_stop_I"]), int(x["to_stop_I"]), f(x["min"]), f(x["max"]), f(x["median"]), f(x["mean"])) for
x in data]
insert_stmt = '''INSERT OR REPLACE INTO ''' + travel_impedance_measure_name + ''' (
from_stop_I,
to_stop_I,
min,
max,
median,
mean) VALUES (?, ?, ?, ?, ?, ?) '''
self.conn.executemany(insert_stmt, data_tuple)
self.conn.commit()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.