repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
sequence | docstring
stringlengths 3
17.3k
| docstring_tokens
sequence | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
CI-WATER/gsshapy | gsshapy/grid/grid_to_gssha.py | GRIDtoGSSHA._set_subset_indices | def _set_subset_indices(self, y_min, y_max, x_min, x_max):
"""
load subset based on extent
"""
y_coords, x_coords = self.xd.lsm.coords
dx = self.xd.lsm.dx
dy = self.xd.lsm.dy
lsm_y_indices_from_y, lsm_x_indices_from_y = \
np.where((y_coords >= (y_min - 2*dy)) &
(y_coords <= (y_max + 2*dy)))
lsm_y_indices_from_x, lsm_x_indices_from_x = \
np.where((x_coords >= (x_min - 2*dx)) &
(x_coords <= (x_max + 2*dx)))
lsm_y_indices = np.intersect1d(lsm_y_indices_from_y,
lsm_y_indices_from_x)
lsm_x_indices = np.intersect1d(lsm_x_indices_from_y,
lsm_x_indices_from_x)
self.xslice = slice(np.amin(lsm_x_indices),
np.amax(lsm_x_indices)+1)
self.yslice = slice(np.amin(lsm_y_indices),
np.amax(lsm_y_indices)+1) | python | def _set_subset_indices(self, y_min, y_max, x_min, x_max):
"""
load subset based on extent
"""
y_coords, x_coords = self.xd.lsm.coords
dx = self.xd.lsm.dx
dy = self.xd.lsm.dy
lsm_y_indices_from_y, lsm_x_indices_from_y = \
np.where((y_coords >= (y_min - 2*dy)) &
(y_coords <= (y_max + 2*dy)))
lsm_y_indices_from_x, lsm_x_indices_from_x = \
np.where((x_coords >= (x_min - 2*dx)) &
(x_coords <= (x_max + 2*dx)))
lsm_y_indices = np.intersect1d(lsm_y_indices_from_y,
lsm_y_indices_from_x)
lsm_x_indices = np.intersect1d(lsm_x_indices_from_y,
lsm_x_indices_from_x)
self.xslice = slice(np.amin(lsm_x_indices),
np.amax(lsm_x_indices)+1)
self.yslice = slice(np.amin(lsm_y_indices),
np.amax(lsm_y_indices)+1) | [
"def",
"_set_subset_indices",
"(",
"self",
",",
"y_min",
",",
"y_max",
",",
"x_min",
",",
"x_max",
")",
":",
"y_coords",
",",
"x_coords",
"=",
"self",
".",
"xd",
".",
"lsm",
".",
"coords",
"dx",
"=",
"self",
".",
"xd",
".",
"lsm",
".",
"dx",
"dy",
"=",
"self",
".",
"xd",
".",
"lsm",
".",
"dy",
"lsm_y_indices_from_y",
",",
"lsm_x_indices_from_y",
"=",
"np",
".",
"where",
"(",
"(",
"y_coords",
">=",
"(",
"y_min",
"-",
"2",
"*",
"dy",
")",
")",
"&",
"(",
"y_coords",
"<=",
"(",
"y_max",
"+",
"2",
"*",
"dy",
")",
")",
")",
"lsm_y_indices_from_x",
",",
"lsm_x_indices_from_x",
"=",
"np",
".",
"where",
"(",
"(",
"x_coords",
">=",
"(",
"x_min",
"-",
"2",
"*",
"dx",
")",
")",
"&",
"(",
"x_coords",
"<=",
"(",
"x_max",
"+",
"2",
"*",
"dx",
")",
")",
")",
"lsm_y_indices",
"=",
"np",
".",
"intersect1d",
"(",
"lsm_y_indices_from_y",
",",
"lsm_y_indices_from_x",
")",
"lsm_x_indices",
"=",
"np",
".",
"intersect1d",
"(",
"lsm_x_indices_from_y",
",",
"lsm_x_indices_from_x",
")",
"self",
".",
"xslice",
"=",
"slice",
"(",
"np",
".",
"amin",
"(",
"lsm_x_indices",
")",
",",
"np",
".",
"amax",
"(",
"lsm_x_indices",
")",
"+",
"1",
")",
"self",
".",
"yslice",
"=",
"slice",
"(",
"np",
".",
"amin",
"(",
"lsm_y_indices",
")",
",",
"np",
".",
"amax",
"(",
"lsm_y_indices",
")",
"+",
"1",
")"
] | load subset based on extent | [
"load",
"subset",
"based",
"on",
"extent"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L608-L631 | train |
CI-WATER/gsshapy | gsshapy/grid/grid_to_gssha.py | GRIDtoGSSHA._time_to_string | def _time_to_string(self, dt, conversion_string="%Y %m %d %H %M"):
"""
This converts a UTC time integer to a string
"""
if self.output_timezone is not None:
dt = dt.replace(tzinfo=utc) \
.astimezone(self.output_timezone)
return dt.strftime(conversion_string) | python | def _time_to_string(self, dt, conversion_string="%Y %m %d %H %M"):
"""
This converts a UTC time integer to a string
"""
if self.output_timezone is not None:
dt = dt.replace(tzinfo=utc) \
.astimezone(self.output_timezone)
return dt.strftime(conversion_string) | [
"def",
"_time_to_string",
"(",
"self",
",",
"dt",
",",
"conversion_string",
"=",
"\"%Y %m %d %H %M\"",
")",
":",
"if",
"self",
".",
"output_timezone",
"is",
"not",
"None",
":",
"dt",
"=",
"dt",
".",
"replace",
"(",
"tzinfo",
"=",
"utc",
")",
".",
"astimezone",
"(",
"self",
".",
"output_timezone",
")",
"return",
"dt",
".",
"strftime",
"(",
"conversion_string",
")"
] | This converts a UTC time integer to a string | [
"This",
"converts",
"a",
"UTC",
"time",
"integer",
"to",
"a",
"string"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L651-L658 | train |
CI-WATER/gsshapy | gsshapy/grid/grid_to_gssha.py | GRIDtoGSSHA._load_lsm_data | def _load_lsm_data(self, data_var,
conversion_factor=1,
calc_4d_method=None,
calc_4d_dim=None,
time_step=None):
"""
This extracts the LSM data from a folder of netcdf files
"""
data = self.xd.lsm.getvar(data_var,
yslice=self.yslice,
xslice=self.xslice,
calc_4d_method=calc_4d_method,
calc_4d_dim=calc_4d_dim)
if isinstance(time_step, datetime):
data = data.loc[{self.lsm_time_dim: [pd.to_datetime(time_step)]}]
elif time_step is not None:
data = data[{self.lsm_time_dim: [time_step]}]
data = data.fillna(0)
data.values *= conversion_factor
return data | python | def _load_lsm_data(self, data_var,
conversion_factor=1,
calc_4d_method=None,
calc_4d_dim=None,
time_step=None):
"""
This extracts the LSM data from a folder of netcdf files
"""
data = self.xd.lsm.getvar(data_var,
yslice=self.yslice,
xslice=self.xslice,
calc_4d_method=calc_4d_method,
calc_4d_dim=calc_4d_dim)
if isinstance(time_step, datetime):
data = data.loc[{self.lsm_time_dim: [pd.to_datetime(time_step)]}]
elif time_step is not None:
data = data[{self.lsm_time_dim: [time_step]}]
data = data.fillna(0)
data.values *= conversion_factor
return data | [
"def",
"_load_lsm_data",
"(",
"self",
",",
"data_var",
",",
"conversion_factor",
"=",
"1",
",",
"calc_4d_method",
"=",
"None",
",",
"calc_4d_dim",
"=",
"None",
",",
"time_step",
"=",
"None",
")",
":",
"data",
"=",
"self",
".",
"xd",
".",
"lsm",
".",
"getvar",
"(",
"data_var",
",",
"yslice",
"=",
"self",
".",
"yslice",
",",
"xslice",
"=",
"self",
".",
"xslice",
",",
"calc_4d_method",
"=",
"calc_4d_method",
",",
"calc_4d_dim",
"=",
"calc_4d_dim",
")",
"if",
"isinstance",
"(",
"time_step",
",",
"datetime",
")",
":",
"data",
"=",
"data",
".",
"loc",
"[",
"{",
"self",
".",
"lsm_time_dim",
":",
"[",
"pd",
".",
"to_datetime",
"(",
"time_step",
")",
"]",
"}",
"]",
"elif",
"time_step",
"is",
"not",
"None",
":",
"data",
"=",
"data",
"[",
"{",
"self",
".",
"lsm_time_dim",
":",
"[",
"time_step",
"]",
"}",
"]",
"data",
"=",
"data",
".",
"fillna",
"(",
"0",
")",
"data",
".",
"values",
"*=",
"conversion_factor",
"return",
"data"
] | This extracts the LSM data from a folder of netcdf files | [
"This",
"extracts",
"the",
"LSM",
"data",
"from",
"a",
"folder",
"of",
"netcdf",
"files"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L660-L679 | train |
CI-WATER/gsshapy | gsshapy/grid/grid_to_gssha.py | GRIDtoGSSHA._check_lsm_input | def _check_lsm_input(self, data_var_map_array):
"""
This function checks the input var map array
to ensure the required input variables exist
"""
REQUIRED_HMET_VAR_LIST = ['Prcp', 'Pres', 'Temp', 'Clod',
'RlHm', 'Drad', 'Grad', 'WndS']
# make sure all required variables exist
given_hmet_var_list = []
for gssha_data_var, lsm_data_var in data_var_map_array:
gssha_data_hmet_name = self.netcdf_attributes[gssha_data_var]['hmet_name']
if gssha_data_hmet_name in given_hmet_var_list:
raise ValueError("Duplicate parameter for HMET variable {0}"
.format(gssha_data_hmet_name))
else:
given_hmet_var_list.append(gssha_data_hmet_name)
for REQUIRED_HMET_VAR in REQUIRED_HMET_VAR_LIST:
if REQUIRED_HMET_VAR not in given_hmet_var_list:
raise ValueError("ERROR: HMET param is required to continue "
"{0} ...".format(REQUIRED_HMET_VAR)) | python | def _check_lsm_input(self, data_var_map_array):
"""
This function checks the input var map array
to ensure the required input variables exist
"""
REQUIRED_HMET_VAR_LIST = ['Prcp', 'Pres', 'Temp', 'Clod',
'RlHm', 'Drad', 'Grad', 'WndS']
# make sure all required variables exist
given_hmet_var_list = []
for gssha_data_var, lsm_data_var in data_var_map_array:
gssha_data_hmet_name = self.netcdf_attributes[gssha_data_var]['hmet_name']
if gssha_data_hmet_name in given_hmet_var_list:
raise ValueError("Duplicate parameter for HMET variable {0}"
.format(gssha_data_hmet_name))
else:
given_hmet_var_list.append(gssha_data_hmet_name)
for REQUIRED_HMET_VAR in REQUIRED_HMET_VAR_LIST:
if REQUIRED_HMET_VAR not in given_hmet_var_list:
raise ValueError("ERROR: HMET param is required to continue "
"{0} ...".format(REQUIRED_HMET_VAR)) | [
"def",
"_check_lsm_input",
"(",
"self",
",",
"data_var_map_array",
")",
":",
"REQUIRED_HMET_VAR_LIST",
"=",
"[",
"'Prcp'",
",",
"'Pres'",
",",
"'Temp'",
",",
"'Clod'",
",",
"'RlHm'",
",",
"'Drad'",
",",
"'Grad'",
",",
"'WndS'",
"]",
"# make sure all required variables exist",
"given_hmet_var_list",
"=",
"[",
"]",
"for",
"gssha_data_var",
",",
"lsm_data_var",
"in",
"data_var_map_array",
":",
"gssha_data_hmet_name",
"=",
"self",
".",
"netcdf_attributes",
"[",
"gssha_data_var",
"]",
"[",
"'hmet_name'",
"]",
"if",
"gssha_data_hmet_name",
"in",
"given_hmet_var_list",
":",
"raise",
"ValueError",
"(",
"\"Duplicate parameter for HMET variable {0}\"",
".",
"format",
"(",
"gssha_data_hmet_name",
")",
")",
"else",
":",
"given_hmet_var_list",
".",
"append",
"(",
"gssha_data_hmet_name",
")",
"for",
"REQUIRED_HMET_VAR",
"in",
"REQUIRED_HMET_VAR_LIST",
":",
"if",
"REQUIRED_HMET_VAR",
"not",
"in",
"given_hmet_var_list",
":",
"raise",
"ValueError",
"(",
"\"ERROR: HMET param is required to continue \"",
"\"{0} ...\"",
".",
"format",
"(",
"REQUIRED_HMET_VAR",
")",
")"
] | This function checks the input var map array
to ensure the required input variables exist | [
"This",
"function",
"checks",
"the",
"input",
"var",
"map",
"array",
"to",
"ensure",
"the",
"required",
"input",
"variables",
"exist"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L808-L830 | train |
CI-WATER/gsshapy | gsshapy/grid/grid_to_gssha.py | GRIDtoGSSHA._resample_data | def _resample_data(self, gssha_var):
"""
This function resamples the data to match the GSSHA grid
IN TESTING MODE
"""
self.data = self.data.lsm.resample(gssha_var, self.gssha_grid) | python | def _resample_data(self, gssha_var):
"""
This function resamples the data to match the GSSHA grid
IN TESTING MODE
"""
self.data = self.data.lsm.resample(gssha_var, self.gssha_grid) | [
"def",
"_resample_data",
"(",
"self",
",",
"gssha_var",
")",
":",
"self",
".",
"data",
"=",
"self",
".",
"data",
".",
"lsm",
".",
"resample",
"(",
"gssha_var",
",",
"self",
".",
"gssha_grid",
")"
] | This function resamples the data to match the GSSHA grid
IN TESTING MODE | [
"This",
"function",
"resamples",
"the",
"data",
"to",
"match",
"the",
"GSSHA",
"grid",
"IN",
"TESTING",
"MODE"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L832-L837 | train |
CI-WATER/gsshapy | gsshapy/grid/grid_to_gssha.py | GRIDtoGSSHA._convert_data_to_hourly | def _convert_data_to_hourly(self, gssha_data_var):
"""
This function converts the data to hourly data
and then puts it into the data_np_array
USED WHEN GENERATING HMET DATA ONLY
"""
time_step_hours = np.diff(self.data.time)[0]/np.timedelta64(1, 'h')
calc_function = self._get_calc_function(gssha_data_var)
resampled_data = None
if time_step_hours < 1:
resampled_data = self.data.resample('1H', dim='time',
how=calc_function,
keep_attrs=True)
elif time_step_hours > 1:
resampled_data = self.data.resample('1H', dim='time',
keep_attrs=True)
for time_idx in range(self.data.dims['time']):
if time_idx+1 < self.data.dims['time']:
# interpolate between time steps
start_time = self.data.time[time_idx].values
end_time = self.data.time[time_idx+1].values
slope_timeslice = slice(str(start_time), str(end_time))
slice_size = resampled_data.sel(time=slope_timeslice).dims['time'] - 1
first_timestep = resampled_data.sel(time=str(start_time))[gssha_data_var]
slope = (resampled_data.sel(time=str(end_time))[gssha_data_var]
- first_timestep)/float(slice_size)
data_timeslice = slice(str(start_time+np.timedelta64(1, 'm')),
str(end_time-np.timedelta64(1, 'm')))
data_subset = resampled_data.sel(time=data_timeslice)
for xidx in range(data_subset.dims['time']):
data_subset[gssha_data_var][xidx] = first_timestep + slope * (xidx+1)
else:
# just continue to repeat the timestep
start_time = self.data.time[time_idx].values
end_time = resampled_data.time[-1].values
if end_time > start_time:
first_timestep = resampled_data.sel(time=str(start_time))[gssha_data_var]
data_timeslice = slice(str(start_time), str(end_time))
data_subset = resampled_data.sel(time=data_timeslice)
slice_size = 1
if calc_function == "mean":
slice_size = data_subset.dims['time']
for xidx in range(data_subset.dims['time']):
data_subset[gssha_data_var][xidx] = first_timestep/float(slice_size)
if resampled_data is not None:
# make sure coordinates copied
if self.data.lsm.x_var not in resampled_data.coords:
resampled_data.coords[self.data.lsm.x_var] = self.data.coords[self.data.lsm.x_var]
if self.data.lsm.y_var not in resampled_data.coords:
resampled_data.coords[self.data.lsm.y_var] = self.data.coords[self.data.lsm.y_var]
self.data = resampled_data | python | def _convert_data_to_hourly(self, gssha_data_var):
"""
This function converts the data to hourly data
and then puts it into the data_np_array
USED WHEN GENERATING HMET DATA ONLY
"""
time_step_hours = np.diff(self.data.time)[0]/np.timedelta64(1, 'h')
calc_function = self._get_calc_function(gssha_data_var)
resampled_data = None
if time_step_hours < 1:
resampled_data = self.data.resample('1H', dim='time',
how=calc_function,
keep_attrs=True)
elif time_step_hours > 1:
resampled_data = self.data.resample('1H', dim='time',
keep_attrs=True)
for time_idx in range(self.data.dims['time']):
if time_idx+1 < self.data.dims['time']:
# interpolate between time steps
start_time = self.data.time[time_idx].values
end_time = self.data.time[time_idx+1].values
slope_timeslice = slice(str(start_time), str(end_time))
slice_size = resampled_data.sel(time=slope_timeslice).dims['time'] - 1
first_timestep = resampled_data.sel(time=str(start_time))[gssha_data_var]
slope = (resampled_data.sel(time=str(end_time))[gssha_data_var]
- first_timestep)/float(slice_size)
data_timeslice = slice(str(start_time+np.timedelta64(1, 'm')),
str(end_time-np.timedelta64(1, 'm')))
data_subset = resampled_data.sel(time=data_timeslice)
for xidx in range(data_subset.dims['time']):
data_subset[gssha_data_var][xidx] = first_timestep + slope * (xidx+1)
else:
# just continue to repeat the timestep
start_time = self.data.time[time_idx].values
end_time = resampled_data.time[-1].values
if end_time > start_time:
first_timestep = resampled_data.sel(time=str(start_time))[gssha_data_var]
data_timeslice = slice(str(start_time), str(end_time))
data_subset = resampled_data.sel(time=data_timeslice)
slice_size = 1
if calc_function == "mean":
slice_size = data_subset.dims['time']
for xidx in range(data_subset.dims['time']):
data_subset[gssha_data_var][xidx] = first_timestep/float(slice_size)
if resampled_data is not None:
# make sure coordinates copied
if self.data.lsm.x_var not in resampled_data.coords:
resampled_data.coords[self.data.lsm.x_var] = self.data.coords[self.data.lsm.x_var]
if self.data.lsm.y_var not in resampled_data.coords:
resampled_data.coords[self.data.lsm.y_var] = self.data.coords[self.data.lsm.y_var]
self.data = resampled_data | [
"def",
"_convert_data_to_hourly",
"(",
"self",
",",
"gssha_data_var",
")",
":",
"time_step_hours",
"=",
"np",
".",
"diff",
"(",
"self",
".",
"data",
".",
"time",
")",
"[",
"0",
"]",
"/",
"np",
".",
"timedelta64",
"(",
"1",
",",
"'h'",
")",
"calc_function",
"=",
"self",
".",
"_get_calc_function",
"(",
"gssha_data_var",
")",
"resampled_data",
"=",
"None",
"if",
"time_step_hours",
"<",
"1",
":",
"resampled_data",
"=",
"self",
".",
"data",
".",
"resample",
"(",
"'1H'",
",",
"dim",
"=",
"'time'",
",",
"how",
"=",
"calc_function",
",",
"keep_attrs",
"=",
"True",
")",
"elif",
"time_step_hours",
">",
"1",
":",
"resampled_data",
"=",
"self",
".",
"data",
".",
"resample",
"(",
"'1H'",
",",
"dim",
"=",
"'time'",
",",
"keep_attrs",
"=",
"True",
")",
"for",
"time_idx",
"in",
"range",
"(",
"self",
".",
"data",
".",
"dims",
"[",
"'time'",
"]",
")",
":",
"if",
"time_idx",
"+",
"1",
"<",
"self",
".",
"data",
".",
"dims",
"[",
"'time'",
"]",
":",
"# interpolate between time steps",
"start_time",
"=",
"self",
".",
"data",
".",
"time",
"[",
"time_idx",
"]",
".",
"values",
"end_time",
"=",
"self",
".",
"data",
".",
"time",
"[",
"time_idx",
"+",
"1",
"]",
".",
"values",
"slope_timeslice",
"=",
"slice",
"(",
"str",
"(",
"start_time",
")",
",",
"str",
"(",
"end_time",
")",
")",
"slice_size",
"=",
"resampled_data",
".",
"sel",
"(",
"time",
"=",
"slope_timeslice",
")",
".",
"dims",
"[",
"'time'",
"]",
"-",
"1",
"first_timestep",
"=",
"resampled_data",
".",
"sel",
"(",
"time",
"=",
"str",
"(",
"start_time",
")",
")",
"[",
"gssha_data_var",
"]",
"slope",
"=",
"(",
"resampled_data",
".",
"sel",
"(",
"time",
"=",
"str",
"(",
"end_time",
")",
")",
"[",
"gssha_data_var",
"]",
"-",
"first_timestep",
")",
"/",
"float",
"(",
"slice_size",
")",
"data_timeslice",
"=",
"slice",
"(",
"str",
"(",
"start_time",
"+",
"np",
".",
"timedelta64",
"(",
"1",
",",
"'m'",
")",
")",
",",
"str",
"(",
"end_time",
"-",
"np",
".",
"timedelta64",
"(",
"1",
",",
"'m'",
")",
")",
")",
"data_subset",
"=",
"resampled_data",
".",
"sel",
"(",
"time",
"=",
"data_timeslice",
")",
"for",
"xidx",
"in",
"range",
"(",
"data_subset",
".",
"dims",
"[",
"'time'",
"]",
")",
":",
"data_subset",
"[",
"gssha_data_var",
"]",
"[",
"xidx",
"]",
"=",
"first_timestep",
"+",
"slope",
"*",
"(",
"xidx",
"+",
"1",
")",
"else",
":",
"# just continue to repeat the timestep",
"start_time",
"=",
"self",
".",
"data",
".",
"time",
"[",
"time_idx",
"]",
".",
"values",
"end_time",
"=",
"resampled_data",
".",
"time",
"[",
"-",
"1",
"]",
".",
"values",
"if",
"end_time",
">",
"start_time",
":",
"first_timestep",
"=",
"resampled_data",
".",
"sel",
"(",
"time",
"=",
"str",
"(",
"start_time",
")",
")",
"[",
"gssha_data_var",
"]",
"data_timeslice",
"=",
"slice",
"(",
"str",
"(",
"start_time",
")",
",",
"str",
"(",
"end_time",
")",
")",
"data_subset",
"=",
"resampled_data",
".",
"sel",
"(",
"time",
"=",
"data_timeslice",
")",
"slice_size",
"=",
"1",
"if",
"calc_function",
"==",
"\"mean\"",
":",
"slice_size",
"=",
"data_subset",
".",
"dims",
"[",
"'time'",
"]",
"for",
"xidx",
"in",
"range",
"(",
"data_subset",
".",
"dims",
"[",
"'time'",
"]",
")",
":",
"data_subset",
"[",
"gssha_data_var",
"]",
"[",
"xidx",
"]",
"=",
"first_timestep",
"/",
"float",
"(",
"slice_size",
")",
"if",
"resampled_data",
"is",
"not",
"None",
":",
"# make sure coordinates copied",
"if",
"self",
".",
"data",
".",
"lsm",
".",
"x_var",
"not",
"in",
"resampled_data",
".",
"coords",
":",
"resampled_data",
".",
"coords",
"[",
"self",
".",
"data",
".",
"lsm",
".",
"x_var",
"]",
"=",
"self",
".",
"data",
".",
"coords",
"[",
"self",
".",
"data",
".",
"lsm",
".",
"x_var",
"]",
"if",
"self",
".",
"data",
".",
"lsm",
".",
"y_var",
"not",
"in",
"resampled_data",
".",
"coords",
":",
"resampled_data",
".",
"coords",
"[",
"self",
".",
"data",
".",
"lsm",
".",
"y_var",
"]",
"=",
"self",
".",
"data",
".",
"coords",
"[",
"self",
".",
"data",
".",
"lsm",
".",
"y_var",
"]",
"self",
".",
"data",
"=",
"resampled_data"
] | This function converts the data to hourly data
and then puts it into the data_np_array
USED WHEN GENERATING HMET DATA ONLY | [
"This",
"function",
"converts",
"the",
"data",
"to",
"hourly",
"data",
"and",
"then",
"puts",
"it",
"into",
"the",
"data_np_array",
"USED",
"WHEN",
"GENERATING",
"HMET",
"DATA",
"ONLY"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L853-L908 | train |
CI-WATER/gsshapy | gsshapy/grid/grid_to_gssha.py | GRIDtoGSSHA.lsm_var_to_grid | def lsm_var_to_grid(self, out_grid_file, lsm_data_var, gssha_convert_var, time_step=0, ascii_format='grass'):
"""This function takes array data and writes out a GSSHA ascii grid.
Parameters:
out_grid_file(str): Location of ASCII file to generate.
lsm_data_var(str or list): This is the variable name for precipitation in the LSM files.
gssha_convert_var(str): This is the name of the variable used in GRIDtoGSSHA to convert data with.
time_step(Optional[int, datetime]): Time step in file to export data from. Default is the initial time step.
ascii_format(Optional[str]): Default is 'grass' for GRASS ASCII. If you want Arc ASCII, use 'arc'.
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
# STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
# STEP 2: Generate init snow grid (from LSM)
# NOTE: Card is INIT_SWE_DEPTH
g2g.lsm_var_to_grid(out_grid_file="E:/GSSHA/swe_grid.asc",
lsm_data_var='SWE_inst',
gssha_convert_var='swe')
"""
self._load_converted_gssha_data_from_lsm(gssha_convert_var, lsm_data_var, 'grid', time_step)
gssha_data_var_name = self.netcdf_attributes[gssha_convert_var]['gssha_name']
self.data = self.data.lsm.to_projection(gssha_data_var_name,
projection=self.gssha_grid.projection)
self._resample_data(gssha_data_var_name)
arr_grid = ArrayGrid(in_array=self.data[gssha_data_var_name].values,
wkt_projection=self.data.lsm.projection.ExportToWkt(),
geotransform=self.data.lsm.geotransform)
if ascii_format.strip().lower() == 'grass':
arr_grid.to_grass_ascii(out_grid_file)
elif ascii_format.strip().lower() == 'arc':
arr_grid.to_arc_ascii(out_grid_file)
else:
raise ValueError("Invalid argument for 'ascii_format'. Only 'grass' or 'arc' allowed.") | python | def lsm_var_to_grid(self, out_grid_file, lsm_data_var, gssha_convert_var, time_step=0, ascii_format='grass'):
"""This function takes array data and writes out a GSSHA ascii grid.
Parameters:
out_grid_file(str): Location of ASCII file to generate.
lsm_data_var(str or list): This is the variable name for precipitation in the LSM files.
gssha_convert_var(str): This is the name of the variable used in GRIDtoGSSHA to convert data with.
time_step(Optional[int, datetime]): Time step in file to export data from. Default is the initial time step.
ascii_format(Optional[str]): Default is 'grass' for GRASS ASCII. If you want Arc ASCII, use 'arc'.
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
# STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
# STEP 2: Generate init snow grid (from LSM)
# NOTE: Card is INIT_SWE_DEPTH
g2g.lsm_var_to_grid(out_grid_file="E:/GSSHA/swe_grid.asc",
lsm_data_var='SWE_inst',
gssha_convert_var='swe')
"""
self._load_converted_gssha_data_from_lsm(gssha_convert_var, lsm_data_var, 'grid', time_step)
gssha_data_var_name = self.netcdf_attributes[gssha_convert_var]['gssha_name']
self.data = self.data.lsm.to_projection(gssha_data_var_name,
projection=self.gssha_grid.projection)
self._resample_data(gssha_data_var_name)
arr_grid = ArrayGrid(in_array=self.data[gssha_data_var_name].values,
wkt_projection=self.data.lsm.projection.ExportToWkt(),
geotransform=self.data.lsm.geotransform)
if ascii_format.strip().lower() == 'grass':
arr_grid.to_grass_ascii(out_grid_file)
elif ascii_format.strip().lower() == 'arc':
arr_grid.to_arc_ascii(out_grid_file)
else:
raise ValueError("Invalid argument for 'ascii_format'. Only 'grass' or 'arc' allowed.") | [
"def",
"lsm_var_to_grid",
"(",
"self",
",",
"out_grid_file",
",",
"lsm_data_var",
",",
"gssha_convert_var",
",",
"time_step",
"=",
"0",
",",
"ascii_format",
"=",
"'grass'",
")",
":",
"self",
".",
"_load_converted_gssha_data_from_lsm",
"(",
"gssha_convert_var",
",",
"lsm_data_var",
",",
"'grid'",
",",
"time_step",
")",
"gssha_data_var_name",
"=",
"self",
".",
"netcdf_attributes",
"[",
"gssha_convert_var",
"]",
"[",
"'gssha_name'",
"]",
"self",
".",
"data",
"=",
"self",
".",
"data",
".",
"lsm",
".",
"to_projection",
"(",
"gssha_data_var_name",
",",
"projection",
"=",
"self",
".",
"gssha_grid",
".",
"projection",
")",
"self",
".",
"_resample_data",
"(",
"gssha_data_var_name",
")",
"arr_grid",
"=",
"ArrayGrid",
"(",
"in_array",
"=",
"self",
".",
"data",
"[",
"gssha_data_var_name",
"]",
".",
"values",
",",
"wkt_projection",
"=",
"self",
".",
"data",
".",
"lsm",
".",
"projection",
".",
"ExportToWkt",
"(",
")",
",",
"geotransform",
"=",
"self",
".",
"data",
".",
"lsm",
".",
"geotransform",
")",
"if",
"ascii_format",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"==",
"'grass'",
":",
"arr_grid",
".",
"to_grass_ascii",
"(",
"out_grid_file",
")",
"elif",
"ascii_format",
".",
"strip",
"(",
")",
".",
"lower",
"(",
")",
"==",
"'arc'",
":",
"arr_grid",
".",
"to_arc_ascii",
"(",
"out_grid_file",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid argument for 'ascii_format'. Only 'grass' or 'arc' allowed.\"",
")"
] | This function takes array data and writes out a GSSHA ascii grid.
Parameters:
out_grid_file(str): Location of ASCII file to generate.
lsm_data_var(str or list): This is the variable name for precipitation in the LSM files.
gssha_convert_var(str): This is the name of the variable used in GRIDtoGSSHA to convert data with.
time_step(Optional[int, datetime]): Time step in file to export data from. Default is the initial time step.
ascii_format(Optional[str]): Default is 'grass' for GRASS ASCII. If you want Arc ASCII, use 'arc'.
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
# STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
# STEP 2: Generate init snow grid (from LSM)
# NOTE: Card is INIT_SWE_DEPTH
g2g.lsm_var_to_grid(out_grid_file="E:/GSSHA/swe_grid.asc",
lsm_data_var='SWE_inst',
gssha_convert_var='swe') | [
"This",
"function",
"takes",
"array",
"data",
"and",
"writes",
"out",
"a",
"GSSHA",
"ascii",
"grid",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L911-L962 | train |
CI-WATER/gsshapy | gsshapy/grid/grid_to_gssha.py | GRIDtoGSSHA._write_hmet_card_file | def _write_hmet_card_file(self, hmet_card_file_path, main_output_folder):
"""
This function writes the HMET_ASCII card file
with ASCII file list for input to GSSHA
"""
with io_open(hmet_card_file_path, 'w') as out_hmet_list_file:
for hour_time in self.data.lsm.datetime:
date_str = self._time_to_string(hour_time, "%Y%m%d%H")
out_hmet_list_file.write(u"{0}\n".format(path.join(main_output_folder, date_str))) | python | def _write_hmet_card_file(self, hmet_card_file_path, main_output_folder):
"""
This function writes the HMET_ASCII card file
with ASCII file list for input to GSSHA
"""
with io_open(hmet_card_file_path, 'w') as out_hmet_list_file:
for hour_time in self.data.lsm.datetime:
date_str = self._time_to_string(hour_time, "%Y%m%d%H")
out_hmet_list_file.write(u"{0}\n".format(path.join(main_output_folder, date_str))) | [
"def",
"_write_hmet_card_file",
"(",
"self",
",",
"hmet_card_file_path",
",",
"main_output_folder",
")",
":",
"with",
"io_open",
"(",
"hmet_card_file_path",
",",
"'w'",
")",
"as",
"out_hmet_list_file",
":",
"for",
"hour_time",
"in",
"self",
".",
"data",
".",
"lsm",
".",
"datetime",
":",
"date_str",
"=",
"self",
".",
"_time_to_string",
"(",
"hour_time",
",",
"\"%Y%m%d%H\"",
")",
"out_hmet_list_file",
".",
"write",
"(",
"u\"{0}\\n\"",
".",
"format",
"(",
"path",
".",
"join",
"(",
"main_output_folder",
",",
"date_str",
")",
")",
")"
] | This function writes the HMET_ASCII card file
with ASCII file list for input to GSSHA | [
"This",
"function",
"writes",
"the",
"HMET_ASCII",
"card",
"file",
"with",
"ASCII",
"file",
"list",
"for",
"input",
"to",
"GSSHA"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L1061-L1069 | train |
CI-WATER/gsshapy | gsshapy/grid/grid_to_gssha.py | GRIDtoGSSHA.lsm_data_to_arc_ascii | def lsm_data_to_arc_ascii(self, data_var_map_array,
main_output_folder=""):
"""Writes extracted data to Arc ASCII file format into folder
to be read in by GSSHA. Also generates the HMET_ASCII card file
for GSSHA in the folder named 'hmet_file_list.txt'.
.. warning:: For GSSHA 6 Versions, for GSSHA 7 or greater, use lsm_data_to_subset_netcdf.
.. note::
GSSHA CARDS:
* HMET_ASCII pointing to the hmet_file_list.txt
* LONG_TERM (see: http://www.gsshawiki.com/Long-term_Simulations:Global_parameters)
Parameters:
data_var_map_array(list): Array to map the variables in the LSM file to the
matching required GSSHA data.
main_output_folder(Optional[str]): This is the path to place the generated ASCII files.
If not included, it defaults to
os.path.join(self.gssha_project_folder, "hmet_ascii_data").
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
#STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
#STEP 2: Generate ASCII DATA
#SEE: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables
#EXAMPLE DATA ARRAY 1: WRF GRID DATA BASED
data_var_map_array = [
['precipitation_acc', ['RAINC', 'RAINNC']],
['pressure', 'PSFC'],
['relative_humidity', ['Q2', 'PSFC', 'T2']], #MUST BE IN ORDER: ['SPECIFIC HUMIDITY', 'PRESSURE', 'TEMPERATURE']
['wind_speed', ['U10', 'V10']], #['U_VELOCITY', 'V_VELOCITY']
['direct_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['diffusive_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['temperature', 'T2'],
['cloud_cover' , 'CLDFRA'], #'CLOUD_FRACTION'
]
g2g.lsm_data_to_arc_ascii(data_var_map_array)
HRRRtoGSSHA Example:
.. code:: python
from gsshapy.grid import HRRRtoGSSHA
#STEP 1: Initialize class
h2g = HRRRtoGSSHA(
#YOUR INIT PARAMETERS HERE
)
#STEP 2: Generate ASCII DATA
#EXAMPLE DATA ARRAY 1: HRRR GRID DATA BASED
data_var_map_array = [
['precipitation_rate', 'prate'],
['pressure', 'sp'],
['relative_humidity', '2r'],
['wind_speed', ['10u', '10v']],
['direct_radiation_cc', ['dswrf', 'tcc']],
['diffusive_radiation_cc', ['dswrf', 'tcc']],
['temperature', 't'],
['cloud_cover_pc' , 'tcc'],
]
h2g.lsm_data_to_arc_ascii(data_var_map_array)
"""
self._check_lsm_input(data_var_map_array)
if not main_output_folder:
main_output_folder = path.join(self.gssha_project_folder, "hmet_ascii_data")
try:
mkdir(main_output_folder)
except OSError:
pass
log.info("Outputting HMET data to {0}".format(main_output_folder))
#PART 2: DATA
for data_var_map in data_var_map_array:
gssha_data_var, lsm_data_var = data_var_map
gssha_data_hmet_name = self.netcdf_attributes[gssha_data_var]['hmet_name']
gssha_data_var_name = self.netcdf_attributes[gssha_data_var]['gssha_name']
self._load_converted_gssha_data_from_lsm(gssha_data_var, lsm_data_var, 'ascii')
self._convert_data_to_hourly(gssha_data_var_name)
self.data = self.data.lsm.to_projection(gssha_data_var_name,
projection=self.gssha_grid.projection)
for time_idx in range(self.data.dims['time']):
arr_grid = ArrayGrid(in_array=self.data[gssha_data_var_name][time_idx].values,
wkt_projection=self.data.lsm.projection.ExportToWkt(),
geotransform=self.data.lsm.geotransform,
nodata_value=-9999)
date_str = self._time_to_string(self.data.lsm.datetime[time_idx], "%Y%m%d%H")
ascii_file_path = path.join(main_output_folder, "{0}_{1}.asc".format(date_str, gssha_data_hmet_name))
arr_grid.to_arc_ascii(ascii_file_path)
#PART 3: HMET_ASCII card input file with ASCII file list
hmet_card_file_path = path.join(main_output_folder, 'hmet_file_list.txt')
self._write_hmet_card_file(hmet_card_file_path, main_output_folder) | python | def lsm_data_to_arc_ascii(self, data_var_map_array,
main_output_folder=""):
"""Writes extracted data to Arc ASCII file format into folder
to be read in by GSSHA. Also generates the HMET_ASCII card file
for GSSHA in the folder named 'hmet_file_list.txt'.
.. warning:: For GSSHA 6 Versions, for GSSHA 7 or greater, use lsm_data_to_subset_netcdf.
.. note::
GSSHA CARDS:
* HMET_ASCII pointing to the hmet_file_list.txt
* LONG_TERM (see: http://www.gsshawiki.com/Long-term_Simulations:Global_parameters)
Parameters:
data_var_map_array(list): Array to map the variables in the LSM file to the
matching required GSSHA data.
main_output_folder(Optional[str]): This is the path to place the generated ASCII files.
If not included, it defaults to
os.path.join(self.gssha_project_folder, "hmet_ascii_data").
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
#STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
#STEP 2: Generate ASCII DATA
#SEE: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables
#EXAMPLE DATA ARRAY 1: WRF GRID DATA BASED
data_var_map_array = [
['precipitation_acc', ['RAINC', 'RAINNC']],
['pressure', 'PSFC'],
['relative_humidity', ['Q2', 'PSFC', 'T2']], #MUST BE IN ORDER: ['SPECIFIC HUMIDITY', 'PRESSURE', 'TEMPERATURE']
['wind_speed', ['U10', 'V10']], #['U_VELOCITY', 'V_VELOCITY']
['direct_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['diffusive_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['temperature', 'T2'],
['cloud_cover' , 'CLDFRA'], #'CLOUD_FRACTION'
]
g2g.lsm_data_to_arc_ascii(data_var_map_array)
HRRRtoGSSHA Example:
.. code:: python
from gsshapy.grid import HRRRtoGSSHA
#STEP 1: Initialize class
h2g = HRRRtoGSSHA(
#YOUR INIT PARAMETERS HERE
)
#STEP 2: Generate ASCII DATA
#EXAMPLE DATA ARRAY 1: HRRR GRID DATA BASED
data_var_map_array = [
['precipitation_rate', 'prate'],
['pressure', 'sp'],
['relative_humidity', '2r'],
['wind_speed', ['10u', '10v']],
['direct_radiation_cc', ['dswrf', 'tcc']],
['diffusive_radiation_cc', ['dswrf', 'tcc']],
['temperature', 't'],
['cloud_cover_pc' , 'tcc'],
]
h2g.lsm_data_to_arc_ascii(data_var_map_array)
"""
self._check_lsm_input(data_var_map_array)
if not main_output_folder:
main_output_folder = path.join(self.gssha_project_folder, "hmet_ascii_data")
try:
mkdir(main_output_folder)
except OSError:
pass
log.info("Outputting HMET data to {0}".format(main_output_folder))
#PART 2: DATA
for data_var_map in data_var_map_array:
gssha_data_var, lsm_data_var = data_var_map
gssha_data_hmet_name = self.netcdf_attributes[gssha_data_var]['hmet_name']
gssha_data_var_name = self.netcdf_attributes[gssha_data_var]['gssha_name']
self._load_converted_gssha_data_from_lsm(gssha_data_var, lsm_data_var, 'ascii')
self._convert_data_to_hourly(gssha_data_var_name)
self.data = self.data.lsm.to_projection(gssha_data_var_name,
projection=self.gssha_grid.projection)
for time_idx in range(self.data.dims['time']):
arr_grid = ArrayGrid(in_array=self.data[gssha_data_var_name][time_idx].values,
wkt_projection=self.data.lsm.projection.ExportToWkt(),
geotransform=self.data.lsm.geotransform,
nodata_value=-9999)
date_str = self._time_to_string(self.data.lsm.datetime[time_idx], "%Y%m%d%H")
ascii_file_path = path.join(main_output_folder, "{0}_{1}.asc".format(date_str, gssha_data_hmet_name))
arr_grid.to_arc_ascii(ascii_file_path)
#PART 3: HMET_ASCII card input file with ASCII file list
hmet_card_file_path = path.join(main_output_folder, 'hmet_file_list.txt')
self._write_hmet_card_file(hmet_card_file_path, main_output_folder) | [
"def",
"lsm_data_to_arc_ascii",
"(",
"self",
",",
"data_var_map_array",
",",
"main_output_folder",
"=",
"\"\"",
")",
":",
"self",
".",
"_check_lsm_input",
"(",
"data_var_map_array",
")",
"if",
"not",
"main_output_folder",
":",
"main_output_folder",
"=",
"path",
".",
"join",
"(",
"self",
".",
"gssha_project_folder",
",",
"\"hmet_ascii_data\"",
")",
"try",
":",
"mkdir",
"(",
"main_output_folder",
")",
"except",
"OSError",
":",
"pass",
"log",
".",
"info",
"(",
"\"Outputting HMET data to {0}\"",
".",
"format",
"(",
"main_output_folder",
")",
")",
"#PART 2: DATA",
"for",
"data_var_map",
"in",
"data_var_map_array",
":",
"gssha_data_var",
",",
"lsm_data_var",
"=",
"data_var_map",
"gssha_data_hmet_name",
"=",
"self",
".",
"netcdf_attributes",
"[",
"gssha_data_var",
"]",
"[",
"'hmet_name'",
"]",
"gssha_data_var_name",
"=",
"self",
".",
"netcdf_attributes",
"[",
"gssha_data_var",
"]",
"[",
"'gssha_name'",
"]",
"self",
".",
"_load_converted_gssha_data_from_lsm",
"(",
"gssha_data_var",
",",
"lsm_data_var",
",",
"'ascii'",
")",
"self",
".",
"_convert_data_to_hourly",
"(",
"gssha_data_var_name",
")",
"self",
".",
"data",
"=",
"self",
".",
"data",
".",
"lsm",
".",
"to_projection",
"(",
"gssha_data_var_name",
",",
"projection",
"=",
"self",
".",
"gssha_grid",
".",
"projection",
")",
"for",
"time_idx",
"in",
"range",
"(",
"self",
".",
"data",
".",
"dims",
"[",
"'time'",
"]",
")",
":",
"arr_grid",
"=",
"ArrayGrid",
"(",
"in_array",
"=",
"self",
".",
"data",
"[",
"gssha_data_var_name",
"]",
"[",
"time_idx",
"]",
".",
"values",
",",
"wkt_projection",
"=",
"self",
".",
"data",
".",
"lsm",
".",
"projection",
".",
"ExportToWkt",
"(",
")",
",",
"geotransform",
"=",
"self",
".",
"data",
".",
"lsm",
".",
"geotransform",
",",
"nodata_value",
"=",
"-",
"9999",
")",
"date_str",
"=",
"self",
".",
"_time_to_string",
"(",
"self",
".",
"data",
".",
"lsm",
".",
"datetime",
"[",
"time_idx",
"]",
",",
"\"%Y%m%d%H\"",
")",
"ascii_file_path",
"=",
"path",
".",
"join",
"(",
"main_output_folder",
",",
"\"{0}_{1}.asc\"",
".",
"format",
"(",
"date_str",
",",
"gssha_data_hmet_name",
")",
")",
"arr_grid",
".",
"to_arc_ascii",
"(",
"ascii_file_path",
")",
"#PART 3: HMET_ASCII card input file with ASCII file list",
"hmet_card_file_path",
"=",
"path",
".",
"join",
"(",
"main_output_folder",
",",
"'hmet_file_list.txt'",
")",
"self",
".",
"_write_hmet_card_file",
"(",
"hmet_card_file_path",
",",
"main_output_folder",
")"
] | Writes extracted data to Arc ASCII file format into folder
to be read in by GSSHA. Also generates the HMET_ASCII card file
for GSSHA in the folder named 'hmet_file_list.txt'.
.. warning:: For GSSHA 6 Versions, for GSSHA 7 or greater, use lsm_data_to_subset_netcdf.
.. note::
GSSHA CARDS:
* HMET_ASCII pointing to the hmet_file_list.txt
* LONG_TERM (see: http://www.gsshawiki.com/Long-term_Simulations:Global_parameters)
Parameters:
data_var_map_array(list): Array to map the variables in the LSM file to the
matching required GSSHA data.
main_output_folder(Optional[str]): This is the path to place the generated ASCII files.
If not included, it defaults to
os.path.join(self.gssha_project_folder, "hmet_ascii_data").
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
#STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
#STEP 2: Generate ASCII DATA
#SEE: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables
#EXAMPLE DATA ARRAY 1: WRF GRID DATA BASED
data_var_map_array = [
['precipitation_acc', ['RAINC', 'RAINNC']],
['pressure', 'PSFC'],
['relative_humidity', ['Q2', 'PSFC', 'T2']], #MUST BE IN ORDER: ['SPECIFIC HUMIDITY', 'PRESSURE', 'TEMPERATURE']
['wind_speed', ['U10', 'V10']], #['U_VELOCITY', 'V_VELOCITY']
['direct_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['diffusive_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['temperature', 'T2'],
['cloud_cover' , 'CLDFRA'], #'CLOUD_FRACTION'
]
g2g.lsm_data_to_arc_ascii(data_var_map_array)
HRRRtoGSSHA Example:
.. code:: python
from gsshapy.grid import HRRRtoGSSHA
#STEP 1: Initialize class
h2g = HRRRtoGSSHA(
#YOUR INIT PARAMETERS HERE
)
#STEP 2: Generate ASCII DATA
#EXAMPLE DATA ARRAY 1: HRRR GRID DATA BASED
data_var_map_array = [
['precipitation_rate', 'prate'],
['pressure', 'sp'],
['relative_humidity', '2r'],
['wind_speed', ['10u', '10v']],
['direct_radiation_cc', ['dswrf', 'tcc']],
['diffusive_radiation_cc', ['dswrf', 'tcc']],
['temperature', 't'],
['cloud_cover_pc' , 'tcc'],
]
h2g.lsm_data_to_arc_ascii(data_var_map_array) | [
"Writes",
"extracted",
"data",
"to",
"Arc",
"ASCII",
"file",
"format",
"into",
"folder",
"to",
"be",
"read",
"in",
"by",
"GSSHA",
".",
"Also",
"generates",
"the",
"HMET_ASCII",
"card",
"file",
"for",
"GSSHA",
"in",
"the",
"folder",
"named",
"hmet_file_list",
".",
"txt",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L1072-L1191 | train |
CI-WATER/gsshapy | gsshapy/grid/grid_to_gssha.py | GRIDtoGSSHA.lsm_data_to_subset_netcdf | def lsm_data_to_subset_netcdf(self, netcdf_file_path,
data_var_map_array,
resample_method=None):
"""Writes extracted data to the NetCDF file format
.. todo:: NetCDF output data time is always in UTC time. Need to convert to local timezone for GSSHA.
.. warning:: The NetCDF GSSHA file is only supported in GSSHA 7 or greater.
.. note::
GSSHA CARDS:
* HMET_NETCDF pointing to the netcdf_file_path
* LONG_TERM (see: http://www.gsshawiki.com/Long-term_Simulations:Global_parameters)
Parameters:
netcdf_file_path(string): Path to output the NetCDF file for GSSHA.
data_var_map_array(list): Array to map the variables in the LSM file to the
matching required GSSHA data.
resample_method(Optional[gdalconst]): Resample input method to match hmet data to GSSHA grid for NetCDF output. Default is None.
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
#STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
#STEP 2: Generate NetCDF DATA
#EXAMPLE DATA ARRAY 1: WRF GRID DATA BASED
#SEE: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables
data_var_map_array = [
['precipitation_acc', ['RAINC', 'RAINNC']],
['pressure', 'PSFC'],
['relative_humidity', ['Q2', 'PSFC', 'T2']], #MUST BE IN ORDER: ['SPECIFIC HUMIDITY', 'PRESSURE', 'TEMPERATURE']
['wind_speed', ['U10', 'V10']], #['U_VELOCITY', 'V_VELOCITY']
['direct_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['diffusive_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['temperature', 'T2'],
['cloud_cover' , 'CLDFRA'], #'CLOUD_FRACTION'
]
g2g.lsm_data_to_subset_netcdf("E/GSSHA/gssha_wrf_data.nc",
data_var_map_array)
HRRRtoGSSHA Example:
.. code:: python
from gsshapy.grid import HRRRtoGSSHA
#STEP 1: Initialize class
h2g = HRRRtoGSSHA(
#YOUR INIT PARAMETERS HERE
)
#STEP 2: Generate NetCDF DATA
#EXAMPLE DATA ARRAY 2: HRRR GRID DATA BASED
data_var_map_array = [
['precipitation_rate', 'prate'],
['pressure', 'sp'],
['relative_humidity', '2r'],
['wind_speed', ['10u', '10v']],
['direct_radiation_cc', ['dswrf', 'tcc']],
['diffusive_radiation_cc', ['dswrf', 'tcc']],
['temperature', 't'],
['cloud_cover_pc' , 'tcc'],
]
h2g.lsm_data_to_subset_netcdf("E:/GSSHA/gssha_wrf_data.nc",
data_var_map_array)
"""
self._check_lsm_input(data_var_map_array)
output_datasets = []
#DATA
for gssha_var, lsm_var in data_var_map_array:
if gssha_var in self.netcdf_attributes:
self._load_converted_gssha_data_from_lsm(gssha_var, lsm_var, 'netcdf')
#previously just added data, but needs to be hourly
gssha_data_var_name = self.netcdf_attributes[gssha_var]['gssha_name']
self._convert_data_to_hourly(gssha_data_var_name)
if resample_method:
self._resample_data(gssha_data_var_name)
else:
self.data = self.data.lsm.to_projection(gssha_data_var_name,
projection=self.gssha_grid.projection)
output_datasets.append(self.data)
else:
raise ValueError("Invalid GSSHA variable name: {0} ...".format(gssha_var))
output_dataset = xr.merge(output_datasets)
#add global attributes
output_dataset.attrs['Convention'] = 'CF-1.6'
output_dataset.attrs['title'] = 'GSSHA LSM Input'
output_dataset.attrs['history'] = 'date_created: {0}'.format(datetime.utcnow())
output_dataset.attrs['proj4'] = self.data.attrs['proj4']
output_dataset.attrs['geotransform'] = self.data.attrs['geotransform']
output_dataset.to_netcdf(netcdf_file_path) | python | def lsm_data_to_subset_netcdf(self, netcdf_file_path,
data_var_map_array,
resample_method=None):
"""Writes extracted data to the NetCDF file format
.. todo:: NetCDF output data time is always in UTC time. Need to convert to local timezone for GSSHA.
.. warning:: The NetCDF GSSHA file is only supported in GSSHA 7 or greater.
.. note::
GSSHA CARDS:
* HMET_NETCDF pointing to the netcdf_file_path
* LONG_TERM (see: http://www.gsshawiki.com/Long-term_Simulations:Global_parameters)
Parameters:
netcdf_file_path(string): Path to output the NetCDF file for GSSHA.
data_var_map_array(list): Array to map the variables in the LSM file to the
matching required GSSHA data.
resample_method(Optional[gdalconst]): Resample input method to match hmet data to GSSHA grid for NetCDF output. Default is None.
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
#STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
#STEP 2: Generate NetCDF DATA
#EXAMPLE DATA ARRAY 1: WRF GRID DATA BASED
#SEE: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables
data_var_map_array = [
['precipitation_acc', ['RAINC', 'RAINNC']],
['pressure', 'PSFC'],
['relative_humidity', ['Q2', 'PSFC', 'T2']], #MUST BE IN ORDER: ['SPECIFIC HUMIDITY', 'PRESSURE', 'TEMPERATURE']
['wind_speed', ['U10', 'V10']], #['U_VELOCITY', 'V_VELOCITY']
['direct_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['diffusive_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['temperature', 'T2'],
['cloud_cover' , 'CLDFRA'], #'CLOUD_FRACTION'
]
g2g.lsm_data_to_subset_netcdf("E/GSSHA/gssha_wrf_data.nc",
data_var_map_array)
HRRRtoGSSHA Example:
.. code:: python
from gsshapy.grid import HRRRtoGSSHA
#STEP 1: Initialize class
h2g = HRRRtoGSSHA(
#YOUR INIT PARAMETERS HERE
)
#STEP 2: Generate NetCDF DATA
#EXAMPLE DATA ARRAY 2: HRRR GRID DATA BASED
data_var_map_array = [
['precipitation_rate', 'prate'],
['pressure', 'sp'],
['relative_humidity', '2r'],
['wind_speed', ['10u', '10v']],
['direct_radiation_cc', ['dswrf', 'tcc']],
['diffusive_radiation_cc', ['dswrf', 'tcc']],
['temperature', 't'],
['cloud_cover_pc' , 'tcc'],
]
h2g.lsm_data_to_subset_netcdf("E:/GSSHA/gssha_wrf_data.nc",
data_var_map_array)
"""
self._check_lsm_input(data_var_map_array)
output_datasets = []
#DATA
for gssha_var, lsm_var in data_var_map_array:
if gssha_var in self.netcdf_attributes:
self._load_converted_gssha_data_from_lsm(gssha_var, lsm_var, 'netcdf')
#previously just added data, but needs to be hourly
gssha_data_var_name = self.netcdf_attributes[gssha_var]['gssha_name']
self._convert_data_to_hourly(gssha_data_var_name)
if resample_method:
self._resample_data(gssha_data_var_name)
else:
self.data = self.data.lsm.to_projection(gssha_data_var_name,
projection=self.gssha_grid.projection)
output_datasets.append(self.data)
else:
raise ValueError("Invalid GSSHA variable name: {0} ...".format(gssha_var))
output_dataset = xr.merge(output_datasets)
#add global attributes
output_dataset.attrs['Convention'] = 'CF-1.6'
output_dataset.attrs['title'] = 'GSSHA LSM Input'
output_dataset.attrs['history'] = 'date_created: {0}'.format(datetime.utcnow())
output_dataset.attrs['proj4'] = self.data.attrs['proj4']
output_dataset.attrs['geotransform'] = self.data.attrs['geotransform']
output_dataset.to_netcdf(netcdf_file_path) | [
"def",
"lsm_data_to_subset_netcdf",
"(",
"self",
",",
"netcdf_file_path",
",",
"data_var_map_array",
",",
"resample_method",
"=",
"None",
")",
":",
"self",
".",
"_check_lsm_input",
"(",
"data_var_map_array",
")",
"output_datasets",
"=",
"[",
"]",
"#DATA",
"for",
"gssha_var",
",",
"lsm_var",
"in",
"data_var_map_array",
":",
"if",
"gssha_var",
"in",
"self",
".",
"netcdf_attributes",
":",
"self",
".",
"_load_converted_gssha_data_from_lsm",
"(",
"gssha_var",
",",
"lsm_var",
",",
"'netcdf'",
")",
"#previously just added data, but needs to be hourly",
"gssha_data_var_name",
"=",
"self",
".",
"netcdf_attributes",
"[",
"gssha_var",
"]",
"[",
"'gssha_name'",
"]",
"self",
".",
"_convert_data_to_hourly",
"(",
"gssha_data_var_name",
")",
"if",
"resample_method",
":",
"self",
".",
"_resample_data",
"(",
"gssha_data_var_name",
")",
"else",
":",
"self",
".",
"data",
"=",
"self",
".",
"data",
".",
"lsm",
".",
"to_projection",
"(",
"gssha_data_var_name",
",",
"projection",
"=",
"self",
".",
"gssha_grid",
".",
"projection",
")",
"output_datasets",
".",
"append",
"(",
"self",
".",
"data",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Invalid GSSHA variable name: {0} ...\"",
".",
"format",
"(",
"gssha_var",
")",
")",
"output_dataset",
"=",
"xr",
".",
"merge",
"(",
"output_datasets",
")",
"#add global attributes",
"output_dataset",
".",
"attrs",
"[",
"'Convention'",
"]",
"=",
"'CF-1.6'",
"output_dataset",
".",
"attrs",
"[",
"'title'",
"]",
"=",
"'GSSHA LSM Input'",
"output_dataset",
".",
"attrs",
"[",
"'history'",
"]",
"=",
"'date_created: {0}'",
".",
"format",
"(",
"datetime",
".",
"utcnow",
"(",
")",
")",
"output_dataset",
".",
"attrs",
"[",
"'proj4'",
"]",
"=",
"self",
".",
"data",
".",
"attrs",
"[",
"'proj4'",
"]",
"output_dataset",
".",
"attrs",
"[",
"'geotransform'",
"]",
"=",
"self",
".",
"data",
".",
"attrs",
"[",
"'geotransform'",
"]",
"output_dataset",
".",
"to_netcdf",
"(",
"netcdf_file_path",
")"
] | Writes extracted data to the NetCDF file format
.. todo:: NetCDF output data time is always in UTC time. Need to convert to local timezone for GSSHA.
.. warning:: The NetCDF GSSHA file is only supported in GSSHA 7 or greater.
.. note::
GSSHA CARDS:
* HMET_NETCDF pointing to the netcdf_file_path
* LONG_TERM (see: http://www.gsshawiki.com/Long-term_Simulations:Global_parameters)
Parameters:
netcdf_file_path(string): Path to output the NetCDF file for GSSHA.
data_var_map_array(list): Array to map the variables in the LSM file to the
matching required GSSHA data.
resample_method(Optional[gdalconst]): Resample input method to match hmet data to GSSHA grid for NetCDF output. Default is None.
GRIDtoGSSHA Example:
.. code:: python
from gsshapy.grid import GRIDtoGSSHA
#STEP 1: Initialize class
g2g = GRIDtoGSSHA(gssha_project_folder='/path/to/gssha_project',
gssha_project_file_name='gssha_project.prj',
lsm_input_folder_path='/path/to/wrf-data',
lsm_search_card='*.nc',
lsm_lat_var='XLAT',
lsm_lon_var='XLONG',
lsm_time_var='Times',
lsm_lat_dim='south_north',
lsm_lon_dim='west_east',
lsm_time_dim='Time',
)
#STEP 2: Generate NetCDF DATA
#EXAMPLE DATA ARRAY 1: WRF GRID DATA BASED
#SEE: http://www.meteo.unican.es/wiki/cordexwrf/OutputVariables
data_var_map_array = [
['precipitation_acc', ['RAINC', 'RAINNC']],
['pressure', 'PSFC'],
['relative_humidity', ['Q2', 'PSFC', 'T2']], #MUST BE IN ORDER: ['SPECIFIC HUMIDITY', 'PRESSURE', 'TEMPERATURE']
['wind_speed', ['U10', 'V10']], #['U_VELOCITY', 'V_VELOCITY']
['direct_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['diffusive_radiation', ['SWDOWN', 'DIFFUSE_FRAC']], #MUST BE IN ORDER: ['GLOBAL RADIATION', 'DIFFUSIVE FRACTION']
['temperature', 'T2'],
['cloud_cover' , 'CLDFRA'], #'CLOUD_FRACTION'
]
g2g.lsm_data_to_subset_netcdf("E/GSSHA/gssha_wrf_data.nc",
data_var_map_array)
HRRRtoGSSHA Example:
.. code:: python
from gsshapy.grid import HRRRtoGSSHA
#STEP 1: Initialize class
h2g = HRRRtoGSSHA(
#YOUR INIT PARAMETERS HERE
)
#STEP 2: Generate NetCDF DATA
#EXAMPLE DATA ARRAY 2: HRRR GRID DATA BASED
data_var_map_array = [
['precipitation_rate', 'prate'],
['pressure', 'sp'],
['relative_humidity', '2r'],
['wind_speed', ['10u', '10v']],
['direct_radiation_cc', ['dswrf', 'tcc']],
['diffusive_radiation_cc', ['dswrf', 'tcc']],
['temperature', 't'],
['cloud_cover_pc' , 'tcc'],
]
h2g.lsm_data_to_subset_netcdf("E:/GSSHA/gssha_wrf_data.nc",
data_var_map_array) | [
"Writes",
"extracted",
"data",
"to",
"the",
"NetCDF",
"file",
"format"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/grid/grid_to_gssha.py#L1193-L1307 | train |
Losant/losant-rest-python | losantrest/data.py | Data.export | def export(self, **kwargs):
"""
Creates a csv file from a query of devices and attributes over a time range.
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Application.read, all.Device, all.Device.read, all.Organization, all.Organization.read, all.User, all.User.read, data.*, or data.export.
Parameters:
* {string} applicationId - ID associated with the application
* {hash} query - The query parameters (https://api.losant.com/#/definitions/dataExport)
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - If command was successfully sent (https://api.losant.com/#/definitions/success)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if application was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "query" in kwargs:
body = kwargs["query"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data/export".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body) | python | def export(self, **kwargs):
"""
Creates a csv file from a query of devices and attributes over a time range.
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Application.read, all.Device, all.Device.read, all.Organization, all.Organization.read, all.User, all.User.read, data.*, or data.export.
Parameters:
* {string} applicationId - ID associated with the application
* {hash} query - The query parameters (https://api.losant.com/#/definitions/dataExport)
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - If command was successfully sent (https://api.losant.com/#/definitions/success)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if application was not found (https://api.losant.com/#/definitions/error)
"""
query_params = {"_actions": "false", "_links": "true", "_embedded": "true"}
path_params = {}
headers = {}
body = None
if "applicationId" in kwargs:
path_params["applicationId"] = kwargs["applicationId"]
if "query" in kwargs:
body = kwargs["query"]
if "losantdomain" in kwargs:
headers["losantdomain"] = kwargs["losantdomain"]
if "_actions" in kwargs:
query_params["_actions"] = kwargs["_actions"]
if "_links" in kwargs:
query_params["_links"] = kwargs["_links"]
if "_embedded" in kwargs:
query_params["_embedded"] = kwargs["_embedded"]
path = "/applications/{applicationId}/data/export".format(**path_params)
return self.client.request("POST", path, params=query_params, headers=headers, body=body) | [
"def",
"export",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"query_params",
"=",
"{",
"\"_actions\"",
":",
"\"false\"",
",",
"\"_links\"",
":",
"\"true\"",
",",
"\"_embedded\"",
":",
"\"true\"",
"}",
"path_params",
"=",
"{",
"}",
"headers",
"=",
"{",
"}",
"body",
"=",
"None",
"if",
"\"applicationId\"",
"in",
"kwargs",
":",
"path_params",
"[",
"\"applicationId\"",
"]",
"=",
"kwargs",
"[",
"\"applicationId\"",
"]",
"if",
"\"query\"",
"in",
"kwargs",
":",
"body",
"=",
"kwargs",
"[",
"\"query\"",
"]",
"if",
"\"losantdomain\"",
"in",
"kwargs",
":",
"headers",
"[",
"\"losantdomain\"",
"]",
"=",
"kwargs",
"[",
"\"losantdomain\"",
"]",
"if",
"\"_actions\"",
"in",
"kwargs",
":",
"query_params",
"[",
"\"_actions\"",
"]",
"=",
"kwargs",
"[",
"\"_actions\"",
"]",
"if",
"\"_links\"",
"in",
"kwargs",
":",
"query_params",
"[",
"\"_links\"",
"]",
"=",
"kwargs",
"[",
"\"_links\"",
"]",
"if",
"\"_embedded\"",
"in",
"kwargs",
":",
"query_params",
"[",
"\"_embedded\"",
"]",
"=",
"kwargs",
"[",
"\"_embedded\"",
"]",
"path",
"=",
"\"/applications/{applicationId}/data/export\"",
".",
"format",
"(",
"*",
"*",
"path_params",
")",
"return",
"self",
".",
"client",
".",
"request",
"(",
"\"POST\"",
",",
"path",
",",
"params",
"=",
"query_params",
",",
"headers",
"=",
"headers",
",",
"body",
"=",
"body",
")"
] | Creates a csv file from a query of devices and attributes over a time range.
Authentication:
The client must be configured with a valid api
access token to call this action. The token
must include at least one of the following scopes:
all.Application, all.Application.read, all.Device, all.Device.read, all.Organization, all.Organization.read, all.User, all.User.read, data.*, or data.export.
Parameters:
* {string} applicationId - ID associated with the application
* {hash} query - The query parameters (https://api.losant.com/#/definitions/dataExport)
* {string} losantdomain - Domain scope of request (rarely needed)
* {boolean} _actions - Return resource actions in response
* {boolean} _links - Return resource link in response
* {boolean} _embedded - Return embedded resources in response
Responses:
* 200 - If command was successfully sent (https://api.losant.com/#/definitions/success)
Errors:
* 400 - Error if malformed request (https://api.losant.com/#/definitions/error)
* 404 - Error if application was not found (https://api.losant.com/#/definitions/error) | [
"Creates",
"a",
"csv",
"file",
"from",
"a",
"query",
"of",
"devices",
"and",
"attributes",
"over",
"a",
"time",
"range",
"."
] | 75b20decda0e999002f21811c3508f087e7f13b5 | https://github.com/Losant/losant-rest-python/blob/75b20decda0e999002f21811c3508f087e7f13b5/losantrest/data.py#L34-L80 | train |
gooofy/py-marytts | marytts/__init__.py | MaryTTS._generate | def _generate(self, message):
"""Given a message in message,
return a response in the appropriate
format."""
raw_params = {"INPUT_TEXT" : message.encode('UTF8'),
"INPUT_TYPE" : self.input_type,
"OUTPUT_TYPE" : self.output_type,
"LOCALE" : self._locale,
"AUDIO" : self.audio,
"VOICE" : self._voice,
}
params = urlencode(raw_params)
headers = {}
logging.debug('maryclient: generate, raw_params=%s' % repr(raw_params))
# Open connection to self._host, self._port.
conn = httplib.HTTPConnection(self._host, self._port)
#conn.set_debuglevel(5)
conn.request("POST", "/process", params, headers)
response = conn.getresponse()
if response.status != 200:
logging.error(response.getheaders())
raise Exception ("{0}: {1}".format(response.status, response.reason))
return response.read() | python | def _generate(self, message):
"""Given a message in message,
return a response in the appropriate
format."""
raw_params = {"INPUT_TEXT" : message.encode('UTF8'),
"INPUT_TYPE" : self.input_type,
"OUTPUT_TYPE" : self.output_type,
"LOCALE" : self._locale,
"AUDIO" : self.audio,
"VOICE" : self._voice,
}
params = urlencode(raw_params)
headers = {}
logging.debug('maryclient: generate, raw_params=%s' % repr(raw_params))
# Open connection to self._host, self._port.
conn = httplib.HTTPConnection(self._host, self._port)
#conn.set_debuglevel(5)
conn.request("POST", "/process", params, headers)
response = conn.getresponse()
if response.status != 200:
logging.error(response.getheaders())
raise Exception ("{0}: {1}".format(response.status, response.reason))
return response.read() | [
"def",
"_generate",
"(",
"self",
",",
"message",
")",
":",
"raw_params",
"=",
"{",
"\"INPUT_TEXT\"",
":",
"message",
".",
"encode",
"(",
"'UTF8'",
")",
",",
"\"INPUT_TYPE\"",
":",
"self",
".",
"input_type",
",",
"\"OUTPUT_TYPE\"",
":",
"self",
".",
"output_type",
",",
"\"LOCALE\"",
":",
"self",
".",
"_locale",
",",
"\"AUDIO\"",
":",
"self",
".",
"audio",
",",
"\"VOICE\"",
":",
"self",
".",
"_voice",
",",
"}",
"params",
"=",
"urlencode",
"(",
"raw_params",
")",
"headers",
"=",
"{",
"}",
"logging",
".",
"debug",
"(",
"'maryclient: generate, raw_params=%s'",
"%",
"repr",
"(",
"raw_params",
")",
")",
"# Open connection to self._host, self._port.",
"conn",
"=",
"httplib",
".",
"HTTPConnection",
"(",
"self",
".",
"_host",
",",
"self",
".",
"_port",
")",
"#conn.set_debuglevel(5)",
"conn",
".",
"request",
"(",
"\"POST\"",
",",
"\"/process\"",
",",
"params",
",",
"headers",
")",
"response",
"=",
"conn",
".",
"getresponse",
"(",
")",
"if",
"response",
".",
"status",
"!=",
"200",
":",
"logging",
".",
"error",
"(",
"response",
".",
"getheaders",
"(",
")",
")",
"raise",
"Exception",
"(",
"\"{0}: {1}\"",
".",
"format",
"(",
"response",
".",
"status",
",",
"response",
".",
"reason",
")",
")",
"return",
"response",
".",
"read",
"(",
")"
] | Given a message in message,
return a response in the appropriate
format. | [
"Given",
"a",
"message",
"in",
"message",
"return",
"a",
"response",
"in",
"the",
"appropriate",
"format",
"."
] | f2693531ea841d21a7b94be0304c3dc8f1d9e5f7 | https://github.com/gooofy/py-marytts/blob/f2693531ea841d21a7b94be0304c3dc8f1d9e5f7/marytts/__init__.py#L84-L110 | train |
pedrotgn/pyactor | pyactor/thread/actor.py | Actor.receive | def receive(self, msg):
'''
The message received from the queue specify a method of the
class the actor represents. This invokes it. If the
communication is an ASK, sends the result back
to the channel included in the message as an
ASKRESPONSE.
If it is a FUTURE, generates a FUTURERESPONSE
to send the result to the manager.
:param msg: The message is a dictionary using the constants
defined in util.py (:mod:`pyactor.util`).
'''
if msg[TYPE] == TELL and msg[METHOD] == 'stop':
self.running = False
self.future_manager.stop()
else:
result = None
try:
invoke = getattr(self._obj, msg[METHOD])
params = msg[PARAMS]
result = invoke(*params[0], **params[1])
except Exception, e:
if msg[TYPE] == TELL:
print e
return
result = e
self.send_response(result, msg) | python | def receive(self, msg):
'''
The message received from the queue specify a method of the
class the actor represents. This invokes it. If the
communication is an ASK, sends the result back
to the channel included in the message as an
ASKRESPONSE.
If it is a FUTURE, generates a FUTURERESPONSE
to send the result to the manager.
:param msg: The message is a dictionary using the constants
defined in util.py (:mod:`pyactor.util`).
'''
if msg[TYPE] == TELL and msg[METHOD] == 'stop':
self.running = False
self.future_manager.stop()
else:
result = None
try:
invoke = getattr(self._obj, msg[METHOD])
params = msg[PARAMS]
result = invoke(*params[0], **params[1])
except Exception, e:
if msg[TYPE] == TELL:
print e
return
result = e
self.send_response(result, msg) | [
"def",
"receive",
"(",
"self",
",",
"msg",
")",
":",
"if",
"msg",
"[",
"TYPE",
"]",
"==",
"TELL",
"and",
"msg",
"[",
"METHOD",
"]",
"==",
"'stop'",
":",
"self",
".",
"running",
"=",
"False",
"self",
".",
"future_manager",
".",
"stop",
"(",
")",
"else",
":",
"result",
"=",
"None",
"try",
":",
"invoke",
"=",
"getattr",
"(",
"self",
".",
"_obj",
",",
"msg",
"[",
"METHOD",
"]",
")",
"params",
"=",
"msg",
"[",
"PARAMS",
"]",
"result",
"=",
"invoke",
"(",
"*",
"params",
"[",
"0",
"]",
",",
"*",
"*",
"params",
"[",
"1",
"]",
")",
"except",
"Exception",
",",
"e",
":",
"if",
"msg",
"[",
"TYPE",
"]",
"==",
"TELL",
":",
"print",
"e",
"return",
"result",
"=",
"e",
"self",
".",
"send_response",
"(",
"result",
",",
"msg",
")"
] | The message received from the queue specify a method of the
class the actor represents. This invokes it. If the
communication is an ASK, sends the result back
to the channel included in the message as an
ASKRESPONSE.
If it is a FUTURE, generates a FUTURERESPONSE
to send the result to the manager.
:param msg: The message is a dictionary using the constants
defined in util.py (:mod:`pyactor.util`). | [
"The",
"message",
"received",
"from",
"the",
"queue",
"specify",
"a",
"method",
"of",
"the",
"class",
"the",
"actor",
"represents",
".",
"This",
"invokes",
"it",
".",
"If",
"the",
"communication",
"is",
"an",
"ASK",
"sends",
"the",
"result",
"back",
"to",
"the",
"channel",
"included",
"in",
"the",
"message",
"as",
"an",
"ASKRESPONSE",
"."
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/thread/actor.py#L100-L128 | train |
timofurrer/ramlient | ramlient/utils.py | download_file | def download_file(url):
"""
Downloads a file from the specified URL.
:param str url: The URL to the file to be downloaded
:returns: the downloaded file's content
:rtype: str
"""
response = requests.get(url)
if response.status_code is not 200:
return None
return response.text | python | def download_file(url):
"""
Downloads a file from the specified URL.
:param str url: The URL to the file to be downloaded
:returns: the downloaded file's content
:rtype: str
"""
response = requests.get(url)
if response.status_code is not 200:
return None
return response.text | [
"def",
"download_file",
"(",
"url",
")",
":",
"response",
"=",
"requests",
".",
"get",
"(",
"url",
")",
"if",
"response",
".",
"status_code",
"is",
"not",
"200",
":",
"return",
"None",
"return",
"response",
".",
"text"
] | Downloads a file from the specified URL.
:param str url: The URL to the file to be downloaded
:returns: the downloaded file's content
:rtype: str | [
"Downloads",
"a",
"file",
"from",
"the",
"specified",
"URL",
"."
] | e93092252635a6b3b0aca2c390b9f820368b791c | https://github.com/timofurrer/ramlient/blob/e93092252635a6b3b0aca2c390b9f820368b791c/ramlient/utils.py#L34-L46 | train |
vinci1it2000/schedula | schedula/dispatcher.py | Dispatcher.get_sub_dsp | def get_sub_dsp(self, nodes_bunch, edges_bunch=None):
"""
Returns the sub-dispatcher induced by given node and edge bunches.
The induced sub-dispatcher contains the available nodes in nodes_bunch
and edges between those nodes, excluding those that are in edges_bunch.
The available nodes are non isolated nodes and function nodes that have
all inputs and at least one output.
:param nodes_bunch:
A container of node ids which will be iterated through once.
:type nodes_bunch: list[str], iterable
:param edges_bunch:
A container of edge ids that will be removed.
:type edges_bunch: list[(str, str)], iterable, optional
:return:
A dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp_from_workflow`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a two functions `fun1` and `fun2`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['a', 'd'],
... outputs=['c', 'e'])
'fun2'
Get the sub-dispatcher induced by given nodes bunch::
>>> sub_dsp = dsp.get_sub_dsp(['a', 'c', 'd', 'e', 'fun2'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher'
"""
# Get real paths.
nodes_bunch = [self.get_node(u)[1][0] for u in nodes_bunch]
# Define an empty dispatcher.
sub_dsp = self.copy_structure(
dmap=self.dmap.subgraph(nodes_bunch).copy()
)
# Namespace shortcuts for speed.
nodes, dmap_out_degree = sub_dsp.nodes, sub_dsp.dmap.out_degree
dmap_dv, dmap_rm_edge = self.default_values, sub_dsp.dmap.remove_edge
dmap_rm_node = sub_dsp.dmap.remove_node
# Remove function nodes that has not whole inputs available.
for u in nodes_bunch:
n = nodes[u].get('inputs', None) # Function inputs.
# No all inputs
if n is not None and not set(n).issubset(nodes_bunch):
dmap_rm_node(u) # Remove function node.
# Remove edges that are not in edges_bunch.
if edges_bunch is not None:
for e in edges_bunch: # Iterate sub-graph edges.
dmap_rm_edge(*e) # Remove edge.
# Remove function node with no outputs.
for u in [u for u, n in sub_dsp.dmap.nodes.items()
if n['type'] == 'function']:
# noinspection PyCallingNonCallable
if not dmap_out_degree(u): # No outputs.
dmap_rm_node(u) # Remove function node.
from networkx import isolates
# Remove isolate nodes from sub-graph.
sub_dsp.dmap.remove_nodes_from(list(isolates(sub_dsp.dmap)))
# Set default values.
sub_dsp.default_values = {k: dmap_dv[k] for k in dmap_dv if k in nodes}
return sub_dsp | python | def get_sub_dsp(self, nodes_bunch, edges_bunch=None):
"""
Returns the sub-dispatcher induced by given node and edge bunches.
The induced sub-dispatcher contains the available nodes in nodes_bunch
and edges between those nodes, excluding those that are in edges_bunch.
The available nodes are non isolated nodes and function nodes that have
all inputs and at least one output.
:param nodes_bunch:
A container of node ids which will be iterated through once.
:type nodes_bunch: list[str], iterable
:param edges_bunch:
A container of edge ids that will be removed.
:type edges_bunch: list[(str, str)], iterable, optional
:return:
A dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp_from_workflow`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a two functions `fun1` and `fun2`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['a', 'd'],
... outputs=['c', 'e'])
'fun2'
Get the sub-dispatcher induced by given nodes bunch::
>>> sub_dsp = dsp.get_sub_dsp(['a', 'c', 'd', 'e', 'fun2'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher'
"""
# Get real paths.
nodes_bunch = [self.get_node(u)[1][0] for u in nodes_bunch]
# Define an empty dispatcher.
sub_dsp = self.copy_structure(
dmap=self.dmap.subgraph(nodes_bunch).copy()
)
# Namespace shortcuts for speed.
nodes, dmap_out_degree = sub_dsp.nodes, sub_dsp.dmap.out_degree
dmap_dv, dmap_rm_edge = self.default_values, sub_dsp.dmap.remove_edge
dmap_rm_node = sub_dsp.dmap.remove_node
# Remove function nodes that has not whole inputs available.
for u in nodes_bunch:
n = nodes[u].get('inputs', None) # Function inputs.
# No all inputs
if n is not None and not set(n).issubset(nodes_bunch):
dmap_rm_node(u) # Remove function node.
# Remove edges that are not in edges_bunch.
if edges_bunch is not None:
for e in edges_bunch: # Iterate sub-graph edges.
dmap_rm_edge(*e) # Remove edge.
# Remove function node with no outputs.
for u in [u for u, n in sub_dsp.dmap.nodes.items()
if n['type'] == 'function']:
# noinspection PyCallingNonCallable
if not dmap_out_degree(u): # No outputs.
dmap_rm_node(u) # Remove function node.
from networkx import isolates
# Remove isolate nodes from sub-graph.
sub_dsp.dmap.remove_nodes_from(list(isolates(sub_dsp.dmap)))
# Set default values.
sub_dsp.default_values = {k: dmap_dv[k] for k in dmap_dv if k in nodes}
return sub_dsp | [
"def",
"get_sub_dsp",
"(",
"self",
",",
"nodes_bunch",
",",
"edges_bunch",
"=",
"None",
")",
":",
"# Get real paths.",
"nodes_bunch",
"=",
"[",
"self",
".",
"get_node",
"(",
"u",
")",
"[",
"1",
"]",
"[",
"0",
"]",
"for",
"u",
"in",
"nodes_bunch",
"]",
"# Define an empty dispatcher.",
"sub_dsp",
"=",
"self",
".",
"copy_structure",
"(",
"dmap",
"=",
"self",
".",
"dmap",
".",
"subgraph",
"(",
"nodes_bunch",
")",
".",
"copy",
"(",
")",
")",
"# Namespace shortcuts for speed.",
"nodes",
",",
"dmap_out_degree",
"=",
"sub_dsp",
".",
"nodes",
",",
"sub_dsp",
".",
"dmap",
".",
"out_degree",
"dmap_dv",
",",
"dmap_rm_edge",
"=",
"self",
".",
"default_values",
",",
"sub_dsp",
".",
"dmap",
".",
"remove_edge",
"dmap_rm_node",
"=",
"sub_dsp",
".",
"dmap",
".",
"remove_node",
"# Remove function nodes that has not whole inputs available.",
"for",
"u",
"in",
"nodes_bunch",
":",
"n",
"=",
"nodes",
"[",
"u",
"]",
".",
"get",
"(",
"'inputs'",
",",
"None",
")",
"# Function inputs.",
"# No all inputs",
"if",
"n",
"is",
"not",
"None",
"and",
"not",
"set",
"(",
"n",
")",
".",
"issubset",
"(",
"nodes_bunch",
")",
":",
"dmap_rm_node",
"(",
"u",
")",
"# Remove function node.",
"# Remove edges that are not in edges_bunch.",
"if",
"edges_bunch",
"is",
"not",
"None",
":",
"for",
"e",
"in",
"edges_bunch",
":",
"# Iterate sub-graph edges.",
"dmap_rm_edge",
"(",
"*",
"e",
")",
"# Remove edge.",
"# Remove function node with no outputs.",
"for",
"u",
"in",
"[",
"u",
"for",
"u",
",",
"n",
"in",
"sub_dsp",
".",
"dmap",
".",
"nodes",
".",
"items",
"(",
")",
"if",
"n",
"[",
"'type'",
"]",
"==",
"'function'",
"]",
":",
"# noinspection PyCallingNonCallable",
"if",
"not",
"dmap_out_degree",
"(",
"u",
")",
":",
"# No outputs.",
"dmap_rm_node",
"(",
"u",
")",
"# Remove function node.",
"from",
"networkx",
"import",
"isolates",
"# Remove isolate nodes from sub-graph.",
"sub_dsp",
".",
"dmap",
".",
"remove_nodes_from",
"(",
"list",
"(",
"isolates",
"(",
"sub_dsp",
".",
"dmap",
")",
")",
")",
"# Set default values.",
"sub_dsp",
".",
"default_values",
"=",
"{",
"k",
":",
"dmap_dv",
"[",
"k",
"]",
"for",
"k",
"in",
"dmap_dv",
"if",
"k",
"in",
"nodes",
"}",
"return",
"sub_dsp"
] | Returns the sub-dispatcher induced by given node and edge bunches.
The induced sub-dispatcher contains the available nodes in nodes_bunch
and edges between those nodes, excluding those that are in edges_bunch.
The available nodes are non isolated nodes and function nodes that have
all inputs and at least one output.
:param nodes_bunch:
A container of node ids which will be iterated through once.
:type nodes_bunch: list[str], iterable
:param edges_bunch:
A container of edge ids that will be removed.
:type edges_bunch: list[(str, str)], iterable, optional
:return:
A dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`get_sub_dsp_from_workflow`
.. note::
The sub-dispatcher edge or node attributes just point to the
original dispatcher. So changes to the node or edge structure
will not be reflected in the original dispatcher map while changes
to the attributes will.
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a two functions `fun1` and `fun2`:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_function(function_id='fun1', inputs=['a', 'b'],
... outputs=['c', 'd'])
'fun1'
>>> dsp.add_function(function_id='fun2', inputs=['a', 'd'],
... outputs=['c', 'e'])
'fun2'
Get the sub-dispatcher induced by given nodes bunch::
>>> sub_dsp = dsp.get_sub_dsp(['a', 'c', 'd', 'e', 'fun2'])
.. dispatcher:: sub_dsp
:opt: graph_attr={'ratio': '1'}
>>> sub_dsp.name = 'Sub-Dispatcher' | [
"Returns",
"the",
"sub",
"-",
"dispatcher",
"induced",
"by",
"given",
"node",
"and",
"edge",
"bunches",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/dispatcher.py#L1048-L1146 | train |
vinci1it2000/schedula | schedula/dispatcher.py | Dispatcher.data_nodes | def data_nodes(self):
"""
Returns all data nodes of the dispatcher.
:return:
All data nodes of the dispatcher.
:rtype: dict[str, dict]
"""
return {k: v for k, v in self.nodes.items() if v['type'] == 'data'} | python | def data_nodes(self):
"""
Returns all data nodes of the dispatcher.
:return:
All data nodes of the dispatcher.
:rtype: dict[str, dict]
"""
return {k: v for k, v in self.nodes.items() if v['type'] == 'data'} | [
"def",
"data_nodes",
"(",
"self",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"nodes",
".",
"items",
"(",
")",
"if",
"v",
"[",
"'type'",
"]",
"==",
"'data'",
"}"
] | Returns all data nodes of the dispatcher.
:return:
All data nodes of the dispatcher.
:rtype: dict[str, dict] | [
"Returns",
"all",
"data",
"nodes",
"of",
"the",
"dispatcher",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/dispatcher.py#L1384-L1393 | train |
vinci1it2000/schedula | schedula/dispatcher.py | Dispatcher.function_nodes | def function_nodes(self):
"""
Returns all function nodes of the dispatcher.
:return:
All data function of the dispatcher.
:rtype: dict[str, dict]
"""
return {k: v for k, v in self.nodes.items() if v['type'] == 'function'} | python | def function_nodes(self):
"""
Returns all function nodes of the dispatcher.
:return:
All data function of the dispatcher.
:rtype: dict[str, dict]
"""
return {k: v for k, v in self.nodes.items() if v['type'] == 'function'} | [
"def",
"function_nodes",
"(",
"self",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"nodes",
".",
"items",
"(",
")",
"if",
"v",
"[",
"'type'",
"]",
"==",
"'function'",
"}"
] | Returns all function nodes of the dispatcher.
:return:
All data function of the dispatcher.
:rtype: dict[str, dict] | [
"Returns",
"all",
"function",
"nodes",
"of",
"the",
"dispatcher",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/dispatcher.py#L1396-L1405 | train |
vinci1it2000/schedula | schedula/dispatcher.py | Dispatcher.sub_dsp_nodes | def sub_dsp_nodes(self):
"""
Returns all sub-dispatcher nodes of the dispatcher.
:return:
All sub-dispatcher nodes of the dispatcher.
:rtype: dict[str, dict]
"""
return {k: v for k, v in self.nodes.items() if
v['type'] == 'dispatcher'} | python | def sub_dsp_nodes(self):
"""
Returns all sub-dispatcher nodes of the dispatcher.
:return:
All sub-dispatcher nodes of the dispatcher.
:rtype: dict[str, dict]
"""
return {k: v for k, v in self.nodes.items() if
v['type'] == 'dispatcher'} | [
"def",
"sub_dsp_nodes",
"(",
"self",
")",
":",
"return",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"self",
".",
"nodes",
".",
"items",
"(",
")",
"if",
"v",
"[",
"'type'",
"]",
"==",
"'dispatcher'",
"}"
] | Returns all sub-dispatcher nodes of the dispatcher.
:return:
All sub-dispatcher nodes of the dispatcher.
:rtype: dict[str, dict] | [
"Returns",
"all",
"sub",
"-",
"dispatcher",
"nodes",
"of",
"the",
"dispatcher",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/dispatcher.py#L1408-L1418 | train |
vinci1it2000/schedula | schedula/dispatcher.py | Dispatcher.blue | def blue(self, memo=None):
"""
Constructs a BlueDispatcher out of the current object.
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,schedula.utils.blue.Blueprint]
:return:
A BlueDispatcher of the current object.
:rtype: schedula.utils.blue.BlueDispatcher
"""
memo = {} if memo is None else memo
if self in memo:
return memo[self]
from .utils.dsp import map_list
from .utils.blue import BlueDispatcher, _parent_blue
memo[self] = blue = BlueDispatcher(
executor=self.executor, name=self.name, raises=self.raises,
description=self.__doc__
)
dfl = self.default_values
key_map_data = ['data_id', {'value': 'default_value'}]
pred, succ = self.dmap.pred, self.dmap.succ
def _set_weight(n, r, d):
d = {i: j['weight'] for i, j in d.items() if 'weight' in j}
if d:
r[n] = d
for k, v in sorted(self.nodes.items(), key=lambda x: x[1]['index']):
v = v.copy()
t = v.pop('type')
del v['index']
if t == 'data':
method = 'add_data'
combine_dicts(map_list(key_map_data, k, dfl.get(k, {})), base=v)
elif t in ('function', 'dispatcher'):
method = 'add_%s' % t
if t == 'dispatcher':
t = 'dsp'
v['%s_id' % t] = k
del v['wait_inputs']
_set_weight('inp_weight', v, pred[k])
_set_weight('out_weight', v, succ[k])
if 'function' in v:
v[t] = _parent_blue(v.pop('function'), memo)
blue.deferred.append((method, v))
return blue | python | def blue(self, memo=None):
"""
Constructs a BlueDispatcher out of the current object.
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,schedula.utils.blue.Blueprint]
:return:
A BlueDispatcher of the current object.
:rtype: schedula.utils.blue.BlueDispatcher
"""
memo = {} if memo is None else memo
if self in memo:
return memo[self]
from .utils.dsp import map_list
from .utils.blue import BlueDispatcher, _parent_blue
memo[self] = blue = BlueDispatcher(
executor=self.executor, name=self.name, raises=self.raises,
description=self.__doc__
)
dfl = self.default_values
key_map_data = ['data_id', {'value': 'default_value'}]
pred, succ = self.dmap.pred, self.dmap.succ
def _set_weight(n, r, d):
d = {i: j['weight'] for i, j in d.items() if 'weight' in j}
if d:
r[n] = d
for k, v in sorted(self.nodes.items(), key=lambda x: x[1]['index']):
v = v.copy()
t = v.pop('type')
del v['index']
if t == 'data':
method = 'add_data'
combine_dicts(map_list(key_map_data, k, dfl.get(k, {})), base=v)
elif t in ('function', 'dispatcher'):
method = 'add_%s' % t
if t == 'dispatcher':
t = 'dsp'
v['%s_id' % t] = k
del v['wait_inputs']
_set_weight('inp_weight', v, pred[k])
_set_weight('out_weight', v, succ[k])
if 'function' in v:
v[t] = _parent_blue(v.pop('function'), memo)
blue.deferred.append((method, v))
return blue | [
"def",
"blue",
"(",
"self",
",",
"memo",
"=",
"None",
")",
":",
"memo",
"=",
"{",
"}",
"if",
"memo",
"is",
"None",
"else",
"memo",
"if",
"self",
"in",
"memo",
":",
"return",
"memo",
"[",
"self",
"]",
"from",
".",
"utils",
".",
"dsp",
"import",
"map_list",
"from",
".",
"utils",
".",
"blue",
"import",
"BlueDispatcher",
",",
"_parent_blue",
"memo",
"[",
"self",
"]",
"=",
"blue",
"=",
"BlueDispatcher",
"(",
"executor",
"=",
"self",
".",
"executor",
",",
"name",
"=",
"self",
".",
"name",
",",
"raises",
"=",
"self",
".",
"raises",
",",
"description",
"=",
"self",
".",
"__doc__",
")",
"dfl",
"=",
"self",
".",
"default_values",
"key_map_data",
"=",
"[",
"'data_id'",
",",
"{",
"'value'",
":",
"'default_value'",
"}",
"]",
"pred",
",",
"succ",
"=",
"self",
".",
"dmap",
".",
"pred",
",",
"self",
".",
"dmap",
".",
"succ",
"def",
"_set_weight",
"(",
"n",
",",
"r",
",",
"d",
")",
":",
"d",
"=",
"{",
"i",
":",
"j",
"[",
"'weight'",
"]",
"for",
"i",
",",
"j",
"in",
"d",
".",
"items",
"(",
")",
"if",
"'weight'",
"in",
"j",
"}",
"if",
"d",
":",
"r",
"[",
"n",
"]",
"=",
"d",
"for",
"k",
",",
"v",
"in",
"sorted",
"(",
"self",
".",
"nodes",
".",
"items",
"(",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"1",
"]",
"[",
"'index'",
"]",
")",
":",
"v",
"=",
"v",
".",
"copy",
"(",
")",
"t",
"=",
"v",
".",
"pop",
"(",
"'type'",
")",
"del",
"v",
"[",
"'index'",
"]",
"if",
"t",
"==",
"'data'",
":",
"method",
"=",
"'add_data'",
"combine_dicts",
"(",
"map_list",
"(",
"key_map_data",
",",
"k",
",",
"dfl",
".",
"get",
"(",
"k",
",",
"{",
"}",
")",
")",
",",
"base",
"=",
"v",
")",
"elif",
"t",
"in",
"(",
"'function'",
",",
"'dispatcher'",
")",
":",
"method",
"=",
"'add_%s'",
"%",
"t",
"if",
"t",
"==",
"'dispatcher'",
":",
"t",
"=",
"'dsp'",
"v",
"[",
"'%s_id'",
"%",
"t",
"]",
"=",
"k",
"del",
"v",
"[",
"'wait_inputs'",
"]",
"_set_weight",
"(",
"'inp_weight'",
",",
"v",
",",
"pred",
"[",
"k",
"]",
")",
"_set_weight",
"(",
"'out_weight'",
",",
"v",
",",
"succ",
"[",
"k",
"]",
")",
"if",
"'function'",
"in",
"v",
":",
"v",
"[",
"t",
"]",
"=",
"_parent_blue",
"(",
"v",
".",
"pop",
"(",
"'function'",
")",
",",
"memo",
")",
"blue",
".",
"deferred",
".",
"append",
"(",
"(",
"method",
",",
"v",
")",
")",
"return",
"blue"
] | Constructs a BlueDispatcher out of the current object.
:param memo:
A dictionary to cache Blueprints.
:type memo: dict[T,schedula.utils.blue.Blueprint]
:return:
A BlueDispatcher of the current object.
:rtype: schedula.utils.blue.BlueDispatcher | [
"Constructs",
"a",
"BlueDispatcher",
"out",
"of",
"the",
"current",
"object",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/dispatcher.py#L1437-L1485 | train |
vinci1it2000/schedula | schedula/dispatcher.py | Dispatcher.extend | def extend(self, *blues, memo=None):
"""
Extends Dispatcher calling each deferred operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints and Dispatchers.
:type memo: dict[T,schedula.utils.blue.Blueprint|Dispatcher]
:return:
Self.
:rtype: Dispatcher
**--------------------------------------------------------------------**
**Example**:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher()
>>> dsp.add_func(callable, ['is_callable'])
'callable'
>>> blue = sh.BlueDispatcher().add_func(len, ['length'])
>>> dsp = sh.Dispatcher().extend(dsp, blue)
"""
from .utils.blue import BlueDispatcher as Blue
return Blue().extend(*blues, memo=memo).register(self, memo=memo) | python | def extend(self, *blues, memo=None):
"""
Extends Dispatcher calling each deferred operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints and Dispatchers.
:type memo: dict[T,schedula.utils.blue.Blueprint|Dispatcher]
:return:
Self.
:rtype: Dispatcher
**--------------------------------------------------------------------**
**Example**:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher()
>>> dsp.add_func(callable, ['is_callable'])
'callable'
>>> blue = sh.BlueDispatcher().add_func(len, ['length'])
>>> dsp = sh.Dispatcher().extend(dsp, blue)
"""
from .utils.blue import BlueDispatcher as Blue
return Blue().extend(*blues, memo=memo).register(self, memo=memo) | [
"def",
"extend",
"(",
"self",
",",
"*",
"blues",
",",
"memo",
"=",
"None",
")",
":",
"from",
".",
"utils",
".",
"blue",
"import",
"BlueDispatcher",
"as",
"Blue",
"return",
"Blue",
"(",
")",
".",
"extend",
"(",
"*",
"blues",
",",
"memo",
"=",
"memo",
")",
".",
"register",
"(",
"self",
",",
"memo",
"=",
"memo",
")"
] | Extends Dispatcher calling each deferred operation of given Blueprints.
:param blues:
Blueprints or Dispatchers to extend deferred operations.
:type blues: Blueprint | schedula.dispatcher.Dispatcher
:param memo:
A dictionary to cache Blueprints and Dispatchers.
:type memo: dict[T,schedula.utils.blue.Blueprint|Dispatcher]
:return:
Self.
:rtype: Dispatcher
**--------------------------------------------------------------------**
**Example**:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
:code:
>>> import schedula as sh
>>> dsp = sh.Dispatcher()
>>> dsp.add_func(callable, ['is_callable'])
'callable'
>>> blue = sh.BlueDispatcher().add_func(len, ['length'])
>>> dsp = sh.Dispatcher().extend(dsp, blue) | [
"Extends",
"Dispatcher",
"calling",
"each",
"deferred",
"operation",
"of",
"given",
"Blueprints",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/dispatcher.py#L1487-L1519 | train |
vinci1it2000/schedula | schedula/dispatcher.py | Dispatcher.dispatch | def dispatch(self, inputs=None, outputs=None, cutoff=None, inputs_dist=None,
wildcard=False, no_call=False, shrink=False,
rm_unused_nds=False, select_output_kw=None, _wait_in=None,
stopper=None, executor=False, sol_name=()):
"""
Evaluates the minimum workflow and data outputs of the dispatcher
model from given inputs.
:param inputs:
Input data values.
:type inputs: dict[str, T], list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param cutoff:
Depth to stop the search.
:type cutoff: float, int, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param no_call:
If True data node estimation function is not used and the input
values are not used.
:type no_call: bool, optional
:param shrink:
If True the dispatcher is shrink before the dispatch.
.. seealso:: :func:`shrink_dsp`
:type shrink: bool, optional
:param rm_unused_nds:
If True unused function and sub-dispatcher nodes are removed from
workflow.
:type rm_unused_nds: bool, optional
:param select_output_kw:
Kwargs of selector function to select specific outputs.
:type select_output_kw: dict, optional
:param _wait_in:
Override wait inputs.
:type _wait_in: dict, optional
:param stopper:
A semaphore to abort the dispatching.
:type stopper: multiprocess.Event, optional
:param executor:
A pool executor id to dispatch asynchronously or in parallel.
:type executor: str, optional
:param sol_name:
Solution name.
:type sol_name: tuple[str], optional
:return:
Dictionary of estimated data node outputs.
:rtype: schedula.utils.sol.Solution
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a function :math:`log(b - a)` and two data `a` and `b`
with default values:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_data(data_id='a', default_value=0)
'a'
>>> dsp.add_data(data_id='b', default_value=5)
'b'
>>> dsp.add_data(data_id='d', default_value=1)
'd'
>>> from math import log
>>> def my_log(a, b):
... return log(b - a)
>>> def my_domain(a, b):
... return a < b
>>> dsp.add_function('log(b - a)', function=my_log,
... inputs=['c', 'd'],
... outputs=['e'], input_domain=my_domain)
'log(b - a)'
>>> dsp.add_function('min', function=min, inputs=['a', 'b'],
... outputs=['c'])
'min'
Dispatch without inputs. The default values are used as inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch()
>>> outputs
Solution([('a', 0), ('b', 5), ('d', 1), ('c', 0), ('e', 0.0)])
Dispatch until data node `c` is estimated:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(outputs=['c'])
>>> outputs
Solution([('a', 0), ('b', 5), ('c', 0)])
Dispatch with one inputs. The default value of `a` is not used as
inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(inputs={'a': 3})
>>> outputs
Solution([('a', 3), ('b', 5), ('d', 1), ('c', 3)])
"""
dsp = self
if not no_call:
if shrink: # Pre shrink.
dsp = self.shrink_dsp(
inputs, outputs, cutoff, inputs_dist, wildcard
)
elif outputs:
dsp = self.get_sub_dsp_from_workflow(
outputs, self.dmap, reverse=True, blockers=inputs,
wildcard=wildcard
)
# Initialize.
self.solution = sol = self.solution.__class__(
dsp, inputs, outputs, wildcard, cutoff, inputs_dist, no_call,
rm_unused_nds, _wait_in, full_name=sol_name
)
# Dispatch.
sol._run(stopper=stopper, executor=executor)
if select_output_kw:
return selector(dictionary=sol, **select_output_kw)
# Return the evaluated data outputs.
return sol | python | def dispatch(self, inputs=None, outputs=None, cutoff=None, inputs_dist=None,
wildcard=False, no_call=False, shrink=False,
rm_unused_nds=False, select_output_kw=None, _wait_in=None,
stopper=None, executor=False, sol_name=()):
"""
Evaluates the minimum workflow and data outputs of the dispatcher
model from given inputs.
:param inputs:
Input data values.
:type inputs: dict[str, T], list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param cutoff:
Depth to stop the search.
:type cutoff: float, int, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param no_call:
If True data node estimation function is not used and the input
values are not used.
:type no_call: bool, optional
:param shrink:
If True the dispatcher is shrink before the dispatch.
.. seealso:: :func:`shrink_dsp`
:type shrink: bool, optional
:param rm_unused_nds:
If True unused function and sub-dispatcher nodes are removed from
workflow.
:type rm_unused_nds: bool, optional
:param select_output_kw:
Kwargs of selector function to select specific outputs.
:type select_output_kw: dict, optional
:param _wait_in:
Override wait inputs.
:type _wait_in: dict, optional
:param stopper:
A semaphore to abort the dispatching.
:type stopper: multiprocess.Event, optional
:param executor:
A pool executor id to dispatch asynchronously or in parallel.
:type executor: str, optional
:param sol_name:
Solution name.
:type sol_name: tuple[str], optional
:return:
Dictionary of estimated data node outputs.
:rtype: schedula.utils.sol.Solution
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a function :math:`log(b - a)` and two data `a` and `b`
with default values:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_data(data_id='a', default_value=0)
'a'
>>> dsp.add_data(data_id='b', default_value=5)
'b'
>>> dsp.add_data(data_id='d', default_value=1)
'd'
>>> from math import log
>>> def my_log(a, b):
... return log(b - a)
>>> def my_domain(a, b):
... return a < b
>>> dsp.add_function('log(b - a)', function=my_log,
... inputs=['c', 'd'],
... outputs=['e'], input_domain=my_domain)
'log(b - a)'
>>> dsp.add_function('min', function=min, inputs=['a', 'b'],
... outputs=['c'])
'min'
Dispatch without inputs. The default values are used as inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch()
>>> outputs
Solution([('a', 0), ('b', 5), ('d', 1), ('c', 0), ('e', 0.0)])
Dispatch until data node `c` is estimated:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(outputs=['c'])
>>> outputs
Solution([('a', 0), ('b', 5), ('c', 0)])
Dispatch with one inputs. The default value of `a` is not used as
inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(inputs={'a': 3})
>>> outputs
Solution([('a', 3), ('b', 5), ('d', 1), ('c', 3)])
"""
dsp = self
if not no_call:
if shrink: # Pre shrink.
dsp = self.shrink_dsp(
inputs, outputs, cutoff, inputs_dist, wildcard
)
elif outputs:
dsp = self.get_sub_dsp_from_workflow(
outputs, self.dmap, reverse=True, blockers=inputs,
wildcard=wildcard
)
# Initialize.
self.solution = sol = self.solution.__class__(
dsp, inputs, outputs, wildcard, cutoff, inputs_dist, no_call,
rm_unused_nds, _wait_in, full_name=sol_name
)
# Dispatch.
sol._run(stopper=stopper, executor=executor)
if select_output_kw:
return selector(dictionary=sol, **select_output_kw)
# Return the evaluated data outputs.
return sol | [
"def",
"dispatch",
"(",
"self",
",",
"inputs",
"=",
"None",
",",
"outputs",
"=",
"None",
",",
"cutoff",
"=",
"None",
",",
"inputs_dist",
"=",
"None",
",",
"wildcard",
"=",
"False",
",",
"no_call",
"=",
"False",
",",
"shrink",
"=",
"False",
",",
"rm_unused_nds",
"=",
"False",
",",
"select_output_kw",
"=",
"None",
",",
"_wait_in",
"=",
"None",
",",
"stopper",
"=",
"None",
",",
"executor",
"=",
"False",
",",
"sol_name",
"=",
"(",
")",
")",
":",
"dsp",
"=",
"self",
"if",
"not",
"no_call",
":",
"if",
"shrink",
":",
"# Pre shrink.",
"dsp",
"=",
"self",
".",
"shrink_dsp",
"(",
"inputs",
",",
"outputs",
",",
"cutoff",
",",
"inputs_dist",
",",
"wildcard",
")",
"elif",
"outputs",
":",
"dsp",
"=",
"self",
".",
"get_sub_dsp_from_workflow",
"(",
"outputs",
",",
"self",
".",
"dmap",
",",
"reverse",
"=",
"True",
",",
"blockers",
"=",
"inputs",
",",
"wildcard",
"=",
"wildcard",
")",
"# Initialize.",
"self",
".",
"solution",
"=",
"sol",
"=",
"self",
".",
"solution",
".",
"__class__",
"(",
"dsp",
",",
"inputs",
",",
"outputs",
",",
"wildcard",
",",
"cutoff",
",",
"inputs_dist",
",",
"no_call",
",",
"rm_unused_nds",
",",
"_wait_in",
",",
"full_name",
"=",
"sol_name",
")",
"# Dispatch.",
"sol",
".",
"_run",
"(",
"stopper",
"=",
"stopper",
",",
"executor",
"=",
"executor",
")",
"if",
"select_output_kw",
":",
"return",
"selector",
"(",
"dictionary",
"=",
"sol",
",",
"*",
"*",
"select_output_kw",
")",
"# Return the evaluated data outputs.",
"return",
"sol"
] | Evaluates the minimum workflow and data outputs of the dispatcher
model from given inputs.
:param inputs:
Input data values.
:type inputs: dict[str, T], list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param cutoff:
Depth to stop the search.
:type cutoff: float, int, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:param no_call:
If True data node estimation function is not used and the input
values are not used.
:type no_call: bool, optional
:param shrink:
If True the dispatcher is shrink before the dispatch.
.. seealso:: :func:`shrink_dsp`
:type shrink: bool, optional
:param rm_unused_nds:
If True unused function and sub-dispatcher nodes are removed from
workflow.
:type rm_unused_nds: bool, optional
:param select_output_kw:
Kwargs of selector function to select specific outputs.
:type select_output_kw: dict, optional
:param _wait_in:
Override wait inputs.
:type _wait_in: dict, optional
:param stopper:
A semaphore to abort the dispatching.
:type stopper: multiprocess.Event, optional
:param executor:
A pool executor id to dispatch asynchronously or in parallel.
:type executor: str, optional
:param sol_name:
Solution name.
:type sol_name: tuple[str], optional
:return:
Dictionary of estimated data node outputs.
:rtype: schedula.utils.sol.Solution
**--------------------------------------------------------------------**
**Example**:
A dispatcher with a function :math:`log(b - a)` and two data `a` and `b`
with default values:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> dsp.add_data(data_id='a', default_value=0)
'a'
>>> dsp.add_data(data_id='b', default_value=5)
'b'
>>> dsp.add_data(data_id='d', default_value=1)
'd'
>>> from math import log
>>> def my_log(a, b):
... return log(b - a)
>>> def my_domain(a, b):
... return a < b
>>> dsp.add_function('log(b - a)', function=my_log,
... inputs=['c', 'd'],
... outputs=['e'], input_domain=my_domain)
'log(b - a)'
>>> dsp.add_function('min', function=min, inputs=['a', 'b'],
... outputs=['c'])
'min'
Dispatch without inputs. The default values are used as inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch()
>>> outputs
Solution([('a', 0), ('b', 5), ('d', 1), ('c', 0), ('e', 0.0)])
Dispatch until data node `c` is estimated:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(outputs=['c'])
>>> outputs
Solution([('a', 0), ('b', 5), ('c', 0)])
Dispatch with one inputs. The default value of `a` is not used as
inputs:
.. dispatcher:: outputs
:opt: graph_attr={'ratio': '1'}
:code:
>>> outputs = dsp.dispatch(inputs={'a': 3})
>>> outputs
Solution([('a', 3), ('b', 5), ('d', 1), ('c', 3)]) | [
"Evaluates",
"the",
"minimum",
"workflow",
"and",
"data",
"outputs",
"of",
"the",
"dispatcher",
"model",
"from",
"given",
"inputs",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/dispatcher.py#L1521-L1679 | train |
vinci1it2000/schedula | schedula/dispatcher.py | Dispatcher.shrink_dsp | def shrink_dsp(self, inputs=None, outputs=None, cutoff=None,
inputs_dist=None, wildcard=True):
"""
Returns a reduced dispatcher.
:param inputs:
Input data nodes.
:type inputs: list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param cutoff:
Depth to stop the search.
:type cutoff: float, int, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:return:
A sub-dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`dispatch`
**--------------------------------------------------------------------**
**Example**:
A dispatcher like this:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> functions = [
... {
... 'function_id': 'fun1',
... 'inputs': ['a', 'b'],
... 'outputs': ['c']
... },
... {
... 'function_id': 'fun2',
... 'inputs': ['b', 'd'],
... 'outputs': ['e']
... },
... {
... 'function_id': 'fun3',
... 'function': min,
... 'inputs': ['d', 'f'],
... 'outputs': ['g']
... },
... {
... 'function_id': 'fun4',
... 'function': max,
... 'inputs': ['a', 'b'],
... 'outputs': ['g']
... },
... {
... 'function_id': 'fun5',
... 'function': max,
... 'inputs': ['d', 'e'],
... 'outputs': ['c', 'f']
... },
... ]
>>> dsp.add_from_lists(fun_list=functions)
([], [...])
Get the sub-dispatcher induced by dispatching with no calls from inputs
`a`, `b`, and `c` to outputs `c`, `e`, and `f`::
>>> shrink_dsp = dsp.shrink_dsp(inputs=['a', 'b', 'd'],
... outputs=['c', 'f'])
.. dispatcher:: shrink_dsp
:opt: graph_attr={'ratio': '1'}
>>> shrink_dsp.name = 'Sub-Dispatcher'
"""
bfs = None
if inputs:
# Get all data nodes no wait inputs.
wait_in = self._get_wait_in(flag=False)
# Evaluate the workflow graph without invoking functions.
o = self.dispatch(
inputs, outputs, cutoff, inputs_dist, wildcard, True, False,
True, _wait_in=wait_in
)
data_nodes = self.data_nodes # Get data nodes.
from .utils.alg import _union_workflow, _convert_bfs
bfs = _union_workflow(o) # bfg edges.
# Set minimum initial distances.
if inputs_dist:
inputs_dist = combine_dicts(o.dist, inputs_dist)
else:
inputs_dist = o.dist
# Set data nodes to wait inputs.
wait_in = self._get_wait_in(flag=True)
while True: # Start shrinking loop.
# Evaluate the workflow graph without invoking functions.
o = self.dispatch(
inputs, outputs, cutoff, inputs_dist, wildcard, True, False,
False, _wait_in=wait_in
)
_union_workflow(o, bfs=bfs) # Update bfs.
n_d, status = o._remove_wait_in() # Remove wait input flags.
if not status:
break # Stop iteration.
# Update inputs.
inputs = n_d.intersection(data_nodes).union(inputs)
# Update outputs and convert bfs in DiGraphs.
outputs, bfs = outputs or o, _convert_bfs(bfs)
elif not outputs:
return self.copy_structure() # Empty Dispatcher.
# Get sub dispatcher breadth-first-search graph.
dsp = self._get_dsp_from_bfs(outputs, bfs_graphs=bfs)
return dsp | python | def shrink_dsp(self, inputs=None, outputs=None, cutoff=None,
inputs_dist=None, wildcard=True):
"""
Returns a reduced dispatcher.
:param inputs:
Input data nodes.
:type inputs: list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param cutoff:
Depth to stop the search.
:type cutoff: float, int, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:return:
A sub-dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`dispatch`
**--------------------------------------------------------------------**
**Example**:
A dispatcher like this:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> functions = [
... {
... 'function_id': 'fun1',
... 'inputs': ['a', 'b'],
... 'outputs': ['c']
... },
... {
... 'function_id': 'fun2',
... 'inputs': ['b', 'd'],
... 'outputs': ['e']
... },
... {
... 'function_id': 'fun3',
... 'function': min,
... 'inputs': ['d', 'f'],
... 'outputs': ['g']
... },
... {
... 'function_id': 'fun4',
... 'function': max,
... 'inputs': ['a', 'b'],
... 'outputs': ['g']
... },
... {
... 'function_id': 'fun5',
... 'function': max,
... 'inputs': ['d', 'e'],
... 'outputs': ['c', 'f']
... },
... ]
>>> dsp.add_from_lists(fun_list=functions)
([], [...])
Get the sub-dispatcher induced by dispatching with no calls from inputs
`a`, `b`, and `c` to outputs `c`, `e`, and `f`::
>>> shrink_dsp = dsp.shrink_dsp(inputs=['a', 'b', 'd'],
... outputs=['c', 'f'])
.. dispatcher:: shrink_dsp
:opt: graph_attr={'ratio': '1'}
>>> shrink_dsp.name = 'Sub-Dispatcher'
"""
bfs = None
if inputs:
# Get all data nodes no wait inputs.
wait_in = self._get_wait_in(flag=False)
# Evaluate the workflow graph without invoking functions.
o = self.dispatch(
inputs, outputs, cutoff, inputs_dist, wildcard, True, False,
True, _wait_in=wait_in
)
data_nodes = self.data_nodes # Get data nodes.
from .utils.alg import _union_workflow, _convert_bfs
bfs = _union_workflow(o) # bfg edges.
# Set minimum initial distances.
if inputs_dist:
inputs_dist = combine_dicts(o.dist, inputs_dist)
else:
inputs_dist = o.dist
# Set data nodes to wait inputs.
wait_in = self._get_wait_in(flag=True)
while True: # Start shrinking loop.
# Evaluate the workflow graph without invoking functions.
o = self.dispatch(
inputs, outputs, cutoff, inputs_dist, wildcard, True, False,
False, _wait_in=wait_in
)
_union_workflow(o, bfs=bfs) # Update bfs.
n_d, status = o._remove_wait_in() # Remove wait input flags.
if not status:
break # Stop iteration.
# Update inputs.
inputs = n_d.intersection(data_nodes).union(inputs)
# Update outputs and convert bfs in DiGraphs.
outputs, bfs = outputs or o, _convert_bfs(bfs)
elif not outputs:
return self.copy_structure() # Empty Dispatcher.
# Get sub dispatcher breadth-first-search graph.
dsp = self._get_dsp_from_bfs(outputs, bfs_graphs=bfs)
return dsp | [
"def",
"shrink_dsp",
"(",
"self",
",",
"inputs",
"=",
"None",
",",
"outputs",
"=",
"None",
",",
"cutoff",
"=",
"None",
",",
"inputs_dist",
"=",
"None",
",",
"wildcard",
"=",
"True",
")",
":",
"bfs",
"=",
"None",
"if",
"inputs",
":",
"# Get all data nodes no wait inputs.",
"wait_in",
"=",
"self",
".",
"_get_wait_in",
"(",
"flag",
"=",
"False",
")",
"# Evaluate the workflow graph without invoking functions.",
"o",
"=",
"self",
".",
"dispatch",
"(",
"inputs",
",",
"outputs",
",",
"cutoff",
",",
"inputs_dist",
",",
"wildcard",
",",
"True",
",",
"False",
",",
"True",
",",
"_wait_in",
"=",
"wait_in",
")",
"data_nodes",
"=",
"self",
".",
"data_nodes",
"# Get data nodes.",
"from",
".",
"utils",
".",
"alg",
"import",
"_union_workflow",
",",
"_convert_bfs",
"bfs",
"=",
"_union_workflow",
"(",
"o",
")",
"# bfg edges.",
"# Set minimum initial distances.",
"if",
"inputs_dist",
":",
"inputs_dist",
"=",
"combine_dicts",
"(",
"o",
".",
"dist",
",",
"inputs_dist",
")",
"else",
":",
"inputs_dist",
"=",
"o",
".",
"dist",
"# Set data nodes to wait inputs.",
"wait_in",
"=",
"self",
".",
"_get_wait_in",
"(",
"flag",
"=",
"True",
")",
"while",
"True",
":",
"# Start shrinking loop.",
"# Evaluate the workflow graph without invoking functions.",
"o",
"=",
"self",
".",
"dispatch",
"(",
"inputs",
",",
"outputs",
",",
"cutoff",
",",
"inputs_dist",
",",
"wildcard",
",",
"True",
",",
"False",
",",
"False",
",",
"_wait_in",
"=",
"wait_in",
")",
"_union_workflow",
"(",
"o",
",",
"bfs",
"=",
"bfs",
")",
"# Update bfs.",
"n_d",
",",
"status",
"=",
"o",
".",
"_remove_wait_in",
"(",
")",
"# Remove wait input flags.",
"if",
"not",
"status",
":",
"break",
"# Stop iteration.",
"# Update inputs.",
"inputs",
"=",
"n_d",
".",
"intersection",
"(",
"data_nodes",
")",
".",
"union",
"(",
"inputs",
")",
"# Update outputs and convert bfs in DiGraphs.",
"outputs",
",",
"bfs",
"=",
"outputs",
"or",
"o",
",",
"_convert_bfs",
"(",
"bfs",
")",
"elif",
"not",
"outputs",
":",
"return",
"self",
".",
"copy_structure",
"(",
")",
"# Empty Dispatcher.",
"# Get sub dispatcher breadth-first-search graph.",
"dsp",
"=",
"self",
".",
"_get_dsp_from_bfs",
"(",
"outputs",
",",
"bfs_graphs",
"=",
"bfs",
")",
"return",
"dsp"
] | Returns a reduced dispatcher.
:param inputs:
Input data nodes.
:type inputs: list[str], iterable, optional
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param cutoff:
Depth to stop the search.
:type cutoff: float, int, optional
:param inputs_dist:
Initial distances of input data nodes.
:type inputs_dist: dict[str, int | float], optional
:param wildcard:
If True, when the data node is used as input and target in the
ArciDispatch algorithm, the input value will be used as input for
the connected functions, but not as output.
:type wildcard: bool, optional
:return:
A sub-dispatcher.
:rtype: Dispatcher
.. seealso:: :func:`dispatch`
**--------------------------------------------------------------------**
**Example**:
A dispatcher like this:
.. dispatcher:: dsp
:opt: graph_attr={'ratio': '1'}
>>> dsp = Dispatcher(name='Dispatcher')
>>> functions = [
... {
... 'function_id': 'fun1',
... 'inputs': ['a', 'b'],
... 'outputs': ['c']
... },
... {
... 'function_id': 'fun2',
... 'inputs': ['b', 'd'],
... 'outputs': ['e']
... },
... {
... 'function_id': 'fun3',
... 'function': min,
... 'inputs': ['d', 'f'],
... 'outputs': ['g']
... },
... {
... 'function_id': 'fun4',
... 'function': max,
... 'inputs': ['a', 'b'],
... 'outputs': ['g']
... },
... {
... 'function_id': 'fun5',
... 'function': max,
... 'inputs': ['d', 'e'],
... 'outputs': ['c', 'f']
... },
... ]
>>> dsp.add_from_lists(fun_list=functions)
([], [...])
Get the sub-dispatcher induced by dispatching with no calls from inputs
`a`, `b`, and `c` to outputs `c`, `e`, and `f`::
>>> shrink_dsp = dsp.shrink_dsp(inputs=['a', 'b', 'd'],
... outputs=['c', 'f'])
.. dispatcher:: shrink_dsp
:opt: graph_attr={'ratio': '1'}
>>> shrink_dsp.name = 'Sub-Dispatcher' | [
"Returns",
"a",
"reduced",
"dispatcher",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/dispatcher.py#L1684-L1823 | train |
vinci1it2000/schedula | schedula/dispatcher.py | Dispatcher._get_dsp_from_bfs | def _get_dsp_from_bfs(self, outputs, bfs_graphs=None):
"""
Returns the sub-dispatcher induced by the workflow from outputs.
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param bfs_graphs:
A dictionary with directed graphs where evaluate the
breadth-first-search.
:type bfs_graphs: dict[str | Token, networkx.DiGraph | dict], optional
:return:
A sub-dispatcher
:rtype: Dispatcher
"""
bfs = bfs_graphs[NONE] if bfs_graphs is not None else self.dmap
# Get sub dispatcher breadth-first-search graph.
dsp = self.get_sub_dsp_from_workflow(
sources=outputs, graph=bfs, reverse=True, _update_links=False
)
# Namespace shortcuts.
succ, nodes, pred = dsp.dmap.succ, dsp.nodes, dsp.dmap.pred
rm_edges, nds = dsp.dmap.remove_edges_from, dsp.data_nodes
from .utils.alg import _nodes, _get_sub_out, _update_io
for n in dsp.sub_dsp_nodes:
a = nodes[n] = nodes[n].copy()
bfs = bfs_graphs[n] if bfs_graphs is not None else None
out = _get_sub_out(a, succ[n])
if 'input_domain' in a:
out.update(_nodes(a['inputs'].values()))
a['function'] = a['function']._get_dsp_from_bfs(out, bfs)
i, o = _update_io(a, pred[n], succ[n]) # Unreachable nodes.
rm_edges({(u, n) for u in i}.union(((n, u) for u in o)))
return dsp | python | def _get_dsp_from_bfs(self, outputs, bfs_graphs=None):
"""
Returns the sub-dispatcher induced by the workflow from outputs.
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param bfs_graphs:
A dictionary with directed graphs where evaluate the
breadth-first-search.
:type bfs_graphs: dict[str | Token, networkx.DiGraph | dict], optional
:return:
A sub-dispatcher
:rtype: Dispatcher
"""
bfs = bfs_graphs[NONE] if bfs_graphs is not None else self.dmap
# Get sub dispatcher breadth-first-search graph.
dsp = self.get_sub_dsp_from_workflow(
sources=outputs, graph=bfs, reverse=True, _update_links=False
)
# Namespace shortcuts.
succ, nodes, pred = dsp.dmap.succ, dsp.nodes, dsp.dmap.pred
rm_edges, nds = dsp.dmap.remove_edges_from, dsp.data_nodes
from .utils.alg import _nodes, _get_sub_out, _update_io
for n in dsp.sub_dsp_nodes:
a = nodes[n] = nodes[n].copy()
bfs = bfs_graphs[n] if bfs_graphs is not None else None
out = _get_sub_out(a, succ[n])
if 'input_domain' in a:
out.update(_nodes(a['inputs'].values()))
a['function'] = a['function']._get_dsp_from_bfs(out, bfs)
i, o = _update_io(a, pred[n], succ[n]) # Unreachable nodes.
rm_edges({(u, n) for u in i}.union(((n, u) for u in o)))
return dsp | [
"def",
"_get_dsp_from_bfs",
"(",
"self",
",",
"outputs",
",",
"bfs_graphs",
"=",
"None",
")",
":",
"bfs",
"=",
"bfs_graphs",
"[",
"NONE",
"]",
"if",
"bfs_graphs",
"is",
"not",
"None",
"else",
"self",
".",
"dmap",
"# Get sub dispatcher breadth-first-search graph.",
"dsp",
"=",
"self",
".",
"get_sub_dsp_from_workflow",
"(",
"sources",
"=",
"outputs",
",",
"graph",
"=",
"bfs",
",",
"reverse",
"=",
"True",
",",
"_update_links",
"=",
"False",
")",
"# Namespace shortcuts.",
"succ",
",",
"nodes",
",",
"pred",
"=",
"dsp",
".",
"dmap",
".",
"succ",
",",
"dsp",
".",
"nodes",
",",
"dsp",
".",
"dmap",
".",
"pred",
"rm_edges",
",",
"nds",
"=",
"dsp",
".",
"dmap",
".",
"remove_edges_from",
",",
"dsp",
".",
"data_nodes",
"from",
".",
"utils",
".",
"alg",
"import",
"_nodes",
",",
"_get_sub_out",
",",
"_update_io",
"for",
"n",
"in",
"dsp",
".",
"sub_dsp_nodes",
":",
"a",
"=",
"nodes",
"[",
"n",
"]",
"=",
"nodes",
"[",
"n",
"]",
".",
"copy",
"(",
")",
"bfs",
"=",
"bfs_graphs",
"[",
"n",
"]",
"if",
"bfs_graphs",
"is",
"not",
"None",
"else",
"None",
"out",
"=",
"_get_sub_out",
"(",
"a",
",",
"succ",
"[",
"n",
"]",
")",
"if",
"'input_domain'",
"in",
"a",
":",
"out",
".",
"update",
"(",
"_nodes",
"(",
"a",
"[",
"'inputs'",
"]",
".",
"values",
"(",
")",
")",
")",
"a",
"[",
"'function'",
"]",
"=",
"a",
"[",
"'function'",
"]",
".",
"_get_dsp_from_bfs",
"(",
"out",
",",
"bfs",
")",
"i",
",",
"o",
"=",
"_update_io",
"(",
"a",
",",
"pred",
"[",
"n",
"]",
",",
"succ",
"[",
"n",
"]",
")",
"# Unreachable nodes.",
"rm_edges",
"(",
"{",
"(",
"u",
",",
"n",
")",
"for",
"u",
"in",
"i",
"}",
".",
"union",
"(",
"(",
"(",
"n",
",",
"u",
")",
"for",
"u",
"in",
"o",
")",
")",
")",
"return",
"dsp"
] | Returns the sub-dispatcher induced by the workflow from outputs.
:param outputs:
Ending data nodes.
:type outputs: list[str], iterable, optional
:param bfs_graphs:
A dictionary with directed graphs where evaluate the
breadth-first-search.
:type bfs_graphs: dict[str | Token, networkx.DiGraph | dict], optional
:return:
A sub-dispatcher
:rtype: Dispatcher | [
"Returns",
"the",
"sub",
"-",
"dispatcher",
"induced",
"by",
"the",
"workflow",
"from",
"outputs",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/schedula/dispatcher.py#L1825-L1868 | train |
pedrotgn/pyactor | pyactor/thread/future.py | Future.add_callback | def add_callback(self, method):
"""
Attaches a mehtod that will be called when the future finishes.
:param method: A callable from an actor that will be called
when the future completes. The only argument for that
method must be the future itself from wich you can get the
result though `future.:meth:`result()``. If the future has
already completed, then the callable will be called
immediately.
.. note:: This functionallity only works when called from an actor,
specifying a method from the same actor.
"""
from_actor = get_current()
if from_actor is not None:
callback = (method, from_actor.channel, from_actor.url)
with self.__condition:
if self.__state is not FINISHED:
self.__callbacks.append(callback)
return
# Invoke the callback directly
# msg = TellRequest(TELL, method, [self], from_actor.url)
msg = {TYPE: TELL, METHOD: method, PARAMS: ([self], {}),
TO: from_actor.url}
from_actor.channel.send(msg)
else:
raise FutureError("add_callback only works when called " +
"from inside an actor") | python | def add_callback(self, method):
"""
Attaches a mehtod that will be called when the future finishes.
:param method: A callable from an actor that will be called
when the future completes. The only argument for that
method must be the future itself from wich you can get the
result though `future.:meth:`result()``. If the future has
already completed, then the callable will be called
immediately.
.. note:: This functionallity only works when called from an actor,
specifying a method from the same actor.
"""
from_actor = get_current()
if from_actor is not None:
callback = (method, from_actor.channel, from_actor.url)
with self.__condition:
if self.__state is not FINISHED:
self.__callbacks.append(callback)
return
# Invoke the callback directly
# msg = TellRequest(TELL, method, [self], from_actor.url)
msg = {TYPE: TELL, METHOD: method, PARAMS: ([self], {}),
TO: from_actor.url}
from_actor.channel.send(msg)
else:
raise FutureError("add_callback only works when called " +
"from inside an actor") | [
"def",
"add_callback",
"(",
"self",
",",
"method",
")",
":",
"from_actor",
"=",
"get_current",
"(",
")",
"if",
"from_actor",
"is",
"not",
"None",
":",
"callback",
"=",
"(",
"method",
",",
"from_actor",
".",
"channel",
",",
"from_actor",
".",
"url",
")",
"with",
"self",
".",
"__condition",
":",
"if",
"self",
".",
"__state",
"is",
"not",
"FINISHED",
":",
"self",
".",
"__callbacks",
".",
"append",
"(",
"callback",
")",
"return",
"# Invoke the callback directly",
"# msg = TellRequest(TELL, method, [self], from_actor.url)",
"msg",
"=",
"{",
"TYPE",
":",
"TELL",
",",
"METHOD",
":",
"method",
",",
"PARAMS",
":",
"(",
"[",
"self",
"]",
",",
"{",
"}",
")",
",",
"TO",
":",
"from_actor",
".",
"url",
"}",
"from_actor",
".",
"channel",
".",
"send",
"(",
"msg",
")",
"else",
":",
"raise",
"FutureError",
"(",
"\"add_callback only works when called \"",
"+",
"\"from inside an actor\"",
")"
] | Attaches a mehtod that will be called when the future finishes.
:param method: A callable from an actor that will be called
when the future completes. The only argument for that
method must be the future itself from wich you can get the
result though `future.:meth:`result()``. If the future has
already completed, then the callable will be called
immediately.
.. note:: This functionallity only works when called from an actor,
specifying a method from the same actor. | [
"Attaches",
"a",
"mehtod",
"that",
"will",
"be",
"called",
"when",
"the",
"future",
"finishes",
"."
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/thread/future.py#L60-L88 | train |
pedrotgn/pyactor | pyactor/thread/future.py | Future.send_work | def send_work(self):
'''Sends the query to the actor for it to start executing the
work.
It is possible to execute once again a future that has finished
if necessary (overwriting the results), but only one execution
at a time.
'''
if self.__set_running():
# msg = FutureRequest(FUTURE, self.__method, self.__params,
# self.__channel, self.__target, self.__id)
msg = {TYPE: FUTURE, METHOD: self.__method, PARAMS: self.__params,
CHANNEL: self.__channel, TO: self.__target,
RPC_ID: self.__id}
self.__actor_channel.send(msg)
else:
raise FutureError("Future already running.") | python | def send_work(self):
'''Sends the query to the actor for it to start executing the
work.
It is possible to execute once again a future that has finished
if necessary (overwriting the results), but only one execution
at a time.
'''
if self.__set_running():
# msg = FutureRequest(FUTURE, self.__method, self.__params,
# self.__channel, self.__target, self.__id)
msg = {TYPE: FUTURE, METHOD: self.__method, PARAMS: self.__params,
CHANNEL: self.__channel, TO: self.__target,
RPC_ID: self.__id}
self.__actor_channel.send(msg)
else:
raise FutureError("Future already running.") | [
"def",
"send_work",
"(",
"self",
")",
":",
"if",
"self",
".",
"__set_running",
"(",
")",
":",
"# msg = FutureRequest(FUTURE, self.__method, self.__params,",
"# self.__channel, self.__target, self.__id)",
"msg",
"=",
"{",
"TYPE",
":",
"FUTURE",
",",
"METHOD",
":",
"self",
".",
"__method",
",",
"PARAMS",
":",
"self",
".",
"__params",
",",
"CHANNEL",
":",
"self",
".",
"__channel",
",",
"TO",
":",
"self",
".",
"__target",
",",
"RPC_ID",
":",
"self",
".",
"__id",
"}",
"self",
".",
"__actor_channel",
".",
"send",
"(",
"msg",
")",
"else",
":",
"raise",
"FutureError",
"(",
"\"Future already running.\"",
")"
] | Sends the query to the actor for it to start executing the
work.
It is possible to execute once again a future that has finished
if necessary (overwriting the results), but only one execution
at a time. | [
"Sends",
"the",
"query",
"to",
"the",
"actor",
"for",
"it",
"to",
"start",
"executing",
"the",
"work",
"."
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/thread/future.py#L144-L160 | train |
pedrotgn/pyactor | pyactor/thread/future.py | Future.set_result | def set_result(self, result):
"""Sets the return value of work associated with the future.
Only called internally.
"""
with self.__condition:
self.__result = result
self.__state = FINISHED
self.__condition.notify_all()
self._invoke_callbacks() | python | def set_result(self, result):
"""Sets the return value of work associated with the future.
Only called internally.
"""
with self.__condition:
self.__result = result
self.__state = FINISHED
self.__condition.notify_all()
self._invoke_callbacks() | [
"def",
"set_result",
"(",
"self",
",",
"result",
")",
":",
"with",
"self",
".",
"__condition",
":",
"self",
".",
"__result",
"=",
"result",
"self",
".",
"__state",
"=",
"FINISHED",
"self",
".",
"__condition",
".",
"notify_all",
"(",
")",
"self",
".",
"_invoke_callbacks",
"(",
")"
] | Sets the return value of work associated with the future.
Only called internally. | [
"Sets",
"the",
"return",
"value",
"of",
"work",
"associated",
"with",
"the",
"future",
".",
"Only",
"called",
"internally",
"."
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/thread/future.py#L173-L181 | train |
pedrotgn/pyactor | pyactor/thread/future.py | Future.set_exception | def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Only called internally.
"""
with self.__condition:
self.__exception = exception
self.__state = FINISHED
self.__condition.notify_all()
self._invoke_callbacks() | python | def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Only called internally.
"""
with self.__condition:
self.__exception = exception
self.__state = FINISHED
self.__condition.notify_all()
self._invoke_callbacks() | [
"def",
"set_exception",
"(",
"self",
",",
"exception",
")",
":",
"with",
"self",
".",
"__condition",
":",
"self",
".",
"__exception",
"=",
"exception",
"self",
".",
"__state",
"=",
"FINISHED",
"self",
".",
"__condition",
".",
"notify_all",
"(",
")",
"self",
".",
"_invoke_callbacks",
"(",
")"
] | Sets the result of the future as being the given exception.
Only called internally. | [
"Sets",
"the",
"result",
"of",
"the",
"future",
"as",
"being",
"the",
"given",
"exception",
".",
"Only",
"called",
"internally",
"."
] | 24d98d134dd4228f2ba38e83611e9c3f50ec2fd4 | https://github.com/pedrotgn/pyactor/blob/24d98d134dd4228f2ba38e83611e9c3f50ec2fd4/pyactor/thread/future.py#L183-L191 | train |
theodoregoetz/wernher | sandbox/Flight.py | angle_between_vectors | def angle_between_vectors(x, y):
""" Compute the angle between vector x and y """
dp = dot_product(x, y)
if dp == 0:
return 0
xm = magnitude(x)
ym = magnitude(y)
return math.acos(dp / (xm*ym)) * (180. / math.pi) | python | def angle_between_vectors(x, y):
""" Compute the angle between vector x and y """
dp = dot_product(x, y)
if dp == 0:
return 0
xm = magnitude(x)
ym = magnitude(y)
return math.acos(dp / (xm*ym)) * (180. / math.pi) | [
"def",
"angle_between_vectors",
"(",
"x",
",",
"y",
")",
":",
"dp",
"=",
"dot_product",
"(",
"x",
",",
"y",
")",
"if",
"dp",
"==",
"0",
":",
"return",
"0",
"xm",
"=",
"magnitude",
"(",
"x",
")",
"ym",
"=",
"magnitude",
"(",
"y",
")",
"return",
"math",
".",
"acos",
"(",
"dp",
"/",
"(",
"xm",
"*",
"ym",
")",
")",
"*",
"(",
"180.",
"/",
"math",
".",
"pi",
")"
] | Compute the angle between vector x and y | [
"Compute",
"the",
"angle",
"between",
"vector",
"x",
"and",
"y"
] | ef5d3aabe24e532b5eab33cd0212b2dbc2c9022e | https://github.com/theodoregoetz/wernher/blob/ef5d3aabe24e532b5eab33cd0212b2dbc2c9022e/sandbox/Flight.py#L31-L38 | train |
dsoprea/PySecure | pysecure/adapters/ssha.py | _ssh_forward_accept | def _ssh_forward_accept(ssh_session, timeout_ms):
"""Waiting for an incoming connection from a reverse forwarded port. Note
that this results in a kernel block until a connection is received.
"""
ssh_channel = c_ssh_forward_accept(c_void_p(ssh_session),
c_int(timeout_ms))
if ssh_channel is None:
raise SshTimeoutException()
return ssh_channel | python | def _ssh_forward_accept(ssh_session, timeout_ms):
"""Waiting for an incoming connection from a reverse forwarded port. Note
that this results in a kernel block until a connection is received.
"""
ssh_channel = c_ssh_forward_accept(c_void_p(ssh_session),
c_int(timeout_ms))
if ssh_channel is None:
raise SshTimeoutException()
return ssh_channel | [
"def",
"_ssh_forward_accept",
"(",
"ssh_session",
",",
"timeout_ms",
")",
":",
"ssh_channel",
"=",
"c_ssh_forward_accept",
"(",
"c_void_p",
"(",
"ssh_session",
")",
",",
"c_int",
"(",
"timeout_ms",
")",
")",
"if",
"ssh_channel",
"is",
"None",
":",
"raise",
"SshTimeoutException",
"(",
")",
"return",
"ssh_channel"
] | Waiting for an incoming connection from a reverse forwarded port. Note
that this results in a kernel block until a connection is received. | [
"Waiting",
"for",
"an",
"incoming",
"connection",
"from",
"a",
"reverse",
"forwarded",
"port",
".",
"Note",
"that",
"this",
"results",
"in",
"a",
"kernel",
"block",
"until",
"a",
"connection",
"is",
"received",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/ssha.py#L249-L260 | train |
dsoprea/PySecure | pysecure/adapters/ssha.py | SshSession.execute | def execute(self, cmd, block_size=DEFAULT_EXECUTE_READ_BLOCK_SIZE):
"""Execute a remote command. This functionality does not support more
than one command to be executed on the same channel, so we create a
dedicated channel at the session level than allowing direct access at
the channel level.
"""
with SshChannel(self) as sc:
self.__log.debug("Executing command: %s" % (cmd))
sc.open_session()
sc.request_exec(cmd)
buffer_ = bytearray()
while 1:
bytes = sc.read(block_size)
yield bytes
if len(bytes) < block_size:
break | python | def execute(self, cmd, block_size=DEFAULT_EXECUTE_READ_BLOCK_SIZE):
"""Execute a remote command. This functionality does not support more
than one command to be executed on the same channel, so we create a
dedicated channel at the session level than allowing direct access at
the channel level.
"""
with SshChannel(self) as sc:
self.__log.debug("Executing command: %s" % (cmd))
sc.open_session()
sc.request_exec(cmd)
buffer_ = bytearray()
while 1:
bytes = sc.read(block_size)
yield bytes
if len(bytes) < block_size:
break | [
"def",
"execute",
"(",
"self",
",",
"cmd",
",",
"block_size",
"=",
"DEFAULT_EXECUTE_READ_BLOCK_SIZE",
")",
":",
"with",
"SshChannel",
"(",
"self",
")",
"as",
"sc",
":",
"self",
".",
"__log",
".",
"debug",
"(",
"\"Executing command: %s\"",
"%",
"(",
"cmd",
")",
")",
"sc",
".",
"open_session",
"(",
")",
"sc",
".",
"request_exec",
"(",
"cmd",
")",
"buffer_",
"=",
"bytearray",
"(",
")",
"while",
"1",
":",
"bytes",
"=",
"sc",
".",
"read",
"(",
"block_size",
")",
"yield",
"bytes",
"if",
"len",
"(",
"bytes",
")",
"<",
"block_size",
":",
"break"
] | Execute a remote command. This functionality does not support more
than one command to be executed on the same channel, so we create a
dedicated channel at the session level than allowing direct access at
the channel level. | [
"Execute",
"a",
"remote",
"command",
".",
"This",
"functionality",
"does",
"not",
"support",
"more",
"than",
"one",
"command",
"to",
"be",
"executed",
"on",
"the",
"same",
"channel",
"so",
"we",
"create",
"a",
"dedicated",
"channel",
"at",
"the",
"session",
"level",
"than",
"allowing",
"direct",
"access",
"at",
"the",
"channel",
"level",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/ssha.py#L476-L495 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._read | def _read(self, directory, filename, session, path, name, extension,
spatial, spatialReferenceID, replaceParamFile,
force_relative=True):
"""
Project File Read from File Method
"""
self.project_directory = directory
with tmp_chdir(directory):
# Headers to ignore
HEADERS = ('GSSHAPROJECT',)
# WMS Cards to include (don't discount as comments)
WMS_CARDS = ('#INDEXGRID_GUID', '#PROJECTION_FILE', '#LandSoil',
'#CHANNEL_POINT_INPUT_WMS')
GSSHAPY_CARDS = ('#GSSHAPY_EVENT_YML', )
with open(path, 'r') as f:
for line in f:
if not line.strip():
# Skip empty lines
continue
elif '#' in line.split()[0] and line.split()[0] \
not in WMS_CARDS + GSSHAPY_CARDS:
# Skip comments designated by the hash symbol
# (with the exception of WMS_CARDS and GSSHAPY_CARDS)
continue
try:
card = self._extractCard(line, force_relative)
except:
card = self._extractDirectoryCard(line, force_relative)
# Now that the cardName and cardValue are separated
# load them into the gsshapy objects
if card['name'] not in HEADERS:
# Create GSSHAPY Project Card object
prjCard = ProjectCard(name=card['name'], value=card['value'])
# Associate ProjectCard with ProjectFile
prjCard.projectFile = self
# Extract MAP_TYPE card value for convenience working
# with output maps
if card['name'] == 'MAP_TYPE':
self.mapType = int(card['value'])
# Assign properties
self.srid = spatialReferenceID
self.name = name
self.fileExtension = extension | python | def _read(self, directory, filename, session, path, name, extension,
spatial, spatialReferenceID, replaceParamFile,
force_relative=True):
"""
Project File Read from File Method
"""
self.project_directory = directory
with tmp_chdir(directory):
# Headers to ignore
HEADERS = ('GSSHAPROJECT',)
# WMS Cards to include (don't discount as comments)
WMS_CARDS = ('#INDEXGRID_GUID', '#PROJECTION_FILE', '#LandSoil',
'#CHANNEL_POINT_INPUT_WMS')
GSSHAPY_CARDS = ('#GSSHAPY_EVENT_YML', )
with open(path, 'r') as f:
for line in f:
if not line.strip():
# Skip empty lines
continue
elif '#' in line.split()[0] and line.split()[0] \
not in WMS_CARDS + GSSHAPY_CARDS:
# Skip comments designated by the hash symbol
# (with the exception of WMS_CARDS and GSSHAPY_CARDS)
continue
try:
card = self._extractCard(line, force_relative)
except:
card = self._extractDirectoryCard(line, force_relative)
# Now that the cardName and cardValue are separated
# load them into the gsshapy objects
if card['name'] not in HEADERS:
# Create GSSHAPY Project Card object
prjCard = ProjectCard(name=card['name'], value=card['value'])
# Associate ProjectCard with ProjectFile
prjCard.projectFile = self
# Extract MAP_TYPE card value for convenience working
# with output maps
if card['name'] == 'MAP_TYPE':
self.mapType = int(card['value'])
# Assign properties
self.srid = spatialReferenceID
self.name = name
self.fileExtension = extension | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
",",
"force_relative",
"=",
"True",
")",
":",
"self",
".",
"project_directory",
"=",
"directory",
"with",
"tmp_chdir",
"(",
"directory",
")",
":",
"# Headers to ignore",
"HEADERS",
"=",
"(",
"'GSSHAPROJECT'",
",",
")",
"# WMS Cards to include (don't discount as comments)",
"WMS_CARDS",
"=",
"(",
"'#INDEXGRID_GUID'",
",",
"'#PROJECTION_FILE'",
",",
"'#LandSoil'",
",",
"'#CHANNEL_POINT_INPUT_WMS'",
")",
"GSSHAPY_CARDS",
"=",
"(",
"'#GSSHAPY_EVENT_YML'",
",",
")",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"for",
"line",
"in",
"f",
":",
"if",
"not",
"line",
".",
"strip",
"(",
")",
":",
"# Skip empty lines",
"continue",
"elif",
"'#'",
"in",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
"and",
"line",
".",
"split",
"(",
")",
"[",
"0",
"]",
"not",
"in",
"WMS_CARDS",
"+",
"GSSHAPY_CARDS",
":",
"# Skip comments designated by the hash symbol",
"# (with the exception of WMS_CARDS and GSSHAPY_CARDS)",
"continue",
"try",
":",
"card",
"=",
"self",
".",
"_extractCard",
"(",
"line",
",",
"force_relative",
")",
"except",
":",
"card",
"=",
"self",
".",
"_extractDirectoryCard",
"(",
"line",
",",
"force_relative",
")",
"# Now that the cardName and cardValue are separated",
"# load them into the gsshapy objects",
"if",
"card",
"[",
"'name'",
"]",
"not",
"in",
"HEADERS",
":",
"# Create GSSHAPY Project Card object",
"prjCard",
"=",
"ProjectCard",
"(",
"name",
"=",
"card",
"[",
"'name'",
"]",
",",
"value",
"=",
"card",
"[",
"'value'",
"]",
")",
"# Associate ProjectCard with ProjectFile",
"prjCard",
".",
"projectFile",
"=",
"self",
"# Extract MAP_TYPE card value for convenience working",
"# with output maps",
"if",
"card",
"[",
"'name'",
"]",
"==",
"'MAP_TYPE'",
":",
"self",
".",
"mapType",
"=",
"int",
"(",
"card",
"[",
"'value'",
"]",
")",
"# Assign properties",
"self",
".",
"srid",
"=",
"spatialReferenceID",
"self",
".",
"name",
"=",
"name",
"self",
".",
"fileExtension",
"=",
"extension"
] | Project File Read from File Method | [
"Project",
"File",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L244-L296 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._write | def _write(self, session, openFile, replaceParamFile):
"""
Project File Write to File Method
"""
# Enforce cards that must be written in certain order
PRIORITY_CARDS = ('WMS', 'MASK_WATERSHED', 'REPLACE_LINE',
'REPLACE_PARAMS', 'REPLACE_VALS', 'REPLACE_FOLDER')
filename = os.path.split(openFile.name)[1]
name = filename.split('.')[0]
# Write lines
openFile.write('GSSHAPROJECT\n')
# Write priority lines
for card_key in PRIORITY_CARDS:
card = self.getCard(card_key)
# Write the card
if card is not None:
openFile.write(card.write(originalPrefix=self.name, newPrefix=name))
# Initiate write on each ProjectCard that belongs to this ProjectFile
for card in self.projectCards:
if card.name not in PRIORITY_CARDS:
openFile.write(card.write(originalPrefix=self.name, newPrefix=name)) | python | def _write(self, session, openFile, replaceParamFile):
"""
Project File Write to File Method
"""
# Enforce cards that must be written in certain order
PRIORITY_CARDS = ('WMS', 'MASK_WATERSHED', 'REPLACE_LINE',
'REPLACE_PARAMS', 'REPLACE_VALS', 'REPLACE_FOLDER')
filename = os.path.split(openFile.name)[1]
name = filename.split('.')[0]
# Write lines
openFile.write('GSSHAPROJECT\n')
# Write priority lines
for card_key in PRIORITY_CARDS:
card = self.getCard(card_key)
# Write the card
if card is not None:
openFile.write(card.write(originalPrefix=self.name, newPrefix=name))
# Initiate write on each ProjectCard that belongs to this ProjectFile
for card in self.projectCards:
if card.name not in PRIORITY_CARDS:
openFile.write(card.write(originalPrefix=self.name, newPrefix=name)) | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
")",
":",
"# Enforce cards that must be written in certain order",
"PRIORITY_CARDS",
"=",
"(",
"'WMS'",
",",
"'MASK_WATERSHED'",
",",
"'REPLACE_LINE'",
",",
"'REPLACE_PARAMS'",
",",
"'REPLACE_VALS'",
",",
"'REPLACE_FOLDER'",
")",
"filename",
"=",
"os",
".",
"path",
".",
"split",
"(",
"openFile",
".",
"name",
")",
"[",
"1",
"]",
"name",
"=",
"filename",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"# Write lines",
"openFile",
".",
"write",
"(",
"'GSSHAPROJECT\\n'",
")",
"# Write priority lines",
"for",
"card_key",
"in",
"PRIORITY_CARDS",
":",
"card",
"=",
"self",
".",
"getCard",
"(",
"card_key",
")",
"# Write the card",
"if",
"card",
"is",
"not",
"None",
":",
"openFile",
".",
"write",
"(",
"card",
".",
"write",
"(",
"originalPrefix",
"=",
"self",
".",
"name",
",",
"newPrefix",
"=",
"name",
")",
")",
"# Initiate write on each ProjectCard that belongs to this ProjectFile",
"for",
"card",
"in",
"self",
".",
"projectCards",
":",
"if",
"card",
".",
"name",
"not",
"in",
"PRIORITY_CARDS",
":",
"openFile",
".",
"write",
"(",
"card",
".",
"write",
"(",
"originalPrefix",
"=",
"self",
".",
"name",
",",
"newPrefix",
"=",
"name",
")",
")"
] | Project File Write to File Method | [
"Project",
"File",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L298-L323 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.appendDirectory | def appendDirectory(self, directory, projectFilePath):
"""
Append directory to relative paths in project file. By default, the project file paths are read and written as
relative paths. Use this method to prepend a directory to all the paths in the project file.
Args:
directory (str): Directory path to prepend to file paths in project file.
projectFilePath (str): Path to project file that will be modified.
"""
lines = []
with open(projectFilePath, 'r') as original:
for l in original:
lines.append(l)
with open(projectFilePath, 'w') as new:
for line in lines:
card = {}
try:
card = self._extractCard(line)
except:
card = self._extractDirectoryCard(line)
# Determine number of spaces between card and value for nice alignment
numSpaces = max(2, 25 - len(card['name']))
if card['value'] is None:
rewriteLine = '%s\n' % (card['name'])
else:
if card['name'] == 'WMS':
rewriteLine = '%s %s\n' % (card['name'], card['value'])
elif card['name'] == 'PROJECT_PATH':
filePath = '"%s"' % os.path.normpath(directory)
rewriteLine = '%s%s%s\n' % (card['name'], ' ' * numSpaces, filePath)
elif '"' in card['value']:
filename = card['value'].strip('"')
filePath = '"%s"' % os.path.join(directory, filename)
rewriteLine = '%s%s%s\n' % (card['name'], ' ' * numSpaces, filePath)
else:
rewriteLine = '%s%s%s\n' % (card['name'], ' ' * numSpaces, card['value'])
new.write(rewriteLine) | python | def appendDirectory(self, directory, projectFilePath):
"""
Append directory to relative paths in project file. By default, the project file paths are read and written as
relative paths. Use this method to prepend a directory to all the paths in the project file.
Args:
directory (str): Directory path to prepend to file paths in project file.
projectFilePath (str): Path to project file that will be modified.
"""
lines = []
with open(projectFilePath, 'r') as original:
for l in original:
lines.append(l)
with open(projectFilePath, 'w') as new:
for line in lines:
card = {}
try:
card = self._extractCard(line)
except:
card = self._extractDirectoryCard(line)
# Determine number of spaces between card and value for nice alignment
numSpaces = max(2, 25 - len(card['name']))
if card['value'] is None:
rewriteLine = '%s\n' % (card['name'])
else:
if card['name'] == 'WMS':
rewriteLine = '%s %s\n' % (card['name'], card['value'])
elif card['name'] == 'PROJECT_PATH':
filePath = '"%s"' % os.path.normpath(directory)
rewriteLine = '%s%s%s\n' % (card['name'], ' ' * numSpaces, filePath)
elif '"' in card['value']:
filename = card['value'].strip('"')
filePath = '"%s"' % os.path.join(directory, filename)
rewriteLine = '%s%s%s\n' % (card['name'], ' ' * numSpaces, filePath)
else:
rewriteLine = '%s%s%s\n' % (card['name'], ' ' * numSpaces, card['value'])
new.write(rewriteLine) | [
"def",
"appendDirectory",
"(",
"self",
",",
"directory",
",",
"projectFilePath",
")",
":",
"lines",
"=",
"[",
"]",
"with",
"open",
"(",
"projectFilePath",
",",
"'r'",
")",
"as",
"original",
":",
"for",
"l",
"in",
"original",
":",
"lines",
".",
"append",
"(",
"l",
")",
"with",
"open",
"(",
"projectFilePath",
",",
"'w'",
")",
"as",
"new",
":",
"for",
"line",
"in",
"lines",
":",
"card",
"=",
"{",
"}",
"try",
":",
"card",
"=",
"self",
".",
"_extractCard",
"(",
"line",
")",
"except",
":",
"card",
"=",
"self",
".",
"_extractDirectoryCard",
"(",
"line",
")",
"# Determine number of spaces between card and value for nice alignment",
"numSpaces",
"=",
"max",
"(",
"2",
",",
"25",
"-",
"len",
"(",
"card",
"[",
"'name'",
"]",
")",
")",
"if",
"card",
"[",
"'value'",
"]",
"is",
"None",
":",
"rewriteLine",
"=",
"'%s\\n'",
"%",
"(",
"card",
"[",
"'name'",
"]",
")",
"else",
":",
"if",
"card",
"[",
"'name'",
"]",
"==",
"'WMS'",
":",
"rewriteLine",
"=",
"'%s %s\\n'",
"%",
"(",
"card",
"[",
"'name'",
"]",
",",
"card",
"[",
"'value'",
"]",
")",
"elif",
"card",
"[",
"'name'",
"]",
"==",
"'PROJECT_PATH'",
":",
"filePath",
"=",
"'\"%s\"'",
"%",
"os",
".",
"path",
".",
"normpath",
"(",
"directory",
")",
"rewriteLine",
"=",
"'%s%s%s\\n'",
"%",
"(",
"card",
"[",
"'name'",
"]",
",",
"' '",
"*",
"numSpaces",
",",
"filePath",
")",
"elif",
"'\"'",
"in",
"card",
"[",
"'value'",
"]",
":",
"filename",
"=",
"card",
"[",
"'value'",
"]",
".",
"strip",
"(",
"'\"'",
")",
"filePath",
"=",
"'\"%s\"'",
"%",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"rewriteLine",
"=",
"'%s%s%s\\n'",
"%",
"(",
"card",
"[",
"'name'",
"]",
",",
"' '",
"*",
"numSpaces",
",",
"filePath",
")",
"else",
":",
"rewriteLine",
"=",
"'%s%s%s\\n'",
"%",
"(",
"card",
"[",
"'name'",
"]",
",",
"' '",
"*",
"numSpaces",
",",
"card",
"[",
"'value'",
"]",
")",
"new",
".",
"write",
"(",
"rewriteLine",
")"
] | Append directory to relative paths in project file. By default, the project file paths are read and written as
relative paths. Use this method to prepend a directory to all the paths in the project file.
Args:
directory (str): Directory path to prepend to file paths in project file.
projectFilePath (str): Path to project file that will be modified. | [
"Append",
"directory",
"to",
"relative",
"paths",
"in",
"project",
"file",
".",
"By",
"default",
"the",
"project",
"file",
"paths",
"are",
"read",
"and",
"written",
"as",
"relative",
"paths",
".",
"Use",
"this",
"method",
"to",
"prepend",
"a",
"directory",
"to",
"all",
"the",
"paths",
"in",
"the",
"project",
"file",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L325-L369 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.readProject | def readProject(self, directory, projectFileName, session, spatial=False, spatialReferenceID=None):
"""
Read all files for a GSSHA project into the database.
This method will read all the files, both input and output files, that are supported by GsshaPy into a database.
To use GsshaPy more efficiently, it is recommended that you use the readInput method when performing
pre-processing tasks and readOutput when performing post-processing tasks.
Args:
directory (str): Directory containing all GSSHA model files. This method assumes that all files are located
in the same directory.
projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is
provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails,
default srid will be used (4326 for WGS 84).
"""
self.project_directory = directory
with tmp_chdir(directory):
# Add project file to session
session.add(self)
# First read self
self.read(directory, projectFileName, session, spatial=spatial, spatialReferenceID=spatialReferenceID)
# Get the batch directory for output
batchDirectory = self._getBatchDirectory(directory)
# Automatically derive the spatial reference system, if possible
if spatialReferenceID is None:
spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory)
# Read in replace param file
replaceParamFile = self._readReplacementFiles(directory, session, spatial, spatialReferenceID)
# Read Input Files
self._readXput(self.INPUT_FILES, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
# Read Output Files
self._readXput(self.OUTPUT_FILES, batchDirectory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
# Read Input Map Files
self._readXputMaps(self.INPUT_MAPS, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
# Read WMS Dataset Files
self._readWMSDatasets(self.WMS_DATASETS, batchDirectory, session, spatial=spatial, spatialReferenceID=spatialReferenceID)
# Commit to database
self._commit(session, self.COMMIT_ERROR_MESSAGE) | python | def readProject(self, directory, projectFileName, session, spatial=False, spatialReferenceID=None):
"""
Read all files for a GSSHA project into the database.
This method will read all the files, both input and output files, that are supported by GsshaPy into a database.
To use GsshaPy more efficiently, it is recommended that you use the readInput method when performing
pre-processing tasks and readOutput when performing post-processing tasks.
Args:
directory (str): Directory containing all GSSHA model files. This method assumes that all files are located
in the same directory.
projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is
provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails,
default srid will be used (4326 for WGS 84).
"""
self.project_directory = directory
with tmp_chdir(directory):
# Add project file to session
session.add(self)
# First read self
self.read(directory, projectFileName, session, spatial=spatial, spatialReferenceID=spatialReferenceID)
# Get the batch directory for output
batchDirectory = self._getBatchDirectory(directory)
# Automatically derive the spatial reference system, if possible
if spatialReferenceID is None:
spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory)
# Read in replace param file
replaceParamFile = self._readReplacementFiles(directory, session, spatial, spatialReferenceID)
# Read Input Files
self._readXput(self.INPUT_FILES, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
# Read Output Files
self._readXput(self.OUTPUT_FILES, batchDirectory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
# Read Input Map Files
self._readXputMaps(self.INPUT_MAPS, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
# Read WMS Dataset Files
self._readWMSDatasets(self.WMS_DATASETS, batchDirectory, session, spatial=spatial, spatialReferenceID=spatialReferenceID)
# Commit to database
self._commit(session, self.COMMIT_ERROR_MESSAGE) | [
"def",
"readProject",
"(",
"self",
",",
"directory",
",",
"projectFileName",
",",
"session",
",",
"spatial",
"=",
"False",
",",
"spatialReferenceID",
"=",
"None",
")",
":",
"self",
".",
"project_directory",
"=",
"directory",
"with",
"tmp_chdir",
"(",
"directory",
")",
":",
"# Add project file to session",
"session",
".",
"add",
"(",
"self",
")",
"# First read self",
"self",
".",
"read",
"(",
"directory",
",",
"projectFileName",
",",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
")",
"# Get the batch directory for output",
"batchDirectory",
"=",
"self",
".",
"_getBatchDirectory",
"(",
"directory",
")",
"# Automatically derive the spatial reference system, if possible",
"if",
"spatialReferenceID",
"is",
"None",
":",
"spatialReferenceID",
"=",
"self",
".",
"_automaticallyDeriveSpatialReferenceId",
"(",
"directory",
")",
"# Read in replace param file",
"replaceParamFile",
"=",
"self",
".",
"_readReplacementFiles",
"(",
"directory",
",",
"session",
",",
"spatial",
",",
"spatialReferenceID",
")",
"# Read Input Files",
"self",
".",
"_readXput",
"(",
"self",
".",
"INPUT_FILES",
",",
"directory",
",",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"# Read Output Files",
"self",
".",
"_readXput",
"(",
"self",
".",
"OUTPUT_FILES",
",",
"batchDirectory",
",",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"# Read Input Map Files",
"self",
".",
"_readXputMaps",
"(",
"self",
".",
"INPUT_MAPS",
",",
"directory",
",",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"# Read WMS Dataset Files",
"self",
".",
"_readWMSDatasets",
"(",
"self",
".",
"WMS_DATASETS",
",",
"batchDirectory",
",",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
")",
"# Commit to database",
"self",
".",
"_commit",
"(",
"session",
",",
"self",
".",
"COMMIT_ERROR_MESSAGE",
")"
] | Read all files for a GSSHA project into the database.
This method will read all the files, both input and output files, that are supported by GsshaPy into a database.
To use GsshaPy more efficiently, it is recommended that you use the readInput method when performing
pre-processing tasks and readOutput when performing post-processing tasks.
Args:
directory (str): Directory containing all GSSHA model files. This method assumes that all files are located
in the same directory.
projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is
provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails,
default srid will be used (4326 for WGS 84). | [
"Read",
"all",
"files",
"for",
"a",
"GSSHA",
"project",
"into",
"the",
"database",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L371-L421 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.readInput | def readInput(self, directory, projectFileName, session, spatial=False, spatialReferenceID=None):
"""
Read only input files for a GSSHA project into the database.
Use this method to read a project when only pre-processing tasks need to be performed.
Args:
directory (str): Directory containing all GSSHA model files. This method assumes that all files are located
in the same directory.
projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is
provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails,
default srid will be used (4326 for WGS 84).
"""
self.project_directory = directory
with tmp_chdir(directory):
# Add project file to session
session.add(self)
# Read Project File
self.read(directory, projectFileName, session, spatial, spatialReferenceID)
# Automatically derive the spatial reference system, if possible
if spatialReferenceID is None:
spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory)
# Read in replace param file
replaceParamFile = self._readReplacementFiles(directory, session, spatial, spatialReferenceID)
# Read Input Files
self._readXput(self.INPUT_FILES, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
# Read Input Map Files
self._readXputMaps(self.INPUT_MAPS, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
# Commit to database
self._commit(session, self.COMMIT_ERROR_MESSAGE) | python | def readInput(self, directory, projectFileName, session, spatial=False, spatialReferenceID=None):
"""
Read only input files for a GSSHA project into the database.
Use this method to read a project when only pre-processing tasks need to be performed.
Args:
directory (str): Directory containing all GSSHA model files. This method assumes that all files are located
in the same directory.
projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is
provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails,
default srid will be used (4326 for WGS 84).
"""
self.project_directory = directory
with tmp_chdir(directory):
# Add project file to session
session.add(self)
# Read Project File
self.read(directory, projectFileName, session, spatial, spatialReferenceID)
# Automatically derive the spatial reference system, if possible
if spatialReferenceID is None:
spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory)
# Read in replace param file
replaceParamFile = self._readReplacementFiles(directory, session, spatial, spatialReferenceID)
# Read Input Files
self._readXput(self.INPUT_FILES, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
# Read Input Map Files
self._readXputMaps(self.INPUT_MAPS, directory, session, spatial=spatial, spatialReferenceID=spatialReferenceID, replaceParamFile=replaceParamFile)
# Commit to database
self._commit(session, self.COMMIT_ERROR_MESSAGE) | [
"def",
"readInput",
"(",
"self",
",",
"directory",
",",
"projectFileName",
",",
"session",
",",
"spatial",
"=",
"False",
",",
"spatialReferenceID",
"=",
"None",
")",
":",
"self",
".",
"project_directory",
"=",
"directory",
"with",
"tmp_chdir",
"(",
"directory",
")",
":",
"# Add project file to session",
"session",
".",
"add",
"(",
"self",
")",
"# Read Project File",
"self",
".",
"read",
"(",
"directory",
",",
"projectFileName",
",",
"session",
",",
"spatial",
",",
"spatialReferenceID",
")",
"# Automatically derive the spatial reference system, if possible",
"if",
"spatialReferenceID",
"is",
"None",
":",
"spatialReferenceID",
"=",
"self",
".",
"_automaticallyDeriveSpatialReferenceId",
"(",
"directory",
")",
"# Read in replace param file",
"replaceParamFile",
"=",
"self",
".",
"_readReplacementFiles",
"(",
"directory",
",",
"session",
",",
"spatial",
",",
"spatialReferenceID",
")",
"# Read Input Files",
"self",
".",
"_readXput",
"(",
"self",
".",
"INPUT_FILES",
",",
"directory",
",",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"# Read Input Map Files",
"self",
".",
"_readXputMaps",
"(",
"self",
".",
"INPUT_MAPS",
",",
"directory",
",",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"# Commit to database",
"self",
".",
"_commit",
"(",
"session",
",",
"self",
".",
"COMMIT_ERROR_MESSAGE",
")"
] | Read only input files for a GSSHA project into the database.
Use this method to read a project when only pre-processing tasks need to be performed.
Args:
directory (str): Directory containing all GSSHA model files. This method assumes that all files are located
in the same directory.
projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is
provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails,
default srid will be used (4326 for WGS 84). | [
"Read",
"only",
"input",
"files",
"for",
"a",
"GSSHA",
"project",
"into",
"the",
"database",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L423-L462 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.readOutput | def readOutput(self, directory, projectFileName, session, spatial=False, spatialReferenceID=None):
"""
Read only output files for a GSSHA project to the database.
Use this method to read a project when only post-processing tasks need to be performed.
Args:
directory (str): Directory containing all GSSHA model files. This method assumes that all files are located
in the same directory.
projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is
provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails,
default srid will be used (4326 for WGS 84).
"""
self.project_directory = directory
with tmp_chdir(directory):
# Add project file to session
session.add(self)
# Read Project File
self.read(directory, projectFileName, session, spatial, spatialReferenceID)
# Get the batch directory for output
batchDirectory = self._getBatchDirectory(directory)
# Read Mask (dependency of some output files)
maskMap = WatershedMaskFile()
maskMapFilename = self.getCard('WATERSHED_MASK').value.strip('"')
maskMap.read(session=session, directory=directory, filename=maskMapFilename, spatial=spatial)
maskMap.projectFile = self
# Automatically derive the spatial reference system, if possible
if spatialReferenceID is None:
spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory)
# Read Output Files
self._readXput(self.OUTPUT_FILES, batchDirectory, session, spatial=spatial, spatialReferenceID=spatialReferenceID)
# Read WMS Dataset Files
self._readWMSDatasets(self.WMS_DATASETS, batchDirectory, session, spatial=spatial, spatialReferenceID=spatialReferenceID)
# Commit to database
self._commit(session, self.COMMIT_ERROR_MESSAGE) | python | def readOutput(self, directory, projectFileName, session, spatial=False, spatialReferenceID=None):
"""
Read only output files for a GSSHA project to the database.
Use this method to read a project when only post-processing tasks need to be performed.
Args:
directory (str): Directory containing all GSSHA model files. This method assumes that all files are located
in the same directory.
projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is
provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails,
default srid will be used (4326 for WGS 84).
"""
self.project_directory = directory
with tmp_chdir(directory):
# Add project file to session
session.add(self)
# Read Project File
self.read(directory, projectFileName, session, spatial, spatialReferenceID)
# Get the batch directory for output
batchDirectory = self._getBatchDirectory(directory)
# Read Mask (dependency of some output files)
maskMap = WatershedMaskFile()
maskMapFilename = self.getCard('WATERSHED_MASK').value.strip('"')
maskMap.read(session=session, directory=directory, filename=maskMapFilename, spatial=spatial)
maskMap.projectFile = self
# Automatically derive the spatial reference system, if possible
if spatialReferenceID is None:
spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory)
# Read Output Files
self._readXput(self.OUTPUT_FILES, batchDirectory, session, spatial=spatial, spatialReferenceID=spatialReferenceID)
# Read WMS Dataset Files
self._readWMSDatasets(self.WMS_DATASETS, batchDirectory, session, spatial=spatial, spatialReferenceID=spatialReferenceID)
# Commit to database
self._commit(session, self.COMMIT_ERROR_MESSAGE) | [
"def",
"readOutput",
"(",
"self",
",",
"directory",
",",
"projectFileName",
",",
"session",
",",
"spatial",
"=",
"False",
",",
"spatialReferenceID",
"=",
"None",
")",
":",
"self",
".",
"project_directory",
"=",
"directory",
"with",
"tmp_chdir",
"(",
"directory",
")",
":",
"# Add project file to session",
"session",
".",
"add",
"(",
"self",
")",
"# Read Project File",
"self",
".",
"read",
"(",
"directory",
",",
"projectFileName",
",",
"session",
",",
"spatial",
",",
"spatialReferenceID",
")",
"# Get the batch directory for output",
"batchDirectory",
"=",
"self",
".",
"_getBatchDirectory",
"(",
"directory",
")",
"# Read Mask (dependency of some output files)",
"maskMap",
"=",
"WatershedMaskFile",
"(",
")",
"maskMapFilename",
"=",
"self",
".",
"getCard",
"(",
"'WATERSHED_MASK'",
")",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
"maskMap",
".",
"read",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"filename",
"=",
"maskMapFilename",
",",
"spatial",
"=",
"spatial",
")",
"maskMap",
".",
"projectFile",
"=",
"self",
"# Automatically derive the spatial reference system, if possible",
"if",
"spatialReferenceID",
"is",
"None",
":",
"spatialReferenceID",
"=",
"self",
".",
"_automaticallyDeriveSpatialReferenceId",
"(",
"directory",
")",
"# Read Output Files",
"self",
".",
"_readXput",
"(",
"self",
".",
"OUTPUT_FILES",
",",
"batchDirectory",
",",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
")",
"# Read WMS Dataset Files",
"self",
".",
"_readWMSDatasets",
"(",
"self",
".",
"WMS_DATASETS",
",",
"batchDirectory",
",",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
")",
"# Commit to database",
"self",
".",
"_commit",
"(",
"session",
",",
"self",
".",
"COMMIT_ERROR_MESSAGE",
")"
] | Read only output files for a GSSHA project to the database.
Use this method to read a project when only post-processing tasks need to be performed.
Args:
directory (str): Directory containing all GSSHA model files. This method assumes that all files are located
in the same directory.
projectFileName (str): Name of the project file for the GSSHA model which will be read (e.g.: 'example.prj').
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
spatial (bool, optional): If True, spatially enabled objects will be read in as PostGIS spatial objects.
Defaults to False.
spatialReferenceID (int, optional): Integer id of spatial reference system for the model. If no id is
provided GsshaPy will attempt to automatically lookup the spatial reference ID. If this process fails,
default srid will be used (4326 for WGS 84). | [
"Read",
"only",
"output",
"files",
"for",
"a",
"GSSHA",
"project",
"to",
"the",
"database",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L464-L509 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._readXputFile | def _readXputFile(self, file_cards, card_name, directory, session,
spatial=False, spatialReferenceID=None,
replaceParamFile=None, **kwargs):
"""
Read specific IO file for a GSSHA project to the database.
"""
# Automatically derive the spatial reference system, if possible
if spatialReferenceID is None:
spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory)
card = self.getCard(card_name)
if card:
fileIO = file_cards[card.name]
filename = card.value.strip('"').strip("'")
# Invoke read method on each file
return self._invokeRead(fileIO=fileIO,
directory=directory,
filename=filename,
session=session,
spatial=spatial,
spatialReferenceID=spatialReferenceID,
replaceParamFile=replaceParamFile,
**kwargs) | python | def _readXputFile(self, file_cards, card_name, directory, session,
spatial=False, spatialReferenceID=None,
replaceParamFile=None, **kwargs):
"""
Read specific IO file for a GSSHA project to the database.
"""
# Automatically derive the spatial reference system, if possible
if spatialReferenceID is None:
spatialReferenceID = self._automaticallyDeriveSpatialReferenceId(directory)
card = self.getCard(card_name)
if card:
fileIO = file_cards[card.name]
filename = card.value.strip('"').strip("'")
# Invoke read method on each file
return self._invokeRead(fileIO=fileIO,
directory=directory,
filename=filename,
session=session,
spatial=spatial,
spatialReferenceID=spatialReferenceID,
replaceParamFile=replaceParamFile,
**kwargs) | [
"def",
"_readXputFile",
"(",
"self",
",",
"file_cards",
",",
"card_name",
",",
"directory",
",",
"session",
",",
"spatial",
"=",
"False",
",",
"spatialReferenceID",
"=",
"None",
",",
"replaceParamFile",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"# Automatically derive the spatial reference system, if possible",
"if",
"spatialReferenceID",
"is",
"None",
":",
"spatialReferenceID",
"=",
"self",
".",
"_automaticallyDeriveSpatialReferenceId",
"(",
"directory",
")",
"card",
"=",
"self",
".",
"getCard",
"(",
"card_name",
")",
"if",
"card",
":",
"fileIO",
"=",
"file_cards",
"[",
"card",
".",
"name",
"]",
"filename",
"=",
"card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
".",
"strip",
"(",
"\"'\"",
")",
"# Invoke read method on each file",
"return",
"self",
".",
"_invokeRead",
"(",
"fileIO",
"=",
"fileIO",
",",
"directory",
"=",
"directory",
",",
"filename",
"=",
"filename",
",",
"session",
"=",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
",",
"replaceParamFile",
"=",
"replaceParamFile",
",",
"*",
"*",
"kwargs",
")"
] | Read specific IO file for a GSSHA project to the database. | [
"Read",
"specific",
"IO",
"file",
"for",
"a",
"GSSHA",
"project",
"to",
"the",
"database",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L511-L534 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.writeProject | def writeProject(self, session, directory, name):
"""
Write all files for a project from the database to file.
Use this method to write all GsshaPy supported files back into their native file formats. If writing to execute
the model, increase efficiency by using the writeInput method to write only the file needed to run the model.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
directory (str): Directory where the files will be written.
name (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project
naming convention will be given this name with the appropriate extension (e.g.: 'example.prj',
'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original
file names.
"""
self.project_directory = directory
with tmp_chdir(directory):
# Get the batch directory for output
batchDirectory = self._getBatchDirectory(directory)
# Get param file for writing
replaceParamFile = self.replaceParamFile
# Write the replacement files
self._writeReplacementFiles(session=session, directory=directory, name=name)
# Write Project File
self.write(session=session, directory=directory, name=name)
# Write input files
self._writeXput(session=session, directory=directory, fileCards=self.INPUT_FILES, name=name, replaceParamFile=replaceParamFile)
# Write output files
self._writeXput(session=session, directory=batchDirectory, fileCards=self.OUTPUT_FILES, name=name)
# Write input map files
self._writeXputMaps(session=session, directory=directory, mapCards=self.INPUT_MAPS, name=name, replaceParamFile=replaceParamFile)
# Write WMS Dataset Files
self._writeWMSDatasets(session=session, directory=batchDirectory, wmsDatasetCards=self.WMS_DATASETS, name=name) | python | def writeProject(self, session, directory, name):
"""
Write all files for a project from the database to file.
Use this method to write all GsshaPy supported files back into their native file formats. If writing to execute
the model, increase efficiency by using the writeInput method to write only the file needed to run the model.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
directory (str): Directory where the files will be written.
name (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project
naming convention will be given this name with the appropriate extension (e.g.: 'example.prj',
'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original
file names.
"""
self.project_directory = directory
with tmp_chdir(directory):
# Get the batch directory for output
batchDirectory = self._getBatchDirectory(directory)
# Get param file for writing
replaceParamFile = self.replaceParamFile
# Write the replacement files
self._writeReplacementFiles(session=session, directory=directory, name=name)
# Write Project File
self.write(session=session, directory=directory, name=name)
# Write input files
self._writeXput(session=session, directory=directory, fileCards=self.INPUT_FILES, name=name, replaceParamFile=replaceParamFile)
# Write output files
self._writeXput(session=session, directory=batchDirectory, fileCards=self.OUTPUT_FILES, name=name)
# Write input map files
self._writeXputMaps(session=session, directory=directory, mapCards=self.INPUT_MAPS, name=name, replaceParamFile=replaceParamFile)
# Write WMS Dataset Files
self._writeWMSDatasets(session=session, directory=batchDirectory, wmsDatasetCards=self.WMS_DATASETS, name=name) | [
"def",
"writeProject",
"(",
"self",
",",
"session",
",",
"directory",
",",
"name",
")",
":",
"self",
".",
"project_directory",
"=",
"directory",
"with",
"tmp_chdir",
"(",
"directory",
")",
":",
"# Get the batch directory for output",
"batchDirectory",
"=",
"self",
".",
"_getBatchDirectory",
"(",
"directory",
")",
"# Get param file for writing",
"replaceParamFile",
"=",
"self",
".",
"replaceParamFile",
"# Write the replacement files",
"self",
".",
"_writeReplacementFiles",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"name",
"=",
"name",
")",
"# Write Project File",
"self",
".",
"write",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"name",
"=",
"name",
")",
"# Write input files",
"self",
".",
"_writeXput",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"fileCards",
"=",
"self",
".",
"INPUT_FILES",
",",
"name",
"=",
"name",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"# Write output files",
"self",
".",
"_writeXput",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"batchDirectory",
",",
"fileCards",
"=",
"self",
".",
"OUTPUT_FILES",
",",
"name",
"=",
"name",
")",
"# Write input map files",
"self",
".",
"_writeXputMaps",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"mapCards",
"=",
"self",
".",
"INPUT_MAPS",
",",
"name",
"=",
"name",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"# Write WMS Dataset Files",
"self",
".",
"_writeWMSDatasets",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"batchDirectory",
",",
"wmsDatasetCards",
"=",
"self",
".",
"WMS_DATASETS",
",",
"name",
"=",
"name",
")"
] | Write all files for a project from the database to file.
Use this method to write all GsshaPy supported files back into their native file formats. If writing to execute
the model, increase efficiency by using the writeInput method to write only the file needed to run the model.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
directory (str): Directory where the files will be written.
name (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project
naming convention will be given this name with the appropriate extension (e.g.: 'example.prj',
'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original
file names. | [
"Write",
"all",
"files",
"for",
"a",
"project",
"from",
"the",
"database",
"to",
"file",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L587-L626 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.writeInput | def writeInput(self, session, directory, name):
"""
Write only input files for a GSSHA project from the database to file.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
directory (str): Directory where the files will be written.
name (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project
naming convention will be given this name with the appropriate extension (e.g.: 'example.prj',
'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original
file names.
"""
self.project_directory = directory
with tmp_chdir(directory):
# Get param file for writing
replaceParamFile = self.replaceParamFile
# Write Project File
self.write(session=session, directory=directory, name=name)
# Write input files
self._writeXput(session=session, directory=directory, fileCards=self.INPUT_FILES, name=name, replaceParamFile=replaceParamFile)
# Write input map files
self._writeXputMaps(session=session, directory=directory, mapCards=self.INPUT_MAPS, name=name, replaceParamFile=replaceParamFile) | python | def writeInput(self, session, directory, name):
"""
Write only input files for a GSSHA project from the database to file.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
directory (str): Directory where the files will be written.
name (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project
naming convention will be given this name with the appropriate extension (e.g.: 'example.prj',
'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original
file names.
"""
self.project_directory = directory
with tmp_chdir(directory):
# Get param file for writing
replaceParamFile = self.replaceParamFile
# Write Project File
self.write(session=session, directory=directory, name=name)
# Write input files
self._writeXput(session=session, directory=directory, fileCards=self.INPUT_FILES, name=name, replaceParamFile=replaceParamFile)
# Write input map files
self._writeXputMaps(session=session, directory=directory, mapCards=self.INPUT_MAPS, name=name, replaceParamFile=replaceParamFile) | [
"def",
"writeInput",
"(",
"self",
",",
"session",
",",
"directory",
",",
"name",
")",
":",
"self",
".",
"project_directory",
"=",
"directory",
"with",
"tmp_chdir",
"(",
"directory",
")",
":",
"# Get param file for writing",
"replaceParamFile",
"=",
"self",
".",
"replaceParamFile",
"# Write Project File",
"self",
".",
"write",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"name",
"=",
"name",
")",
"# Write input files",
"self",
".",
"_writeXput",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"fileCards",
"=",
"self",
".",
"INPUT_FILES",
",",
"name",
"=",
"name",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"# Write input map files",
"self",
".",
"_writeXputMaps",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"mapCards",
"=",
"self",
".",
"INPUT_MAPS",
",",
"name",
"=",
"name",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")"
] | Write only input files for a GSSHA project from the database to file.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
directory (str): Directory where the files will be written.
name (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project
naming convention will be given this name with the appropriate extension (e.g.: 'example.prj',
'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original
file names. | [
"Write",
"only",
"input",
"files",
"for",
"a",
"GSSHA",
"project",
"from",
"the",
"database",
"to",
"file",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L628-L652 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.writeOutput | def writeOutput(self, session, directory, name):
"""
Write only output files for a GSSHA project from the database to file.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
directory (str): Directory where the files will be written.
name (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project
naming convention will be given this name with the appropriate extension (e.g.: 'example.prj',
'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original
file names.
"""
self.project_directory = directory
with tmp_chdir(directory):
# Get the batch directory for output
batchDirectory = self._getBatchDirectory(directory)
# Write the replacement files
self._writeReplacementFiles(session=session, directory=directory, name=name)
# Write Project File
self.write(session=session, directory=directory, name=name)
# Write output files
self._writeXput(session=session, directory=batchDirectory, fileCards=self.OUTPUT_FILES, name=name)
# Write WMS Dataset Files
self._writeWMSDatasets(session=session, directory=batchDirectory, wmsDatasetCards=self.WMS_DATASETS, name=name) | python | def writeOutput(self, session, directory, name):
"""
Write only output files for a GSSHA project from the database to file.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
directory (str): Directory where the files will be written.
name (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project
naming convention will be given this name with the appropriate extension (e.g.: 'example.prj',
'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original
file names.
"""
self.project_directory = directory
with tmp_chdir(directory):
# Get the batch directory for output
batchDirectory = self._getBatchDirectory(directory)
# Write the replacement files
self._writeReplacementFiles(session=session, directory=directory, name=name)
# Write Project File
self.write(session=session, directory=directory, name=name)
# Write output files
self._writeXput(session=session, directory=batchDirectory, fileCards=self.OUTPUT_FILES, name=name)
# Write WMS Dataset Files
self._writeWMSDatasets(session=session, directory=batchDirectory, wmsDatasetCards=self.WMS_DATASETS, name=name) | [
"def",
"writeOutput",
"(",
"self",
",",
"session",
",",
"directory",
",",
"name",
")",
":",
"self",
".",
"project_directory",
"=",
"directory",
"with",
"tmp_chdir",
"(",
"directory",
")",
":",
"# Get the batch directory for output",
"batchDirectory",
"=",
"self",
".",
"_getBatchDirectory",
"(",
"directory",
")",
"# Write the replacement files",
"self",
".",
"_writeReplacementFiles",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"name",
"=",
"name",
")",
"# Write Project File",
"self",
".",
"write",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"name",
"=",
"name",
")",
"# Write output files",
"self",
".",
"_writeXput",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"batchDirectory",
",",
"fileCards",
"=",
"self",
".",
"OUTPUT_FILES",
",",
"name",
"=",
"name",
")",
"# Write WMS Dataset Files",
"self",
".",
"_writeWMSDatasets",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"batchDirectory",
",",
"wmsDatasetCards",
"=",
"self",
".",
"WMS_DATASETS",
",",
"name",
"=",
"name",
")"
] | Write only output files for a GSSHA project from the database to file.
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database
directory (str): Directory where the files will be written.
name (str): Name that will be given to project when written (e.g.: 'example'). Files that follow the project
naming convention will be given this name with the appropriate extension (e.g.: 'example.prj',
'example.cmt', and 'example.gag'). Files that do not follow this convention will retain their original
file names. | [
"Write",
"only",
"output",
"files",
"for",
"a",
"GSSHA",
"project",
"from",
"the",
"database",
"to",
"file",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L656-L683 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.getFileKeys | def getFileKeys(self):
"""
Retrieve a list of file keys that have been read into the database.
This is a utility method that can be used to programmatically access the GsshaPy file objects. Use these keys
in conjunction with the dictionary returned by the getFileObjects method.
Returns:
list: List of keys representing file objects that have been read into the database.
"""
files = self.getFileObjects()
files_list = []
for key, value in files.iteritems():
if value:
files_list.append(key)
return files_list | python | def getFileKeys(self):
"""
Retrieve a list of file keys that have been read into the database.
This is a utility method that can be used to programmatically access the GsshaPy file objects. Use these keys
in conjunction with the dictionary returned by the getFileObjects method.
Returns:
list: List of keys representing file objects that have been read into the database.
"""
files = self.getFileObjects()
files_list = []
for key, value in files.iteritems():
if value:
files_list.append(key)
return files_list | [
"def",
"getFileKeys",
"(",
"self",
")",
":",
"files",
"=",
"self",
".",
"getFileObjects",
"(",
")",
"files_list",
"=",
"[",
"]",
"for",
"key",
",",
"value",
"in",
"files",
".",
"iteritems",
"(",
")",
":",
"if",
"value",
":",
"files_list",
".",
"append",
"(",
"key",
")",
"return",
"files_list"
] | Retrieve a list of file keys that have been read into the database.
This is a utility method that can be used to programmatically access the GsshaPy file objects. Use these keys
in conjunction with the dictionary returned by the getFileObjects method.
Returns:
list: List of keys representing file objects that have been read into the database. | [
"Retrieve",
"a",
"list",
"of",
"file",
"keys",
"that",
"have",
"been",
"read",
"into",
"the",
"database",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L685-L703 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.getFileObjects | def getFileObjects(self):
"""
Retrieve a dictionary of file objects.
This is a utility method that can be used to programmatically access the GsshaPy file objects. Use this method
in conjunction with the getFileKeys method to access only files that have been read into the database.
Returns:
dict: Dictionary with human readable keys and values of GsshaPy file object instances. Files that have not
been read into the database will have a value of None.
"""
files = {'project-file': self,
'mapping-table-file': self.mapTableFile,
'channel-input-file': self.channelInputFile,
'precipitation-file': self.precipFile,
'storm-pipe-network-file': self.stormPipeNetworkFile,
'hmet-file': self.hmetFile,
'nwsrfs-file': self.nwsrfsFile,
'orographic-gage-file': self.orographicGageFile,
'grid-pipe-file': self.gridPipeFile,
'grid-stream-file': self.gridStreamFile,
'time-series-file': self.timeSeriesFiles,
'projection-file': self.projectionFile,
'replace-parameters-file': self.replaceParamFile,
'replace-value-file': self.replaceValFile,
'output-location-file': self.outputLocationFiles,
'maps': self.maps,
'link-node-datasets-file': self.linkNodeDatasets}
return files | python | def getFileObjects(self):
"""
Retrieve a dictionary of file objects.
This is a utility method that can be used to programmatically access the GsshaPy file objects. Use this method
in conjunction with the getFileKeys method to access only files that have been read into the database.
Returns:
dict: Dictionary with human readable keys and values of GsshaPy file object instances. Files that have not
been read into the database will have a value of None.
"""
files = {'project-file': self,
'mapping-table-file': self.mapTableFile,
'channel-input-file': self.channelInputFile,
'precipitation-file': self.precipFile,
'storm-pipe-network-file': self.stormPipeNetworkFile,
'hmet-file': self.hmetFile,
'nwsrfs-file': self.nwsrfsFile,
'orographic-gage-file': self.orographicGageFile,
'grid-pipe-file': self.gridPipeFile,
'grid-stream-file': self.gridStreamFile,
'time-series-file': self.timeSeriesFiles,
'projection-file': self.projectionFile,
'replace-parameters-file': self.replaceParamFile,
'replace-value-file': self.replaceValFile,
'output-location-file': self.outputLocationFiles,
'maps': self.maps,
'link-node-datasets-file': self.linkNodeDatasets}
return files | [
"def",
"getFileObjects",
"(",
"self",
")",
":",
"files",
"=",
"{",
"'project-file'",
":",
"self",
",",
"'mapping-table-file'",
":",
"self",
".",
"mapTableFile",
",",
"'channel-input-file'",
":",
"self",
".",
"channelInputFile",
",",
"'precipitation-file'",
":",
"self",
".",
"precipFile",
",",
"'storm-pipe-network-file'",
":",
"self",
".",
"stormPipeNetworkFile",
",",
"'hmet-file'",
":",
"self",
".",
"hmetFile",
",",
"'nwsrfs-file'",
":",
"self",
".",
"nwsrfsFile",
",",
"'orographic-gage-file'",
":",
"self",
".",
"orographicGageFile",
",",
"'grid-pipe-file'",
":",
"self",
".",
"gridPipeFile",
",",
"'grid-stream-file'",
":",
"self",
".",
"gridStreamFile",
",",
"'time-series-file'",
":",
"self",
".",
"timeSeriesFiles",
",",
"'projection-file'",
":",
"self",
".",
"projectionFile",
",",
"'replace-parameters-file'",
":",
"self",
".",
"replaceParamFile",
",",
"'replace-value-file'",
":",
"self",
".",
"replaceValFile",
",",
"'output-location-file'",
":",
"self",
".",
"outputLocationFiles",
",",
"'maps'",
":",
"self",
".",
"maps",
",",
"'link-node-datasets-file'",
":",
"self",
".",
"linkNodeDatasets",
"}",
"return",
"files"
] | Retrieve a dictionary of file objects.
This is a utility method that can be used to programmatically access the GsshaPy file objects. Use this method
in conjunction with the getFileKeys method to access only files that have been read into the database.
Returns:
dict: Dictionary with human readable keys and values of GsshaPy file object instances. Files that have not
been read into the database will have a value of None. | [
"Retrieve",
"a",
"dictionary",
"of",
"file",
"objects",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L705-L735 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.getCard | def getCard(self, name):
"""
Retrieve card object for given card name.
Args:
name (str): Name of card to be retrieved.
Returns:
:class:`.ProjectCard` or None: Project card object. Will return None if the card is not available.
"""
cards = self.projectCards
for card in cards:
if card.name.upper() == name.upper():
return card
return None | python | def getCard(self, name):
"""
Retrieve card object for given card name.
Args:
name (str): Name of card to be retrieved.
Returns:
:class:`.ProjectCard` or None: Project card object. Will return None if the card is not available.
"""
cards = self.projectCards
for card in cards:
if card.name.upper() == name.upper():
return card
return None | [
"def",
"getCard",
"(",
"self",
",",
"name",
")",
":",
"cards",
"=",
"self",
".",
"projectCards",
"for",
"card",
"in",
"cards",
":",
"if",
"card",
".",
"name",
".",
"upper",
"(",
")",
"==",
"name",
".",
"upper",
"(",
")",
":",
"return",
"card",
"return",
"None"
] | Retrieve card object for given card name.
Args:
name (str): Name of card to be retrieved.
Returns:
:class:`.ProjectCard` or None: Project card object. Will return None if the card is not available. | [
"Retrieve",
"card",
"object",
"for",
"given",
"card",
"name",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L737-L753 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.deleteCard | def deleteCard(self, card_name, db_session):
"""
Removes card from gssha project file
"""
card_name = card_name.upper()
gssha_card = self.getCard(card_name)
if gssha_card is not None:
db_session.delete(gssha_card)
db_session.commit() | python | def deleteCard(self, card_name, db_session):
"""
Removes card from gssha project file
"""
card_name = card_name.upper()
gssha_card = self.getCard(card_name)
if gssha_card is not None:
db_session.delete(gssha_card)
db_session.commit() | [
"def",
"deleteCard",
"(",
"self",
",",
"card_name",
",",
"db_session",
")",
":",
"card_name",
"=",
"card_name",
".",
"upper",
"(",
")",
"gssha_card",
"=",
"self",
".",
"getCard",
"(",
"card_name",
")",
"if",
"gssha_card",
"is",
"not",
"None",
":",
"db_session",
".",
"delete",
"(",
"gssha_card",
")",
"db_session",
".",
"commit",
"(",
")"
] | Removes card from gssha project file | [
"Removes",
"card",
"from",
"gssha",
"project",
"file"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L776-L784 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.getGridByCard | def getGridByCard(self, gssha_card_name):
"""
Returns GDALGrid object of GSSHA grid
Paramters:
gssha_card_name(str): Name of GSSHA project card for grid.
Returns:
GDALGrid
"""
with tmp_chdir(self.project_directory):
if gssha_card_name not in (self.INPUT_MAPS+self.WMS_DATASETS):
raise ValueError("Card {0} not found in valid grid cards ..."
.format(gssha_card_name))
gssha_grid_card = self.getCard(gssha_card_name)
if gssha_grid_card is None:
raise ValueError("{0} card not found ...".format(gssha_card_name))
gssha_pro_card = self.getCard("#PROJECTION_FILE")
if gssha_pro_card is None:
raise ValueError("#PROJECTION_FILE card not found ...")
# return gssha grid
return GDALGrid(gssha_grid_card.value.strip('"').strip("'"),
gssha_pro_card.value.strip('"').strip("'")) | python | def getGridByCard(self, gssha_card_name):
"""
Returns GDALGrid object of GSSHA grid
Paramters:
gssha_card_name(str): Name of GSSHA project card for grid.
Returns:
GDALGrid
"""
with tmp_chdir(self.project_directory):
if gssha_card_name not in (self.INPUT_MAPS+self.WMS_DATASETS):
raise ValueError("Card {0} not found in valid grid cards ..."
.format(gssha_card_name))
gssha_grid_card = self.getCard(gssha_card_name)
if gssha_grid_card is None:
raise ValueError("{0} card not found ...".format(gssha_card_name))
gssha_pro_card = self.getCard("#PROJECTION_FILE")
if gssha_pro_card is None:
raise ValueError("#PROJECTION_FILE card not found ...")
# return gssha grid
return GDALGrid(gssha_grid_card.value.strip('"').strip("'"),
gssha_pro_card.value.strip('"').strip("'")) | [
"def",
"getGridByCard",
"(",
"self",
",",
"gssha_card_name",
")",
":",
"with",
"tmp_chdir",
"(",
"self",
".",
"project_directory",
")",
":",
"if",
"gssha_card_name",
"not",
"in",
"(",
"self",
".",
"INPUT_MAPS",
"+",
"self",
".",
"WMS_DATASETS",
")",
":",
"raise",
"ValueError",
"(",
"\"Card {0} not found in valid grid cards ...\"",
".",
"format",
"(",
"gssha_card_name",
")",
")",
"gssha_grid_card",
"=",
"self",
".",
"getCard",
"(",
"gssha_card_name",
")",
"if",
"gssha_grid_card",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"{0} card not found ...\"",
".",
"format",
"(",
"gssha_card_name",
")",
")",
"gssha_pro_card",
"=",
"self",
".",
"getCard",
"(",
"\"#PROJECTION_FILE\"",
")",
"if",
"gssha_pro_card",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"#PROJECTION_FILE card not found ...\"",
")",
"# return gssha grid",
"return",
"GDALGrid",
"(",
"gssha_grid_card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
".",
"strip",
"(",
"\"'\"",
")",
",",
"gssha_pro_card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
".",
"strip",
"(",
"\"'\"",
")",
")"
] | Returns GDALGrid object of GSSHA grid
Paramters:
gssha_card_name(str): Name of GSSHA project card for grid.
Returns:
GDALGrid | [
"Returns",
"GDALGrid",
"object",
"of",
"GSSHA",
"grid"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1136-L1161 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.getGrid | def getGrid(self, use_mask=True):
"""
Returns GDALGrid object of GSSHA model bounds
Paramters:
use_mask(bool): If True, uses watershed mask. Otherwise, it uses the elevaiton grid.
Returns:
GDALGrid
"""
grid_card_name = "WATERSHED_MASK"
if not use_mask:
grid_card_name = "ELEVATION"
return self.getGridByCard(grid_card_name) | python | def getGrid(self, use_mask=True):
"""
Returns GDALGrid object of GSSHA model bounds
Paramters:
use_mask(bool): If True, uses watershed mask. Otherwise, it uses the elevaiton grid.
Returns:
GDALGrid
"""
grid_card_name = "WATERSHED_MASK"
if not use_mask:
grid_card_name = "ELEVATION"
return self.getGridByCard(grid_card_name) | [
"def",
"getGrid",
"(",
"self",
",",
"use_mask",
"=",
"True",
")",
":",
"grid_card_name",
"=",
"\"WATERSHED_MASK\"",
"if",
"not",
"use_mask",
":",
"grid_card_name",
"=",
"\"ELEVATION\"",
"return",
"self",
".",
"getGridByCard",
"(",
"grid_card_name",
")"
] | Returns GDALGrid object of GSSHA model bounds
Paramters:
use_mask(bool): If True, uses watershed mask. Otherwise, it uses the elevaiton grid.
Returns:
GDALGrid | [
"Returns",
"GDALGrid",
"object",
"of",
"GSSHA",
"model",
"bounds"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1163-L1178 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.getIndexGrid | def getIndexGrid(self, name):
"""
Returns GDALGrid object of index map
Paramters:
name(str): Name of index map in 'cmt' file.
Returns:
GDALGrid
"""
index_map = self.mapTableFile.indexMaps.filter_by(name=name).one()
gssha_pro_card = self.getCard("#PROJECTION_FILE")
if gssha_pro_card is None:
raise ValueError("#PROJECTION_FILE card not found ...")
with tmp_chdir(self.project_directory):
# return gssha grid
return GDALGrid(index_map.filename,
gssha_pro_card.value.strip('"').strip("'")) | python | def getIndexGrid(self, name):
"""
Returns GDALGrid object of index map
Paramters:
name(str): Name of index map in 'cmt' file.
Returns:
GDALGrid
"""
index_map = self.mapTableFile.indexMaps.filter_by(name=name).one()
gssha_pro_card = self.getCard("#PROJECTION_FILE")
if gssha_pro_card is None:
raise ValueError("#PROJECTION_FILE card not found ...")
with tmp_chdir(self.project_directory):
# return gssha grid
return GDALGrid(index_map.filename,
gssha_pro_card.value.strip('"').strip("'")) | [
"def",
"getIndexGrid",
"(",
"self",
",",
"name",
")",
":",
"index_map",
"=",
"self",
".",
"mapTableFile",
".",
"indexMaps",
".",
"filter_by",
"(",
"name",
"=",
"name",
")",
".",
"one",
"(",
")",
"gssha_pro_card",
"=",
"self",
".",
"getCard",
"(",
"\"#PROJECTION_FILE\"",
")",
"if",
"gssha_pro_card",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"#PROJECTION_FILE card not found ...\"",
")",
"with",
"tmp_chdir",
"(",
"self",
".",
"project_directory",
")",
":",
"# return gssha grid",
"return",
"GDALGrid",
"(",
"index_map",
".",
"filename",
",",
"gssha_pro_card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
".",
"strip",
"(",
"\"'\"",
")",
")"
] | Returns GDALGrid object of index map
Paramters:
name(str): Name of index map in 'cmt' file.
Returns:
GDALGrid | [
"Returns",
"GDALGrid",
"object",
"of",
"index",
"map"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1180-L1199 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.getWkt | def getWkt(self):
"""
Returns GSSHA projection WKT string
"""
gssha_pro_card = self.getCard("#PROJECTION_FILE")
if gssha_pro_card is None:
raise ValueError("#PROJECTION_FILE card not found ...")
with tmp_chdir(self.project_directory):
gssha_prj_file = gssha_pro_card.value.strip('"').strip("'")
with open(gssha_prj_file) as pro_file:
wkt_string = pro_file.read()
return wkt_string | python | def getWkt(self):
"""
Returns GSSHA projection WKT string
"""
gssha_pro_card = self.getCard("#PROJECTION_FILE")
if gssha_pro_card is None:
raise ValueError("#PROJECTION_FILE card not found ...")
with tmp_chdir(self.project_directory):
gssha_prj_file = gssha_pro_card.value.strip('"').strip("'")
with open(gssha_prj_file) as pro_file:
wkt_string = pro_file.read()
return wkt_string | [
"def",
"getWkt",
"(",
"self",
")",
":",
"gssha_pro_card",
"=",
"self",
".",
"getCard",
"(",
"\"#PROJECTION_FILE\"",
")",
"if",
"gssha_pro_card",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"#PROJECTION_FILE card not found ...\"",
")",
"with",
"tmp_chdir",
"(",
"self",
".",
"project_directory",
")",
":",
"gssha_prj_file",
"=",
"gssha_pro_card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
".",
"strip",
"(",
"\"'\"",
")",
"with",
"open",
"(",
"gssha_prj_file",
")",
"as",
"pro_file",
":",
"wkt_string",
"=",
"pro_file",
".",
"read",
"(",
")",
"return",
"wkt_string"
] | Returns GSSHA projection WKT string | [
"Returns",
"GSSHA",
"projection",
"WKT",
"string"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1201-L1213 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.getOutlet | def getOutlet(self):
"""
Gets the outlet latitude and longitude.
Returns:
latitude(float): Latitude of grid cell center.
longitude(float): Longitude of grid cell center.
"""
# OUTROW, OUTCOL
outrow = int(self.getCard(name='OUTROW').value)-1
outcol = int(self.getCard(name='OUTCOL').value)-1
gssha_grid = self.getGrid()
return gssha_grid.pixel2lonlat(outcol, outrow) | python | def getOutlet(self):
"""
Gets the outlet latitude and longitude.
Returns:
latitude(float): Latitude of grid cell center.
longitude(float): Longitude of grid cell center.
"""
# OUTROW, OUTCOL
outrow = int(self.getCard(name='OUTROW').value)-1
outcol = int(self.getCard(name='OUTCOL').value)-1
gssha_grid = self.getGrid()
return gssha_grid.pixel2lonlat(outcol, outrow) | [
"def",
"getOutlet",
"(",
"self",
")",
":",
"# OUTROW, OUTCOL",
"outrow",
"=",
"int",
"(",
"self",
".",
"getCard",
"(",
"name",
"=",
"'OUTROW'",
")",
".",
"value",
")",
"-",
"1",
"outcol",
"=",
"int",
"(",
"self",
".",
"getCard",
"(",
"name",
"=",
"'OUTCOL'",
")",
".",
"value",
")",
"-",
"1",
"gssha_grid",
"=",
"self",
".",
"getGrid",
"(",
")",
"return",
"gssha_grid",
".",
"pixel2lonlat",
"(",
"outcol",
",",
"outrow",
")"
] | Gets the outlet latitude and longitude.
Returns:
latitude(float): Latitude of grid cell center.
longitude(float): Longitude of grid cell center. | [
"Gets",
"the",
"outlet",
"latitude",
"and",
"longitude",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1215-L1227 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.setOutlet | def setOutlet(self, col, row, outslope=None):
"""
Sets the outlet grid cell information in the project file.
Parameters:
col(float): 1-based column index.
row(float): 1-based row index.
outslope(Optional[float]): River slope at outlet.
"""
#OUTROW, OUTCOL, OUTSLOPE
gssha_grid = self.getGrid()
# col, row = gssha_grid.lonlat2pixel(longitude, latitude)
# add 1 to row & col becasue GSSHA is 1-based
self.setCard(name='OUTROW', value=str(row))
self.setCard(name='OUTCOL', value=str(col))
if outslope is None:
self.calculateOutletSlope()
else:
self.setCard(name='OUTSLOPE', value=str(outslope)) | python | def setOutlet(self, col, row, outslope=None):
"""
Sets the outlet grid cell information in the project file.
Parameters:
col(float): 1-based column index.
row(float): 1-based row index.
outslope(Optional[float]): River slope at outlet.
"""
#OUTROW, OUTCOL, OUTSLOPE
gssha_grid = self.getGrid()
# col, row = gssha_grid.lonlat2pixel(longitude, latitude)
# add 1 to row & col becasue GSSHA is 1-based
self.setCard(name='OUTROW', value=str(row))
self.setCard(name='OUTCOL', value=str(col))
if outslope is None:
self.calculateOutletSlope()
else:
self.setCard(name='OUTSLOPE', value=str(outslope)) | [
"def",
"setOutlet",
"(",
"self",
",",
"col",
",",
"row",
",",
"outslope",
"=",
"None",
")",
":",
"#OUTROW, OUTCOL, OUTSLOPE",
"gssha_grid",
"=",
"self",
".",
"getGrid",
"(",
")",
"# col, row = gssha_grid.lonlat2pixel(longitude, latitude)",
"# add 1 to row & col becasue GSSHA is 1-based",
"self",
".",
"setCard",
"(",
"name",
"=",
"'OUTROW'",
",",
"value",
"=",
"str",
"(",
"row",
")",
")",
"self",
".",
"setCard",
"(",
"name",
"=",
"'OUTCOL'",
",",
"value",
"=",
"str",
"(",
"col",
")",
")",
"if",
"outslope",
"is",
"None",
":",
"self",
".",
"calculateOutletSlope",
"(",
")",
"else",
":",
"self",
".",
"setCard",
"(",
"name",
"=",
"'OUTSLOPE'",
",",
"value",
"=",
"str",
"(",
"outslope",
")",
")"
] | Sets the outlet grid cell information in the project file.
Parameters:
col(float): 1-based column index.
row(float): 1-based row index.
outslope(Optional[float]): River slope at outlet. | [
"Sets",
"the",
"outlet",
"grid",
"cell",
"information",
"in",
"the",
"project",
"file",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1229-L1247 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.findOutlet | def findOutlet(self, shapefile_path):
"""
Calculate outlet location
"""
# determine outlet from shapefile
# by getting outlet from first point in polygon
# make sure the boundary geometry is valid
check_watershed_boundary_geometry(shapefile_path)
shapefile = ogr.Open(shapefile_path)
source_layer = shapefile.GetLayer(0)
source_lyr_proj = source_layer.GetSpatialRef()
osr_geographic_proj = osr.SpatialReference()
osr_geographic_proj.ImportFromEPSG(4326)
proj_transform = osr.CoordinateTransformation(source_lyr_proj,
osr_geographic_proj)
boundary_feature = source_layer.GetFeature(0)
feat_geom = boundary_feature.GetGeometryRef()
feat_geom.Transform(proj_transform)
polygon = shapely_loads(feat_geom.ExportToWkb())
# make lowest point on boundary outlet
mask_grid = self.getGrid()
elevation_grid = self.getGrid(use_mask=False)
elevation_array = elevation_grid.np_array()
ma_elevation_array = np.ma.array(elevation_array,
mask=mask_grid.np_array()==0)
min_elevation = sys.maxsize
outlet_pt = None
for coord in list(polygon.exterior.coords):
try:
col, row = mask_grid.lonlat2pixel(*coord)
except IndexError:
# out of bounds
continue
elevation_value = ma_elevation_array[row, col]
if elevation_value is np.ma.masked:
# search for closest value in mask to this point
# elevation within 5 pixels in any direction
actual_value = elevation_array[row, col]
max_diff = sys.maxsize
nrow = None
ncol = None
nval = None
for row_ix in range(max(row-5, 0), min(row+5, mask_grid.y_size)):
for col_ix in range(max(col-5, 0), min(col+5, mask_grid.x_size)):
val = ma_elevation_array[row_ix, col_ix]
if not val is np.ma.masked:
val_diff = abs(val-actual_value)
if val_diff < max_diff:
max_diff = val_diff
nval = val
nrow = row_ix
ncol = col_ix
if None not in (nrow, ncol, nval):
row = nrow
col = ncol
elevation_value = nval
if elevation_value < min_elevation:
min_elevation = elevation_value
outlet_pt = (col, row)
if outlet_pt is None:
raise IndexError('No valid outlet points found on boundary ...')
outcol, outrow = outlet_pt
self.setOutlet(col=outcol+1, row=outrow+1) | python | def findOutlet(self, shapefile_path):
"""
Calculate outlet location
"""
# determine outlet from shapefile
# by getting outlet from first point in polygon
# make sure the boundary geometry is valid
check_watershed_boundary_geometry(shapefile_path)
shapefile = ogr.Open(shapefile_path)
source_layer = shapefile.GetLayer(0)
source_lyr_proj = source_layer.GetSpatialRef()
osr_geographic_proj = osr.SpatialReference()
osr_geographic_proj.ImportFromEPSG(4326)
proj_transform = osr.CoordinateTransformation(source_lyr_proj,
osr_geographic_proj)
boundary_feature = source_layer.GetFeature(0)
feat_geom = boundary_feature.GetGeometryRef()
feat_geom.Transform(proj_transform)
polygon = shapely_loads(feat_geom.ExportToWkb())
# make lowest point on boundary outlet
mask_grid = self.getGrid()
elevation_grid = self.getGrid(use_mask=False)
elevation_array = elevation_grid.np_array()
ma_elevation_array = np.ma.array(elevation_array,
mask=mask_grid.np_array()==0)
min_elevation = sys.maxsize
outlet_pt = None
for coord in list(polygon.exterior.coords):
try:
col, row = mask_grid.lonlat2pixel(*coord)
except IndexError:
# out of bounds
continue
elevation_value = ma_elevation_array[row, col]
if elevation_value is np.ma.masked:
# search for closest value in mask to this point
# elevation within 5 pixels in any direction
actual_value = elevation_array[row, col]
max_diff = sys.maxsize
nrow = None
ncol = None
nval = None
for row_ix in range(max(row-5, 0), min(row+5, mask_grid.y_size)):
for col_ix in range(max(col-5, 0), min(col+5, mask_grid.x_size)):
val = ma_elevation_array[row_ix, col_ix]
if not val is np.ma.masked:
val_diff = abs(val-actual_value)
if val_diff < max_diff:
max_diff = val_diff
nval = val
nrow = row_ix
ncol = col_ix
if None not in (nrow, ncol, nval):
row = nrow
col = ncol
elevation_value = nval
if elevation_value < min_elevation:
min_elevation = elevation_value
outlet_pt = (col, row)
if outlet_pt is None:
raise IndexError('No valid outlet points found on boundary ...')
outcol, outrow = outlet_pt
self.setOutlet(col=outcol+1, row=outrow+1) | [
"def",
"findOutlet",
"(",
"self",
",",
"shapefile_path",
")",
":",
"# determine outlet from shapefile",
"# by getting outlet from first point in polygon",
"# make sure the boundary geometry is valid",
"check_watershed_boundary_geometry",
"(",
"shapefile_path",
")",
"shapefile",
"=",
"ogr",
".",
"Open",
"(",
"shapefile_path",
")",
"source_layer",
"=",
"shapefile",
".",
"GetLayer",
"(",
"0",
")",
"source_lyr_proj",
"=",
"source_layer",
".",
"GetSpatialRef",
"(",
")",
"osr_geographic_proj",
"=",
"osr",
".",
"SpatialReference",
"(",
")",
"osr_geographic_proj",
".",
"ImportFromEPSG",
"(",
"4326",
")",
"proj_transform",
"=",
"osr",
".",
"CoordinateTransformation",
"(",
"source_lyr_proj",
",",
"osr_geographic_proj",
")",
"boundary_feature",
"=",
"source_layer",
".",
"GetFeature",
"(",
"0",
")",
"feat_geom",
"=",
"boundary_feature",
".",
"GetGeometryRef",
"(",
")",
"feat_geom",
".",
"Transform",
"(",
"proj_transform",
")",
"polygon",
"=",
"shapely_loads",
"(",
"feat_geom",
".",
"ExportToWkb",
"(",
")",
")",
"# make lowest point on boundary outlet",
"mask_grid",
"=",
"self",
".",
"getGrid",
"(",
")",
"elevation_grid",
"=",
"self",
".",
"getGrid",
"(",
"use_mask",
"=",
"False",
")",
"elevation_array",
"=",
"elevation_grid",
".",
"np_array",
"(",
")",
"ma_elevation_array",
"=",
"np",
".",
"ma",
".",
"array",
"(",
"elevation_array",
",",
"mask",
"=",
"mask_grid",
".",
"np_array",
"(",
")",
"==",
"0",
")",
"min_elevation",
"=",
"sys",
".",
"maxsize",
"outlet_pt",
"=",
"None",
"for",
"coord",
"in",
"list",
"(",
"polygon",
".",
"exterior",
".",
"coords",
")",
":",
"try",
":",
"col",
",",
"row",
"=",
"mask_grid",
".",
"lonlat2pixel",
"(",
"*",
"coord",
")",
"except",
"IndexError",
":",
"# out of bounds",
"continue",
"elevation_value",
"=",
"ma_elevation_array",
"[",
"row",
",",
"col",
"]",
"if",
"elevation_value",
"is",
"np",
".",
"ma",
".",
"masked",
":",
"# search for closest value in mask to this point",
"# elevation within 5 pixels in any direction",
"actual_value",
"=",
"elevation_array",
"[",
"row",
",",
"col",
"]",
"max_diff",
"=",
"sys",
".",
"maxsize",
"nrow",
"=",
"None",
"ncol",
"=",
"None",
"nval",
"=",
"None",
"for",
"row_ix",
"in",
"range",
"(",
"max",
"(",
"row",
"-",
"5",
",",
"0",
")",
",",
"min",
"(",
"row",
"+",
"5",
",",
"mask_grid",
".",
"y_size",
")",
")",
":",
"for",
"col_ix",
"in",
"range",
"(",
"max",
"(",
"col",
"-",
"5",
",",
"0",
")",
",",
"min",
"(",
"col",
"+",
"5",
",",
"mask_grid",
".",
"x_size",
")",
")",
":",
"val",
"=",
"ma_elevation_array",
"[",
"row_ix",
",",
"col_ix",
"]",
"if",
"not",
"val",
"is",
"np",
".",
"ma",
".",
"masked",
":",
"val_diff",
"=",
"abs",
"(",
"val",
"-",
"actual_value",
")",
"if",
"val_diff",
"<",
"max_diff",
":",
"max_diff",
"=",
"val_diff",
"nval",
"=",
"val",
"nrow",
"=",
"row_ix",
"ncol",
"=",
"col_ix",
"if",
"None",
"not",
"in",
"(",
"nrow",
",",
"ncol",
",",
"nval",
")",
":",
"row",
"=",
"nrow",
"col",
"=",
"ncol",
"elevation_value",
"=",
"nval",
"if",
"elevation_value",
"<",
"min_elevation",
":",
"min_elevation",
"=",
"elevation_value",
"outlet_pt",
"=",
"(",
"col",
",",
"row",
")",
"if",
"outlet_pt",
"is",
"None",
":",
"raise",
"IndexError",
"(",
"'No valid outlet points found on boundary ...'",
")",
"outcol",
",",
"outrow",
"=",
"outlet_pt",
"self",
".",
"setOutlet",
"(",
"col",
"=",
"outcol",
"+",
"1",
",",
"row",
"=",
"outrow",
"+",
"1",
")"
] | Calculate outlet location | [
"Calculate",
"outlet",
"location"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1249-L1319 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.calculateOutletSlope | def calculateOutletSlope(self):
"""
Attempt to determine the slope at the OUTLET
"""
try:
mask_grid = self.getGrid()
elevation_grid = self.getGrid(use_mask=False)
outrow = int(self.getCard("OUTROW").value)-1
outcol = int(self.getCard("OUTCOL").value)-1
cell_size = float(self.getCard("GRIDSIZE").value)
min_row = max(0, outrow-1)
max_row = min(mask_grid.x_size, outrow+2)
min_col = max(0, outcol-1)
max_col = min(mask_grid.y_size, outcol+2)
mask_array = mask_grid.np_array()
mask_array[outrow, outcol] = 0
mask_array = mask_array[min_row:max_row, min_col:max_col]
mask_array = (mask_array==0)
elevation_array = elevation_grid.np_array()
original_elevation = elevation_array[outrow, outcol]
elevation_array = elevation_array[min_row:max_row, min_col:max_col]
slope_calc_array = (elevation_array-original_elevation)/cell_size
#NOTE: Ignoring distance to cells at angles. Assuming to small to matter
mask_array[slope_calc_array<=0] = True
slope_mask_array = np.ma.array(slope_calc_array, mask=mask_array)
outslope = slope_mask_array.mean()
if outslope is np.ma.masked or outslope < 0.001:
outslope = 0.001
except ValueError:
outslope = 0.001
self.setCard("OUTSLOPE", str(outslope)) | python | def calculateOutletSlope(self):
"""
Attempt to determine the slope at the OUTLET
"""
try:
mask_grid = self.getGrid()
elevation_grid = self.getGrid(use_mask=False)
outrow = int(self.getCard("OUTROW").value)-1
outcol = int(self.getCard("OUTCOL").value)-1
cell_size = float(self.getCard("GRIDSIZE").value)
min_row = max(0, outrow-1)
max_row = min(mask_grid.x_size, outrow+2)
min_col = max(0, outcol-1)
max_col = min(mask_grid.y_size, outcol+2)
mask_array = mask_grid.np_array()
mask_array[outrow, outcol] = 0
mask_array = mask_array[min_row:max_row, min_col:max_col]
mask_array = (mask_array==0)
elevation_array = elevation_grid.np_array()
original_elevation = elevation_array[outrow, outcol]
elevation_array = elevation_array[min_row:max_row, min_col:max_col]
slope_calc_array = (elevation_array-original_elevation)/cell_size
#NOTE: Ignoring distance to cells at angles. Assuming to small to matter
mask_array[slope_calc_array<=0] = True
slope_mask_array = np.ma.array(slope_calc_array, mask=mask_array)
outslope = slope_mask_array.mean()
if outslope is np.ma.masked or outslope < 0.001:
outslope = 0.001
except ValueError:
outslope = 0.001
self.setCard("OUTSLOPE", str(outslope)) | [
"def",
"calculateOutletSlope",
"(",
"self",
")",
":",
"try",
":",
"mask_grid",
"=",
"self",
".",
"getGrid",
"(",
")",
"elevation_grid",
"=",
"self",
".",
"getGrid",
"(",
"use_mask",
"=",
"False",
")",
"outrow",
"=",
"int",
"(",
"self",
".",
"getCard",
"(",
"\"OUTROW\"",
")",
".",
"value",
")",
"-",
"1",
"outcol",
"=",
"int",
"(",
"self",
".",
"getCard",
"(",
"\"OUTCOL\"",
")",
".",
"value",
")",
"-",
"1",
"cell_size",
"=",
"float",
"(",
"self",
".",
"getCard",
"(",
"\"GRIDSIZE\"",
")",
".",
"value",
")",
"min_row",
"=",
"max",
"(",
"0",
",",
"outrow",
"-",
"1",
")",
"max_row",
"=",
"min",
"(",
"mask_grid",
".",
"x_size",
",",
"outrow",
"+",
"2",
")",
"min_col",
"=",
"max",
"(",
"0",
",",
"outcol",
"-",
"1",
")",
"max_col",
"=",
"min",
"(",
"mask_grid",
".",
"y_size",
",",
"outcol",
"+",
"2",
")",
"mask_array",
"=",
"mask_grid",
".",
"np_array",
"(",
")",
"mask_array",
"[",
"outrow",
",",
"outcol",
"]",
"=",
"0",
"mask_array",
"=",
"mask_array",
"[",
"min_row",
":",
"max_row",
",",
"min_col",
":",
"max_col",
"]",
"mask_array",
"=",
"(",
"mask_array",
"==",
"0",
")",
"elevation_array",
"=",
"elevation_grid",
".",
"np_array",
"(",
")",
"original_elevation",
"=",
"elevation_array",
"[",
"outrow",
",",
"outcol",
"]",
"elevation_array",
"=",
"elevation_array",
"[",
"min_row",
":",
"max_row",
",",
"min_col",
":",
"max_col",
"]",
"slope_calc_array",
"=",
"(",
"elevation_array",
"-",
"original_elevation",
")",
"/",
"cell_size",
"#NOTE: Ignoring distance to cells at angles. Assuming to small to matter",
"mask_array",
"[",
"slope_calc_array",
"<=",
"0",
"]",
"=",
"True",
"slope_mask_array",
"=",
"np",
".",
"ma",
".",
"array",
"(",
"slope_calc_array",
",",
"mask",
"=",
"mask_array",
")",
"outslope",
"=",
"slope_mask_array",
".",
"mean",
"(",
")",
"if",
"outslope",
"is",
"np",
".",
"ma",
".",
"masked",
"or",
"outslope",
"<",
"0.001",
":",
"outslope",
"=",
"0.001",
"except",
"ValueError",
":",
"outslope",
"=",
"0.001",
"self",
".",
"setCard",
"(",
"\"OUTSLOPE\"",
",",
"str",
"(",
"outslope",
")",
")"
] | Attempt to determine the slope at the OUTLET | [
"Attempt",
"to",
"determine",
"the",
"slope",
"at",
"the",
"OUTLET"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1321-L1359 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile.timezone | def timezone(self):
"""
timezone of GSSHA model
"""
if self._tz is None:
# GET CENTROID FROM GSSHA GRID
cen_lat, cen_lon = self.centerLatLon()
# update time zone
tf = TimezoneFinder()
tz_name = tf.timezone_at(lng=cen_lon, lat=cen_lat)
self._tz = timezone(tz_name)
return self._tz | python | def timezone(self):
"""
timezone of GSSHA model
"""
if self._tz is None:
# GET CENTROID FROM GSSHA GRID
cen_lat, cen_lon = self.centerLatLon()
# update time zone
tf = TimezoneFinder()
tz_name = tf.timezone_at(lng=cen_lon, lat=cen_lat)
self._tz = timezone(tz_name)
return self._tz | [
"def",
"timezone",
"(",
"self",
")",
":",
"if",
"self",
".",
"_tz",
"is",
"None",
":",
"# GET CENTROID FROM GSSHA GRID",
"cen_lat",
",",
"cen_lon",
"=",
"self",
".",
"centerLatLon",
"(",
")",
"# update time zone",
"tf",
"=",
"TimezoneFinder",
"(",
")",
"tz_name",
"=",
"tf",
".",
"timezone_at",
"(",
"lng",
"=",
"cen_lon",
",",
"lat",
"=",
"cen_lat",
")",
"self",
".",
"_tz",
"=",
"timezone",
"(",
"tz_name",
")",
"return",
"self",
".",
"_tz"
] | timezone of GSSHA model | [
"timezone",
"of",
"GSSHA",
"model"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1362-L1374 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._getBatchDirectory | def _getBatchDirectory(self, projectRootDirectory):
"""
Check the project file for the REPLACE_FOLDER card. If it exists, append it's value to create the batch directory path.
This is the directory output is written to when run in batch mode.
"""
# Set output directory to main directory as default
batchDirectory = projectRootDirectory
# Get the replace folder card
replaceFolderCard = self.getCard('REPLACE_FOLDER')
if replaceFolderCard:
replaceDir = replaceFolderCard.value.strip('"')
batchDirectory = os.path.join(batchDirectory, replaceDir)
# Create directory if it doesn't exist
if not os.path.isdir(batchDirectory):
os.mkdir(batchDirectory)
log.info('Creating directory for batch output: {0}'.format(batchDirectory))
return batchDirectory | python | def _getBatchDirectory(self, projectRootDirectory):
"""
Check the project file for the REPLACE_FOLDER card. If it exists, append it's value to create the batch directory path.
This is the directory output is written to when run in batch mode.
"""
# Set output directory to main directory as default
batchDirectory = projectRootDirectory
# Get the replace folder card
replaceFolderCard = self.getCard('REPLACE_FOLDER')
if replaceFolderCard:
replaceDir = replaceFolderCard.value.strip('"')
batchDirectory = os.path.join(batchDirectory, replaceDir)
# Create directory if it doesn't exist
if not os.path.isdir(batchDirectory):
os.mkdir(batchDirectory)
log.info('Creating directory for batch output: {0}'.format(batchDirectory))
return batchDirectory | [
"def",
"_getBatchDirectory",
"(",
"self",
",",
"projectRootDirectory",
")",
":",
"# Set output directory to main directory as default",
"batchDirectory",
"=",
"projectRootDirectory",
"# Get the replace folder card",
"replaceFolderCard",
"=",
"self",
".",
"getCard",
"(",
"'REPLACE_FOLDER'",
")",
"if",
"replaceFolderCard",
":",
"replaceDir",
"=",
"replaceFolderCard",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
"batchDirectory",
"=",
"os",
".",
"path",
".",
"join",
"(",
"batchDirectory",
",",
"replaceDir",
")",
"# Create directory if it doesn't exist",
"if",
"not",
"os",
".",
"path",
".",
"isdir",
"(",
"batchDirectory",
")",
":",
"os",
".",
"mkdir",
"(",
"batchDirectory",
")",
"log",
".",
"info",
"(",
"'Creating directory for batch output: {0}'",
".",
"format",
"(",
"batchDirectory",
")",
")",
"return",
"batchDirectory"
] | Check the project file for the REPLACE_FOLDER card. If it exists, append it's value to create the batch directory path.
This is the directory output is written to when run in batch mode. | [
"Check",
"the",
"project",
"file",
"for",
"the",
"REPLACE_FOLDER",
"card",
".",
"If",
"it",
"exists",
"append",
"it",
"s",
"value",
"to",
"create",
"the",
"batch",
"directory",
"path",
".",
"This",
"is",
"the",
"directory",
"output",
"is",
"written",
"to",
"when",
"run",
"in",
"batch",
"mode",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1426-L1446 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._readXput | def _readXput(self, fileCards, directory, session, spatial=False, spatialReferenceID=4236, replaceParamFile=None):
"""
GSSHAPY Project Read Files from File Method
"""
## NOTE: This function is dependent on the project file being read first
# Read Input/Output Files
for card in self.projectCards:
if (card.name in fileCards) and self._noneOrNumValue(card.value) and fileCards[card.name]:
fileIO = fileCards[card.name]
filename = card.value.strip('"')
# Invoke read method on each file
self._invokeRead(fileIO=fileIO,
directory=directory,
filename=filename,
session=session,
spatial=spatial,
spatialReferenceID=spatialReferenceID,
replaceParamFile=replaceParamFile) | python | def _readXput(self, fileCards, directory, session, spatial=False, spatialReferenceID=4236, replaceParamFile=None):
"""
GSSHAPY Project Read Files from File Method
"""
## NOTE: This function is dependent on the project file being read first
# Read Input/Output Files
for card in self.projectCards:
if (card.name in fileCards) and self._noneOrNumValue(card.value) and fileCards[card.name]:
fileIO = fileCards[card.name]
filename = card.value.strip('"')
# Invoke read method on each file
self._invokeRead(fileIO=fileIO,
directory=directory,
filename=filename,
session=session,
spatial=spatial,
spatialReferenceID=spatialReferenceID,
replaceParamFile=replaceParamFile) | [
"def",
"_readXput",
"(",
"self",
",",
"fileCards",
",",
"directory",
",",
"session",
",",
"spatial",
"=",
"False",
",",
"spatialReferenceID",
"=",
"4236",
",",
"replaceParamFile",
"=",
"None",
")",
":",
"## NOTE: This function is dependent on the project file being read first",
"# Read Input/Output Files",
"for",
"card",
"in",
"self",
".",
"projectCards",
":",
"if",
"(",
"card",
".",
"name",
"in",
"fileCards",
")",
"and",
"self",
".",
"_noneOrNumValue",
"(",
"card",
".",
"value",
")",
"and",
"fileCards",
"[",
"card",
".",
"name",
"]",
":",
"fileIO",
"=",
"fileCards",
"[",
"card",
".",
"name",
"]",
"filename",
"=",
"card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
"# Invoke read method on each file",
"self",
".",
"_invokeRead",
"(",
"fileIO",
"=",
"fileIO",
",",
"directory",
"=",
"directory",
",",
"filename",
"=",
"filename",
",",
"session",
"=",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")"
] | GSSHAPY Project Read Files from File Method | [
"GSSHAPY",
"Project",
"Read",
"Files",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1448-L1466 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._readXputMaps | def _readXputMaps(self, mapCards, directory, session, spatial=False, spatialReferenceID=4236, replaceParamFile=None):
"""
GSSHA Project Read Map Files from File Method
"""
if self.mapType in self.MAP_TYPES_SUPPORTED:
for card in self.projectCards:
if (card.name in mapCards) and self._noneOrNumValue(card.value):
filename = card.value.strip('"')
# Invoke read method on each map
self._invokeRead(fileIO=RasterMapFile,
directory=directory,
filename=filename,
session=session,
spatial=spatial,
spatialReferenceID=spatialReferenceID,
replaceParamFile=replaceParamFile)
else:
for card in self.projectCards:
if (card.name in mapCards) and self._noneOrNumValue(card.value):
filename = card.value.strip('"')
fileExtension = filename.split('.')[1]
if fileExtension in self.ALWAYS_READ_AND_WRITE_MAPS:
# Invoke read method on each map
self._invokeRead(fileIO=RasterMapFile,
directory=directory,
filename=filename,
session=session,
spatial=spatial,
spatialReferenceID=spatialReferenceID,
replaceParamFile=replaceParamFile)
log.warning('Could not read map files. '
'MAP_TYPE {0} not supported.'.format(self.mapType)) | python | def _readXputMaps(self, mapCards, directory, session, spatial=False, spatialReferenceID=4236, replaceParamFile=None):
"""
GSSHA Project Read Map Files from File Method
"""
if self.mapType in self.MAP_TYPES_SUPPORTED:
for card in self.projectCards:
if (card.name in mapCards) and self._noneOrNumValue(card.value):
filename = card.value.strip('"')
# Invoke read method on each map
self._invokeRead(fileIO=RasterMapFile,
directory=directory,
filename=filename,
session=session,
spatial=spatial,
spatialReferenceID=spatialReferenceID,
replaceParamFile=replaceParamFile)
else:
for card in self.projectCards:
if (card.name in mapCards) and self._noneOrNumValue(card.value):
filename = card.value.strip('"')
fileExtension = filename.split('.')[1]
if fileExtension in self.ALWAYS_READ_AND_WRITE_MAPS:
# Invoke read method on each map
self._invokeRead(fileIO=RasterMapFile,
directory=directory,
filename=filename,
session=session,
spatial=spatial,
spatialReferenceID=spatialReferenceID,
replaceParamFile=replaceParamFile)
log.warning('Could not read map files. '
'MAP_TYPE {0} not supported.'.format(self.mapType)) | [
"def",
"_readXputMaps",
"(",
"self",
",",
"mapCards",
",",
"directory",
",",
"session",
",",
"spatial",
"=",
"False",
",",
"spatialReferenceID",
"=",
"4236",
",",
"replaceParamFile",
"=",
"None",
")",
":",
"if",
"self",
".",
"mapType",
"in",
"self",
".",
"MAP_TYPES_SUPPORTED",
":",
"for",
"card",
"in",
"self",
".",
"projectCards",
":",
"if",
"(",
"card",
".",
"name",
"in",
"mapCards",
")",
"and",
"self",
".",
"_noneOrNumValue",
"(",
"card",
".",
"value",
")",
":",
"filename",
"=",
"card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
"# Invoke read method on each map",
"self",
".",
"_invokeRead",
"(",
"fileIO",
"=",
"RasterMapFile",
",",
"directory",
"=",
"directory",
",",
"filename",
"=",
"filename",
",",
"session",
"=",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"else",
":",
"for",
"card",
"in",
"self",
".",
"projectCards",
":",
"if",
"(",
"card",
".",
"name",
"in",
"mapCards",
")",
"and",
"self",
".",
"_noneOrNumValue",
"(",
"card",
".",
"value",
")",
":",
"filename",
"=",
"card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
"fileExtension",
"=",
"filename",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
"]",
"if",
"fileExtension",
"in",
"self",
".",
"ALWAYS_READ_AND_WRITE_MAPS",
":",
"# Invoke read method on each map",
"self",
".",
"_invokeRead",
"(",
"fileIO",
"=",
"RasterMapFile",
",",
"directory",
"=",
"directory",
",",
"filename",
"=",
"filename",
",",
"session",
"=",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"log",
".",
"warning",
"(",
"'Could not read map files. '",
"'MAP_TYPE {0} not supported.'",
".",
"format",
"(",
"self",
".",
"mapType",
")",
")"
] | GSSHA Project Read Map Files from File Method | [
"GSSHA",
"Project",
"Read",
"Map",
"Files",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1468-L1501 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._readWMSDatasets | def _readWMSDatasets(self, datasetCards, directory, session, spatial=False, spatialReferenceID=4236):
"""
Method to handle the special case of WMS Dataset Files. WMS Dataset Files
cannot be read in independently as other types of file can. They rely on
the Mask Map file for some parameters.
"""
if self.mapType in self.MAP_TYPES_SUPPORTED:
# Get Mask Map dependency
maskMap = session.query(RasterMapFile).\
filter(RasterMapFile.projectFile == self).\
filter(RasterMapFile.fileExtension == 'msk').\
one()
for card in self.projectCards:
if (card.name in datasetCards) and self._noneOrNumValue(card.value):
# Get filename from project file
filename = card.value.strip('"')
path = os.path.join(directory, filename)
if os.path.isfile(path):
wmsDatasetFile = WMSDatasetFile()
wmsDatasetFile.projectFile = self
wmsDatasetFile.read(directory=directory,
filename=filename,
session=session,
maskMap=maskMap,
spatial=spatial,
spatialReferenceID=spatialReferenceID)
else:
self._readBatchOutputForFile(directory, WMSDatasetFile, filename, session, spatial,
spatialReferenceID, maskMap=maskMap) | python | def _readWMSDatasets(self, datasetCards, directory, session, spatial=False, spatialReferenceID=4236):
"""
Method to handle the special case of WMS Dataset Files. WMS Dataset Files
cannot be read in independently as other types of file can. They rely on
the Mask Map file for some parameters.
"""
if self.mapType in self.MAP_TYPES_SUPPORTED:
# Get Mask Map dependency
maskMap = session.query(RasterMapFile).\
filter(RasterMapFile.projectFile == self).\
filter(RasterMapFile.fileExtension == 'msk').\
one()
for card in self.projectCards:
if (card.name in datasetCards) and self._noneOrNumValue(card.value):
# Get filename from project file
filename = card.value.strip('"')
path = os.path.join(directory, filename)
if os.path.isfile(path):
wmsDatasetFile = WMSDatasetFile()
wmsDatasetFile.projectFile = self
wmsDatasetFile.read(directory=directory,
filename=filename,
session=session,
maskMap=maskMap,
spatial=spatial,
spatialReferenceID=spatialReferenceID)
else:
self._readBatchOutputForFile(directory, WMSDatasetFile, filename, session, spatial,
spatialReferenceID, maskMap=maskMap) | [
"def",
"_readWMSDatasets",
"(",
"self",
",",
"datasetCards",
",",
"directory",
",",
"session",
",",
"spatial",
"=",
"False",
",",
"spatialReferenceID",
"=",
"4236",
")",
":",
"if",
"self",
".",
"mapType",
"in",
"self",
".",
"MAP_TYPES_SUPPORTED",
":",
"# Get Mask Map dependency",
"maskMap",
"=",
"session",
".",
"query",
"(",
"RasterMapFile",
")",
".",
"filter",
"(",
"RasterMapFile",
".",
"projectFile",
"==",
"self",
")",
".",
"filter",
"(",
"RasterMapFile",
".",
"fileExtension",
"==",
"'msk'",
")",
".",
"one",
"(",
")",
"for",
"card",
"in",
"self",
".",
"projectCards",
":",
"if",
"(",
"card",
".",
"name",
"in",
"datasetCards",
")",
"and",
"self",
".",
"_noneOrNumValue",
"(",
"card",
".",
"value",
")",
":",
"# Get filename from project file",
"filename",
"=",
"card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"wmsDatasetFile",
"=",
"WMSDatasetFile",
"(",
")",
"wmsDatasetFile",
".",
"projectFile",
"=",
"self",
"wmsDatasetFile",
".",
"read",
"(",
"directory",
"=",
"directory",
",",
"filename",
"=",
"filename",
",",
"session",
"=",
"session",
",",
"maskMap",
"=",
"maskMap",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
")",
"else",
":",
"self",
".",
"_readBatchOutputForFile",
"(",
"directory",
",",
"WMSDatasetFile",
",",
"filename",
",",
"session",
",",
"spatial",
",",
"spatialReferenceID",
",",
"maskMap",
"=",
"maskMap",
")"
] | Method to handle the special case of WMS Dataset Files. WMS Dataset Files
cannot be read in independently as other types of file can. They rely on
the Mask Map file for some parameters. | [
"Method",
"to",
"handle",
"the",
"special",
"case",
"of",
"WMS",
"Dataset",
"Files",
".",
"WMS",
"Dataset",
"Files",
"cannot",
"be",
"read",
"in",
"independently",
"as",
"other",
"types",
"of",
"file",
"can",
".",
"They",
"rely",
"on",
"the",
"Mask",
"Map",
"file",
"for",
"some",
"parameters",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1503-L1533 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._readBatchOutputForFile | def _readBatchOutputForFile(self, directory, fileIO, filename, session, spatial, spatialReferenceID,
replaceParamFile=None, maskMap=None):
"""
When batch mode is run in GSSHA, the files of the same type are
prepended with an integer to avoid filename conflicts.
This will attempt to read files in this format and
throw warnings if the files aren't found.
"""
# Get contents of directory
directoryList = os.listdir(directory)
# Compile a list of files with that include the filename in them
batchFiles = []
for thing in directoryList:
if filename in thing:
batchFiles.append(thing)
numFilesRead = 0
for batchFile in batchFiles:
instance = fileIO()
instance.projectFile = self
if isinstance(instance, WMSDatasetFile):
instance.read(directory=directory, filename=batchFile, session=session, maskMap=maskMap, spatial=spatial,
spatialReferenceID=spatialReferenceID)
else:
instance.read(directory, batchFile, session, spatial=spatial, spatialReferenceID=spatialReferenceID,
replaceParamFile=replaceParamFile)
# Increment runCounter for next file
numFilesRead += 1
# Issue warnings
if '[' in filename or ']' in filename:
log.info('A file cannot be read, because the path to the '
'file in the project file has been replaced with '
'replacement variable {0}.'.format(filename))
elif numFilesRead == 0:
log.warning('{0} listed in project file, but no such '
'file exists.'.format(filename))
else:
log.info('Batch mode output detected. {0} files read '
'for file {1}'.format(numFilesRead, filename)) | python | def _readBatchOutputForFile(self, directory, fileIO, filename, session, spatial, spatialReferenceID,
replaceParamFile=None, maskMap=None):
"""
When batch mode is run in GSSHA, the files of the same type are
prepended with an integer to avoid filename conflicts.
This will attempt to read files in this format and
throw warnings if the files aren't found.
"""
# Get contents of directory
directoryList = os.listdir(directory)
# Compile a list of files with that include the filename in them
batchFiles = []
for thing in directoryList:
if filename in thing:
batchFiles.append(thing)
numFilesRead = 0
for batchFile in batchFiles:
instance = fileIO()
instance.projectFile = self
if isinstance(instance, WMSDatasetFile):
instance.read(directory=directory, filename=batchFile, session=session, maskMap=maskMap, spatial=spatial,
spatialReferenceID=spatialReferenceID)
else:
instance.read(directory, batchFile, session, spatial=spatial, spatialReferenceID=spatialReferenceID,
replaceParamFile=replaceParamFile)
# Increment runCounter for next file
numFilesRead += 1
# Issue warnings
if '[' in filename or ']' in filename:
log.info('A file cannot be read, because the path to the '
'file in the project file has been replaced with '
'replacement variable {0}.'.format(filename))
elif numFilesRead == 0:
log.warning('{0} listed in project file, but no such '
'file exists.'.format(filename))
else:
log.info('Batch mode output detected. {0} files read '
'for file {1}'.format(numFilesRead, filename)) | [
"def",
"_readBatchOutputForFile",
"(",
"self",
",",
"directory",
",",
"fileIO",
",",
"filename",
",",
"session",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
"=",
"None",
",",
"maskMap",
"=",
"None",
")",
":",
"# Get contents of directory",
"directoryList",
"=",
"os",
".",
"listdir",
"(",
"directory",
")",
"# Compile a list of files with that include the filename in them",
"batchFiles",
"=",
"[",
"]",
"for",
"thing",
"in",
"directoryList",
":",
"if",
"filename",
"in",
"thing",
":",
"batchFiles",
".",
"append",
"(",
"thing",
")",
"numFilesRead",
"=",
"0",
"for",
"batchFile",
"in",
"batchFiles",
":",
"instance",
"=",
"fileIO",
"(",
")",
"instance",
".",
"projectFile",
"=",
"self",
"if",
"isinstance",
"(",
"instance",
",",
"WMSDatasetFile",
")",
":",
"instance",
".",
"read",
"(",
"directory",
"=",
"directory",
",",
"filename",
"=",
"batchFile",
",",
"session",
"=",
"session",
",",
"maskMap",
"=",
"maskMap",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
")",
"else",
":",
"instance",
".",
"read",
"(",
"directory",
",",
"batchFile",
",",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"# Increment runCounter for next file",
"numFilesRead",
"+=",
"1",
"# Issue warnings",
"if",
"'['",
"in",
"filename",
"or",
"']'",
"in",
"filename",
":",
"log",
".",
"info",
"(",
"'A file cannot be read, because the path to the '",
"'file in the project file has been replaced with '",
"'replacement variable {0}.'",
".",
"format",
"(",
"filename",
")",
")",
"elif",
"numFilesRead",
"==",
"0",
":",
"log",
".",
"warning",
"(",
"'{0} listed in project file, but no such '",
"'file exists.'",
".",
"format",
"(",
"filename",
")",
")",
"else",
":",
"log",
".",
"info",
"(",
"'Batch mode output detected. {0} files read '",
"'for file {1}'",
".",
"format",
"(",
"numFilesRead",
",",
"filename",
")",
")"
] | When batch mode is run in GSSHA, the files of the same type are
prepended with an integer to avoid filename conflicts.
This will attempt to read files in this format and
throw warnings if the files aren't found. | [
"When",
"batch",
"mode",
"is",
"run",
"in",
"GSSHA",
"the",
"files",
"of",
"the",
"same",
"type",
"are",
"prepended",
"with",
"an",
"integer",
"to",
"avoid",
"filename",
"conflicts",
".",
"This",
"will",
"attempt",
"to",
"read",
"files",
"in",
"this",
"format",
"and",
"throw",
"warnings",
"if",
"the",
"files",
"aren",
"t",
"found",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1577-L1621 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._invokeRead | def _invokeRead(self, fileIO, directory, filename, session, spatial=False,
spatialReferenceID=4236, replaceParamFile=None, **kwargs):
"""
Invoke File Read Method on Other Files
"""
path = os.path.join(directory, filename)
if os.path.isfile(path):
instance = fileIO()
instance.projectFile = self
instance.read(directory, filename, session, spatial=spatial,
spatialReferenceID=spatialReferenceID,
replaceParamFile=replaceParamFile, **kwargs)
return instance
else:
self._readBatchOutputForFile(directory, fileIO, filename, session,
spatial, spatialReferenceID, replaceParamFile) | python | def _invokeRead(self, fileIO, directory, filename, session, spatial=False,
spatialReferenceID=4236, replaceParamFile=None, **kwargs):
"""
Invoke File Read Method on Other Files
"""
path = os.path.join(directory, filename)
if os.path.isfile(path):
instance = fileIO()
instance.projectFile = self
instance.read(directory, filename, session, spatial=spatial,
spatialReferenceID=spatialReferenceID,
replaceParamFile=replaceParamFile, **kwargs)
return instance
else:
self._readBatchOutputForFile(directory, fileIO, filename, session,
spatial, spatialReferenceID, replaceParamFile) | [
"def",
"_invokeRead",
"(",
"self",
",",
"fileIO",
",",
"directory",
",",
"filename",
",",
"session",
",",
"spatial",
"=",
"False",
",",
"spatialReferenceID",
"=",
"4236",
",",
"replaceParamFile",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"path",
")",
":",
"instance",
"=",
"fileIO",
"(",
")",
"instance",
".",
"projectFile",
"=",
"self",
"instance",
".",
"read",
"(",
"directory",
",",
"filename",
",",
"session",
",",
"spatial",
"=",
"spatial",
",",
"spatialReferenceID",
"=",
"spatialReferenceID",
",",
"replaceParamFile",
"=",
"replaceParamFile",
",",
"*",
"*",
"kwargs",
")",
"return",
"instance",
"else",
":",
"self",
".",
"_readBatchOutputForFile",
"(",
"directory",
",",
"fileIO",
",",
"filename",
",",
"session",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
")"
] | Invoke File Read Method on Other Files | [
"Invoke",
"File",
"Read",
"Method",
"on",
"Other",
"Files"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1623-L1639 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._writeXput | def _writeXput(self, session, directory, fileCards,
name=None, replaceParamFile=None):
"""
GSSHA Project Write Files to File Method
"""
for card in self.projectCards:
if (card.name in fileCards) and self._noneOrNumValue(card.value) \
and fileCards[card.name]:
fileIO = fileCards[card.name]
filename = card.value.strip('"')
# Check for replacement variables
if '[' in filename or ']' in filename:
log.info('The file for project card {0} cannot be '
'written, because the path has been replaced '
'with replacement variable {1}.'.format(card.name, filename))
return
# Determine new filename
filename = self._replaceNewFilename(filename=filename,
name=name)
# Invoke write method on each file
self._invokeWrite(fileIO=fileIO,
session=session,
directory=directory,
filename=filename,
replaceParamFile=replaceParamFile) | python | def _writeXput(self, session, directory, fileCards,
name=None, replaceParamFile=None):
"""
GSSHA Project Write Files to File Method
"""
for card in self.projectCards:
if (card.name in fileCards) and self._noneOrNumValue(card.value) \
and fileCards[card.name]:
fileIO = fileCards[card.name]
filename = card.value.strip('"')
# Check for replacement variables
if '[' in filename or ']' in filename:
log.info('The file for project card {0} cannot be '
'written, because the path has been replaced '
'with replacement variable {1}.'.format(card.name, filename))
return
# Determine new filename
filename = self._replaceNewFilename(filename=filename,
name=name)
# Invoke write method on each file
self._invokeWrite(fileIO=fileIO,
session=session,
directory=directory,
filename=filename,
replaceParamFile=replaceParamFile) | [
"def",
"_writeXput",
"(",
"self",
",",
"session",
",",
"directory",
",",
"fileCards",
",",
"name",
"=",
"None",
",",
"replaceParamFile",
"=",
"None",
")",
":",
"for",
"card",
"in",
"self",
".",
"projectCards",
":",
"if",
"(",
"card",
".",
"name",
"in",
"fileCards",
")",
"and",
"self",
".",
"_noneOrNumValue",
"(",
"card",
".",
"value",
")",
"and",
"fileCards",
"[",
"card",
".",
"name",
"]",
":",
"fileIO",
"=",
"fileCards",
"[",
"card",
".",
"name",
"]",
"filename",
"=",
"card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
"# Check for replacement variables",
"if",
"'['",
"in",
"filename",
"or",
"']'",
"in",
"filename",
":",
"log",
".",
"info",
"(",
"'The file for project card {0} cannot be '",
"'written, because the path has been replaced '",
"'with replacement variable {1}.'",
".",
"format",
"(",
"card",
".",
"name",
",",
"filename",
")",
")",
"return",
"# Determine new filename",
"filename",
"=",
"self",
".",
"_replaceNewFilename",
"(",
"filename",
"=",
"filename",
",",
"name",
"=",
"name",
")",
"# Invoke write method on each file",
"self",
".",
"_invokeWrite",
"(",
"fileIO",
"=",
"fileIO",
",",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"filename",
"=",
"filename",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")"
] | GSSHA Project Write Files to File Method | [
"GSSHA",
"Project",
"Write",
"Files",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1642-L1669 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._writeXputMaps | def _writeXputMaps(self, session, directory, mapCards,
name=None, replaceParamFile=None):
"""
GSSHAPY Project Write Map Files to File Method
"""
if self.mapType in self.MAP_TYPES_SUPPORTED:
for card in self.projectCards:
if (card.name in mapCards) and self._noneOrNumValue(card.value):
filename = card.value.strip('"')
# Determine new filename
filename = self._replaceNewFilename(filename, name)
# Write map file
self._invokeWrite(fileIO=RasterMapFile,
session=session,
directory=directory,
filename=filename,
replaceParamFile=replaceParamFile)
else:
for card in self.projectCards:
if (card.name in mapCards) and self._noneOrNumValue(card.value):
filename = card.value.strip('"')
fileExtension = filename.split('.')[1]
if fileExtension in self.ALWAYS_READ_AND_WRITE_MAPS:
# Determine new filename
filename = self._replaceNewFilename(filename, name)
# Write map file
self._invokeWrite(fileIO=RasterMapFile,
session=session,
directory=directory,
filename=filename,
replaceParamFile=replaceParamFile)
log.error('Could not write map files. MAP_TYPE {0} '
'not supported.'.format(self.mapType)) | python | def _writeXputMaps(self, session, directory, mapCards,
name=None, replaceParamFile=None):
"""
GSSHAPY Project Write Map Files to File Method
"""
if self.mapType in self.MAP_TYPES_SUPPORTED:
for card in self.projectCards:
if (card.name in mapCards) and self._noneOrNumValue(card.value):
filename = card.value.strip('"')
# Determine new filename
filename = self._replaceNewFilename(filename, name)
# Write map file
self._invokeWrite(fileIO=RasterMapFile,
session=session,
directory=directory,
filename=filename,
replaceParamFile=replaceParamFile)
else:
for card in self.projectCards:
if (card.name in mapCards) and self._noneOrNumValue(card.value):
filename = card.value.strip('"')
fileExtension = filename.split('.')[1]
if fileExtension in self.ALWAYS_READ_AND_WRITE_MAPS:
# Determine new filename
filename = self._replaceNewFilename(filename, name)
# Write map file
self._invokeWrite(fileIO=RasterMapFile,
session=session,
directory=directory,
filename=filename,
replaceParamFile=replaceParamFile)
log.error('Could not write map files. MAP_TYPE {0} '
'not supported.'.format(self.mapType)) | [
"def",
"_writeXputMaps",
"(",
"self",
",",
"session",
",",
"directory",
",",
"mapCards",
",",
"name",
"=",
"None",
",",
"replaceParamFile",
"=",
"None",
")",
":",
"if",
"self",
".",
"mapType",
"in",
"self",
".",
"MAP_TYPES_SUPPORTED",
":",
"for",
"card",
"in",
"self",
".",
"projectCards",
":",
"if",
"(",
"card",
".",
"name",
"in",
"mapCards",
")",
"and",
"self",
".",
"_noneOrNumValue",
"(",
"card",
".",
"value",
")",
":",
"filename",
"=",
"card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
"# Determine new filename",
"filename",
"=",
"self",
".",
"_replaceNewFilename",
"(",
"filename",
",",
"name",
")",
"# Write map file",
"self",
".",
"_invokeWrite",
"(",
"fileIO",
"=",
"RasterMapFile",
",",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"filename",
"=",
"filename",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"else",
":",
"for",
"card",
"in",
"self",
".",
"projectCards",
":",
"if",
"(",
"card",
".",
"name",
"in",
"mapCards",
")",
"and",
"self",
".",
"_noneOrNumValue",
"(",
"card",
".",
"value",
")",
":",
"filename",
"=",
"card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
"fileExtension",
"=",
"filename",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
"]",
"if",
"fileExtension",
"in",
"self",
".",
"ALWAYS_READ_AND_WRITE_MAPS",
":",
"# Determine new filename",
"filename",
"=",
"self",
".",
"_replaceNewFilename",
"(",
"filename",
",",
"name",
")",
"# Write map file",
"self",
".",
"_invokeWrite",
"(",
"fileIO",
"=",
"RasterMapFile",
",",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"filename",
"=",
"filename",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"log",
".",
"error",
"(",
"'Could not write map files. MAP_TYPE {0} '",
"'not supported.'",
".",
"format",
"(",
"self",
".",
"mapType",
")",
")"
] | GSSHAPY Project Write Map Files to File Method | [
"GSSHAPY",
"Project",
"Write",
"Map",
"Files",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1671-L1709 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._writeWMSDatasets | def _writeWMSDatasets(self, session, directory, wmsDatasetCards, name=None):
"""
GSSHAPY Project Write WMS Datasets to File Method
"""
if self.mapType in self.MAP_TYPES_SUPPORTED:
for card in self.projectCards:
if (card.name in wmsDatasetCards) and self._noneOrNumValue(card.value):
filename = card.value.strip('"')
# Determine new filename
filename = self._replaceNewFilename(filename, name)
# Handle case where fileIO interfaces with multiple files
# Retrieve File using FileIO and file extension
extension = filename.split('.')[1]
# Get mask map file
maskMap = session.query(RasterMapFile).\
filter(RasterMapFile.projectFile == self).\
filter(RasterMapFile.fileExtension == 'msk').\
one()
# Default wms dataset
wmsDataset = None
try:
wmsDataset = session.query(WMSDatasetFile). \
filter(WMSDatasetFile.projectFile == self). \
filter(WMSDatasetFile.fileExtension == extension). \
one()
except NoResultFound:
# Handle case when there is no file in database but
# the card is listed in the project file
log.warning('{0} listed as card in project file, '
'but the file is not found in the database.'.format(filename))
except MultipleResultsFound:
# Write all instances
self._invokeWriteForMultipleOfType(directory, extension,
WMSDatasetFile, filename,
session, maskMap=maskMap)
return
# Initiate Write Method on File
if wmsDataset is not None and maskMap is not None:
wmsDataset.write(session=session, directory=directory,
name=filename, maskMap=maskMap)
else:
log.error('Could not write WMS Dataset files. '
'MAP_TYPE {0} not supported.'.format(self.mapType)) | python | def _writeWMSDatasets(self, session, directory, wmsDatasetCards, name=None):
"""
GSSHAPY Project Write WMS Datasets to File Method
"""
if self.mapType in self.MAP_TYPES_SUPPORTED:
for card in self.projectCards:
if (card.name in wmsDatasetCards) and self._noneOrNumValue(card.value):
filename = card.value.strip('"')
# Determine new filename
filename = self._replaceNewFilename(filename, name)
# Handle case where fileIO interfaces with multiple files
# Retrieve File using FileIO and file extension
extension = filename.split('.')[1]
# Get mask map file
maskMap = session.query(RasterMapFile).\
filter(RasterMapFile.projectFile == self).\
filter(RasterMapFile.fileExtension == 'msk').\
one()
# Default wms dataset
wmsDataset = None
try:
wmsDataset = session.query(WMSDatasetFile). \
filter(WMSDatasetFile.projectFile == self). \
filter(WMSDatasetFile.fileExtension == extension). \
one()
except NoResultFound:
# Handle case when there is no file in database but
# the card is listed in the project file
log.warning('{0} listed as card in project file, '
'but the file is not found in the database.'.format(filename))
except MultipleResultsFound:
# Write all instances
self._invokeWriteForMultipleOfType(directory, extension,
WMSDatasetFile, filename,
session, maskMap=maskMap)
return
# Initiate Write Method on File
if wmsDataset is not None and maskMap is not None:
wmsDataset.write(session=session, directory=directory,
name=filename, maskMap=maskMap)
else:
log.error('Could not write WMS Dataset files. '
'MAP_TYPE {0} not supported.'.format(self.mapType)) | [
"def",
"_writeWMSDatasets",
"(",
"self",
",",
"session",
",",
"directory",
",",
"wmsDatasetCards",
",",
"name",
"=",
"None",
")",
":",
"if",
"self",
".",
"mapType",
"in",
"self",
".",
"MAP_TYPES_SUPPORTED",
":",
"for",
"card",
"in",
"self",
".",
"projectCards",
":",
"if",
"(",
"card",
".",
"name",
"in",
"wmsDatasetCards",
")",
"and",
"self",
".",
"_noneOrNumValue",
"(",
"card",
".",
"value",
")",
":",
"filename",
"=",
"card",
".",
"value",
".",
"strip",
"(",
"'\"'",
")",
"# Determine new filename",
"filename",
"=",
"self",
".",
"_replaceNewFilename",
"(",
"filename",
",",
"name",
")",
"# Handle case where fileIO interfaces with multiple files",
"# Retrieve File using FileIO and file extension",
"extension",
"=",
"filename",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
"]",
"# Get mask map file",
"maskMap",
"=",
"session",
".",
"query",
"(",
"RasterMapFile",
")",
".",
"filter",
"(",
"RasterMapFile",
".",
"projectFile",
"==",
"self",
")",
".",
"filter",
"(",
"RasterMapFile",
".",
"fileExtension",
"==",
"'msk'",
")",
".",
"one",
"(",
")",
"# Default wms dataset",
"wmsDataset",
"=",
"None",
"try",
":",
"wmsDataset",
"=",
"session",
".",
"query",
"(",
"WMSDatasetFile",
")",
".",
"filter",
"(",
"WMSDatasetFile",
".",
"projectFile",
"==",
"self",
")",
".",
"filter",
"(",
"WMSDatasetFile",
".",
"fileExtension",
"==",
"extension",
")",
".",
"one",
"(",
")",
"except",
"NoResultFound",
":",
"# Handle case when there is no file in database but",
"# the card is listed in the project file",
"log",
".",
"warning",
"(",
"'{0} listed as card in project file, '",
"'but the file is not found in the database.'",
".",
"format",
"(",
"filename",
")",
")",
"except",
"MultipleResultsFound",
":",
"# Write all instances",
"self",
".",
"_invokeWriteForMultipleOfType",
"(",
"directory",
",",
"extension",
",",
"WMSDatasetFile",
",",
"filename",
",",
"session",
",",
"maskMap",
"=",
"maskMap",
")",
"return",
"# Initiate Write Method on File",
"if",
"wmsDataset",
"is",
"not",
"None",
"and",
"maskMap",
"is",
"not",
"None",
":",
"wmsDataset",
".",
"write",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"name",
"=",
"filename",
",",
"maskMap",
"=",
"maskMap",
")",
"else",
":",
"log",
".",
"error",
"(",
"'Could not write WMS Dataset files. '",
"'MAP_TYPE {0} not supported.'",
".",
"format",
"(",
"self",
".",
"mapType",
")",
")"
] | GSSHAPY Project Write WMS Datasets to File Method | [
"GSSHAPY",
"Project",
"Write",
"WMS",
"Datasets",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1711-L1761 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._writeReplacementFiles | def _writeReplacementFiles(self, session, directory, name):
"""
Write the replacement files
"""
if self.replaceParamFile:
self.replaceParamFile.write(session=session, directory=directory,
name=name)
if self.replaceValFile:
self.replaceValFile.write(session=session, directory=directory,
name=name) | python | def _writeReplacementFiles(self, session, directory, name):
"""
Write the replacement files
"""
if self.replaceParamFile:
self.replaceParamFile.write(session=session, directory=directory,
name=name)
if self.replaceValFile:
self.replaceValFile.write(session=session, directory=directory,
name=name) | [
"def",
"_writeReplacementFiles",
"(",
"self",
",",
"session",
",",
"directory",
",",
"name",
")",
":",
"if",
"self",
".",
"replaceParamFile",
":",
"self",
".",
"replaceParamFile",
".",
"write",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"name",
"=",
"name",
")",
"if",
"self",
".",
"replaceValFile",
":",
"self",
".",
"replaceValFile",
".",
"write",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"name",
"=",
"name",
")"
] | Write the replacement files | [
"Write",
"the",
"replacement",
"files"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1763-L1773 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectFile._invokeWrite | def _invokeWrite(self, fileIO, session, directory, filename, replaceParamFile):
"""
Invoke File Write Method on Other Files
"""
# Default value for instance
instance = None
try:
# Handle case where fileIO interfaces with single file
# Retrieve File using FileIO
instance = session.query(fileIO). \
filter(fileIO.projectFile == self). \
one()
except:
# Handle case where fileIO interfaces with multiple files
# Retrieve File using FileIO and file extension
extension = filename.split('.')[1]
try:
instance = session.query(fileIO). \
filter(fileIO.projectFile == self). \
filter(fileIO.fileExtension == extension). \
one()
except NoResultFound:
# Handle case when there is no file in database but the
# card is listed in the project file
log.warning('{0} listed as card in project file, but '
'the file is not found in the database.'.format(filename))
except MultipleResultsFound:
self._invokeWriteForMultipleOfType(directory, extension, fileIO,
filename, session,
replaceParamFile=replaceParamFile)
return
# Initiate Write Method on File
if instance is not None:
instance.write(session=session, directory=directory, name=filename,
replaceParamFile=replaceParamFile) | python | def _invokeWrite(self, fileIO, session, directory, filename, replaceParamFile):
"""
Invoke File Write Method on Other Files
"""
# Default value for instance
instance = None
try:
# Handle case where fileIO interfaces with single file
# Retrieve File using FileIO
instance = session.query(fileIO). \
filter(fileIO.projectFile == self). \
one()
except:
# Handle case where fileIO interfaces with multiple files
# Retrieve File using FileIO and file extension
extension = filename.split('.')[1]
try:
instance = session.query(fileIO). \
filter(fileIO.projectFile == self). \
filter(fileIO.fileExtension == extension). \
one()
except NoResultFound:
# Handle case when there is no file in database but the
# card is listed in the project file
log.warning('{0} listed as card in project file, but '
'the file is not found in the database.'.format(filename))
except MultipleResultsFound:
self._invokeWriteForMultipleOfType(directory, extension, fileIO,
filename, session,
replaceParamFile=replaceParamFile)
return
# Initiate Write Method on File
if instance is not None:
instance.write(session=session, directory=directory, name=filename,
replaceParamFile=replaceParamFile) | [
"def",
"_invokeWrite",
"(",
"self",
",",
"fileIO",
",",
"session",
",",
"directory",
",",
"filename",
",",
"replaceParamFile",
")",
":",
"# Default value for instance",
"instance",
"=",
"None",
"try",
":",
"# Handle case where fileIO interfaces with single file",
"# Retrieve File using FileIO",
"instance",
"=",
"session",
".",
"query",
"(",
"fileIO",
")",
".",
"filter",
"(",
"fileIO",
".",
"projectFile",
"==",
"self",
")",
".",
"one",
"(",
")",
"except",
":",
"# Handle case where fileIO interfaces with multiple files",
"# Retrieve File using FileIO and file extension",
"extension",
"=",
"filename",
".",
"split",
"(",
"'.'",
")",
"[",
"1",
"]",
"try",
":",
"instance",
"=",
"session",
".",
"query",
"(",
"fileIO",
")",
".",
"filter",
"(",
"fileIO",
".",
"projectFile",
"==",
"self",
")",
".",
"filter",
"(",
"fileIO",
".",
"fileExtension",
"==",
"extension",
")",
".",
"one",
"(",
")",
"except",
"NoResultFound",
":",
"# Handle case when there is no file in database but the",
"# card is listed in the project file",
"log",
".",
"warning",
"(",
"'{0} listed as card in project file, but '",
"'the file is not found in the database.'",
".",
"format",
"(",
"filename",
")",
")",
"except",
"MultipleResultsFound",
":",
"self",
".",
"_invokeWriteForMultipleOfType",
"(",
"directory",
",",
"extension",
",",
"fileIO",
",",
"filename",
",",
"session",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")",
"return",
"# Initiate Write Method on File",
"if",
"instance",
"is",
"not",
"None",
":",
"instance",
".",
"write",
"(",
"session",
"=",
"session",
",",
"directory",
"=",
"directory",
",",
"name",
"=",
"filename",
",",
"replaceParamFile",
"=",
"replaceParamFile",
")"
] | Invoke File Write Method on Other Files | [
"Invoke",
"File",
"Write",
"Method",
"on",
"Other",
"Files"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L1802-L1842 | train |
CI-WATER/gsshapy | gsshapy/orm/prj.py | ProjectCard.write | def write(self, originalPrefix, newPrefix=None):
"""
Write project card to string.
Args:
originalPrefix (str): Original name to give to files that follow the project naming convention
(e.g: prefix.gag).
newPrefix (str, optional): If new prefix is desired, pass in this parameter. Defaults to None.
Returns:
str: Card and value as they would be written to the project file.
"""
# Determine number of spaces between card and value for nice alignment
numSpaces = max(2, 25 - len(self.name))
# Handle special case of booleans
if self.value is None:
line = '%s\n' % self.name
else:
if self.name == 'WMS':
line = '%s %s\n' % (self.name, self.value)
elif newPrefix is None:
line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value)
elif originalPrefix in self.value:
line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value.replace(originalPrefix, newPrefix))
else:
line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value)
return line | python | def write(self, originalPrefix, newPrefix=None):
"""
Write project card to string.
Args:
originalPrefix (str): Original name to give to files that follow the project naming convention
(e.g: prefix.gag).
newPrefix (str, optional): If new prefix is desired, pass in this parameter. Defaults to None.
Returns:
str: Card and value as they would be written to the project file.
"""
# Determine number of spaces between card and value for nice alignment
numSpaces = max(2, 25 - len(self.name))
# Handle special case of booleans
if self.value is None:
line = '%s\n' % self.name
else:
if self.name == 'WMS':
line = '%s %s\n' % (self.name, self.value)
elif newPrefix is None:
line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value)
elif originalPrefix in self.value:
line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value.replace(originalPrefix, newPrefix))
else:
line = '%s%s%s\n' % (self.name, ' ' * numSpaces, self.value)
return line | [
"def",
"write",
"(",
"self",
",",
"originalPrefix",
",",
"newPrefix",
"=",
"None",
")",
":",
"# Determine number of spaces between card and value for nice alignment",
"numSpaces",
"=",
"max",
"(",
"2",
",",
"25",
"-",
"len",
"(",
"self",
".",
"name",
")",
")",
"# Handle special case of booleans",
"if",
"self",
".",
"value",
"is",
"None",
":",
"line",
"=",
"'%s\\n'",
"%",
"self",
".",
"name",
"else",
":",
"if",
"self",
".",
"name",
"==",
"'WMS'",
":",
"line",
"=",
"'%s %s\\n'",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"value",
")",
"elif",
"newPrefix",
"is",
"None",
":",
"line",
"=",
"'%s%s%s\\n'",
"%",
"(",
"self",
".",
"name",
",",
"' '",
"*",
"numSpaces",
",",
"self",
".",
"value",
")",
"elif",
"originalPrefix",
"in",
"self",
".",
"value",
":",
"line",
"=",
"'%s%s%s\\n'",
"%",
"(",
"self",
".",
"name",
",",
"' '",
"*",
"numSpaces",
",",
"self",
".",
"value",
".",
"replace",
"(",
"originalPrefix",
",",
"newPrefix",
")",
")",
"else",
":",
"line",
"=",
"'%s%s%s\\n'",
"%",
"(",
"self",
".",
"name",
",",
"' '",
"*",
"numSpaces",
",",
"self",
".",
"value",
")",
"return",
"line"
] | Write project card to string.
Args:
originalPrefix (str): Original name to give to files that follow the project naming convention
(e.g: prefix.gag).
newPrefix (str, optional): If new prefix is desired, pass in this parameter. Defaults to None.
Returns:
str: Card and value as they would be written to the project file. | [
"Write",
"project",
"card",
"to",
"string",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/prj.py#L2024-L2051 | train |
Robpol86/etaprogress | etaprogress/eta.py | ETA.elapsed | def elapsed(self):
"""Returns the number of seconds it has been since the start until the latest entry."""
if not self.started or self._start_time is None:
return 0.0
return self._timing_data[-1][0] - self._start_time | python | def elapsed(self):
"""Returns the number of seconds it has been since the start until the latest entry."""
if not self.started or self._start_time is None:
return 0.0
return self._timing_data[-1][0] - self._start_time | [
"def",
"elapsed",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"started",
"or",
"self",
".",
"_start_time",
"is",
"None",
":",
"return",
"0.0",
"return",
"self",
".",
"_timing_data",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"-",
"self",
".",
"_start_time"
] | Returns the number of seconds it has been since the start until the latest entry. | [
"Returns",
"the",
"number",
"of",
"seconds",
"it",
"has",
"been",
"since",
"the",
"start",
"until",
"the",
"latest",
"entry",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/eta.py#L83-L87 | train |
Robpol86/etaprogress | etaprogress/eta.py | ETA.rate_unstable | def rate_unstable(self):
"""Returns an unstable rate based on the last two entries in the timing data. Less intensive to compute."""
if not self.started or self.stalled:
return 0.0
x1, y1 = self._timing_data[-2]
x2, y2 = self._timing_data[-1]
return (y2 - y1) / (x2 - x1) | python | def rate_unstable(self):
"""Returns an unstable rate based on the last two entries in the timing data. Less intensive to compute."""
if not self.started or self.stalled:
return 0.0
x1, y1 = self._timing_data[-2]
x2, y2 = self._timing_data[-1]
return (y2 - y1) / (x2 - x1) | [
"def",
"rate_unstable",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"started",
"or",
"self",
".",
"stalled",
":",
"return",
"0.0",
"x1",
",",
"y1",
"=",
"self",
".",
"_timing_data",
"[",
"-",
"2",
"]",
"x2",
",",
"y2",
"=",
"self",
".",
"_timing_data",
"[",
"-",
"1",
"]",
"return",
"(",
"y2",
"-",
"y1",
")",
"/",
"(",
"x2",
"-",
"x1",
")"
] | Returns an unstable rate based on the last two entries in the timing data. Less intensive to compute. | [
"Returns",
"an",
"unstable",
"rate",
"based",
"on",
"the",
"last",
"two",
"entries",
"in",
"the",
"timing",
"data",
".",
"Less",
"intensive",
"to",
"compute",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/eta.py#L90-L96 | train |
Robpol86/etaprogress | etaprogress/eta.py | ETA.rate_overall | def rate_overall(self):
"""Returns the overall average rate based on the start time."""
elapsed = self.elapsed
return self.rate if not elapsed else self.numerator / self.elapsed | python | def rate_overall(self):
"""Returns the overall average rate based on the start time."""
elapsed = self.elapsed
return self.rate if not elapsed else self.numerator / self.elapsed | [
"def",
"rate_overall",
"(",
"self",
")",
":",
"elapsed",
"=",
"self",
".",
"elapsed",
"return",
"self",
".",
"rate",
"if",
"not",
"elapsed",
"else",
"self",
".",
"numerator",
"/",
"self",
".",
"elapsed"
] | Returns the overall average rate based on the start time. | [
"Returns",
"the",
"overall",
"average",
"rate",
"based",
"on",
"the",
"start",
"time",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/eta.py#L99-L102 | train |
Robpol86/etaprogress | etaprogress/eta.py | ETA._calculate | def _calculate(self):
"""Perform the ETA and rate calculation.
Two linear lines are used to calculate the ETA: the linear regression (line through a scatter-plot), and the
fitted line (a line that runs through the latest data point in _timing_data but parallel to the linear
regression line).
As the percentage moves closer to 100%, _calculate() gradually uses the ETA based on the fitted line more and
more. This is done to prevent an ETA that's in the past.
http://code.activestate.com/recipes/578914-simple-linear-regression-with-pure-python/
http://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
"""
# Calculate means and standard deviations.
mean_x = sum(i[0] for i in self._timing_data) / len(self._timing_data)
mean_y = sum(i[1] for i in self._timing_data) / len(self._timing_data)
std_x = sqrt(sum(pow(i[0] - mean_x, 2) for i in self._timing_data) / (len(self._timing_data) - 1))
std_y = sqrt(sum(pow(i[1] - mean_y, 2) for i in self._timing_data) / (len(self._timing_data) - 1))
# Calculate coefficient.
sum_xy, sum_sq_v_x, sum_sq_v_y = 0, 0, 0
for x, y in self._timing_data:
x -= mean_x
y -= mean_y
sum_xy += x * y
sum_sq_v_x += pow(x, 2)
sum_sq_v_y += pow(y, 2)
pearson_r = sum_xy / sqrt(sum_sq_v_x * sum_sq_v_y)
# Calculate regression line. y = mx + b where m is the slope and b is the y-intercept.
m = self.rate = pearson_r * (std_y / std_x)
if self.undefined:
return
y = self.denominator
b = mean_y - m * mean_x
x = (y - b) / m
# Calculate fitted line (transformed/shifted regression line horizontally).
fitted_b = self._timing_data[-1][1] - (m * self._timing_data[-1][0])
fitted_x = (y - fitted_b) / m
adjusted_x = ((fitted_x - x) * (self.numerator / self.denominator)) + x
self.eta_epoch = adjusted_x | python | def _calculate(self):
"""Perform the ETA and rate calculation.
Two linear lines are used to calculate the ETA: the linear regression (line through a scatter-plot), and the
fitted line (a line that runs through the latest data point in _timing_data but parallel to the linear
regression line).
As the percentage moves closer to 100%, _calculate() gradually uses the ETA based on the fitted line more and
more. This is done to prevent an ETA that's in the past.
http://code.activestate.com/recipes/578914-simple-linear-regression-with-pure-python/
http://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
"""
# Calculate means and standard deviations.
mean_x = sum(i[0] for i in self._timing_data) / len(self._timing_data)
mean_y = sum(i[1] for i in self._timing_data) / len(self._timing_data)
std_x = sqrt(sum(pow(i[0] - mean_x, 2) for i in self._timing_data) / (len(self._timing_data) - 1))
std_y = sqrt(sum(pow(i[1] - mean_y, 2) for i in self._timing_data) / (len(self._timing_data) - 1))
# Calculate coefficient.
sum_xy, sum_sq_v_x, sum_sq_v_y = 0, 0, 0
for x, y in self._timing_data:
x -= mean_x
y -= mean_y
sum_xy += x * y
sum_sq_v_x += pow(x, 2)
sum_sq_v_y += pow(y, 2)
pearson_r = sum_xy / sqrt(sum_sq_v_x * sum_sq_v_y)
# Calculate regression line. y = mx + b where m is the slope and b is the y-intercept.
m = self.rate = pearson_r * (std_y / std_x)
if self.undefined:
return
y = self.denominator
b = mean_y - m * mean_x
x = (y - b) / m
# Calculate fitted line (transformed/shifted regression line horizontally).
fitted_b = self._timing_data[-1][1] - (m * self._timing_data[-1][0])
fitted_x = (y - fitted_b) / m
adjusted_x = ((fitted_x - x) * (self.numerator / self.denominator)) + x
self.eta_epoch = adjusted_x | [
"def",
"_calculate",
"(",
"self",
")",
":",
"# Calculate means and standard deviations.",
"mean_x",
"=",
"sum",
"(",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"self",
".",
"_timing_data",
")",
"/",
"len",
"(",
"self",
".",
"_timing_data",
")",
"mean_y",
"=",
"sum",
"(",
"i",
"[",
"1",
"]",
"for",
"i",
"in",
"self",
".",
"_timing_data",
")",
"/",
"len",
"(",
"self",
".",
"_timing_data",
")",
"std_x",
"=",
"sqrt",
"(",
"sum",
"(",
"pow",
"(",
"i",
"[",
"0",
"]",
"-",
"mean_x",
",",
"2",
")",
"for",
"i",
"in",
"self",
".",
"_timing_data",
")",
"/",
"(",
"len",
"(",
"self",
".",
"_timing_data",
")",
"-",
"1",
")",
")",
"std_y",
"=",
"sqrt",
"(",
"sum",
"(",
"pow",
"(",
"i",
"[",
"1",
"]",
"-",
"mean_y",
",",
"2",
")",
"for",
"i",
"in",
"self",
".",
"_timing_data",
")",
"/",
"(",
"len",
"(",
"self",
".",
"_timing_data",
")",
"-",
"1",
")",
")",
"# Calculate coefficient.",
"sum_xy",
",",
"sum_sq_v_x",
",",
"sum_sq_v_y",
"=",
"0",
",",
"0",
",",
"0",
"for",
"x",
",",
"y",
"in",
"self",
".",
"_timing_data",
":",
"x",
"-=",
"mean_x",
"y",
"-=",
"mean_y",
"sum_xy",
"+=",
"x",
"*",
"y",
"sum_sq_v_x",
"+=",
"pow",
"(",
"x",
",",
"2",
")",
"sum_sq_v_y",
"+=",
"pow",
"(",
"y",
",",
"2",
")",
"pearson_r",
"=",
"sum_xy",
"/",
"sqrt",
"(",
"sum_sq_v_x",
"*",
"sum_sq_v_y",
")",
"# Calculate regression line. y = mx + b where m is the slope and b is the y-intercept.",
"m",
"=",
"self",
".",
"rate",
"=",
"pearson_r",
"*",
"(",
"std_y",
"/",
"std_x",
")",
"if",
"self",
".",
"undefined",
":",
"return",
"y",
"=",
"self",
".",
"denominator",
"b",
"=",
"mean_y",
"-",
"m",
"*",
"mean_x",
"x",
"=",
"(",
"y",
"-",
"b",
")",
"/",
"m",
"# Calculate fitted line (transformed/shifted regression line horizontally).",
"fitted_b",
"=",
"self",
".",
"_timing_data",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"-",
"(",
"m",
"*",
"self",
".",
"_timing_data",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
")",
"fitted_x",
"=",
"(",
"y",
"-",
"fitted_b",
")",
"/",
"m",
"adjusted_x",
"=",
"(",
"(",
"fitted_x",
"-",
"x",
")",
"*",
"(",
"self",
".",
"numerator",
"/",
"self",
".",
"denominator",
")",
")",
"+",
"x",
"self",
".",
"eta_epoch",
"=",
"adjusted_x"
] | Perform the ETA and rate calculation.
Two linear lines are used to calculate the ETA: the linear regression (line through a scatter-plot), and the
fitted line (a line that runs through the latest data point in _timing_data but parallel to the linear
regression line).
As the percentage moves closer to 100%, _calculate() gradually uses the ETA based on the fitted line more and
more. This is done to prevent an ETA that's in the past.
http://code.activestate.com/recipes/578914-simple-linear-regression-with-pure-python/
http://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient | [
"Perform",
"the",
"ETA",
"and",
"rate",
"calculation",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/eta.py#L128-L169 | train |
CI-WATER/gsshapy | gsshapy/orm/hmet.py | HmetFile._read | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Read HMET WES from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse into HmetRecords
with open(path, 'r') as hmetFile:
for line in hmetFile:
sline = line.strip().split()
try:
# Extract data time from record
dateTime = datetime(int(sline[0]), int(sline[1]), int(sline[2]), int(sline[3]))
# Intitialize GSSHAPY HmetRecord object
hmetRecord = HmetRecord(hmetDateTime=dateTime,
barometricPress=sline[4],
relHumidity=sline[5],
totalSkyCover=sline[6],
windSpeed=sline[7],
dryBulbTemp=sline[8],
directRad=sline[9],
globalRad=sline[10])
# Associate HmetRecord with HmetFile
hmetRecord.hmetFile = self
except:
pass | python | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Read HMET WES from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse into HmetRecords
with open(path, 'r') as hmetFile:
for line in hmetFile:
sline = line.strip().split()
try:
# Extract data time from record
dateTime = datetime(int(sline[0]), int(sline[1]), int(sline[2]), int(sline[3]))
# Intitialize GSSHAPY HmetRecord object
hmetRecord = HmetRecord(hmetDateTime=dateTime,
barometricPress=sline[4],
relHumidity=sline[5],
totalSkyCover=sline[6],
windSpeed=sline[7],
dryBulbTemp=sline[8],
directRad=sline[9],
globalRad=sline[10])
# Associate HmetRecord with HmetFile
hmetRecord.hmetFile = self
except:
pass | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
")",
":",
"# Set file extension property",
"self",
".",
"fileExtension",
"=",
"extension",
"# Open file and parse into HmetRecords",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"hmetFile",
":",
"for",
"line",
"in",
"hmetFile",
":",
"sline",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"try",
":",
"# Extract data time from record",
"dateTime",
"=",
"datetime",
"(",
"int",
"(",
"sline",
"[",
"0",
"]",
")",
",",
"int",
"(",
"sline",
"[",
"1",
"]",
")",
",",
"int",
"(",
"sline",
"[",
"2",
"]",
")",
",",
"int",
"(",
"sline",
"[",
"3",
"]",
")",
")",
"# Intitialize GSSHAPY HmetRecord object",
"hmetRecord",
"=",
"HmetRecord",
"(",
"hmetDateTime",
"=",
"dateTime",
",",
"barometricPress",
"=",
"sline",
"[",
"4",
"]",
",",
"relHumidity",
"=",
"sline",
"[",
"5",
"]",
",",
"totalSkyCover",
"=",
"sline",
"[",
"6",
"]",
",",
"windSpeed",
"=",
"sline",
"[",
"7",
"]",
",",
"dryBulbTemp",
"=",
"sline",
"[",
"8",
"]",
",",
"directRad",
"=",
"sline",
"[",
"9",
"]",
",",
"globalRad",
"=",
"sline",
"[",
"10",
"]",
")",
"# Associate HmetRecord with HmetFile",
"hmetRecord",
".",
"hmetFile",
"=",
"self",
"except",
":",
"pass"
] | Read HMET WES from File Method | [
"Read",
"HMET",
"WES",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/hmet.py#L56-L87 | train |
CI-WATER/gsshapy | gsshapy/orm/hmet.py | HmetFile._write | def _write(self, session, openFile, replaceParamFile):
"""
Write HMET WES to File Method
"""
## TODO: Ensure Other HMET Formats are supported
hmetRecords = self.hmetRecords
for record in hmetRecords:
openFile.write('%s\t%s\t%s\t%s\t%.3f\t%s\t%s\t%s\t%s\t%.2f\t%.2f\n' % (
record.hmetDateTime.year,
record.hmetDateTime.month,
record.hmetDateTime.day,
record.hmetDateTime.hour,
record.barometricPress,
record.relHumidity,
record.totalSkyCover,
record.windSpeed,
record.dryBulbTemp,
record.directRad,
record.globalRad)) | python | def _write(self, session, openFile, replaceParamFile):
"""
Write HMET WES to File Method
"""
## TODO: Ensure Other HMET Formats are supported
hmetRecords = self.hmetRecords
for record in hmetRecords:
openFile.write('%s\t%s\t%s\t%s\t%.3f\t%s\t%s\t%s\t%s\t%.2f\t%.2f\n' % (
record.hmetDateTime.year,
record.hmetDateTime.month,
record.hmetDateTime.day,
record.hmetDateTime.hour,
record.barometricPress,
record.relHumidity,
record.totalSkyCover,
record.windSpeed,
record.dryBulbTemp,
record.directRad,
record.globalRad)) | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
")",
":",
"## TODO: Ensure Other HMET Formats are supported",
"hmetRecords",
"=",
"self",
".",
"hmetRecords",
"for",
"record",
"in",
"hmetRecords",
":",
"openFile",
".",
"write",
"(",
"'%s\\t%s\\t%s\\t%s\\t%.3f\\t%s\\t%s\\t%s\\t%s\\t%.2f\\t%.2f\\n'",
"%",
"(",
"record",
".",
"hmetDateTime",
".",
"year",
",",
"record",
".",
"hmetDateTime",
".",
"month",
",",
"record",
".",
"hmetDateTime",
".",
"day",
",",
"record",
".",
"hmetDateTime",
".",
"hour",
",",
"record",
".",
"barometricPress",
",",
"record",
".",
"relHumidity",
",",
"record",
".",
"totalSkyCover",
",",
"record",
".",
"windSpeed",
",",
"record",
".",
"dryBulbTemp",
",",
"record",
".",
"directRad",
",",
"record",
".",
"globalRad",
")",
")"
] | Write HMET WES to File Method | [
"Write",
"HMET",
"WES",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/hmet.py#L89-L108 | train |
CI-WATER/gsshapy | gsshapy/orm/evt.py | ProjectFileEventManager._read | def _read(self, directory, filename, session, path, name, extension,
spatial=None, spatialReferenceID=None, replaceParamFile=None):
"""
ProjectFileEvent Read from File Method
"""
yml_events = []
with open(path) as fo:
yml_events = yaml.load(fo)
for yml_event in yml_events:
if os.path.exists(os.path.join(directory, yml_event.subfolder)):
orm_event = yml_event.as_orm()
if not self._similar_event_exists(orm_event.subfolder):
session.add(orm_event)
self.events.append(orm_event)
session.commit() | python | def _read(self, directory, filename, session, path, name, extension,
spatial=None, spatialReferenceID=None, replaceParamFile=None):
"""
ProjectFileEvent Read from File Method
"""
yml_events = []
with open(path) as fo:
yml_events = yaml.load(fo)
for yml_event in yml_events:
if os.path.exists(os.path.join(directory, yml_event.subfolder)):
orm_event = yml_event.as_orm()
if not self._similar_event_exists(orm_event.subfolder):
session.add(orm_event)
self.events.append(orm_event)
session.commit() | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
"=",
"None",
",",
"spatialReferenceID",
"=",
"None",
",",
"replaceParamFile",
"=",
"None",
")",
":",
"yml_events",
"=",
"[",
"]",
"with",
"open",
"(",
"path",
")",
"as",
"fo",
":",
"yml_events",
"=",
"yaml",
".",
"load",
"(",
"fo",
")",
"for",
"yml_event",
"in",
"yml_events",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"yml_event",
".",
"subfolder",
")",
")",
":",
"orm_event",
"=",
"yml_event",
".",
"as_orm",
"(",
")",
"if",
"not",
"self",
".",
"_similar_event_exists",
"(",
"orm_event",
".",
"subfolder",
")",
":",
"session",
".",
"add",
"(",
"orm_event",
")",
"self",
".",
"events",
".",
"append",
"(",
"orm_event",
")",
"session",
".",
"commit",
"(",
")"
] | ProjectFileEvent Read from File Method | [
"ProjectFileEvent",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/evt.py#L30-L46 | train |
CI-WATER/gsshapy | gsshapy/orm/evt.py | ProjectFileEvent.as_yml | def as_yml(self):
"""
Return yml compatible version of self
"""
return YmlFileEvent(name=str(self.name),
subfolder=str(self.subfolder)) | python | def as_yml(self):
"""
Return yml compatible version of self
"""
return YmlFileEvent(name=str(self.name),
subfolder=str(self.subfolder)) | [
"def",
"as_yml",
"(",
"self",
")",
":",
"return",
"YmlFileEvent",
"(",
"name",
"=",
"str",
"(",
"self",
".",
"name",
")",
",",
"subfolder",
"=",
"str",
"(",
"self",
".",
"subfolder",
")",
")"
] | Return yml compatible version of self | [
"Return",
"yml",
"compatible",
"version",
"of",
"self"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/evt.py#L115-L120 | train |
timofurrer/ramlient | ramlient/request.py | prepare_request | def prepare_request(node):
"""
Prepare request to node's API route
:param Node node: the RAML node object
"""
if node.resource.method not in AVAILABLE_METHODS:
raise UnsupportedHTTPMethodError(node.resource.method)
def request(data=None, json=None, **kwargs):
"""
Make request to node's API route with the given keyword arguments
"""
# validate given query parameters
for key, value in kwargs.items():
param = next((p for p in node.resource.query_params if p.name == key), None)
if not param:
raise UnsupportedQueryParameter(node.resource.path, key)
if not match_type(value, param.type):
raise TypeError(
"Resource Query Parameter has type '{0}' but expected type '{1}'".format(
value.__class__.__name__, param.type))
response = requests.request(node.resource.method, node.resource.absolute_uri, params=kwargs,
data=data, json=json)
return response
return request | python | def prepare_request(node):
"""
Prepare request to node's API route
:param Node node: the RAML node object
"""
if node.resource.method not in AVAILABLE_METHODS:
raise UnsupportedHTTPMethodError(node.resource.method)
def request(data=None, json=None, **kwargs):
"""
Make request to node's API route with the given keyword arguments
"""
# validate given query parameters
for key, value in kwargs.items():
param = next((p for p in node.resource.query_params if p.name == key), None)
if not param:
raise UnsupportedQueryParameter(node.resource.path, key)
if not match_type(value, param.type):
raise TypeError(
"Resource Query Parameter has type '{0}' but expected type '{1}'".format(
value.__class__.__name__, param.type))
response = requests.request(node.resource.method, node.resource.absolute_uri, params=kwargs,
data=data, json=json)
return response
return request | [
"def",
"prepare_request",
"(",
"node",
")",
":",
"if",
"node",
".",
"resource",
".",
"method",
"not",
"in",
"AVAILABLE_METHODS",
":",
"raise",
"UnsupportedHTTPMethodError",
"(",
"node",
".",
"resource",
".",
"method",
")",
"def",
"request",
"(",
"data",
"=",
"None",
",",
"json",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"\n Make request to node's API route with the given keyword arguments\n \"\"\"",
"# validate given query parameters",
"for",
"key",
",",
"value",
"in",
"kwargs",
".",
"items",
"(",
")",
":",
"param",
"=",
"next",
"(",
"(",
"p",
"for",
"p",
"in",
"node",
".",
"resource",
".",
"query_params",
"if",
"p",
".",
"name",
"==",
"key",
")",
",",
"None",
")",
"if",
"not",
"param",
":",
"raise",
"UnsupportedQueryParameter",
"(",
"node",
".",
"resource",
".",
"path",
",",
"key",
")",
"if",
"not",
"match_type",
"(",
"value",
",",
"param",
".",
"type",
")",
":",
"raise",
"TypeError",
"(",
"\"Resource Query Parameter has type '{0}' but expected type '{1}'\"",
".",
"format",
"(",
"value",
".",
"__class__",
".",
"__name__",
",",
"param",
".",
"type",
")",
")",
"response",
"=",
"requests",
".",
"request",
"(",
"node",
".",
"resource",
".",
"method",
",",
"node",
".",
"resource",
".",
"absolute_uri",
",",
"params",
"=",
"kwargs",
",",
"data",
"=",
"data",
",",
"json",
"=",
"json",
")",
"return",
"response",
"return",
"request"
] | Prepare request to node's API route
:param Node node: the RAML node object | [
"Prepare",
"request",
"to",
"node",
"s",
"API",
"route"
] | e93092252635a6b3b0aca2c390b9f820368b791c | https://github.com/timofurrer/ramlient/blob/e93092252635a6b3b0aca2c390b9f820368b791c/ramlient/request.py#L20-L47 | train |
vinci1it2000/schedula | examples/processing_chain/utils/plot.py | define_plot_data | def define_plot_data(data, x_name, *y_names):
"""
Defines the data to be plotted.
:param data:
All data.
:type data: dict
:param x_name:
x-axes name.
:type x_name: str
:param y_names:
y-axes names to be plotted.
:type y_names: str
:return:
Data to be plotted.
:rtype: list
"""
it = []
for k in y_names:
it.append({
'x': data[x_name],
'y': data[k],
'name': k
})
return it | python | def define_plot_data(data, x_name, *y_names):
"""
Defines the data to be plotted.
:param data:
All data.
:type data: dict
:param x_name:
x-axes name.
:type x_name: str
:param y_names:
y-axes names to be plotted.
:type y_names: str
:return:
Data to be plotted.
:rtype: list
"""
it = []
for k in y_names:
it.append({
'x': data[x_name],
'y': data[k],
'name': k
})
return it | [
"def",
"define_plot_data",
"(",
"data",
",",
"x_name",
",",
"*",
"y_names",
")",
":",
"it",
"=",
"[",
"]",
"for",
"k",
"in",
"y_names",
":",
"it",
".",
"append",
"(",
"{",
"'x'",
":",
"data",
"[",
"x_name",
"]",
",",
"'y'",
":",
"data",
"[",
"k",
"]",
",",
"'name'",
":",
"k",
"}",
")",
"return",
"it"
] | Defines the data to be plotted.
:param data:
All data.
:type data: dict
:param x_name:
x-axes name.
:type x_name: str
:param y_names:
y-axes names to be plotted.
:type y_names: str
:return:
Data to be plotted.
:rtype: list | [
"Defines",
"the",
"data",
"to",
"be",
"plotted",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/examples/processing_chain/utils/plot.py#L9-L36 | train |
vinci1it2000/schedula | examples/processing_chain/utils/plot.py | plot_lines | def plot_lines(it):
"""
Plotting lines.
:param it:
Data to plot where key value is the name of the series.
:type it: list[dict]
:return:
The plot.
:rtype: plotly.plotly.iplot
"""
data = [go.Scatter(mode='lines', **d) for d in it]
return py.iplot(data, filename='scatter-mode') | python | def plot_lines(it):
"""
Plotting lines.
:param it:
Data to plot where key value is the name of the series.
:type it: list[dict]
:return:
The plot.
:rtype: plotly.plotly.iplot
"""
data = [go.Scatter(mode='lines', **d) for d in it]
return py.iplot(data, filename='scatter-mode') | [
"def",
"plot_lines",
"(",
"it",
")",
":",
"data",
"=",
"[",
"go",
".",
"Scatter",
"(",
"mode",
"=",
"'lines'",
",",
"*",
"*",
"d",
")",
"for",
"d",
"in",
"it",
"]",
"return",
"py",
".",
"iplot",
"(",
"data",
",",
"filename",
"=",
"'scatter-mode'",
")"
] | Plotting lines.
:param it:
Data to plot where key value is the name of the series.
:type it: list[dict]
:return:
The plot.
:rtype: plotly.plotly.iplot | [
"Plotting",
"lines",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/examples/processing_chain/utils/plot.py#L39-L52 | train |
dsoprea/PySecure | pysecure/adapters/channela.py | _ssh_channel_read | def _ssh_channel_read(ssh_channel_int, count, is_stderr):
"""Do a read on a channel."""
buffer_ = create_string_buffer(count)
while 1:
received_bytes = c_ssh_channel_read(ssh_channel_int,
cast(buffer_, c_void_p),
c_uint32(count),
c_int(int(is_stderr)))
if received_bytes == SSH_ERROR:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Channel read failed: %s" % (error))
# BUG: We're not using the nonblocking variant, but this can still
# return SSH_AGAIN due to that call's broken dependencies.
# TODO: This call might return SSH_AGAIN, even though we should always be
# blocking. Reported as bug #115.
elif received_bytes == SSH_AGAIN:
continue
else:
break
# TODO: Where is the timeout configured for the read?
return buffer_.raw[0:received_bytes] | python | def _ssh_channel_read(ssh_channel_int, count, is_stderr):
"""Do a read on a channel."""
buffer_ = create_string_buffer(count)
while 1:
received_bytes = c_ssh_channel_read(ssh_channel_int,
cast(buffer_, c_void_p),
c_uint32(count),
c_int(int(is_stderr)))
if received_bytes == SSH_ERROR:
ssh_session_int = _ssh_channel_get_session(ssh_channel_int)
error = ssh_get_error(ssh_session_int)
raise SshError("Channel read failed: %s" % (error))
# BUG: We're not using the nonblocking variant, but this can still
# return SSH_AGAIN due to that call's broken dependencies.
# TODO: This call might return SSH_AGAIN, even though we should always be
# blocking. Reported as bug #115.
elif received_bytes == SSH_AGAIN:
continue
else:
break
# TODO: Where is the timeout configured for the read?
return buffer_.raw[0:received_bytes] | [
"def",
"_ssh_channel_read",
"(",
"ssh_channel_int",
",",
"count",
",",
"is_stderr",
")",
":",
"buffer_",
"=",
"create_string_buffer",
"(",
"count",
")",
"while",
"1",
":",
"received_bytes",
"=",
"c_ssh_channel_read",
"(",
"ssh_channel_int",
",",
"cast",
"(",
"buffer_",
",",
"c_void_p",
")",
",",
"c_uint32",
"(",
"count",
")",
",",
"c_int",
"(",
"int",
"(",
"is_stderr",
")",
")",
")",
"if",
"received_bytes",
"==",
"SSH_ERROR",
":",
"ssh_session_int",
"=",
"_ssh_channel_get_session",
"(",
"ssh_channel_int",
")",
"error",
"=",
"ssh_get_error",
"(",
"ssh_session_int",
")",
"raise",
"SshError",
"(",
"\"Channel read failed: %s\"",
"%",
"(",
"error",
")",
")",
"# BUG: We're not using the nonblocking variant, but this can still ",
"# return SSH_AGAIN due to that call's broken dependencies.",
"# TODO: This call might return SSH_AGAIN, even though we should always be ",
"# blocking. Reported as bug #115.",
"elif",
"received_bytes",
"==",
"SSH_AGAIN",
":",
"continue",
"else",
":",
"break",
"# TODO: Where is the timeout configured for the read?",
"return",
"buffer_",
".",
"raw",
"[",
"0",
":",
"received_bytes",
"]"
] | Do a read on a channel. | [
"Do",
"a",
"read",
"on",
"a",
"channel",
"."
] | ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0 | https://github.com/dsoprea/PySecure/blob/ff7e01a0a77e79564cb00b6e38b4e6f9f88674f0/pysecure/adapters/channela.py#L79-L106 | train |
CI-WATER/gsshapy | gsshapy/lib/gag_chunk.py | eventChunk | def eventChunk(key, lines):
"""
Parse EVENT chunks
"""
## NOTE: RADAR file format not supported currently.
## TODO: Add Support for RADAR file format type values
# Contants
KEYWORDS = ('EVENT',
'NRPDS',
'NRGAG',
'COORD',
'GAGES',
'ACCUM',
'RATES',
'RADAR')
NUM_CARDS = ('NRPDS',
'NRGAG')
VALUE_CARDS = ('GAGES',
'ACCUM',
'RATES',
'RADAR')
# Define result object
result = {'description': None,
'nrgag': None,
'nrpds': None,
'coords':[],
'valLines':[]}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for card, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
# Cases
if card == 'EVENT':
# EVENT handler
schunk = pt.splitLine(chunk[0])
result['description'] = schunk[1]
elif card in NUM_CARDS:
# Num cards handler
result[card.lower()] = schunk[1]
elif card == 'COORD':
# COORD handler
schunk = pt.splitLine(chunk[0])
try:
# Extract the event description
desc = schunk[3]
except:
# Handle case where the event description is blank
desc = ""
coord = {'x': schunk[1],
'y': schunk[2],
'description': desc}
result['coords'].append(coord)
elif card in VALUE_CARDS:
# Value cards handler
# Extract DateTime
dateTime = datetime(year=int(schunk[1]),
month=int(schunk[2]),
day=int(schunk[3]),
hour=int(schunk[4]),
minute=int(schunk[5]))
# Compile values into a list
values = []
for index in range(6, len(schunk)):
values.append(schunk[index])
valueLine = {'type': schunk[0],
'dateTime': dateTime,
'values': values}
result['valLines'].append(valueLine)
return result | python | def eventChunk(key, lines):
"""
Parse EVENT chunks
"""
## NOTE: RADAR file format not supported currently.
## TODO: Add Support for RADAR file format type values
# Contants
KEYWORDS = ('EVENT',
'NRPDS',
'NRGAG',
'COORD',
'GAGES',
'ACCUM',
'RATES',
'RADAR')
NUM_CARDS = ('NRPDS',
'NRGAG')
VALUE_CARDS = ('GAGES',
'ACCUM',
'RATES',
'RADAR')
# Define result object
result = {'description': None,
'nrgag': None,
'nrpds': None,
'coords':[],
'valLines':[]}
chunks = pt.chunk(KEYWORDS, lines)
# Parse chunks associated with each key
for card, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
schunk = chunk[0].strip().split()
# Cases
if card == 'EVENT':
# EVENT handler
schunk = pt.splitLine(chunk[0])
result['description'] = schunk[1]
elif card in NUM_CARDS:
# Num cards handler
result[card.lower()] = schunk[1]
elif card == 'COORD':
# COORD handler
schunk = pt.splitLine(chunk[0])
try:
# Extract the event description
desc = schunk[3]
except:
# Handle case where the event description is blank
desc = ""
coord = {'x': schunk[1],
'y': schunk[2],
'description': desc}
result['coords'].append(coord)
elif card in VALUE_CARDS:
# Value cards handler
# Extract DateTime
dateTime = datetime(year=int(schunk[1]),
month=int(schunk[2]),
day=int(schunk[3]),
hour=int(schunk[4]),
minute=int(schunk[5]))
# Compile values into a list
values = []
for index in range(6, len(schunk)):
values.append(schunk[index])
valueLine = {'type': schunk[0],
'dateTime': dateTime,
'values': values}
result['valLines'].append(valueLine)
return result | [
"def",
"eventChunk",
"(",
"key",
",",
"lines",
")",
":",
"## NOTE: RADAR file format not supported currently.",
"## TODO: Add Support for RADAR file format type values",
"# Contants",
"KEYWORDS",
"=",
"(",
"'EVENT'",
",",
"'NRPDS'",
",",
"'NRGAG'",
",",
"'COORD'",
",",
"'GAGES'",
",",
"'ACCUM'",
",",
"'RATES'",
",",
"'RADAR'",
")",
"NUM_CARDS",
"=",
"(",
"'NRPDS'",
",",
"'NRGAG'",
")",
"VALUE_CARDS",
"=",
"(",
"'GAGES'",
",",
"'ACCUM'",
",",
"'RATES'",
",",
"'RADAR'",
")",
"# Define result object",
"result",
"=",
"{",
"'description'",
":",
"None",
",",
"'nrgag'",
":",
"None",
",",
"'nrpds'",
":",
"None",
",",
"'coords'",
":",
"[",
"]",
",",
"'valLines'",
":",
"[",
"]",
"}",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"lines",
")",
"# Parse chunks associated with each key",
"for",
"card",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"schunk",
"=",
"chunk",
"[",
"0",
"]",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"# Cases",
"if",
"card",
"==",
"'EVENT'",
":",
"# EVENT handler",
"schunk",
"=",
"pt",
".",
"splitLine",
"(",
"chunk",
"[",
"0",
"]",
")",
"result",
"[",
"'description'",
"]",
"=",
"schunk",
"[",
"1",
"]",
"elif",
"card",
"in",
"NUM_CARDS",
":",
"# Num cards handler",
"result",
"[",
"card",
".",
"lower",
"(",
")",
"]",
"=",
"schunk",
"[",
"1",
"]",
"elif",
"card",
"==",
"'COORD'",
":",
"# COORD handler",
"schunk",
"=",
"pt",
".",
"splitLine",
"(",
"chunk",
"[",
"0",
"]",
")",
"try",
":",
"# Extract the event description",
"desc",
"=",
"schunk",
"[",
"3",
"]",
"except",
":",
"# Handle case where the event description is blank",
"desc",
"=",
"\"\"",
"coord",
"=",
"{",
"'x'",
":",
"schunk",
"[",
"1",
"]",
",",
"'y'",
":",
"schunk",
"[",
"2",
"]",
",",
"'description'",
":",
"desc",
"}",
"result",
"[",
"'coords'",
"]",
".",
"append",
"(",
"coord",
")",
"elif",
"card",
"in",
"VALUE_CARDS",
":",
"# Value cards handler",
"# Extract DateTime",
"dateTime",
"=",
"datetime",
"(",
"year",
"=",
"int",
"(",
"schunk",
"[",
"1",
"]",
")",
",",
"month",
"=",
"int",
"(",
"schunk",
"[",
"2",
"]",
")",
",",
"day",
"=",
"int",
"(",
"schunk",
"[",
"3",
"]",
")",
",",
"hour",
"=",
"int",
"(",
"schunk",
"[",
"4",
"]",
")",
",",
"minute",
"=",
"int",
"(",
"schunk",
"[",
"5",
"]",
")",
")",
"# Compile values into a list",
"values",
"=",
"[",
"]",
"for",
"index",
"in",
"range",
"(",
"6",
",",
"len",
"(",
"schunk",
")",
")",
":",
"values",
".",
"append",
"(",
"schunk",
"[",
"index",
"]",
")",
"valueLine",
"=",
"{",
"'type'",
":",
"schunk",
"[",
"0",
"]",
",",
"'dateTime'",
":",
"dateTime",
",",
"'values'",
":",
"values",
"}",
"result",
"[",
"'valLines'",
"]",
".",
"append",
"(",
"valueLine",
")",
"return",
"result"
] | Parse EVENT chunks | [
"Parse",
"EVENT",
"chunks"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/lib/gag_chunk.py#L15-L102 | train |
Losant/losant-rest-python | losantrest/client.py | Client.request | def request(self, method, path, params=None, headers=None, body=None):
""" Base method for making a Losant API request """
if not headers:
headers = {}
if not params:
params = {}
headers["Accept"] = "application/json"
headers["Accept-Version"] = "^1.15.0"
if self.auth_token:
headers["Authorization"] = "Bearer {0}".format(self.auth_token)
path = self.url + path
params = self.flatten_params(params)
response = requests.request(method, path, params=params, headers=headers, json=body)
result = response.text
try:
result = response.json()
except Exception:
pass
if response.status_code >= 400:
raise LosantError(response.status_code, result)
return result | python | def request(self, method, path, params=None, headers=None, body=None):
""" Base method for making a Losant API request """
if not headers:
headers = {}
if not params:
params = {}
headers["Accept"] = "application/json"
headers["Accept-Version"] = "^1.15.0"
if self.auth_token:
headers["Authorization"] = "Bearer {0}".format(self.auth_token)
path = self.url + path
params = self.flatten_params(params)
response = requests.request(method, path, params=params, headers=headers, json=body)
result = response.text
try:
result = response.json()
except Exception:
pass
if response.status_code >= 400:
raise LosantError(response.status_code, result)
return result | [
"def",
"request",
"(",
"self",
",",
"method",
",",
"path",
",",
"params",
"=",
"None",
",",
"headers",
"=",
"None",
",",
"body",
"=",
"None",
")",
":",
"if",
"not",
"headers",
":",
"headers",
"=",
"{",
"}",
"if",
"not",
"params",
":",
"params",
"=",
"{",
"}",
"headers",
"[",
"\"Accept\"",
"]",
"=",
"\"application/json\"",
"headers",
"[",
"\"Accept-Version\"",
"]",
"=",
"\"^1.15.0\"",
"if",
"self",
".",
"auth_token",
":",
"headers",
"[",
"\"Authorization\"",
"]",
"=",
"\"Bearer {0}\"",
".",
"format",
"(",
"self",
".",
"auth_token",
")",
"path",
"=",
"self",
".",
"url",
"+",
"path",
"params",
"=",
"self",
".",
"flatten_params",
"(",
"params",
")",
"response",
"=",
"requests",
".",
"request",
"(",
"method",
",",
"path",
",",
"params",
"=",
"params",
",",
"headers",
"=",
"headers",
",",
"json",
"=",
"body",
")",
"result",
"=",
"response",
".",
"text",
"try",
":",
"result",
"=",
"response",
".",
"json",
"(",
")",
"except",
"Exception",
":",
"pass",
"if",
"response",
".",
"status_code",
">=",
"400",
":",
"raise",
"LosantError",
"(",
"response",
".",
"status_code",
",",
"result",
")",
"return",
"result"
] | Base method for making a Losant API request | [
"Base",
"method",
"for",
"making",
"a",
"Losant",
"API",
"request"
] | 75b20decda0e999002f21811c3508f087e7f13b5 | https://github.com/Losant/losant-rest-python/blob/75b20decda0e999002f21811c3508f087e7f13b5/losantrest/client.py#L165-L190 | train |
Losant/losant-rest-python | losantrest/client.py | Client.flatten_params | def flatten_params(self, data, base_key=None):
""" Flatten out nested arrays and dicts in query params into correct format """
result = {}
if data is None:
return result
map_data = None
if not isinstance(data, collections.Mapping):
map_data = []
for idx, val in enumerate(data):
map_data.append([str(idx), val])
else:
map_data = list(data.items())
for key, value in map_data:
if not base_key is None:
key = base_key + "[" + key + "]"
if isinstance(value, basestring) or not hasattr(value, "__iter__"):
result[key] = value
else:
result.update(self.flatten_params(value, key))
return result | python | def flatten_params(self, data, base_key=None):
""" Flatten out nested arrays and dicts in query params into correct format """
result = {}
if data is None:
return result
map_data = None
if not isinstance(data, collections.Mapping):
map_data = []
for idx, val in enumerate(data):
map_data.append([str(idx), val])
else:
map_data = list(data.items())
for key, value in map_data:
if not base_key is None:
key = base_key + "[" + key + "]"
if isinstance(value, basestring) or not hasattr(value, "__iter__"):
result[key] = value
else:
result.update(self.flatten_params(value, key))
return result | [
"def",
"flatten_params",
"(",
"self",
",",
"data",
",",
"base_key",
"=",
"None",
")",
":",
"result",
"=",
"{",
"}",
"if",
"data",
"is",
"None",
":",
"return",
"result",
"map_data",
"=",
"None",
"if",
"not",
"isinstance",
"(",
"data",
",",
"collections",
".",
"Mapping",
")",
":",
"map_data",
"=",
"[",
"]",
"for",
"idx",
",",
"val",
"in",
"enumerate",
"(",
"data",
")",
":",
"map_data",
".",
"append",
"(",
"[",
"str",
"(",
"idx",
")",
",",
"val",
"]",
")",
"else",
":",
"map_data",
"=",
"list",
"(",
"data",
".",
"items",
"(",
")",
")",
"for",
"key",
",",
"value",
"in",
"map_data",
":",
"if",
"not",
"base_key",
"is",
"None",
":",
"key",
"=",
"base_key",
"+",
"\"[\"",
"+",
"key",
"+",
"\"]\"",
"if",
"isinstance",
"(",
"value",
",",
"basestring",
")",
"or",
"not",
"hasattr",
"(",
"value",
",",
"\"__iter__\"",
")",
":",
"result",
"[",
"key",
"]",
"=",
"value",
"else",
":",
"result",
".",
"update",
"(",
"self",
".",
"flatten_params",
"(",
"value",
",",
"key",
")",
")",
"return",
"result"
] | Flatten out nested arrays and dicts in query params into correct format | [
"Flatten",
"out",
"nested",
"arrays",
"and",
"dicts",
"in",
"query",
"params",
"into",
"correct",
"format"
] | 75b20decda0e999002f21811c3508f087e7f13b5 | https://github.com/Losant/losant-rest-python/blob/75b20decda0e999002f21811c3508f087e7f13b5/losantrest/client.py#L192-L216 | train |
vinci1it2000/schedula | examples/processing_chain/process.py | read_excel | def read_excel(input_fpath):
"""
Reads the excel file.
:param input_fpath:
Input file path.
:type input_fpath: str
:return:
Raw Data.
:rtype: dict
"""
return {k: v.values for k, v in pd.read_excel(input_fpath).items()} | python | def read_excel(input_fpath):
"""
Reads the excel file.
:param input_fpath:
Input file path.
:type input_fpath: str
:return:
Raw Data.
:rtype: dict
"""
return {k: v.values for k, v in pd.read_excel(input_fpath).items()} | [
"def",
"read_excel",
"(",
"input_fpath",
")",
":",
"return",
"{",
"k",
":",
"v",
".",
"values",
"for",
"k",
",",
"v",
"in",
"pd",
".",
"read_excel",
"(",
"input_fpath",
")",
".",
"items",
"(",
")",
"}"
] | Reads the excel file.
:param input_fpath:
Input file path.
:type input_fpath: str
:return:
Raw Data.
:rtype: dict | [
"Reads",
"the",
"excel",
"file",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/examples/processing_chain/process.py#L13-L25 | train |
vinci1it2000/schedula | examples/processing_chain/process.py | save_outputs | def save_outputs(outputs, output_fpath):
"""
Save model outputs in an Excel file.
:param outputs:
Model outputs.
:type outputs: dict
:param output_fpath:
Output file path.
:type output_fpath: str
"""
df = pd.DataFrame(outputs)
with pd.ExcelWriter(output_fpath) as writer:
df.to_excel(writer) | python | def save_outputs(outputs, output_fpath):
"""
Save model outputs in an Excel file.
:param outputs:
Model outputs.
:type outputs: dict
:param output_fpath:
Output file path.
:type output_fpath: str
"""
df = pd.DataFrame(outputs)
with pd.ExcelWriter(output_fpath) as writer:
df.to_excel(writer) | [
"def",
"save_outputs",
"(",
"outputs",
",",
"output_fpath",
")",
":",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"outputs",
")",
"with",
"pd",
".",
"ExcelWriter",
"(",
"output_fpath",
")",
"as",
"writer",
":",
"df",
".",
"to_excel",
"(",
"writer",
")"
] | Save model outputs in an Excel file.
:param outputs:
Model outputs.
:type outputs: dict
:param output_fpath:
Output file path.
:type output_fpath: str | [
"Save",
"model",
"outputs",
"in",
"an",
"Excel",
"file",
"."
] | addb9fd685be81544b796c51383ac00a31543ce9 | https://github.com/vinci1it2000/schedula/blob/addb9fd685be81544b796c51383ac00a31543ce9/examples/processing_chain/process.py#L51-L65 | train |
CI-WATER/gsshapy | gsshapy/orm/gag.py | PrecipFile._read | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Precipitation Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Dictionary of keywords/cards and parse function names
KEYWORDS = ('EVENT',)
# Parse file into chunks associated with keywords/cards
with open(path, 'r') as f:
chunks = pt.chunk(KEYWORDS, f)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
result = gak.eventChunk(key, chunk)
self._createGsshaPyObjects(result)
# Add this PrecipFile to the database session
session.add(self) | python | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Precipitation Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Dictionary of keywords/cards and parse function names
KEYWORDS = ('EVENT',)
# Parse file into chunks associated with keywords/cards
with open(path, 'r') as f:
chunks = pt.chunk(KEYWORDS, f)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
result = gak.eventChunk(key, chunk)
self._createGsshaPyObjects(result)
# Add this PrecipFile to the database session
session.add(self) | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
")",
":",
"# Set file extension property",
"self",
".",
"fileExtension",
"=",
"extension",
"# Dictionary of keywords/cards and parse function names",
"KEYWORDS",
"=",
"(",
"'EVENT'",
",",
")",
"# Parse file into chunks associated with keywords/cards",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"f",
")",
"# Parse chunks associated with each key",
"for",
"key",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"result",
"=",
"gak",
".",
"eventChunk",
"(",
"key",
",",
"chunk",
")",
"self",
".",
"_createGsshaPyObjects",
"(",
"result",
")",
"# Add this PrecipFile to the database session",
"session",
".",
"add",
"(",
"self",
")"
] | Precipitation Read from File Method | [
"Precipitation",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gag.py#L62-L84 | train |
CI-WATER/gsshapy | gsshapy/orm/gag.py | PrecipFile._write | def _write(self, session, openFile, replaceParamFile):
"""
Precipitation File Write to File Method
"""
# Retrieve the events associated with this PrecipFile
events = self.precipEvents
# Write each event to file
for event in events:
openFile.write('EVENT "%s"\nNRGAG %s\nNRPDS %s\n' % (event.description, event.nrGag, event.nrPds))
if event.nrGag > 0:
values = event.values
valList = []
# Convert PrecipValue objects into a list of dictionaries, valList,
# so that it is compatible with the pivot function.
for value in values:
valList.append({'ValueType': value.valueType,
'DateTime': value.dateTime,
'Gage': value.gage.id,
'Value': value.value})
# Pivot using the function found at:
# code.activestate.com/recipes/334695
pivotedValues = pivot.pivot(valList, ('DateTime', 'ValueType'), ('Gage',), 'Value')
## TODO: Create custom pivot function that can work with sqlalchemy
## objects explicitly without the costly conversion.
# Create an empty set for obtaining a list of unique gages
gages = session.query(PrecipGage). \
filter(PrecipGage.event == event). \
order_by(PrecipGage.id). \
all()
for gage in gages:
openFile.write('COORD %s %s "%s"\n' % (gage.x, gage.y, gage.description))
# Write the value rows out to file
for row in pivotedValues:
# Extract the PrecipValues
valString = ''
# Retreive a list of sorted keys. This assumes the values are
# read into the database in order
keys = sorted([key for key in row if key != 'DateTime' and key != 'ValueType'])
# String all of the values together into valString
for key in keys:
if key != 'DateTime' and key != 'ValueType':
valString = '%s %.3f' % (valString, row[key])
# Write value line to file with appropriate formatting
openFile.write('%s %.4d %.2d %.2d %.2d %.2d%s\n' % (
row['ValueType'],
row['DateTime'].year,
row['DateTime'].month,
row['DateTime'].day,
row['DateTime'].hour,
row['DateTime'].minute,
valString)) | python | def _write(self, session, openFile, replaceParamFile):
"""
Precipitation File Write to File Method
"""
# Retrieve the events associated with this PrecipFile
events = self.precipEvents
# Write each event to file
for event in events:
openFile.write('EVENT "%s"\nNRGAG %s\nNRPDS %s\n' % (event.description, event.nrGag, event.nrPds))
if event.nrGag > 0:
values = event.values
valList = []
# Convert PrecipValue objects into a list of dictionaries, valList,
# so that it is compatible with the pivot function.
for value in values:
valList.append({'ValueType': value.valueType,
'DateTime': value.dateTime,
'Gage': value.gage.id,
'Value': value.value})
# Pivot using the function found at:
# code.activestate.com/recipes/334695
pivotedValues = pivot.pivot(valList, ('DateTime', 'ValueType'), ('Gage',), 'Value')
## TODO: Create custom pivot function that can work with sqlalchemy
## objects explicitly without the costly conversion.
# Create an empty set for obtaining a list of unique gages
gages = session.query(PrecipGage). \
filter(PrecipGage.event == event). \
order_by(PrecipGage.id). \
all()
for gage in gages:
openFile.write('COORD %s %s "%s"\n' % (gage.x, gage.y, gage.description))
# Write the value rows out to file
for row in pivotedValues:
# Extract the PrecipValues
valString = ''
# Retreive a list of sorted keys. This assumes the values are
# read into the database in order
keys = sorted([key for key in row if key != 'DateTime' and key != 'ValueType'])
# String all of the values together into valString
for key in keys:
if key != 'DateTime' and key != 'ValueType':
valString = '%s %.3f' % (valString, row[key])
# Write value line to file with appropriate formatting
openFile.write('%s %.4d %.2d %.2d %.2d %.2d%s\n' % (
row['ValueType'],
row['DateTime'].year,
row['DateTime'].month,
row['DateTime'].day,
row['DateTime'].hour,
row['DateTime'].minute,
valString)) | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
")",
":",
"# Retrieve the events associated with this PrecipFile",
"events",
"=",
"self",
".",
"precipEvents",
"# Write each event to file",
"for",
"event",
"in",
"events",
":",
"openFile",
".",
"write",
"(",
"'EVENT \"%s\"\\nNRGAG %s\\nNRPDS %s\\n'",
"%",
"(",
"event",
".",
"description",
",",
"event",
".",
"nrGag",
",",
"event",
".",
"nrPds",
")",
")",
"if",
"event",
".",
"nrGag",
">",
"0",
":",
"values",
"=",
"event",
".",
"values",
"valList",
"=",
"[",
"]",
"# Convert PrecipValue objects into a list of dictionaries, valList,",
"# so that it is compatible with the pivot function.",
"for",
"value",
"in",
"values",
":",
"valList",
".",
"append",
"(",
"{",
"'ValueType'",
":",
"value",
".",
"valueType",
",",
"'DateTime'",
":",
"value",
".",
"dateTime",
",",
"'Gage'",
":",
"value",
".",
"gage",
".",
"id",
",",
"'Value'",
":",
"value",
".",
"value",
"}",
")",
"# Pivot using the function found at:",
"# code.activestate.com/recipes/334695",
"pivotedValues",
"=",
"pivot",
".",
"pivot",
"(",
"valList",
",",
"(",
"'DateTime'",
",",
"'ValueType'",
")",
",",
"(",
"'Gage'",
",",
")",
",",
"'Value'",
")",
"## TODO: Create custom pivot function that can work with sqlalchemy",
"## objects explicitly without the costly conversion.",
"# Create an empty set for obtaining a list of unique gages",
"gages",
"=",
"session",
".",
"query",
"(",
"PrecipGage",
")",
".",
"filter",
"(",
"PrecipGage",
".",
"event",
"==",
"event",
")",
".",
"order_by",
"(",
"PrecipGage",
".",
"id",
")",
".",
"all",
"(",
")",
"for",
"gage",
"in",
"gages",
":",
"openFile",
".",
"write",
"(",
"'COORD %s %s \"%s\"\\n'",
"%",
"(",
"gage",
".",
"x",
",",
"gage",
".",
"y",
",",
"gage",
".",
"description",
")",
")",
"# Write the value rows out to file",
"for",
"row",
"in",
"pivotedValues",
":",
"# Extract the PrecipValues",
"valString",
"=",
"''",
"# Retreive a list of sorted keys. This assumes the values are",
"# read into the database in order",
"keys",
"=",
"sorted",
"(",
"[",
"key",
"for",
"key",
"in",
"row",
"if",
"key",
"!=",
"'DateTime'",
"and",
"key",
"!=",
"'ValueType'",
"]",
")",
"# String all of the values together into valString",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
"!=",
"'DateTime'",
"and",
"key",
"!=",
"'ValueType'",
":",
"valString",
"=",
"'%s %.3f'",
"%",
"(",
"valString",
",",
"row",
"[",
"key",
"]",
")",
"# Write value line to file with appropriate formatting",
"openFile",
".",
"write",
"(",
"'%s %.4d %.2d %.2d %.2d %.2d%s\\n'",
"%",
"(",
"row",
"[",
"'ValueType'",
"]",
",",
"row",
"[",
"'DateTime'",
"]",
".",
"year",
",",
"row",
"[",
"'DateTime'",
"]",
".",
"month",
",",
"row",
"[",
"'DateTime'",
"]",
".",
"day",
",",
"row",
"[",
"'DateTime'",
"]",
".",
"hour",
",",
"row",
"[",
"'DateTime'",
"]",
".",
"minute",
",",
"valString",
")",
")"
] | Precipitation File Write to File Method | [
"Precipitation",
"File",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gag.py#L86-L148 | train |
CI-WATER/gsshapy | gsshapy/orm/gag.py | PrecipFile._createGsshaPyObjects | def _createGsshaPyObjects(self, eventChunk):
"""
Create GSSHAPY PrecipEvent, PrecipValue, and PrecipGage Objects Method
"""
## TODO: Add Support for RADAR file format type values
# Create GSSHAPY PrecipEvent
event = PrecipEvent(description=eventChunk['description'],
nrGag=eventChunk['nrgag'],
nrPds=eventChunk['nrpds'])
# Associate PrecipEvent with PrecipFile
event.precipFile = self
gages = []
for coord in eventChunk['coords']:
# Create GSSHAPY PrecipGage object
gage = PrecipGage(description=coord['description'],
x=coord['x'],
y=coord['y'])
# Associate PrecipGage with PrecipEvent
gage.event = event
# Append to gages list for association with PrecipValues
gages.append(gage)
for valLine in eventChunk['valLines']:
for index, value in enumerate(valLine['values']):
# Create GSSHAPY PrecipValue object
val = PrecipValue(valueType=valLine['type'],
dateTime=valLine['dateTime'],
value=value)
# Associate PrecipValue with PrecipEvent and PrecipGage
val.event = event
val.gage = gages[index] | python | def _createGsshaPyObjects(self, eventChunk):
"""
Create GSSHAPY PrecipEvent, PrecipValue, and PrecipGage Objects Method
"""
## TODO: Add Support for RADAR file format type values
# Create GSSHAPY PrecipEvent
event = PrecipEvent(description=eventChunk['description'],
nrGag=eventChunk['nrgag'],
nrPds=eventChunk['nrpds'])
# Associate PrecipEvent with PrecipFile
event.precipFile = self
gages = []
for coord in eventChunk['coords']:
# Create GSSHAPY PrecipGage object
gage = PrecipGage(description=coord['description'],
x=coord['x'],
y=coord['y'])
# Associate PrecipGage with PrecipEvent
gage.event = event
# Append to gages list for association with PrecipValues
gages.append(gage)
for valLine in eventChunk['valLines']:
for index, value in enumerate(valLine['values']):
# Create GSSHAPY PrecipValue object
val = PrecipValue(valueType=valLine['type'],
dateTime=valLine['dateTime'],
value=value)
# Associate PrecipValue with PrecipEvent and PrecipGage
val.event = event
val.gage = gages[index] | [
"def",
"_createGsshaPyObjects",
"(",
"self",
",",
"eventChunk",
")",
":",
"## TODO: Add Support for RADAR file format type values",
"# Create GSSHAPY PrecipEvent",
"event",
"=",
"PrecipEvent",
"(",
"description",
"=",
"eventChunk",
"[",
"'description'",
"]",
",",
"nrGag",
"=",
"eventChunk",
"[",
"'nrgag'",
"]",
",",
"nrPds",
"=",
"eventChunk",
"[",
"'nrpds'",
"]",
")",
"# Associate PrecipEvent with PrecipFile",
"event",
".",
"precipFile",
"=",
"self",
"gages",
"=",
"[",
"]",
"for",
"coord",
"in",
"eventChunk",
"[",
"'coords'",
"]",
":",
"# Create GSSHAPY PrecipGage object",
"gage",
"=",
"PrecipGage",
"(",
"description",
"=",
"coord",
"[",
"'description'",
"]",
",",
"x",
"=",
"coord",
"[",
"'x'",
"]",
",",
"y",
"=",
"coord",
"[",
"'y'",
"]",
")",
"# Associate PrecipGage with PrecipEvent",
"gage",
".",
"event",
"=",
"event",
"# Append to gages list for association with PrecipValues",
"gages",
".",
"append",
"(",
"gage",
")",
"for",
"valLine",
"in",
"eventChunk",
"[",
"'valLines'",
"]",
":",
"for",
"index",
",",
"value",
"in",
"enumerate",
"(",
"valLine",
"[",
"'values'",
"]",
")",
":",
"# Create GSSHAPY PrecipValue object",
"val",
"=",
"PrecipValue",
"(",
"valueType",
"=",
"valLine",
"[",
"'type'",
"]",
",",
"dateTime",
"=",
"valLine",
"[",
"'dateTime'",
"]",
",",
"value",
"=",
"value",
")",
"# Associate PrecipValue with PrecipEvent and PrecipGage",
"val",
".",
"event",
"=",
"event",
"val",
".",
"gage",
"=",
"gages",
"[",
"index",
"]"
] | Create GSSHAPY PrecipEvent, PrecipValue, and PrecipGage Objects Method | [
"Create",
"GSSHAPY",
"PrecipEvent",
"PrecipValue",
"and",
"PrecipGage",
"Objects",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/gag.py#L150-L186 | train |
CI-WATER/gsshapy | gsshapy/orm/pro.py | ProjectionFile.lookupSpatialReferenceID | def lookupSpatialReferenceID(cls, directory, filename):
"""
Look up spatial reference system using the projection file.
Args:
directory (str):
filename (str):
Return:
int: Spatial Reference ID
"""
path = os.path.join(directory, filename)
with open(path, 'r') as f:
srid = lookupSpatialReferenceID(f.read())
return srid | python | def lookupSpatialReferenceID(cls, directory, filename):
"""
Look up spatial reference system using the projection file.
Args:
directory (str):
filename (str):
Return:
int: Spatial Reference ID
"""
path = os.path.join(directory, filename)
with open(path, 'r') as f:
srid = lookupSpatialReferenceID(f.read())
return srid | [
"def",
"lookupSpatialReferenceID",
"(",
"cls",
",",
"directory",
",",
"filename",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"filename",
")",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"srid",
"=",
"lookupSpatialReferenceID",
"(",
"f",
".",
"read",
"(",
")",
")",
"return",
"srid"
] | Look up spatial reference system using the projection file.
Args:
directory (str):
filename (str):
Return:
int: Spatial Reference ID | [
"Look",
"up",
"spatial",
"reference",
"system",
"using",
"the",
"projection",
"file",
"."
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/pro.py#L61-L78 | train |
CI-WATER/gsshapy | gsshapy/orm/pro.py | ProjectionFile._read | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Projection File Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse into a data structure
with io_open(path, 'r') as f:
self.projection = f.read() | python | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Projection File Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Open file and parse into a data structure
with io_open(path, 'r') as f:
self.projection = f.read() | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
")",
":",
"# Set file extension property",
"self",
".",
"fileExtension",
"=",
"extension",
"# Open file and parse into a data structure",
"with",
"io_open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"self",
".",
"projection",
"=",
"f",
".",
"read",
"(",
")"
] | Projection File Read from File Method | [
"Projection",
"File",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/pro.py#L81-L90 | train |
CI-WATER/gsshapy | gsshapy/orm/pro.py | ProjectionFile._write | def _write(self, session, openFile, replaceParamFile):
"""
Projection File Write to File Method
"""
# Write lines
openFile.write(text(self.projection)) | python | def _write(self, session, openFile, replaceParamFile):
"""
Projection File Write to File Method
"""
# Write lines
openFile.write(text(self.projection)) | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
")",
":",
"# Write lines",
"openFile",
".",
"write",
"(",
"text",
"(",
"self",
".",
"projection",
")",
")"
] | Projection File Write to File Method | [
"Projection",
"File",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/pro.py#L92-L97 | train |
Robpol86/etaprogress | etaprogress/components/base_progress_bar.py | BaseProgressBar.numerator | def numerator(self, value):
"""Sets a new numerator and generates the ETA. Must be greater than or equal to previous numerator."""
# If ETA is every iteration, don't do anything fancy.
if self.eta_every <= 1:
self._eta.numerator = value
self._eta_string = self._generate_eta(self._eta.eta_seconds)
return
# If ETA is not every iteration, unstable rate is used. If this bar is undefined, no point in calculating ever.
if self._eta.undefined:
self._eta.set_numerator(value, calculate=False)
return
# Calculate if this iteration is the right one.
if self._eta_count >= self.eta_every:
self._eta_count = 1
self._eta.numerator = value
self._eta_string = self._generate_eta(self._eta.eta_seconds)
return
self._eta_count += 1
self._eta.set_numerator(value, calculate=False) | python | def numerator(self, value):
"""Sets a new numerator and generates the ETA. Must be greater than or equal to previous numerator."""
# If ETA is every iteration, don't do anything fancy.
if self.eta_every <= 1:
self._eta.numerator = value
self._eta_string = self._generate_eta(self._eta.eta_seconds)
return
# If ETA is not every iteration, unstable rate is used. If this bar is undefined, no point in calculating ever.
if self._eta.undefined:
self._eta.set_numerator(value, calculate=False)
return
# Calculate if this iteration is the right one.
if self._eta_count >= self.eta_every:
self._eta_count = 1
self._eta.numerator = value
self._eta_string = self._generate_eta(self._eta.eta_seconds)
return
self._eta_count += 1
self._eta.set_numerator(value, calculate=False) | [
"def",
"numerator",
"(",
"self",
",",
"value",
")",
":",
"# If ETA is every iteration, don't do anything fancy.",
"if",
"self",
".",
"eta_every",
"<=",
"1",
":",
"self",
".",
"_eta",
".",
"numerator",
"=",
"value",
"self",
".",
"_eta_string",
"=",
"self",
".",
"_generate_eta",
"(",
"self",
".",
"_eta",
".",
"eta_seconds",
")",
"return",
"# If ETA is not every iteration, unstable rate is used. If this bar is undefined, no point in calculating ever.",
"if",
"self",
".",
"_eta",
".",
"undefined",
":",
"self",
".",
"_eta",
".",
"set_numerator",
"(",
"value",
",",
"calculate",
"=",
"False",
")",
"return",
"# Calculate if this iteration is the right one.",
"if",
"self",
".",
"_eta_count",
">=",
"self",
".",
"eta_every",
":",
"self",
".",
"_eta_count",
"=",
"1",
"self",
".",
"_eta",
".",
"numerator",
"=",
"value",
"self",
".",
"_eta_string",
"=",
"self",
".",
"_generate_eta",
"(",
"self",
".",
"_eta",
".",
"eta_seconds",
")",
"return",
"self",
".",
"_eta_count",
"+=",
"1",
"self",
".",
"_eta",
".",
"set_numerator",
"(",
"value",
",",
"calculate",
"=",
"False",
")"
] | Sets a new numerator and generates the ETA. Must be greater than or equal to previous numerator. | [
"Sets",
"a",
"new",
"numerator",
"and",
"generates",
"the",
"ETA",
".",
"Must",
"be",
"greater",
"than",
"or",
"equal",
"to",
"previous",
"numerator",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/base_progress_bar.py#L40-L61 | train |
Robpol86/etaprogress | etaprogress/components/base_progress_bar.py | BaseProgressBar.rate | def rate(self):
"""Returns the rate of the progress as a float. Selects the unstable rate if eta_every > 1 for performance."""
return float(self._eta.rate_unstable if self.eta_every > 1 else self._eta.rate) | python | def rate(self):
"""Returns the rate of the progress as a float. Selects the unstable rate if eta_every > 1 for performance."""
return float(self._eta.rate_unstable if self.eta_every > 1 else self._eta.rate) | [
"def",
"rate",
"(",
"self",
")",
":",
"return",
"float",
"(",
"self",
".",
"_eta",
".",
"rate_unstable",
"if",
"self",
".",
"eta_every",
">",
"1",
"else",
"self",
".",
"_eta",
".",
"rate",
")"
] | Returns the rate of the progress as a float. Selects the unstable rate if eta_every > 1 for performance. | [
"Returns",
"the",
"rate",
"of",
"the",
"progress",
"as",
"a",
"float",
".",
"Selects",
"the",
"unstable",
"rate",
"if",
"eta_every",
">",
"1",
"for",
"performance",
"."
] | 224e8a248c2bf820bad218763281914ad3983fff | https://github.com/Robpol86/etaprogress/blob/224e8a248c2bf820bad218763281914ad3983fff/etaprogress/components/base_progress_bar.py#L69-L71 | train |
CI-WATER/gsshapy | gsshapy/orm/ele.py | ElevationGridFile.generateFromRaster | def generateFromRaster(self,
elevation_raster,
shapefile_path=None,
out_elevation_grid=None,
resample_method=gdalconst.GRA_Average,
load_raster_to_db=True):
"""
Generates an elevation grid for the GSSHA simulation
from an elevation raster
Example::
from gsshapy.orm import ProjectFile, ElevationGridFile
from gsshapy.lib import db_tools as dbt
gssha_directory = '/gsshapy/tests/grid_standard/gssha_project'
elevation_raster = 'elevation.tif'
project_manager, db_sessionmaker = \
dbt.get_project_session('grid_standard',
gssha_directory)
db_session = db_sessionmaker()
# read project file
project_manager.readInput(directory=gssha_directory,
projectFileName='grid_standard.prj',
session=db_session)
# generate elevation grid
elevation_grid = ElevationGridFile(session=db_session,
project_file=project_manager)
elevation_grid.generateFromRaster(elevation_raster)
# write out updated parameters
project_manager.writeInput(session=db_session,
directory=gssha_directory,
name='grid_standard')
"""
if not self.projectFile:
raise ValueError("Must be connected to project file ...")
# make sure paths are absolute as the working directory changes
elevation_raster = os.path.abspath(elevation_raster)
shapefile_path = os.path.abspath(shapefile_path)
# must match elevation mask grid
mask_grid = self.projectFile.getGrid()
if out_elevation_grid is None:
out_elevation_grid = '{0}.{1}'.format(self.projectFile.name,
self.fileExtension)
elevation_grid = resample_grid(elevation_raster,
mask_grid,
resample_method=resample_method,
as_gdal_grid=True)
with tmp_chdir(self.projectFile.project_directory):
elevation_grid.to_grass_ascii(out_elevation_grid, print_nodata=False)
# read raster into object
if load_raster_to_db:
self._load_raster_text(out_elevation_grid)
self.filename = out_elevation_grid
self.projectFile.setCard("ELEVATION", out_elevation_grid, add_quotes=True)
# find outlet and add slope
self.projectFile.findOutlet(shapefile_path) | python | def generateFromRaster(self,
elevation_raster,
shapefile_path=None,
out_elevation_grid=None,
resample_method=gdalconst.GRA_Average,
load_raster_to_db=True):
"""
Generates an elevation grid for the GSSHA simulation
from an elevation raster
Example::
from gsshapy.orm import ProjectFile, ElevationGridFile
from gsshapy.lib import db_tools as dbt
gssha_directory = '/gsshapy/tests/grid_standard/gssha_project'
elevation_raster = 'elevation.tif'
project_manager, db_sessionmaker = \
dbt.get_project_session('grid_standard',
gssha_directory)
db_session = db_sessionmaker()
# read project file
project_manager.readInput(directory=gssha_directory,
projectFileName='grid_standard.prj',
session=db_session)
# generate elevation grid
elevation_grid = ElevationGridFile(session=db_session,
project_file=project_manager)
elevation_grid.generateFromRaster(elevation_raster)
# write out updated parameters
project_manager.writeInput(session=db_session,
directory=gssha_directory,
name='grid_standard')
"""
if not self.projectFile:
raise ValueError("Must be connected to project file ...")
# make sure paths are absolute as the working directory changes
elevation_raster = os.path.abspath(elevation_raster)
shapefile_path = os.path.abspath(shapefile_path)
# must match elevation mask grid
mask_grid = self.projectFile.getGrid()
if out_elevation_grid is None:
out_elevation_grid = '{0}.{1}'.format(self.projectFile.name,
self.fileExtension)
elevation_grid = resample_grid(elevation_raster,
mask_grid,
resample_method=resample_method,
as_gdal_grid=True)
with tmp_chdir(self.projectFile.project_directory):
elevation_grid.to_grass_ascii(out_elevation_grid, print_nodata=False)
# read raster into object
if load_raster_to_db:
self._load_raster_text(out_elevation_grid)
self.filename = out_elevation_grid
self.projectFile.setCard("ELEVATION", out_elevation_grid, add_quotes=True)
# find outlet and add slope
self.projectFile.findOutlet(shapefile_path) | [
"def",
"generateFromRaster",
"(",
"self",
",",
"elevation_raster",
",",
"shapefile_path",
"=",
"None",
",",
"out_elevation_grid",
"=",
"None",
",",
"resample_method",
"=",
"gdalconst",
".",
"GRA_Average",
",",
"load_raster_to_db",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"projectFile",
":",
"raise",
"ValueError",
"(",
"\"Must be connected to project file ...\"",
")",
"# make sure paths are absolute as the working directory changes",
"elevation_raster",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"elevation_raster",
")",
"shapefile_path",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"shapefile_path",
")",
"# must match elevation mask grid",
"mask_grid",
"=",
"self",
".",
"projectFile",
".",
"getGrid",
"(",
")",
"if",
"out_elevation_grid",
"is",
"None",
":",
"out_elevation_grid",
"=",
"'{0}.{1}'",
".",
"format",
"(",
"self",
".",
"projectFile",
".",
"name",
",",
"self",
".",
"fileExtension",
")",
"elevation_grid",
"=",
"resample_grid",
"(",
"elevation_raster",
",",
"mask_grid",
",",
"resample_method",
"=",
"resample_method",
",",
"as_gdal_grid",
"=",
"True",
")",
"with",
"tmp_chdir",
"(",
"self",
".",
"projectFile",
".",
"project_directory",
")",
":",
"elevation_grid",
".",
"to_grass_ascii",
"(",
"out_elevation_grid",
",",
"print_nodata",
"=",
"False",
")",
"# read raster into object",
"if",
"load_raster_to_db",
":",
"self",
".",
"_load_raster_text",
"(",
"out_elevation_grid",
")",
"self",
".",
"filename",
"=",
"out_elevation_grid",
"self",
".",
"projectFile",
".",
"setCard",
"(",
"\"ELEVATION\"",
",",
"out_elevation_grid",
",",
"add_quotes",
"=",
"True",
")",
"# find outlet and add slope",
"self",
".",
"projectFile",
".",
"findOutlet",
"(",
"shapefile_path",
")"
] | Generates an elevation grid for the GSSHA simulation
from an elevation raster
Example::
from gsshapy.orm import ProjectFile, ElevationGridFile
from gsshapy.lib import db_tools as dbt
gssha_directory = '/gsshapy/tests/grid_standard/gssha_project'
elevation_raster = 'elevation.tif'
project_manager, db_sessionmaker = \
dbt.get_project_session('grid_standard',
gssha_directory)
db_session = db_sessionmaker()
# read project file
project_manager.readInput(directory=gssha_directory,
projectFileName='grid_standard.prj',
session=db_session)
# generate elevation grid
elevation_grid = ElevationGridFile(session=db_session,
project_file=project_manager)
elevation_grid.generateFromRaster(elevation_raster)
# write out updated parameters
project_manager.writeInput(session=db_session,
directory=gssha_directory,
name='grid_standard') | [
"Generates",
"an",
"elevation",
"grid",
"for",
"the",
"GSSHA",
"simulation",
"from",
"an",
"elevation",
"raster"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/ele.py#L42-L111 | train |
CI-WATER/gsshapy | gsshapy/orm/spn.py | StormPipeNetworkFile._read | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Storm Pipe Network File Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Dictionary of keywords/cards and parse function names
KEYWORDS = {'CONNECT': spc.connectChunk,
'SJUNC': spc.sjuncChunk,
'SLINK': spc.slinkChunk}
sjuncs = []
slinks = []
connections = []
# Parse file into chunks associated with keywords/cards
with open(path, 'r') as f:
chunks = pt.chunk(KEYWORDS, f)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Call chunk specific parsers for each chunk
result = KEYWORDS[key](key, chunk)
# Cases
if key == 'CONNECT':
connections.append(result)
elif key == 'SJUNC':
sjuncs.append(result)
elif key == 'SLINK':
slinks.append(result)
# Create GSSHAPY objects
self._createConnection(connections)
self._createSjunc(sjuncs)
self._createSlink(slinks) | python | def _read(self, directory, filename, session, path, name, extension, spatial, spatialReferenceID, replaceParamFile):
"""
Storm Pipe Network File Read from File Method
"""
# Set file extension property
self.fileExtension = extension
# Dictionary of keywords/cards and parse function names
KEYWORDS = {'CONNECT': spc.connectChunk,
'SJUNC': spc.sjuncChunk,
'SLINK': spc.slinkChunk}
sjuncs = []
slinks = []
connections = []
# Parse file into chunks associated with keywords/cards
with open(path, 'r') as f:
chunks = pt.chunk(KEYWORDS, f)
# Parse chunks associated with each key
for key, chunkList in iteritems(chunks):
# Parse each chunk in the chunk list
for chunk in chunkList:
# Call chunk specific parsers for each chunk
result = KEYWORDS[key](key, chunk)
# Cases
if key == 'CONNECT':
connections.append(result)
elif key == 'SJUNC':
sjuncs.append(result)
elif key == 'SLINK':
slinks.append(result)
# Create GSSHAPY objects
self._createConnection(connections)
self._createSjunc(sjuncs)
self._createSlink(slinks) | [
"def",
"_read",
"(",
"self",
",",
"directory",
",",
"filename",
",",
"session",
",",
"path",
",",
"name",
",",
"extension",
",",
"spatial",
",",
"spatialReferenceID",
",",
"replaceParamFile",
")",
":",
"# Set file extension property",
"self",
".",
"fileExtension",
"=",
"extension",
"# Dictionary of keywords/cards and parse function names",
"KEYWORDS",
"=",
"{",
"'CONNECT'",
":",
"spc",
".",
"connectChunk",
",",
"'SJUNC'",
":",
"spc",
".",
"sjuncChunk",
",",
"'SLINK'",
":",
"spc",
".",
"slinkChunk",
"}",
"sjuncs",
"=",
"[",
"]",
"slinks",
"=",
"[",
"]",
"connections",
"=",
"[",
"]",
"# Parse file into chunks associated with keywords/cards",
"with",
"open",
"(",
"path",
",",
"'r'",
")",
"as",
"f",
":",
"chunks",
"=",
"pt",
".",
"chunk",
"(",
"KEYWORDS",
",",
"f",
")",
"# Parse chunks associated with each key",
"for",
"key",
",",
"chunkList",
"in",
"iteritems",
"(",
"chunks",
")",
":",
"# Parse each chunk in the chunk list",
"for",
"chunk",
"in",
"chunkList",
":",
"# Call chunk specific parsers for each chunk",
"result",
"=",
"KEYWORDS",
"[",
"key",
"]",
"(",
"key",
",",
"chunk",
")",
"# Cases",
"if",
"key",
"==",
"'CONNECT'",
":",
"connections",
".",
"append",
"(",
"result",
")",
"elif",
"key",
"==",
"'SJUNC'",
":",
"sjuncs",
".",
"append",
"(",
"result",
")",
"elif",
"key",
"==",
"'SLINK'",
":",
"slinks",
".",
"append",
"(",
"result",
")",
"# Create GSSHAPY objects",
"self",
".",
"_createConnection",
"(",
"connections",
")",
"self",
".",
"_createSjunc",
"(",
"sjuncs",
")",
"self",
".",
"_createSlink",
"(",
"slinks",
")"
] | Storm Pipe Network File Read from File Method | [
"Storm",
"Pipe",
"Network",
"File",
"Read",
"from",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/spn.py#L65-L103 | train |
CI-WATER/gsshapy | gsshapy/orm/spn.py | StormPipeNetworkFile._write | def _write(self, session, openFile, replaceParamFile):
"""
Storm Pipe Network File Write to File Method
"""
# Retrieve Connection objects and write to file
connections = self.connections
self._writeConnections(connections=connections,
fileObject=openFile)
# Retrieve SuperJunction objects and write to file
sjuncs = self.superJunctions
self._writeSuperJunctions(superJunctions=sjuncs,
fileObject=openFile)
# Retrieve SuperLink objects and write to file
slinks = self.superLinks
self._writeSuperLinks(superLinks=slinks,
fileObject=openFile) | python | def _write(self, session, openFile, replaceParamFile):
"""
Storm Pipe Network File Write to File Method
"""
# Retrieve Connection objects and write to file
connections = self.connections
self._writeConnections(connections=connections,
fileObject=openFile)
# Retrieve SuperJunction objects and write to file
sjuncs = self.superJunctions
self._writeSuperJunctions(superJunctions=sjuncs,
fileObject=openFile)
# Retrieve SuperLink objects and write to file
slinks = self.superLinks
self._writeSuperLinks(superLinks=slinks,
fileObject=openFile) | [
"def",
"_write",
"(",
"self",
",",
"session",
",",
"openFile",
",",
"replaceParamFile",
")",
":",
"# Retrieve Connection objects and write to file",
"connections",
"=",
"self",
".",
"connections",
"self",
".",
"_writeConnections",
"(",
"connections",
"=",
"connections",
",",
"fileObject",
"=",
"openFile",
")",
"# Retrieve SuperJunction objects and write to file",
"sjuncs",
"=",
"self",
".",
"superJunctions",
"self",
".",
"_writeSuperJunctions",
"(",
"superJunctions",
"=",
"sjuncs",
",",
"fileObject",
"=",
"openFile",
")",
"# Retrieve SuperLink objects and write to file",
"slinks",
"=",
"self",
".",
"superLinks",
"self",
".",
"_writeSuperLinks",
"(",
"superLinks",
"=",
"slinks",
",",
"fileObject",
"=",
"openFile",
")"
] | Storm Pipe Network File Write to File Method | [
"Storm",
"Pipe",
"Network",
"File",
"Write",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/spn.py#L105-L122 | train |
CI-WATER/gsshapy | gsshapy/orm/spn.py | StormPipeNetworkFile._createConnection | def _createConnection(self, connections):
"""
Create GSSHAPY Connection Objects Method
"""
for c in connections:
# Create GSSHAPY Connection object
connection = Connection(slinkNumber=c['slinkNumber'],
upSjuncNumber=c['upSjunc'],
downSjuncNumber=c['downSjunc'])
# Associate Connection with StormPipeNetworkFile
connection.stormPipeNetworkFile = self | python | def _createConnection(self, connections):
"""
Create GSSHAPY Connection Objects Method
"""
for c in connections:
# Create GSSHAPY Connection object
connection = Connection(slinkNumber=c['slinkNumber'],
upSjuncNumber=c['upSjunc'],
downSjuncNumber=c['downSjunc'])
# Associate Connection with StormPipeNetworkFile
connection.stormPipeNetworkFile = self | [
"def",
"_createConnection",
"(",
"self",
",",
"connections",
")",
":",
"for",
"c",
"in",
"connections",
":",
"# Create GSSHAPY Connection object",
"connection",
"=",
"Connection",
"(",
"slinkNumber",
"=",
"c",
"[",
"'slinkNumber'",
"]",
",",
"upSjuncNumber",
"=",
"c",
"[",
"'upSjunc'",
"]",
",",
"downSjuncNumber",
"=",
"c",
"[",
"'downSjunc'",
"]",
")",
"# Associate Connection with StormPipeNetworkFile",
"connection",
".",
"stormPipeNetworkFile",
"=",
"self"
] | Create GSSHAPY Connection Objects Method | [
"Create",
"GSSHAPY",
"Connection",
"Objects",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/spn.py#L124-L136 | train |
CI-WATER/gsshapy | gsshapy/orm/spn.py | StormPipeNetworkFile._createSlink | def _createSlink(self, slinks):
"""
Create GSSHAPY SuperLink, Pipe, and SuperNode Objects Method
"""
for slink in slinks:
# Create GSSHAPY SuperLink object
superLink = SuperLink(slinkNumber=slink['slinkNumber'],
numPipes=slink['numPipes'])
# Associate SuperLink with StormPipeNetworkFile
superLink.stormPipeNetworkFile = self
for node in slink['nodes']:
# Create GSSHAPY SuperNode objects
superNode = SuperNode(nodeNumber=node['nodeNumber'],
groundSurfaceElev=node['groundSurfaceElev'],
invertElev=node['invertElev'],
manholeSA=node['manholeSA'],
nodeInletCode=node['inletCode'],
cellI=node['cellI'],
cellJ=node['cellJ'],
weirSideLength=node['weirSideLength'],
orificeDiameter=node['orificeDiameter'])
# Associate SuperNode with SuperLink
superNode.superLink = superLink
for p in slink['pipes']:
# Create GSSHAPY Pipe objects
pipe = Pipe(pipeNumber=p['pipeNumber'],
xSecType=p['xSecType'],
diameterOrHeight=p['diameterOrHeight'],
width=p['width'],
slope=p['slope'],
roughness=p['roughness'],
length=p['length'],
conductance=p['conductance'],
drainSpacing=p['drainSpacing'])
# Associate Pipe with SuperLink
pipe.superLink = superLink | python | def _createSlink(self, slinks):
"""
Create GSSHAPY SuperLink, Pipe, and SuperNode Objects Method
"""
for slink in slinks:
# Create GSSHAPY SuperLink object
superLink = SuperLink(slinkNumber=slink['slinkNumber'],
numPipes=slink['numPipes'])
# Associate SuperLink with StormPipeNetworkFile
superLink.stormPipeNetworkFile = self
for node in slink['nodes']:
# Create GSSHAPY SuperNode objects
superNode = SuperNode(nodeNumber=node['nodeNumber'],
groundSurfaceElev=node['groundSurfaceElev'],
invertElev=node['invertElev'],
manholeSA=node['manholeSA'],
nodeInletCode=node['inletCode'],
cellI=node['cellI'],
cellJ=node['cellJ'],
weirSideLength=node['weirSideLength'],
orificeDiameter=node['orificeDiameter'])
# Associate SuperNode with SuperLink
superNode.superLink = superLink
for p in slink['pipes']:
# Create GSSHAPY Pipe objects
pipe = Pipe(pipeNumber=p['pipeNumber'],
xSecType=p['xSecType'],
diameterOrHeight=p['diameterOrHeight'],
width=p['width'],
slope=p['slope'],
roughness=p['roughness'],
length=p['length'],
conductance=p['conductance'],
drainSpacing=p['drainSpacing'])
# Associate Pipe with SuperLink
pipe.superLink = superLink | [
"def",
"_createSlink",
"(",
"self",
",",
"slinks",
")",
":",
"for",
"slink",
"in",
"slinks",
":",
"# Create GSSHAPY SuperLink object",
"superLink",
"=",
"SuperLink",
"(",
"slinkNumber",
"=",
"slink",
"[",
"'slinkNumber'",
"]",
",",
"numPipes",
"=",
"slink",
"[",
"'numPipes'",
"]",
")",
"# Associate SuperLink with StormPipeNetworkFile",
"superLink",
".",
"stormPipeNetworkFile",
"=",
"self",
"for",
"node",
"in",
"slink",
"[",
"'nodes'",
"]",
":",
"# Create GSSHAPY SuperNode objects",
"superNode",
"=",
"SuperNode",
"(",
"nodeNumber",
"=",
"node",
"[",
"'nodeNumber'",
"]",
",",
"groundSurfaceElev",
"=",
"node",
"[",
"'groundSurfaceElev'",
"]",
",",
"invertElev",
"=",
"node",
"[",
"'invertElev'",
"]",
",",
"manholeSA",
"=",
"node",
"[",
"'manholeSA'",
"]",
",",
"nodeInletCode",
"=",
"node",
"[",
"'inletCode'",
"]",
",",
"cellI",
"=",
"node",
"[",
"'cellI'",
"]",
",",
"cellJ",
"=",
"node",
"[",
"'cellJ'",
"]",
",",
"weirSideLength",
"=",
"node",
"[",
"'weirSideLength'",
"]",
",",
"orificeDiameter",
"=",
"node",
"[",
"'orificeDiameter'",
"]",
")",
"# Associate SuperNode with SuperLink",
"superNode",
".",
"superLink",
"=",
"superLink",
"for",
"p",
"in",
"slink",
"[",
"'pipes'",
"]",
":",
"# Create GSSHAPY Pipe objects",
"pipe",
"=",
"Pipe",
"(",
"pipeNumber",
"=",
"p",
"[",
"'pipeNumber'",
"]",
",",
"xSecType",
"=",
"p",
"[",
"'xSecType'",
"]",
",",
"diameterOrHeight",
"=",
"p",
"[",
"'diameterOrHeight'",
"]",
",",
"width",
"=",
"p",
"[",
"'width'",
"]",
",",
"slope",
"=",
"p",
"[",
"'slope'",
"]",
",",
"roughness",
"=",
"p",
"[",
"'roughness'",
"]",
",",
"length",
"=",
"p",
"[",
"'length'",
"]",
",",
"conductance",
"=",
"p",
"[",
"'conductance'",
"]",
",",
"drainSpacing",
"=",
"p",
"[",
"'drainSpacing'",
"]",
")",
"# Associate Pipe with SuperLink",
"pipe",
".",
"superLink",
"=",
"superLink"
] | Create GSSHAPY SuperLink, Pipe, and SuperNode Objects Method | [
"Create",
"GSSHAPY",
"SuperLink",
"Pipe",
"and",
"SuperNode",
"Objects",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/spn.py#L138-L179 | train |
CI-WATER/gsshapy | gsshapy/orm/spn.py | StormPipeNetworkFile._createSjunc | def _createSjunc(self, sjuncs):
"""
Create GSSHAPY SuperJunction Objects Method
"""
for sjunc in sjuncs:
# Create GSSHAPY SuperJunction object
superJunction = SuperJunction(sjuncNumber=sjunc['sjuncNumber'],
groundSurfaceElev=sjunc['groundSurfaceElev'],
invertElev=sjunc['invertElev'],
manholeSA=sjunc['manholeSA'],
inletCode=sjunc['inletCode'],
linkOrCellI=sjunc['linkOrCellI'],
nodeOrCellJ=sjunc['nodeOrCellJ'],
weirSideLength=sjunc['weirSideLength'],
orificeDiameter=sjunc['orificeDiameter'])
# Associate SuperJunction with StormPipeNetworkFile
superJunction.stormPipeNetworkFile = self | python | def _createSjunc(self, sjuncs):
"""
Create GSSHAPY SuperJunction Objects Method
"""
for sjunc in sjuncs:
# Create GSSHAPY SuperJunction object
superJunction = SuperJunction(sjuncNumber=sjunc['sjuncNumber'],
groundSurfaceElev=sjunc['groundSurfaceElev'],
invertElev=sjunc['invertElev'],
manholeSA=sjunc['manholeSA'],
inletCode=sjunc['inletCode'],
linkOrCellI=sjunc['linkOrCellI'],
nodeOrCellJ=sjunc['nodeOrCellJ'],
weirSideLength=sjunc['weirSideLength'],
orificeDiameter=sjunc['orificeDiameter'])
# Associate SuperJunction with StormPipeNetworkFile
superJunction.stormPipeNetworkFile = self | [
"def",
"_createSjunc",
"(",
"self",
",",
"sjuncs",
")",
":",
"for",
"sjunc",
"in",
"sjuncs",
":",
"# Create GSSHAPY SuperJunction object",
"superJunction",
"=",
"SuperJunction",
"(",
"sjuncNumber",
"=",
"sjunc",
"[",
"'sjuncNumber'",
"]",
",",
"groundSurfaceElev",
"=",
"sjunc",
"[",
"'groundSurfaceElev'",
"]",
",",
"invertElev",
"=",
"sjunc",
"[",
"'invertElev'",
"]",
",",
"manholeSA",
"=",
"sjunc",
"[",
"'manholeSA'",
"]",
",",
"inletCode",
"=",
"sjunc",
"[",
"'inletCode'",
"]",
",",
"linkOrCellI",
"=",
"sjunc",
"[",
"'linkOrCellI'",
"]",
",",
"nodeOrCellJ",
"=",
"sjunc",
"[",
"'nodeOrCellJ'",
"]",
",",
"weirSideLength",
"=",
"sjunc",
"[",
"'weirSideLength'",
"]",
",",
"orificeDiameter",
"=",
"sjunc",
"[",
"'orificeDiameter'",
"]",
")",
"# Associate SuperJunction with StormPipeNetworkFile",
"superJunction",
".",
"stormPipeNetworkFile",
"=",
"self"
] | Create GSSHAPY SuperJunction Objects Method | [
"Create",
"GSSHAPY",
"SuperJunction",
"Objects",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/spn.py#L181-L199 | train |
CI-WATER/gsshapy | gsshapy/orm/spn.py | StormPipeNetworkFile._writeConnections | def _writeConnections(self, connections, fileObject):
"""
Write Connections to File Method
"""
for connection in connections:
fileObject.write('CONNECT %s %s %s\n' % (
connection.slinkNumber,
connection.upSjuncNumber,
connection.downSjuncNumber)) | python | def _writeConnections(self, connections, fileObject):
"""
Write Connections to File Method
"""
for connection in connections:
fileObject.write('CONNECT %s %s %s\n' % (
connection.slinkNumber,
connection.upSjuncNumber,
connection.downSjuncNumber)) | [
"def",
"_writeConnections",
"(",
"self",
",",
"connections",
",",
"fileObject",
")",
":",
"for",
"connection",
"in",
"connections",
":",
"fileObject",
".",
"write",
"(",
"'CONNECT %s %s %s\\n'",
"%",
"(",
"connection",
".",
"slinkNumber",
",",
"connection",
".",
"upSjuncNumber",
",",
"connection",
".",
"downSjuncNumber",
")",
")"
] | Write Connections to File Method | [
"Write",
"Connections",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/spn.py#L201-L209 | train |
CI-WATER/gsshapy | gsshapy/orm/spn.py | StormPipeNetworkFile._writeSuperJunctions | def _writeSuperJunctions(self, superJunctions, fileObject):
"""
Write SuperJunctions to File Method
"""
for sjunc in superJunctions:
fileObject.write('SJUNC %s %.2f %.2f %.6f %s %s %s %.6f %.6f\n' % (
sjunc.sjuncNumber,
sjunc.groundSurfaceElev,
sjunc.invertElev,
sjunc.manholeSA,
sjunc.inletCode,
sjunc.linkOrCellI,
sjunc.nodeOrCellJ,
sjunc.weirSideLength,
sjunc.orificeDiameter)) | python | def _writeSuperJunctions(self, superJunctions, fileObject):
"""
Write SuperJunctions to File Method
"""
for sjunc in superJunctions:
fileObject.write('SJUNC %s %.2f %.2f %.6f %s %s %s %.6f %.6f\n' % (
sjunc.sjuncNumber,
sjunc.groundSurfaceElev,
sjunc.invertElev,
sjunc.manholeSA,
sjunc.inletCode,
sjunc.linkOrCellI,
sjunc.nodeOrCellJ,
sjunc.weirSideLength,
sjunc.orificeDiameter)) | [
"def",
"_writeSuperJunctions",
"(",
"self",
",",
"superJunctions",
",",
"fileObject",
")",
":",
"for",
"sjunc",
"in",
"superJunctions",
":",
"fileObject",
".",
"write",
"(",
"'SJUNC %s %.2f %.2f %.6f %s %s %s %.6f %.6f\\n'",
"%",
"(",
"sjunc",
".",
"sjuncNumber",
",",
"sjunc",
".",
"groundSurfaceElev",
",",
"sjunc",
".",
"invertElev",
",",
"sjunc",
".",
"manholeSA",
",",
"sjunc",
".",
"inletCode",
",",
"sjunc",
".",
"linkOrCellI",
",",
"sjunc",
".",
"nodeOrCellJ",
",",
"sjunc",
".",
"weirSideLength",
",",
"sjunc",
".",
"orificeDiameter",
")",
")"
] | Write SuperJunctions to File Method | [
"Write",
"SuperJunctions",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/spn.py#L211-L225 | train |
CI-WATER/gsshapy | gsshapy/orm/spn.py | StormPipeNetworkFile._writeSuperLinks | def _writeSuperLinks(self, superLinks, fileObject):
"""
Write SuperLinks to File Method
"""
for slink in superLinks:
fileObject.write('SLINK %s %s\n' % (
slink.slinkNumber,
slink.numPipes))
for node in slink.superNodes:
fileObject.write('NODE %s %.2f %.2f %.6f %s %s %s %.6f %.6f\n' % (
node.nodeNumber,
node.groundSurfaceElev,
node.invertElev,
node.manholeSA,
node.nodeInletCode,
node.cellI,
node.cellJ,
node.weirSideLength,
node.orificeDiameter))
for pipe in slink.pipes:
fileObject.write('PIPE %s %s %.6f %.6f %.6f %.6f %.2f %.6f %.6f\n' % (
pipe.pipeNumber,
pipe.xSecType,
pipe.diameterOrHeight,
pipe.width,
pipe.slope,
pipe.roughness,
pipe.length,
pipe.conductance,
pipe.drainSpacing)) | python | def _writeSuperLinks(self, superLinks, fileObject):
"""
Write SuperLinks to File Method
"""
for slink in superLinks:
fileObject.write('SLINK %s %s\n' % (
slink.slinkNumber,
slink.numPipes))
for node in slink.superNodes:
fileObject.write('NODE %s %.2f %.2f %.6f %s %s %s %.6f %.6f\n' % (
node.nodeNumber,
node.groundSurfaceElev,
node.invertElev,
node.manholeSA,
node.nodeInletCode,
node.cellI,
node.cellJ,
node.weirSideLength,
node.orificeDiameter))
for pipe in slink.pipes:
fileObject.write('PIPE %s %s %.6f %.6f %.6f %.6f %.2f %.6f %.6f\n' % (
pipe.pipeNumber,
pipe.xSecType,
pipe.diameterOrHeight,
pipe.width,
pipe.slope,
pipe.roughness,
pipe.length,
pipe.conductance,
pipe.drainSpacing)) | [
"def",
"_writeSuperLinks",
"(",
"self",
",",
"superLinks",
",",
"fileObject",
")",
":",
"for",
"slink",
"in",
"superLinks",
":",
"fileObject",
".",
"write",
"(",
"'SLINK %s %s\\n'",
"%",
"(",
"slink",
".",
"slinkNumber",
",",
"slink",
".",
"numPipes",
")",
")",
"for",
"node",
"in",
"slink",
".",
"superNodes",
":",
"fileObject",
".",
"write",
"(",
"'NODE %s %.2f %.2f %.6f %s %s %s %.6f %.6f\\n'",
"%",
"(",
"node",
".",
"nodeNumber",
",",
"node",
".",
"groundSurfaceElev",
",",
"node",
".",
"invertElev",
",",
"node",
".",
"manholeSA",
",",
"node",
".",
"nodeInletCode",
",",
"node",
".",
"cellI",
",",
"node",
".",
"cellJ",
",",
"node",
".",
"weirSideLength",
",",
"node",
".",
"orificeDiameter",
")",
")",
"for",
"pipe",
"in",
"slink",
".",
"pipes",
":",
"fileObject",
".",
"write",
"(",
"'PIPE %s %s %.6f %.6f %.6f %.6f %.2f %.6f %.6f\\n'",
"%",
"(",
"pipe",
".",
"pipeNumber",
",",
"pipe",
".",
"xSecType",
",",
"pipe",
".",
"diameterOrHeight",
",",
"pipe",
".",
"width",
",",
"pipe",
".",
"slope",
",",
"pipe",
".",
"roughness",
",",
"pipe",
".",
"length",
",",
"pipe",
".",
"conductance",
",",
"pipe",
".",
"drainSpacing",
")",
")"
] | Write SuperLinks to File Method | [
"Write",
"SuperLinks",
"to",
"File",
"Method"
] | 00fd4af0fd65f1614d75a52fe950a04fb0867f4c | https://github.com/CI-WATER/gsshapy/blob/00fd4af0fd65f1614d75a52fe950a04fb0867f4c/gsshapy/orm/spn.py#L227-L257 | train |
theodoregoetz/wernher | wernher/pid_control.py | Controller.ziegler_nichols | def ziegler_nichols(self,ku,tu,control_type='pid'):
'''
ku = ultimate gain
tu = period of oscillation at ultimate gain
'''
converter = dict(
p = lambda ku,tu: (.5*ku, 0, 0),
pi = lambda ku,tu: (.45*ku, 1.2*(.45*ku)/tu, 0),
pd = lambda ku,tu: (.8*ku, 0, (.8*ku)*tu/8),
pid = lambda ku,tu: (.6*ku, 2*(.6*ku)/tu, (.6*ku)*tu/8),
pessen = lambda ku,tu: (.7*ku, 2.5*(.7*ku)/tu, 3*(.7*ku)*tu/20),
some_overshoot = lambda ku,tu: (.33*ku, 2*(.33*ku)/tu, (.33*ku)*tu/3),
no_overshoot = lambda ku,tu: (.2*ku, 2*(.2*ku)/tu, (.2*ku)*tu/3)
)
self.kp,self.ki,self.kd = converter[control_type.lower()](ku,tu) | python | def ziegler_nichols(self,ku,tu,control_type='pid'):
'''
ku = ultimate gain
tu = period of oscillation at ultimate gain
'''
converter = dict(
p = lambda ku,tu: (.5*ku, 0, 0),
pi = lambda ku,tu: (.45*ku, 1.2*(.45*ku)/tu, 0),
pd = lambda ku,tu: (.8*ku, 0, (.8*ku)*tu/8),
pid = lambda ku,tu: (.6*ku, 2*(.6*ku)/tu, (.6*ku)*tu/8),
pessen = lambda ku,tu: (.7*ku, 2.5*(.7*ku)/tu, 3*(.7*ku)*tu/20),
some_overshoot = lambda ku,tu: (.33*ku, 2*(.33*ku)/tu, (.33*ku)*tu/3),
no_overshoot = lambda ku,tu: (.2*ku, 2*(.2*ku)/tu, (.2*ku)*tu/3)
)
self.kp,self.ki,self.kd = converter[control_type.lower()](ku,tu) | [
"def",
"ziegler_nichols",
"(",
"self",
",",
"ku",
",",
"tu",
",",
"control_type",
"=",
"'pid'",
")",
":",
"converter",
"=",
"dict",
"(",
"p",
"=",
"lambda",
"ku",
",",
"tu",
":",
"(",
".5",
"*",
"ku",
",",
"0",
",",
"0",
")",
",",
"pi",
"=",
"lambda",
"ku",
",",
"tu",
":",
"(",
".45",
"*",
"ku",
",",
"1.2",
"*",
"(",
".45",
"*",
"ku",
")",
"/",
"tu",
",",
"0",
")",
",",
"pd",
"=",
"lambda",
"ku",
",",
"tu",
":",
"(",
".8",
"*",
"ku",
",",
"0",
",",
"(",
".8",
"*",
"ku",
")",
"*",
"tu",
"/",
"8",
")",
",",
"pid",
"=",
"lambda",
"ku",
",",
"tu",
":",
"(",
".6",
"*",
"ku",
",",
"2",
"*",
"(",
".6",
"*",
"ku",
")",
"/",
"tu",
",",
"(",
".6",
"*",
"ku",
")",
"*",
"tu",
"/",
"8",
")",
",",
"pessen",
"=",
"lambda",
"ku",
",",
"tu",
":",
"(",
".7",
"*",
"ku",
",",
"2.5",
"*",
"(",
".7",
"*",
"ku",
")",
"/",
"tu",
",",
"3",
"*",
"(",
".7",
"*",
"ku",
")",
"*",
"tu",
"/",
"20",
")",
",",
"some_overshoot",
"=",
"lambda",
"ku",
",",
"tu",
":",
"(",
".33",
"*",
"ku",
",",
"2",
"*",
"(",
".33",
"*",
"ku",
")",
"/",
"tu",
",",
"(",
".33",
"*",
"ku",
")",
"*",
"tu",
"/",
"3",
")",
",",
"no_overshoot",
"=",
"lambda",
"ku",
",",
"tu",
":",
"(",
".2",
"*",
"ku",
",",
"2",
"*",
"(",
".2",
"*",
"ku",
")",
"/",
"tu",
",",
"(",
".2",
"*",
"ku",
")",
"*",
"tu",
"/",
"3",
")",
")",
"self",
".",
"kp",
",",
"self",
".",
"ki",
",",
"self",
".",
"kd",
"=",
"converter",
"[",
"control_type",
".",
"lower",
"(",
")",
"]",
"(",
"ku",
",",
"tu",
")"
] | ku = ultimate gain
tu = period of oscillation at ultimate gain | [
"ku",
"=",
"ultimate",
"gain",
"tu",
"=",
"period",
"of",
"oscillation",
"at",
"ultimate",
"gain"
] | ef5d3aabe24e532b5eab33cd0212b2dbc2c9022e | https://github.com/theodoregoetz/wernher/blob/ef5d3aabe24e532b5eab33cd0212b2dbc2c9022e/wernher/pid_control.py#L110-L124 | train |
Subsets and Splits