language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def loss_gradient(self, H, g_os, g, h, hp, w):
"""Computes the gradient of the hint loss location w.
Args:
H: d x n np array - prediction from self.n hinters
g_os: d x 1 np.array - ground truth cumulative loss
g (np.array) - d x 1 vector of gradient at time t
h (np.array) - d x 1 vector of hint at time t
hp (np.array) - d x 1 vector of hint at time t-1
w: n x 1 np.array - omega weight play of hinter
"""
d = H.shape[0] # Number of gradient values
n = H.shape[1] # Number of experts
# Add default values
err = H @ w - g_os
err_norm = np.linalg.norm(err, ord=2)
g_norm = np.linalg.norm(g + hp - h, ord=2)
if np.isclose(err_norm, 0.0):
return np.zeros((n,))
return ((g_norm / err_norm) * (H.T @ err)).reshape(-1,) | def loss_gradient(self, H, g_os, g, h, hp, w):
"""Computes the gradient of the hint loss location w.
Args:
H: d x n np array - prediction from self.n hinters
g_os: d x 1 np.array - ground truth cumulative loss
g (np.array) - d x 1 vector of gradient at time t
h (np.array) - d x 1 vector of hint at time t
hp (np.array) - d x 1 vector of hint at time t-1
w: n x 1 np.array - omega weight play of hinter
"""
d = H.shape[0] # Number of gradient values
n = H.shape[1] # Number of experts
# Add default values
err = H @ w - g_os
err_norm = np.linalg.norm(err, ord=2)
g_norm = np.linalg.norm(g + hp - h, ord=2)
if np.isclose(err_norm, 0.0):
return np.zeros((n,))
return ((g_norm / err_norm) * (H.T @ err)).reshape(-1,) |
Python | def save_forecasts(preds,
model="spatiotemporal_mean",
submodel="spatiotemporal_mean-1981_2010",
gt_id="contest_tmp2m",
horizon="34w",
target_date_str="20191029"):
"""Saves predictions produced by a given model and submodel
for a given target date and task
Args:
preds: pandas DataFrame with columns ['lat','lon','start_date','pred']
containing predictions for the given target date
model: string model name
submodel: string submodel name or None; if None, returns forecast
directory associated with selected submodel or None if no
submodel selected
gt_id: contest_tmp2m or contest_precip
horizon: "12w", "34w", or "56w"
target_date_str: first date of target two week period; string in
YYYYMMDD format
"""
outfile = get_forecast_filename(model=model, submodel=submodel,
gt_id=gt_id, horizon=horizon,
target_date_str=target_date_str)
pandas2hdf(preds, outfile) | def save_forecasts(preds,
model="spatiotemporal_mean",
submodel="spatiotemporal_mean-1981_2010",
gt_id="contest_tmp2m",
horizon="34w",
target_date_str="20191029"):
"""Saves predictions produced by a given model and submodel
for a given target date and task
Args:
preds: pandas DataFrame with columns ['lat','lon','start_date','pred']
containing predictions for the given target date
model: string model name
submodel: string submodel name or None; if None, returns forecast
directory associated with selected submodel or None if no
submodel selected
gt_id: contest_tmp2m or contest_precip
horizon: "12w", "34w", or "56w"
target_date_str: first date of target two week period; string in
YYYYMMDD format
"""
outfile = get_forecast_filename(model=model, submodel=submodel,
gt_id=gt_id, horizon=horizon,
target_date_str=target_date_str)
pandas2hdf(preds, outfile) |
Python | def start_logger(model="spatiotemporal_mean",
submodel=None,
gt_id="contest_tmp2m",
horizon="34w",
target_dates="std_val"):
"""Initializes and returns Logger object pointing to log file for the given
model, submodel, task, and target_dates
Args:
model: string model name
submodel: string submodel name or None; if None, selected submodel of
model is used
gt_id: contest_tmp2m or contest_precip
horizon: "12w", "34w", or "56w"
target_dates: string representing set of target dates
"""
log_file = get_log_filename(model=model,submodel=submodel,gt_id=gt_id,
horizon=horizon,target_dates=target_dates)
make_parent_directories(log_file)
# 'w' overwrites previous log
return Logger(log_file, 'w') | def start_logger(model="spatiotemporal_mean",
submodel=None,
gt_id="contest_tmp2m",
horizon="34w",
target_dates="std_val"):
"""Initializes and returns Logger object pointing to log file for the given
model, submodel, task, and target_dates
Args:
model: string model name
submodel: string submodel name or None; if None, selected submodel of
model is used
gt_id: contest_tmp2m or contest_precip
horizon: "12w", "34w", or "56w"
target_dates: string representing set of target dates
"""
log_file = get_log_filename(model=model,submodel=submodel,gt_id=gt_id,
horizon=horizon,target_dates=target_dates)
make_parent_directories(log_file)
# 'w' overwrites previous log
return Logger(log_file, 'w') |
Python | def log_params(params_names, params_values):
"""Log arguments using names in params_names and values in params_values.
Args
----------
params_names : list
List with names of parameters to be logged
params_values : list
List with values of parameters to be logged
"""
printf("Parameter values:")
assert len(params_names)==len(params_values)
for name, value in zip(params_names, params_values):
printf(f" {name}: {value}") | def log_params(params_names, params_values):
"""Log arguments using names in params_names and values in params_values.
Args
----------
params_names : list
List with names of parameters to be logged
params_values : list
List with values of parameters to be logged
"""
printf("Parameter values:")
assert len(params_names)==len(params_values)
for name, value in zip(params_names, params_values):
printf(f" {name}: {value}") |
Python | def calc_slope(self) -> float:
"""
Calculate slope of best line
using ordinary least squares
"""
# result = (nΣxi*yi – Σxi*Σyi) / (nΣxi^2 – (Σxi)^2)
sum_xy = 0
sum_x_2 = 0
for point in self.points:
sum_xy += point.coor_x * point.coor_y # Σxi*yi
sum_x_2 += point.coor_x ** 2 # Σxi^2
# nΣxi*yi – Σxi*Σyi
dividend = self.points_count * sum_xy
dividend -= self.sum_x * self.sum_y
# nΣxi^2 – (Σxi)^2
divisor = self.points_count * sum_x_2
divisor -= self.sum_x**2
return dividend / divisor | def calc_slope(self) -> float:
"""
Calculate slope of best line
using ordinary least squares
"""
# result = (nΣxi*yi – Σxi*Σyi) / (nΣxi^2 – (Σxi)^2)
sum_xy = 0
sum_x_2 = 0
for point in self.points:
sum_xy += point.coor_x * point.coor_y # Σxi*yi
sum_x_2 += point.coor_x ** 2 # Σxi^2
# nΣxi*yi – Σxi*Σyi
dividend = self.points_count * sum_xy
dividend -= self.sum_x * self.sum_y
# nΣxi^2 – (Σxi)^2
divisor = self.points_count * sum_x_2
divisor -= self.sum_x**2
return dividend / divisor |
Python | def calc_y_interception(self) -> float:
"""
Calculate y interception of best line
using ordinary least squares
"""
# result = (Σyi – aΣxi) / n
dividend = self.sum_y
dividend -= self.get_slope() * self.sum_x
return dividend / self.points_count | def calc_y_interception(self) -> float:
"""
Calculate y interception of best line
using ordinary least squares
"""
# result = (Σyi – aΣxi) / n
dividend = self.sum_y
dividend -= self.get_slope() * self.sum_x
return dividend / self.points_count |
Python | def calc(self, x_value:int) -> float:
"""
Get result of f(x) using the line obtained
using ordinary least squares
"""
return self.slope * x_value + self.y_interception | def calc(self, x_value:int) -> float:
"""
Get result of f(x) using the line obtained
using ordinary least squares
"""
return self.slope * x_value + self.y_interception |
Python | def graph(self):
"""
Print graph of points and best line
using ordinary least squares
"""
# Set coordinates to draw line
linear_x = [self.get_domain()[0], self.get_domain()[-1]]
linear_y = []
for value in linear_x:
linear_y.append(self.calc(value))
# Set coordinates to draw points
for point in self.points:
plt.scatter(point.coor_x, point.coor_y)
plt.plot(linear_x, linear_y, '-r', label=self.get_function())
plt.grid()
plt.show() | def graph(self):
"""
Print graph of points and best line
using ordinary least squares
"""
# Set coordinates to draw line
linear_x = [self.get_domain()[0], self.get_domain()[-1]]
linear_y = []
for value in linear_x:
linear_y.append(self.calc(value))
# Set coordinates to draw points
for point in self.points:
plt.scatter(point.coor_x, point.coor_y)
plt.plot(linear_x, linear_y, '-r', label=self.get_function())
plt.grid()
plt.show() |
Python | def calc_bolsano_steps(self, interval, error, splits):
"""Calculate number of steps to approximate the interval to an root
using the Bolsano theorem"""
result = math.log( abs(interval[-1] - interval[0]) / error, splits)
result = int(math.ceil(result))
return result | def calc_bolsano_steps(self, interval, error, splits):
"""Calculate number of steps to approximate the interval to an root
using the Bolsano theorem"""
result = math.log( abs(interval[-1] - interval[0]) / error, splits)
result = int(math.ceil(result))
return result |
Python | def nsection(self, interval, error, splits=2, max_steps=0):
"""Get root of function using Bolsano theorem,
this function is very flexible, SPLITS could be an integer
or an array, you can specify number of MAX_STEPS to terminate
the loop"""
# Check interval
assert len(interval) >= 2, 'interval must containt at least 2 values.'
assert self.bolsano_signs_condition(interval), f'no roots in {interval} according to Bolsano theorem.'
a_value, b_value = interval[0], interval[-1]
# Check error
assert error > 0, 'error must be higher than 0 (zero)'
# Check splits
assert (isinstance(splits, list) or (isinstance(splits, int) and splits > 0)),'wrong splits'
max_splits = max(splits) if isinstance(splits, list) else splits
# Check max steps
assert isinstance(max_steps, int), 'max_steps must be an int.'
assert max_steps >= 0, 'max number of steps must be higher or equal to 0 (zero)'
root = None
step = segments = 0
verbose_text = ''
current_error = 0.0
while True:
# Get number of splits
n_splits = get_correlative(splits, step) if isinstance(splits, list) else splits
# Calculate segment
segment_len = (b_value - a_value) / n_splits
a_temp = a_value + segment_len * segments
b_temp = a_value + segment_len * (segments + 1)
# Verbose
## TODO: remove inaccurated decimals.
current_error = round(err.absolute_error([a_temp, b_temp]), len(str(error)))
if segments == 0:
verbose_text = f'step={step}\t{self._function_string(a_temp, current_error)}'
verbose_text += f'\t{self._function_string(b_temp, current_error)}'
# Root conditions
if self.calc(b_temp) == 0:
root = b_temp
if self._verbose:
print('root founded.')
break
if self.bolsano_signs_condition([a_temp, b_temp]):
a_value = a_temp
b_value = b_temp
if self._verbose:
## TODO: remove inaccurated decimals.
verbose_text += ('\t' + '-' * 3).ljust(25, " ") * (max_splits - segments - 1)
verbose_text += f'\troot={str(err.remove_inaccuracy((a_value + b_value)/2, current_error)).ljust(25, " ")}'
verbose_text += f'\terror={round(current_error, len(str(error)))}'
print(verbose_text)
step += 1
segments = 0
else:
segments += 1
# Stop conditions
if b_value - a_value < error:
if self._verbose:
print('reached acceptable error.')
break
if step > self.calc_bolsano_steps(interval, error, max_splits):
if self._verbose:
print('reached expected number of steps.')
break
if 0 < max_steps <= step:
if self._verbose:
print('reached specified number of steps.')
break
# If the exact root is not founded, get the middle value
root = root if root else (a_value + b_value) / 2
# Remove inaccurated decimals
root = err.remove_inaccuracy(root, current_error)
return root, current_error | def nsection(self, interval, error, splits=2, max_steps=0):
"""Get root of function using Bolsano theorem,
this function is very flexible, SPLITS could be an integer
or an array, you can specify number of MAX_STEPS to terminate
the loop"""
# Check interval
assert len(interval) >= 2, 'interval must containt at least 2 values.'
assert self.bolsano_signs_condition(interval), f'no roots in {interval} according to Bolsano theorem.'
a_value, b_value = interval[0], interval[-1]
# Check error
assert error > 0, 'error must be higher than 0 (zero)'
# Check splits
assert (isinstance(splits, list) or (isinstance(splits, int) and splits > 0)),'wrong splits'
max_splits = max(splits) if isinstance(splits, list) else splits
# Check max steps
assert isinstance(max_steps, int), 'max_steps must be an int.'
assert max_steps >= 0, 'max number of steps must be higher or equal to 0 (zero)'
root = None
step = segments = 0
verbose_text = ''
current_error = 0.0
while True:
# Get number of splits
n_splits = get_correlative(splits, step) if isinstance(splits, list) else splits
# Calculate segment
segment_len = (b_value - a_value) / n_splits
a_temp = a_value + segment_len * segments
b_temp = a_value + segment_len * (segments + 1)
# Verbose
## TODO: remove inaccurated decimals.
current_error = round(err.absolute_error([a_temp, b_temp]), len(str(error)))
if segments == 0:
verbose_text = f'step={step}\t{self._function_string(a_temp, current_error)}'
verbose_text += f'\t{self._function_string(b_temp, current_error)}'
# Root conditions
if self.calc(b_temp) == 0:
root = b_temp
if self._verbose:
print('root founded.')
break
if self.bolsano_signs_condition([a_temp, b_temp]):
a_value = a_temp
b_value = b_temp
if self._verbose:
## TODO: remove inaccurated decimals.
verbose_text += ('\t' + '-' * 3).ljust(25, " ") * (max_splits - segments - 1)
verbose_text += f'\troot={str(err.remove_inaccuracy((a_value + b_value)/2, current_error)).ljust(25, " ")}'
verbose_text += f'\terror={round(current_error, len(str(error)))}'
print(verbose_text)
step += 1
segments = 0
else:
segments += 1
# Stop conditions
if b_value - a_value < error:
if self._verbose:
print('reached acceptable error.')
break
if step > self.calc_bolsano_steps(interval, error, max_splits):
if self._verbose:
print('reached expected number of steps.')
break
if 0 < max_steps <= step:
if self._verbose:
print('reached specified number of steps.')
break
# If the exact root is not founded, get the middle value
root = root if root else (a_value + b_value) / 2
# Remove inaccurated decimals
root = err.remove_inaccuracy(root, current_error)
return root, current_error |
Python | def remove_files(self, dry_run=True):
"""
Loops through Windows and Linux lists and deletes all folder except latest for each product
Args:
dry_run: (bool) if True just prints output without actual deletion
Returns: None
"""
for dist in ["linx64", "winx64"]:
items = super().get_list_items(dist)
items_to_keep = {}
for item in items:
title = item.properties["Title"]
if title not in items_to_keep:
# ensure at least one item to be kept
items_to_keep[title] = item
continue
if item.properties["build_date"] > items_to_keep[title].properties["build_date"]:
item_to_delete = items_to_keep.pop(title)
items_to_keep[title] = item
else:
item_to_delete = item
if dry_run:
print(f"Will delete {title}: {item_to_delete.properties['build_date']} ({dist})")
else:
folder_url = item_to_delete.properties["relative_url"].rsplit("/", maxsplit=1)[0]
remote_folder = self.ctx.web.get_folder_by_server_relative_url(
f"/sites/BetaDownloader/{folder_url}"
)
self.ctx.execute_query()
remote_folder.delete_object()
item_to_delete.delete_object()
if dry_run:
print("#" * 30)
for key, val in items_to_keep.items():
print(f"Will keep {key}: {val.properties['build_date']} ({dist})")
print("#" * 30, "\n\n\n")
self.ctx.execute_query() | def remove_files(self, dry_run=True):
"""
Loops through Windows and Linux lists and deletes all folder except latest for each product
Args:
dry_run: (bool) if True just prints output without actual deletion
Returns: None
"""
for dist in ["linx64", "winx64"]:
items = super().get_list_items(dist)
items_to_keep = {}
for item in items:
title = item.properties["Title"]
if title not in items_to_keep:
# ensure at least one item to be kept
items_to_keep[title] = item
continue
if item.properties["build_date"] > items_to_keep[title].properties["build_date"]:
item_to_delete = items_to_keep.pop(title)
items_to_keep[title] = item
else:
item_to_delete = item
if dry_run:
print(f"Will delete {title}: {item_to_delete.properties['build_date']} ({dist})")
else:
folder_url = item_to_delete.properties["relative_url"].rsplit("/", maxsplit=1)[0]
remote_folder = self.ctx.web.get_folder_by_server_relative_url(
f"/sites/BetaDownloader/{folder_url}"
)
self.ctx.execute_query()
remote_folder.delete_object()
item_to_delete.delete_object()
if dry_run:
print("#" * 30)
for key, val in items_to_keep.items():
print(f"Will keep {key}: {val.properties['build_date']} ({dist})")
print("#" * 30, "\n\n\n")
self.ctx.execute_query() |
Python | def retry(exceptions, tries=4, delay=3, backoff=1, logger=None, proc_lock=False):
"""
Retry calling the decorated function using an exponential backoff.
Args:
exceptions (Exception or tuple): the exception to check. may be a tuple of exceptions to check
tries (int): number of times to try (not retry) before giving up
delay (int): initial delay between retries in seconds
backoff (int): backoff multiplier e.g. value of 2 will double the delay each retry
logger (logging): logger to use. If None, print
proc_lock (bool): if retry is applied to proc lock function
Returns: decorator
"""
def deco_retry(func):
@wraps(func)
def f_retry(self, *args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 0:
try:
return func(self, *args, **kwargs)
except exceptions as e:
msg = f"{e}. Error occurred, attempt: {tries - mtries + 1}/{tries}"
if proc_lock:
# only applied for process lock
err = "Stop all processes running from installation folder. "
err += f"Attempt: {tries - mtries + 1}/{tries}"
if mtries > 1:
err += " Autoretry in 60sec."
Downloader.toaster_notification("Failed", err)
else:
raise DownloaderError(msg)
if logger:
logger.warning(msg)
else:
print(msg)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
else:
error = (
"Please verify that your connection is stable, avoid switching state of VPN during download. "
"For artifactory you have to be on VPN. "
f"Number of attempts: {tries}/{tries}"
)
if logger:
raise DownloaderError(error)
else:
print(error)
return f_retry # true decorator
return deco_retry | def retry(exceptions, tries=4, delay=3, backoff=1, logger=None, proc_lock=False):
"""
Retry calling the decorated function using an exponential backoff.
Args:
exceptions (Exception or tuple): the exception to check. may be a tuple of exceptions to check
tries (int): number of times to try (not retry) before giving up
delay (int): initial delay between retries in seconds
backoff (int): backoff multiplier e.g. value of 2 will double the delay each retry
logger (logging): logger to use. If None, print
proc_lock (bool): if retry is applied to proc lock function
Returns: decorator
"""
def deco_retry(func):
@wraps(func)
def f_retry(self, *args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 0:
try:
return func(self, *args, **kwargs)
except exceptions as e:
msg = f"{e}. Error occurred, attempt: {tries - mtries + 1}/{tries}"
if proc_lock:
# only applied for process lock
err = "Stop all processes running from installation folder. "
err += f"Attempt: {tries - mtries + 1}/{tries}"
if mtries > 1:
err += " Autoretry in 60sec."
Downloader.toaster_notification("Failed", err)
else:
raise DownloaderError(msg)
if logger:
logger.warning(msg)
else:
print(msg)
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
else:
error = (
"Please verify that your connection is stable, avoid switching state of VPN during download. "
"For artifactory you have to be on VPN. "
f"Number of attempts: {tries}/{tries}"
)
if logger:
raise DownloaderError(error)
else:
print(error)
return f_retry # true decorator
return deco_retry |
Python | def authorize_sharepoint(self):
"""
Function that uses PnP to authorize user in SharePoint using Windows account and to get actual client_id and
client_secret
Returns: ctx: authorization context for Office365 library
"""
self.update_installation_history(status="In-Progress", details="Authorizing in SharePoint")
command = "powershell.exe "
command += "Connect-PnPOnline -Url https://ansys.sharepoint.com/sites/BetaDownloader -UseWebLogin;"
command += '(Get-PnPListItem -List secret_list -Fields "Title","client_id","client_secret").FieldValues'
out_str = self.subprocess_call(command, shell=True, popen=True)
secret_list = []
try:
for line in out_str.splitlines():
if "Title" in line:
secret_dict = {"Title": line.split()[1]}
elif "client_id" in line:
secret_dict["client_id"] = line.split()[1]
elif "client_secret" in line:
secret_dict["client_secret"] = line.split()[1]
secret_list.append(secret_dict)
except NameError:
raise DownloaderError("Cannot retrieve authentication tokens for SharePoint")
secret_list.sort(key=lambda elem: elem["Title"], reverse=True)
context_auth = AuthenticationContext(url=SHAREPOINT_SITE_URL)
context_auth.acquire_token_for_app(
client_id=secret_list[0]["client_id"], client_secret=secret_list[0]["client_secret"]
)
ctx = ClientContext(SHAREPOINT_SITE_URL, context_auth)
return ctx | def authorize_sharepoint(self):
"""
Function that uses PnP to authorize user in SharePoint using Windows account and to get actual client_id and
client_secret
Returns: ctx: authorization context for Office365 library
"""
self.update_installation_history(status="In-Progress", details="Authorizing in SharePoint")
command = "powershell.exe "
command += "Connect-PnPOnline -Url https://ansys.sharepoint.com/sites/BetaDownloader -UseWebLogin;"
command += '(Get-PnPListItem -List secret_list -Fields "Title","client_id","client_secret").FieldValues'
out_str = self.subprocess_call(command, shell=True, popen=True)
secret_list = []
try:
for line in out_str.splitlines():
if "Title" in line:
secret_dict = {"Title": line.split()[1]}
elif "client_id" in line:
secret_dict["client_id"] = line.split()[1]
elif "client_secret" in line:
secret_dict["client_secret"] = line.split()[1]
secret_list.append(secret_dict)
except NameError:
raise DownloaderError("Cannot retrieve authentication tokens for SharePoint")
secret_list.sort(key=lambda elem: elem["Title"], reverse=True)
context_auth = AuthenticationContext(url=SHAREPOINT_SITE_URL)
context_auth.acquire_token_for_app(
client_id=secret_list[0]["client_id"], client_secret=secret_list[0]["client_secret"]
)
ctx = ClientContext(SHAREPOINT_SITE_URL, context_auth)
return ctx |
Python | def run(self):
"""
Function that executes the download-installation process
:return: None
"""
try:
set_logger(self.logging_file)
logging.info(f"Settings path is set to {self.settings_path}")
if self.settings.artifactory == "SharePoint":
self.ctx = self.authorize_sharepoint()
self.update_installation_history(status="In-Progress", details="Verifying configuration")
self.check_and_make_directories(self.settings.install_path, self.settings.download_path)
if "ElectronicsDesktop" in self.settings.version or "Workbench" in self.settings.version:
space_required = 15
# License Manager can be updated even if running
self.check_process_lock()
else:
if not self.settings.license_file:
raise DownloaderError("No license file defined. Please select it in Advanced Settings")
if not os.path.isfile(self.settings.license_file):
raise DownloaderError(f"No license file was detected under {self.settings.license_file}")
space_required = 1
self.check_free_space(self.settings.download_path, space_required)
self.check_free_space(self.settings.install_path, space_required)
self.get_build_link()
if self.settings.force_install or self.newer_version_exists:
self.download_file()
if "ElectronicsDesktop" in self.settings.version or "Workbench" in self.settings.version:
self.check_process_lock() # download can take time, better to recheck again
self.install()
try:
self.send_statistics()
except Exception:
self.warnings_list.append("Connection to product improvement server failed")
self.update_installation_history(status="Success", details="Normal completion")
else:
raise DownloaderError("Versions are up to date. If issue occurred please use force install flag")
return
except DownloaderError as e:
# all caught errors are here
logging.error(e)
self.update_installation_history(status="Failed", details=str(e))
except Exception:
logging.error(traceback.format_exc())
self.update_installation_history(status="Failed", details="Unexpected error, see logs")
self.send_statistics(error=traceback.format_exc())
self.clean_temp() | def run(self):
"""
Function that executes the download-installation process
:return: None
"""
try:
set_logger(self.logging_file)
logging.info(f"Settings path is set to {self.settings_path}")
if self.settings.artifactory == "SharePoint":
self.ctx = self.authorize_sharepoint()
self.update_installation_history(status="In-Progress", details="Verifying configuration")
self.check_and_make_directories(self.settings.install_path, self.settings.download_path)
if "ElectronicsDesktop" in self.settings.version or "Workbench" in self.settings.version:
space_required = 15
# License Manager can be updated even if running
self.check_process_lock()
else:
if not self.settings.license_file:
raise DownloaderError("No license file defined. Please select it in Advanced Settings")
if not os.path.isfile(self.settings.license_file):
raise DownloaderError(f"No license file was detected under {self.settings.license_file}")
space_required = 1
self.check_free_space(self.settings.download_path, space_required)
self.check_free_space(self.settings.install_path, space_required)
self.get_build_link()
if self.settings.force_install or self.newer_version_exists:
self.download_file()
if "ElectronicsDesktop" in self.settings.version or "Workbench" in self.settings.version:
self.check_process_lock() # download can take time, better to recheck again
self.install()
try:
self.send_statistics()
except Exception:
self.warnings_list.append("Connection to product improvement server failed")
self.update_installation_history(status="Success", details="Normal completion")
else:
raise DownloaderError("Versions are up to date. If issue occurred please use force install flag")
return
except DownloaderError as e:
# all caught errors are here
logging.error(e)
self.update_installation_history(status="Failed", details=str(e))
except Exception:
logging.error(traceback.format_exc())
self.update_installation_history(status="Failed", details="Unexpected error, see logs")
self.send_statistics(error=traceback.format_exc())
self.clean_temp() |
Python | def check_and_make_directories(*paths):
"""
Verify that installation and download path exists.
If not tries to create a requested path
:parameter: paths: list of paths that we need to check and create
"""
for path in paths:
if not os.path.isdir(path):
try:
os.makedirs(path)
except PermissionError:
raise DownloaderError(f"{path} could not be created due to insufficient permissions")
except OSError as err:
if "BitLocker" in str(err):
raise DownloaderError("Your drive is locked by BitLocker. Please unlock!")
else:
raise DownloaderError(err) | def check_and_make_directories(*paths):
"""
Verify that installation and download path exists.
If not tries to create a requested path
:parameter: paths: list of paths that we need to check and create
"""
for path in paths:
if not os.path.isdir(path):
try:
os.makedirs(path)
except PermissionError:
raise DownloaderError(f"{path} could not be created due to insufficient permissions")
except OSError as err:
if "BitLocker" in str(err):
raise DownloaderError("Your drive is locked by BitLocker. Please unlock!")
else:
raise DownloaderError(err) |
Python | def check_free_space(path, required):
"""
Verifies that enough disk space is available. Raises error if not enough space
:param path: path where to check
:param required: value in GB that should be available on drive to pass the check
:return:
"""
free_space = shutil.disk_usage(path).free // (2**30)
if free_space < required:
err = f"Disk space in {path} is less than {required}GB. This would not be enough to proceed"
raise DownloaderError(err) | def check_free_space(path, required):
"""
Verifies that enough disk space is available. Raises error if not enough space
:param path: path where to check
:param required: value in GB that should be available on drive to pass the check
:return:
"""
free_space = shutil.disk_usage(path).free // (2**30)
if free_space < required:
err = f"Disk space in {path} is less than {required}GB. This would not be enough to proceed"
raise DownloaderError(err) |
Python | def check_process_lock(self):
"""
Verify if some executable is running from installation folder
Abort installation if any process is running from installation folder
:return: None
"""
process_list = []
for process in psutil.process_iter():
try:
if self.product_root_path in process.exe():
process_list.append(process.name())
except psutil.AccessDenied:
pass
if process_list:
process_list.sort(key=len) # to fit into UI
raise DownloaderError(
"Following processes are running from installation directory: "
+ f"{', '.join(set(process_list))}. Please stop all processes."
) | def check_process_lock(self):
"""
Verify if some executable is running from installation folder
Abort installation if any process is running from installation folder
:return: None
"""
process_list = []
for process in psutil.process_iter():
try:
if self.product_root_path in process.exe():
process_list.append(process.name())
except psutil.AccessDenied:
pass
if process_list:
process_list.sort(key=len) # to fit into UI
raise DownloaderError(
"Following processes are running from installation directory: "
+ f"{', '.join(set(process_list))}. Please stop all processes."
) |
Python | def newer_version_exists(self):
"""
verify if version on the server is newer compared to installed
Returns:
(bool) True if remote is newer or no version is installed, False if remote is the same or older
"""
if "Workbench" in self.settings.version:
product_installed = os.path.join(self.product_root_path, "package.id")
elif "LicenseManager" in self.settings.version:
# always update LM
return True
else:
product_installed = self.installed_product_info
if os.path.isfile(product_installed):
if "Workbench" in self.settings.version:
with open(product_installed) as file:
installed_product_version = next(file).rstrip().split()[-1] # get first line
try:
installed_product_version = int(installed_product_version.split("P")[0])
except ValueError:
installed_product_version = 0
else:
installed_product_version = self.get_edt_build_date(product_installed)
logging.info(f"Installed version of {self.settings.version} is {installed_product_version}")
new_product_version = self.get_new_build_date()
if not all([new_product_version, installed_product_version]):
# some of the versions could not be parsed, need installation
return True
if new_product_version <= installed_product_version:
return False
return True | def newer_version_exists(self):
"""
verify if version on the server is newer compared to installed
Returns:
(bool) True if remote is newer or no version is installed, False if remote is the same or older
"""
if "Workbench" in self.settings.version:
product_installed = os.path.join(self.product_root_path, "package.id")
elif "LicenseManager" in self.settings.version:
# always update LM
return True
else:
product_installed = self.installed_product_info
if os.path.isfile(product_installed):
if "Workbench" in self.settings.version:
with open(product_installed) as file:
installed_product_version = next(file).rstrip().split()[-1] # get first line
try:
installed_product_version = int(installed_product_version.split("P")[0])
except ValueError:
installed_product_version = 0
else:
installed_product_version = self.get_edt_build_date(product_installed)
logging.info(f"Installed version of {self.settings.version} is {installed_product_version}")
new_product_version = self.get_new_build_date()
if not all([new_product_version, installed_product_version]):
# some of the versions could not be parsed, need installation
return True
if new_product_version <= installed_product_version:
return False
return True |
Python | def download_file(self):
"""
Downloads file in chunks and saves to the temp.zip file
Uses url to the zip archive or special JFrog API to download Workbench folder
:modify: (str) zip_file: link to the zip file
"""
if self.settings.artifactory == "SharePoint" or "win" in self.build_artifactory_path.name:
archive_type = "zip"
else:
archive_type = "tgz"
self.zip_file = os.path.join(self.settings.download_path, f"{self.settings.version}.{archive_type}")
chunk_size = 50 * 1024 * 1024
if self.settings.artifactory == "SharePoint":
self.download_from_sharepoint(chunk_size=chunk_size)
else:
self.download_from_artifactory(archive_type, chunk_size=chunk_size)
logging.info(f"File is downloaded to {self.zip_file}") | def download_file(self):
"""
Downloads file in chunks and saves to the temp.zip file
Uses url to the zip archive or special JFrog API to download Workbench folder
:modify: (str) zip_file: link to the zip file
"""
if self.settings.artifactory == "SharePoint" or "win" in self.build_artifactory_path.name:
archive_type = "zip"
else:
archive_type = "tgz"
self.zip_file = os.path.join(self.settings.download_path, f"{self.settings.version}.{archive_type}")
chunk_size = 50 * 1024 * 1024
if self.settings.artifactory == "SharePoint":
self.download_from_sharepoint(chunk_size=chunk_size)
else:
self.download_from_artifactory(archive_type, chunk_size=chunk_size)
logging.info(f"File is downloaded to {self.zip_file}") |
Python | def install(self, local_lang=False):
"""
Unpack downloaded zip and proceed to installation. Different executions for Electronics Desktop and Workbench
:param local_lang: if not specified then use English as default installation language
:return: None
"""
self.unpack_archive()
if "ElectronicsDesktop" in self.settings.version:
self.install_edt()
elif "Workbench" in self.settings.version:
self.install_wb(local_lang)
else:
self.install_license_manager()
self.update_installation_history(status="In-Progress", details="Clean temp directory")
self.clean_temp() | def install(self, local_lang=False):
"""
Unpack downloaded zip and proceed to installation. Different executions for Electronics Desktop and Workbench
:param local_lang: if not specified then use English as default installation language
:return: None
"""
self.unpack_archive()
if "ElectronicsDesktop" in self.settings.version:
self.install_edt()
elif "Workbench" in self.settings.version:
self.install_wb(local_lang)
else:
self.install_license_manager()
self.update_installation_history(status="In-Progress", details="Clean temp directory")
self.clean_temp() |
Python | def install_edt(self):
"""
Install Electronics Desktop. Make verification that the same version is not yet installed and makes
silent installation
Get Workbench installation path from environment variable and enables integration if exists.
:return: None
"""
setup_exe, product_id, installshield_version = self.parse_iss_template(self.target_unpack_dir)
self.uninstall_edt(setup_exe, product_id, installshield_version)
install_iss_file, install_log_file = self.create_install_iss_file(installshield_version, product_id)
command = [f'"{setup_exe}"', "-s", rf'-f1"{install_iss_file}"', rf'-f2"{install_log_file}"']
command = " ".join(command)
self.update_installation_history(status="In-Progress", details="Start installation")
logging.info("Execute installation")
self.subprocess_call(command)
self.check_result_code(install_log_file)
self.update_edt_registry()
self.remove_aedt_shortcuts() | def install_edt(self):
"""
Install Electronics Desktop. Make verification that the same version is not yet installed and makes
silent installation
Get Workbench installation path from environment variable and enables integration if exists.
:return: None
"""
setup_exe, product_id, installshield_version = self.parse_iss_template(self.target_unpack_dir)
self.uninstall_edt(setup_exe, product_id, installshield_version)
install_iss_file, install_log_file = self.create_install_iss_file(installshield_version, product_id)
command = [f'"{setup_exe}"', "-s", rf'-f1"{install_iss_file}"', rf'-f2"{install_log_file}"']
command = " ".join(command)
self.update_installation_history(status="In-Progress", details="Start installation")
logging.info("Execute installation")
self.subprocess_call(command)
self.check_result_code(install_log_file)
self.update_edt_registry()
self.remove_aedt_shortcuts() |
Python | def uninstall_edt(self, setup_exe, product_id, installshield_version):
"""
Silently uninstall build of the same version
:return: None
"""
if os.path.isfile(self.installed_product_info):
uninstall_iss_file = os.path.join(self.target_unpack_dir, "uninstall.iss")
uninstall_log_file = os.path.join(self.target_unpack_dir, "uninstall.log")
with open(uninstall_iss_file, "w") as file:
file.write(iss_templates.uninstall_iss.format(product_id, installshield_version))
command = [
f'"{setup_exe}"',
"-uninst",
"-s",
rf'-f1"{uninstall_iss_file}"',
rf'-f2"{uninstall_log_file}"',
]
command = " ".join(command)
logging.info("Execute uninstallation")
self.update_installation_history(status="In-Progress", details="Uninstall previous build")
self.subprocess_call(command)
self.check_result_code(uninstall_log_file, False)
em_main_dir = os.path.dirname(self.product_root_path)
self.remove_path(em_main_dir)
if os.path.isdir(em_main_dir):
raise DownloaderError(f"Failed to remove {em_main_dir}. Probably directory is locked.")
else:
logging.info("Version is not installed, skip uninstallation") | def uninstall_edt(self, setup_exe, product_id, installshield_version):
"""
Silently uninstall build of the same version
:return: None
"""
if os.path.isfile(self.installed_product_info):
uninstall_iss_file = os.path.join(self.target_unpack_dir, "uninstall.iss")
uninstall_log_file = os.path.join(self.target_unpack_dir, "uninstall.log")
with open(uninstall_iss_file, "w") as file:
file.write(iss_templates.uninstall_iss.format(product_id, installshield_version))
command = [
f'"{setup_exe}"',
"-uninst",
"-s",
rf'-f1"{uninstall_iss_file}"',
rf'-f2"{uninstall_log_file}"',
]
command = " ".join(command)
logging.info("Execute uninstallation")
self.update_installation_history(status="In-Progress", details="Uninstall previous build")
self.subprocess_call(command)
self.check_result_code(uninstall_log_file, False)
em_main_dir = os.path.dirname(self.product_root_path)
self.remove_path(em_main_dir)
if os.path.isdir(em_main_dir):
raise DownloaderError(f"Failed to remove {em_main_dir}. Probably directory is locked.")
else:
logging.info("Version is not installed, skip uninstallation") |
Python | def check_result_code(self, log_file, installation=True):
"""
Verify result code of the InstallShield log file
:param log_file: installation log file
:param installation: True if verify log after installation elif after uninstallation False
:return: None
"""
success = "New build was successfully installed" if installation else "Previous build was uninstalled"
fail = "Installation went wrong" if installation else "Uninstallation went wrong"
if not os.path.isfile(log_file):
raise DownloaderError(f"{fail}. Check that UAC disabled or confirm UAC question manually")
msg = fail
regex = "ResultCode=(.*)"
with open(log_file) as file:
for line in file:
code = re.findall(regex, line)
if code and code[0] == "0":
logging.info(success)
break
else:
if not installation:
msg = "Official uninstaller failed, make hard remove"
logging.error(msg)
self.warnings_list.append(msg)
else:
raise DownloaderError(msg) | def check_result_code(self, log_file, installation=True):
"""
Verify result code of the InstallShield log file
:param log_file: installation log file
:param installation: True if verify log after installation elif after uninstallation False
:return: None
"""
success = "New build was successfully installed" if installation else "Previous build was uninstalled"
fail = "Installation went wrong" if installation else "Uninstallation went wrong"
if not os.path.isfile(log_file):
raise DownloaderError(f"{fail}. Check that UAC disabled or confirm UAC question manually")
msg = fail
regex = "ResultCode=(.*)"
with open(log_file) as file:
for line in file:
code = re.findall(regex, line)
if code and code[0] == "0":
logging.info(success)
break
else:
if not installation:
msg = "Official uninstaller failed, make hard remove"
logging.error(msg)
self.warnings_list.append(msg)
else:
raise DownloaderError(msg) |
Python | def parse_iss_template(unpacked_dir):
"""
Open directory with unpacked build of Electronics Desktop and search for SilentInstallationTemplate.iss to
extract product ID which is GUID hash
Args:
unpacked_dir: directory where AEDT package was unpacked
Returns:
product_id: product GUID extracted from iss template
setup_exe: set path to setup.exe if exists
installshield_version: set version from file
"""
default_iss_file = ""
setup_exe = ""
product_id_match = []
for dir_path, dir_names, file_names in os.walk(unpacked_dir):
for filename in file_names:
if "AnsysEM" in dir_path and filename.endswith(".iss"):
default_iss_file = os.path.join(dir_path, filename)
setup_exe = os.path.join(dir_path, "setup.exe")
break
if not default_iss_file:
raise DownloaderError("SilentInstallationTemplate.iss does not exist")
if not os.path.isfile(setup_exe):
raise DownloaderError("setup.exe does not exist")
with open(default_iss_file, "r") as iss_file:
for line in iss_file:
if "DlgOrder" in line:
guid_regex = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
product_id_match = re.findall(guid_regex, line)
if "InstallShield Silent" in line:
installshield_version = next(iss_file).split("=")[1]
if product_id_match:
product_id = product_id_match[0]
logging.info(f"Product ID is {product_id}")
else:
raise DownloaderError("Unable to extract product ID")
return setup_exe, product_id, installshield_version | def parse_iss_template(unpacked_dir):
"""
Open directory with unpacked build of Electronics Desktop and search for SilentInstallationTemplate.iss to
extract product ID which is GUID hash
Args:
unpacked_dir: directory where AEDT package was unpacked
Returns:
product_id: product GUID extracted from iss template
setup_exe: set path to setup.exe if exists
installshield_version: set version from file
"""
default_iss_file = ""
setup_exe = ""
product_id_match = []
for dir_path, dir_names, file_names in os.walk(unpacked_dir):
for filename in file_names:
if "AnsysEM" in dir_path and filename.endswith(".iss"):
default_iss_file = os.path.join(dir_path, filename)
setup_exe = os.path.join(dir_path, "setup.exe")
break
if not default_iss_file:
raise DownloaderError("SilentInstallationTemplate.iss does not exist")
if not os.path.isfile(setup_exe):
raise DownloaderError("setup.exe does not exist")
with open(default_iss_file, "r") as iss_file:
for line in iss_file:
if "DlgOrder" in line:
guid_regex = "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}"
product_id_match = re.findall(guid_regex, line)
if "InstallShield Silent" in line:
installshield_version = next(iss_file).split("=")[1]
if product_id_match:
product_id = product_id_match[0]
logging.info(f"Product ID is {product_id}")
else:
raise DownloaderError("Unable to extract product ID")
return setup_exe, product_id, installshield_version |
Python | def install_license_manager(self):
"""
Install license manager and feed it with license file
"""
self.setup_exe = os.path.join(self.target_unpack_dir, "setup.exe")
if os.path.isfile(self.setup_exe):
install_path = os.path.join(self.settings.install_path, "ANSYS Inc")
if not os.path.isfile(self.settings.license_file):
raise DownloaderError(f"No license file was detected under {self.settings.license_file}")
command = [
self.setup_exe,
"-silent",
"-LM",
"-install_dir",
install_path,
"-lang",
"en",
"-licfilepath",
self.settings.license_file,
]
self.update_installation_history(status="In-Progress", details="Start installation")
logging.info("Execute installation")
self.subprocess_call(command)
package_build = self.parse_lm_installer_builddate()
installed_build = self.get_license_manager_build_date()
if all([package_build, installed_build]) and package_build == installed_build:
self.update_installation_history(status="Success", details="Normal completion")
else:
raise DownloaderError("License Manager was not installed")
else:
raise DownloaderError("No LicenseManager setup.exe file detected") | def install_license_manager(self):
"""
Install license manager and feed it with license file
"""
self.setup_exe = os.path.join(self.target_unpack_dir, "setup.exe")
if os.path.isfile(self.setup_exe):
install_path = os.path.join(self.settings.install_path, "ANSYS Inc")
if not os.path.isfile(self.settings.license_file):
raise DownloaderError(f"No license file was detected under {self.settings.license_file}")
command = [
self.setup_exe,
"-silent",
"-LM",
"-install_dir",
install_path,
"-lang",
"en",
"-licfilepath",
self.settings.license_file,
]
self.update_installation_history(status="In-Progress", details="Start installation")
logging.info("Execute installation")
self.subprocess_call(command)
package_build = self.parse_lm_installer_builddate()
installed_build = self.get_license_manager_build_date()
if all([package_build, installed_build]) and package_build == installed_build:
self.update_installation_history(status="Success", details="Normal completion")
else:
raise DownloaderError("License Manager was not installed")
else:
raise DownloaderError("No LicenseManager setup.exe file detected") |
Python | def parse_lm_installer_builddate(self):
"""
Check build date of installation package of License Manager
"""
build_file = os.path.join(self.target_unpack_dir, "builddate.txt")
lm_center_archive = os.path.join(self.target_unpack_dir, "lmcenter", "WINX64.7z")
if not os.path.isfile(build_file) and os.path.isfile(lm_center_archive):
with py7zr.SevenZipFile(lm_center_archive, "r") as archive:
archive.extractall(path=os.path.join(self.target_unpack_dir, "lmcenter"))
build_file = os.path.join(
self.target_unpack_dir,
"lmcenter",
"Shared Files",
"licensing",
"tools",
"lmcenter",
"lmcenter_blddate.txt",
)
if not os.path.isfile(build_file):
# check again if file was unpacked
logging.warning("builddate.txt was not found in installation package")
return
with open(build_file) as file:
for line in file:
if "license management center" in line.lower():
lm_build_date = line.split()[-1]
try:
logging.info(f"Build date of License Manager in installation package {lm_build_date}")
lm_build_date = int(lm_build_date)
return lm_build_date
except TypeError:
raise DownloaderError("Cannot extract build date of installation package") | def parse_lm_installer_builddate(self):
"""
Check build date of installation package of License Manager
"""
build_file = os.path.join(self.target_unpack_dir, "builddate.txt")
lm_center_archive = os.path.join(self.target_unpack_dir, "lmcenter", "WINX64.7z")
if not os.path.isfile(build_file) and os.path.isfile(lm_center_archive):
with py7zr.SevenZipFile(lm_center_archive, "r") as archive:
archive.extractall(path=os.path.join(self.target_unpack_dir, "lmcenter"))
build_file = os.path.join(
self.target_unpack_dir,
"lmcenter",
"Shared Files",
"licensing",
"tools",
"lmcenter",
"lmcenter_blddate.txt",
)
if not os.path.isfile(build_file):
# check again if file was unpacked
logging.warning("builddate.txt was not found in installation package")
return
with open(build_file) as file:
for line in file:
if "license management center" in line.lower():
lm_build_date = line.split()[-1]
try:
logging.info(f"Build date of License Manager in installation package {lm_build_date}")
lm_build_date = int(lm_build_date)
return lm_build_date
except TypeError:
raise DownloaderError("Cannot extract build date of installation package") |
Python | def install_wb(self, local_lang=False):
"""
Install Workbench to the target installation directory
:param local_lang: if not specified then use English as default installation language
"""
self.setup_exe = os.path.join(self.target_unpack_dir, "setup.exe")
if os.path.isfile(self.setup_exe):
uninstall_exe = self.uninstall_wb()
install_path = os.path.join(self.settings.install_path, "ANSYS Inc")
command = [self.setup_exe, "-silent", "-install_dir", install_path]
if not local_lang:
command += ["-lang", "en"]
command += self.settings.wb_flags.split()
# the "shared files" is created at the same level as the "ANSYS Inc" so if installing to unique folders,
# the Shared Files folder will be unique as well. Thus we can check install folder for license
if (
os.path.isfile(os.path.join(install_path, "Shared Files", "Licensing", "ansyslmd.ini"))
or "ANSYSLMD_LICENSE_FILE" in os.environ
):
logging.info("Install using existing license configuration")
else:
command += ["-licserverinfo", "2325:1055:127.0.0.1,OTTLICENSE5,PITRH6LICSRV1"]
logging.info("Install using 127.0.0.1, Otterfing and HQ license servers")
# convert command to string to easy append custom flags
command = subprocess.list2cmdline(command)
command += " " + self.settings.custom_flags
self.update_installation_history(status="In-Progress", details="Start installation")
logging.info("Execute installation")
self.subprocess_call(command)
if os.path.isfile(uninstall_exe):
logging.info("New build was installed")
else:
raise DownloaderError(
"Workbench installation failed. "
+ f"If you see this error message by mistake please report to {__email__}"
)
if self.settings.wb_assoc:
wb_assoc_exe = os.path.join(self.settings.wb_assoc, "commonfiles", "tools", "winx64", "fileassoc.exe")
if not os.path.isfile(wb_assoc_exe):
self.warnings_list.append(f"Cannot find {wb_assoc_exe}")
else:
logging.info("Run WB file association")
self.subprocess_call(wb_assoc_exe)
else:
raise DownloaderError("No Workbench setup.exe file detected") | def install_wb(self, local_lang=False):
"""
Install Workbench to the target installation directory
:param local_lang: if not specified then use English as default installation language
"""
self.setup_exe = os.path.join(self.target_unpack_dir, "setup.exe")
if os.path.isfile(self.setup_exe):
uninstall_exe = self.uninstall_wb()
install_path = os.path.join(self.settings.install_path, "ANSYS Inc")
command = [self.setup_exe, "-silent", "-install_dir", install_path]
if not local_lang:
command += ["-lang", "en"]
command += self.settings.wb_flags.split()
# the "shared files" is created at the same level as the "ANSYS Inc" so if installing to unique folders,
# the Shared Files folder will be unique as well. Thus we can check install folder for license
if (
os.path.isfile(os.path.join(install_path, "Shared Files", "Licensing", "ansyslmd.ini"))
or "ANSYSLMD_LICENSE_FILE" in os.environ
):
logging.info("Install using existing license configuration")
else:
command += ["-licserverinfo", "2325:1055:127.0.0.1,OTTLICENSE5,PITRH6LICSRV1"]
logging.info("Install using 127.0.0.1, Otterfing and HQ license servers")
# convert command to string to easy append custom flags
command = subprocess.list2cmdline(command)
command += " " + self.settings.custom_flags
self.update_installation_history(status="In-Progress", details="Start installation")
logging.info("Execute installation")
self.subprocess_call(command)
if os.path.isfile(uninstall_exe):
logging.info("New build was installed")
else:
raise DownloaderError(
"Workbench installation failed. "
+ f"If you see this error message by mistake please report to {__email__}"
)
if self.settings.wb_assoc:
wb_assoc_exe = os.path.join(self.settings.wb_assoc, "commonfiles", "tools", "winx64", "fileassoc.exe")
if not os.path.isfile(wb_assoc_exe):
self.warnings_list.append(f"Cannot find {wb_assoc_exe}")
else:
logging.info("Run WB file association")
self.subprocess_call(wb_assoc_exe)
else:
raise DownloaderError("No Workbench setup.exe file detected") |
Python | def uninstall_wb(self):
"""
Uninstall Workbench if such exists in the target installation directory
:return: uninstall_exe: name of the executable of uninstaller"""
uninstall_exe = os.path.join(self.product_root_path, "Uninstall.exe")
if os.path.isfile(uninstall_exe):
command = [uninstall_exe, "-silent"]
self.update_installation_history(status="In-Progress", details="Uninstall previous build")
logging.info("Execute uninstallation")
self.subprocess_call(command)
logging.info("Previous build was uninstalled using uninstaller")
else:
logging.info("No Workbench Uninstall.exe file detected")
self.remove_path(self.product_root_path)
return uninstall_exe | def uninstall_wb(self):
"""
Uninstall Workbench if such exists in the target installation directory
:return: uninstall_exe: name of the executable of uninstaller"""
uninstall_exe = os.path.join(self.product_root_path, "Uninstall.exe")
if os.path.isfile(uninstall_exe):
command = [uninstall_exe, "-silent"]
self.update_installation_history(status="In-Progress", details="Uninstall previous build")
logging.info("Execute uninstallation")
self.subprocess_call(command)
logging.info("Previous build was uninstalled using uninstaller")
else:
logging.info("No Workbench Uninstall.exe file detected")
self.remove_path(self.product_root_path)
return uninstall_exe |
Python | def remove_path(self, path):
"""
Function to safely remove path if rmtree fails
:param path:
:return:
"""
def hard_remove():
try:
# try this dirty method to force remove all files in directory
all_files = os.path.join(path, "*.*")
command = ["DEL", "/F", "/Q", "/S", all_files, ">", "NUL"]
self.subprocess_call(command, shell=True)
command = ["rmdir", "/Q", "/S", path]
self.subprocess_call(command, shell=True)
except Exception as err:
logging.error(str(err))
logging.error("Failed to remove directory via hard_remove")
self.warnings_list.append("Failed to remove directory")
logging.info(f"Removing {path}")
if os.path.isdir(path):
try:
shutil.rmtree(path)
except PermissionError:
logging.warning("Permission error. Switch to CMD force mode")
hard_remove()
self.warnings_list.append("Clean remove failed due to Permissions Error")
except (FileNotFoundError, OSError, Exception):
logging.warning("FileNotFoundError or other error. Switch to CMD force mode")
hard_remove()
self.warnings_list.append("Clean remove failed due to Not Found or OS Error")
elif os.path.isfile(path):
os.remove(path) | def remove_path(self, path):
"""
Function to safely remove path if rmtree fails
:param path:
:return:
"""
def hard_remove():
try:
# try this dirty method to force remove all files in directory
all_files = os.path.join(path, "*.*")
command = ["DEL", "/F", "/Q", "/S", all_files, ">", "NUL"]
self.subprocess_call(command, shell=True)
command = ["rmdir", "/Q", "/S", path]
self.subprocess_call(command, shell=True)
except Exception as err:
logging.error(str(err))
logging.error("Failed to remove directory via hard_remove")
self.warnings_list.append("Failed to remove directory")
logging.info(f"Removing {path}")
if os.path.isdir(path):
try:
shutil.rmtree(path)
except PermissionError:
logging.warning("Permission error. Switch to CMD force mode")
hard_remove()
self.warnings_list.append("Clean remove failed due to Permissions Error")
except (FileNotFoundError, OSError, Exception):
logging.warning("FileNotFoundError or other error. Switch to CMD force mode")
hard_remove()
self.warnings_list.append("Clean remove failed due to Not Found or OS Error")
elif os.path.isfile(path):
os.remove(path) |
Python | def clean_temp(self):
"""
Cleans downloaded zip and unpacked folder with content
:return: None
"""
try:
if os.path.isfile(self.zip_file) and self.settings.delete_zip:
self.remove_path(self.zip_file)
logging.info("ZIP deleted")
if os.path.isdir(self.target_unpack_dir):
self.remove_path(self.target_unpack_dir)
logging.info("Unpacked directory removed")
except PermissionError:
logging.error("Temp files could not be removed due to permission error") | def clean_temp(self):
"""
Cleans downloaded zip and unpacked folder with content
:return: None
"""
try:
if os.path.isfile(self.zip_file) and self.settings.delete_zip:
self.remove_path(self.zip_file)
logging.info("ZIP deleted")
if os.path.isdir(self.target_unpack_dir):
self.remove_path(self.target_unpack_dir)
logging.info("Unpacked directory removed")
except PermissionError:
logging.error("Temp files could not be removed due to permission error") |
Python | def remove_aedt_shortcuts(self):
"""
Function to remove newly created AEDT shortcuts and replace them with new one
"""
if not self.settings.replace_shortcut:
return
# include Public, user folder and user folder when OneDrive sync is enabled
for user in ["Public", os.getenv("username"), os.path.join(os.getenv("username"), "OneDrive - ANSYS, Inc")]:
desktop = os.path.join("C:\\", "Users", user, "Desktop")
for shortcut in [
"ANSYS Savant",
"ANSYS EMIT",
"ANSYS SIwave",
"ANSYS Twin Builder",
"Ansys Nuhertz FilterSolutions",
]:
self.remove_path(os.path.join(desktop, shortcut + ".lnk"))
new_name = os.path.join(
desktop, f"20{self.product_version[:2]}R{self.product_version[2:]} Electronics Desktop.lnk"
)
aedt_shortcut = os.path.join(desktop, "ANSYS Electronics Desktop.lnk")
if not os.path.isfile(new_name):
try:
os.rename(aedt_shortcut, new_name)
except FileNotFoundError:
pass
else:
self.remove_path(aedt_shortcut) | def remove_aedt_shortcuts(self):
"""
Function to remove newly created AEDT shortcuts and replace them with new one
"""
if not self.settings.replace_shortcut:
return
# include Public, user folder and user folder when OneDrive sync is enabled
for user in ["Public", os.getenv("username"), os.path.join(os.getenv("username"), "OneDrive - ANSYS, Inc")]:
desktop = os.path.join("C:\\", "Users", user, "Desktop")
for shortcut in [
"ANSYS Savant",
"ANSYS EMIT",
"ANSYS SIwave",
"ANSYS Twin Builder",
"Ansys Nuhertz FilterSolutions",
]:
self.remove_path(os.path.join(desktop, shortcut + ".lnk"))
new_name = os.path.join(
desktop, f"20{self.product_version[:2]}R{self.product_version[2:]} Electronics Desktop.lnk"
)
aedt_shortcut = os.path.join(desktop, "ANSYS Electronics Desktop.lnk")
if not os.path.isfile(new_name):
try:
os.rename(aedt_shortcut, new_name)
except FileNotFoundError:
pass
else:
self.remove_path(aedt_shortcut) |
Python | def update_edt_registry(self):
"""
Update Electronics Desktop registry based on the files in the HPC_Options folder that are added from UI
:return: None
"""
hpc_folder = os.path.join(self.settings_folder, "HPC_Options")
update_registry_exe = os.path.join(self.product_root_path, "UpdateRegistry.exe")
productlist_file = os.path.join(self.product_root_path, "config", "ProductList.txt")
if not os.path.isfile(productlist_file):
raise DownloaderError("Cannot update registry. Probably Electronics Desktop installation failed")
with open(productlist_file) as file:
product_version = next(file).rstrip() # get first line
self.update_installation_history(status="In-Progress", details="Update registry")
if os.path.isdir(hpc_folder):
for file in os.listdir(hpc_folder):
if ".acf" in file:
options_file = os.path.join(hpc_folder, file)
command = [update_registry_exe, "-ProductName", product_version, "-FromFile", options_file]
logging.info("Update registry")
self.subprocess_call(command) | def update_edt_registry(self):
"""
Update Electronics Desktop registry based on the files in the HPC_Options folder that are added from UI
:return: None
"""
hpc_folder = os.path.join(self.settings_folder, "HPC_Options")
update_registry_exe = os.path.join(self.product_root_path, "UpdateRegistry.exe")
productlist_file = os.path.join(self.product_root_path, "config", "ProductList.txt")
if not os.path.isfile(productlist_file):
raise DownloaderError("Cannot update registry. Probably Electronics Desktop installation failed")
with open(productlist_file) as file:
product_version = next(file).rstrip() # get first line
self.update_installation_history(status="In-Progress", details="Update registry")
if os.path.isdir(hpc_folder):
for file in os.listdir(hpc_folder):
if ".acf" in file:
options_file = os.path.join(hpc_folder, file)
command = [update_registry_exe, "-ProductName", product_version, "-FromFile", options_file]
logging.info("Update registry")
self.subprocess_call(command) |
Python | def update_installation_history(self, status, details):
"""
Update ordered dictionary with new data and write it to the file
:param status: Failed | Success | In-Progress (important, used in JS)
:param details: Message for details field
:return:
"""
if status == "Failed" or status == "Success":
try:
self.toaster_notification(status, details)
except Exception:
msg = "Toaster notification did not work"
logging.error(msg)
self.warnings_list.append(msg)
self.get_installation_history() # in case if file was deleted during run of installation
time_now = datetime.datetime.now().strftime("%d-%m-%Y %H:%M")
shorten_path = self.settings_path.replace(os.getenv("APPDATA", "@@@"), "%APPDATA%")
if status == "Failed" or status == "Success":
if self.warnings_list:
details += "\nSome warnings occurred during process:\n" + "\n".join(self.warnings_list)
self.history[self.hash] = [status, self.settings.version, time_now, shorten_path, details, self.pid]
with open(self.history_file, "w") as file:
json.dump(self.history, file, indent=4) | def update_installation_history(self, status, details):
"""
Update ordered dictionary with new data and write it to the file
:param status: Failed | Success | In-Progress (important, used in JS)
:param details: Message for details field
:return:
"""
if status == "Failed" or status == "Success":
try:
self.toaster_notification(status, details)
except Exception:
msg = "Toaster notification did not work"
logging.error(msg)
self.warnings_list.append(msg)
self.get_installation_history() # in case if file was deleted during run of installation
time_now = datetime.datetime.now().strftime("%d-%m-%Y %H:%M")
shorten_path = self.settings_path.replace(os.getenv("APPDATA", "@@@"), "%APPDATA%")
if status == "Failed" or status == "Success":
if self.warnings_list:
details += "\nSome warnings occurred during process:\n" + "\n".join(self.warnings_list)
self.history[self.hash] = [status, self.settings.version, time_now, shorten_path, details, self.pid]
with open(self.history_file, "w") as file:
json.dump(self.history, file, indent=4) |
Python | def send_statistics(self, error=None):
"""
Send usage statistics to the database.
Collect username, time, version and software installed
in case of crash send also crash data
:parameter: error: error message of what went wrong
:return: None
"""
version, tool = self.settings.version.split("_")
time_now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
self.settings.username = os.getenv("username", self.settings.username)
if self.settings.artifactory == "SharePoint":
self.send_statistics_to_sharepoint(tool, version, time_now, error)
else:
self.send_statistics_to_influx(tool, version, time_now, error) | def send_statistics(self, error=None):
"""
Send usage statistics to the database.
Collect username, time, version and software installed
in case of crash send also crash data
:parameter: error: error message of what went wrong
:return: None
"""
version, tool = self.settings.version.split("_")
time_now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
self.settings.username = os.getenv("username", self.settings.username)
if self.settings.artifactory == "SharePoint":
self.send_statistics_to_sharepoint(tool, version, time_now, error)
else:
self.send_statistics_to_influx(tool, version, time_now, error) |
Python | def subprocess_call(command, shell=False, popen=False):
"""
Wrapper for subprocess call to handle non admin run or UAC issue
Args:
command: (str/list) command to run
shell: call with shell mode or not
popen: in case if you need output we need to use Popen. Pyinstaller compiles in -noconsole, need
to explicitly define stdout, in, err
Returns:
output (str), output of the command run
"""
output = ""
try:
if isinstance(command, list):
command_str = subprocess.list2cmdline(command)
else:
command_str = command
logging.info(command_str)
if popen:
p = subprocess.Popen(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell
)
byte_output = p.stdout.read()
output = byte_output.decode("utf-8").rstrip()
p.communicate()
else:
subprocess.call(command, shell=shell)
return output
except OSError:
raise DownloaderError("Please run as administrator and disable Windows UAC") | def subprocess_call(command, shell=False, popen=False):
"""
Wrapper for subprocess call to handle non admin run or UAC issue
Args:
command: (str/list) command to run
shell: call with shell mode or not
popen: in case if you need output we need to use Popen. Pyinstaller compiles in -noconsole, need
to explicitly define stdout, in, err
Returns:
output (str), output of the command run
"""
output = ""
try:
if isinstance(command, list):
command_str = subprocess.list2cmdline(command)
else:
command_str = command
logging.info(command_str)
if popen:
p = subprocess.Popen(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell
)
byte_output = p.stdout.read()
output = byte_output.decode("utf-8").rstrip()
p.communicate()
else:
subprocess.call(command, shell=shell)
return output
except OSError:
raise DownloaderError("Please run as administrator and disable Windows UAC") |
Python | def parse_args(version):
"""
Function to parse arguments provided to the script. Search for -p key to get settings path
:return: settings_path: path to the configuration file
"""
parser = argparse.ArgumentParser()
# Add long and short argument
parser.add_argument("--path", "-p", help="set path to the settings file generated by UI")
parser.add_argument("--version", "-V", action="version", version=f"%(prog)s version: {version}")
args = parser.parse_args()
if args.path:
settings_path = args.path
if not os.path.isfile(settings_path):
raise DownloaderError("Settings file does not exist")
return settings_path
else:
raise DownloaderError("Please provide --path argument") | def parse_args(version):
"""
Function to parse arguments provided to the script. Search for -p key to get settings path
:return: settings_path: path to the configuration file
"""
parser = argparse.ArgumentParser()
# Add long and short argument
parser.add_argument("--path", "-p", help="set path to the settings file generated by UI")
parser.add_argument("--version", "-V", action="version", version=f"%(prog)s version: {version}")
args = parser.parse_args()
if args.path:
settings_path = args.path
if not os.path.isfile(settings_path):
raise DownloaderError("Settings file does not exist")
return settings_path
else:
raise DownloaderError("Please provide --path argument") |
Python | def generate_hash_str():
"""
generate random hash. Letter A at the end is important to preserver Order in JS
:return: hash code (str)
"""
return f"{random.getrandbits(32):x}A".strip() | def generate_hash_str():
"""
generate random hash. Letter A at the end is important to preserver Order in JS
:return: hash code (str)
"""
return f"{random.getrandbits(32):x}A".strip() |
Python | def main():
"""
Main function that runs only if process is not locked (cannot run in parallel)
parses folder with all settings files and starts a process for each of them
Returns:
"""
settings_folder = r"/settings"
downloader_backend.set_logger(os.path.join(settings_folder, "uploader.log"))
with PidFile():
for file in os.listdir(settings_folder):
settings_file = os.path.join(settings_folder, file)
_, file_extension = os.path.splitext(settings_file)
if file_extension == ".json" and "installation_history.json" not in settings_file:
for dist in ["winx64", "linx64"]:
try:
upload_to_sharepoint(settings_file, distribution=dist)
except Exception:
continue | def main():
"""
Main function that runs only if process is not locked (cannot run in parallel)
parses folder with all settings files and starts a process for each of them
Returns:
"""
settings_folder = r"/settings"
downloader_backend.set_logger(os.path.join(settings_folder, "uploader.log"))
with PidFile():
for file in os.listdir(settings_folder):
settings_file = os.path.join(settings_folder, file)
_, file_extension = os.path.splitext(settings_file)
if file_extension == ".json" and "installation_history.json" not in settings_file:
for dist in ["winx64", "linx64"]:
try:
upload_to_sharepoint(settings_file, distribution=dist)
except Exception:
continue |
Python | def upload_to_sharepoint(settings_file, distribution):
"""
Check that latest build is not yet on Sharepoint. Download from Artifactory and upload to Sharepoint.
Add new build date to list of product on SP
Args:
settings_file: path to file with download settings
distribution: linux or windows distribution selection
Returns:
"""
sp = SharepointUpload(settings_file)
sp.get_build_link(distribution=distribution)
# validate that we do not have such build already
if "LicenseManager" in sp.settings.version:
# todo once with have builddate.txt on Artifactory we need to change it
build_date = int(datetime.datetime.now().strftime("%Y%m%d"))
else:
build_date = sp.get_new_build_date(distribution=distribution)
all_items = sp.get_list_items(distribution=distribution)
for item in all_items:
if item.properties["Title"] == sp.settings.version and item.properties["build_date"] == build_date:
logging.info(f"Build is up to date {item.properties['build_date']} == {build_date}")
return
sp.download_file()
archive_file = Path(sp.zip_file)
version, product = sp.settings.version.split("_")
time_now = datetime.datetime.now().strftime("%Y%m%d_%H%M")
folder_url = sp.prepare_upload(archive_file, distribution, product, version, time_now)
if not folder_url:
raise UploaderError("folder_url is None")
sp.add_list_item(f"{folder_url}/{archive_file.name}", int(build_date), folder_url, distribution=distribution)
archive_file.unlink() | def upload_to_sharepoint(settings_file, distribution):
"""
Check that latest build is not yet on Sharepoint. Download from Artifactory and upload to Sharepoint.
Add new build date to list of product on SP
Args:
settings_file: path to file with download settings
distribution: linux or windows distribution selection
Returns:
"""
sp = SharepointUpload(settings_file)
sp.get_build_link(distribution=distribution)
# validate that we do not have such build already
if "LicenseManager" in sp.settings.version:
# todo once with have builddate.txt on Artifactory we need to change it
build_date = int(datetime.datetime.now().strftime("%Y%m%d"))
else:
build_date = sp.get_new_build_date(distribution=distribution)
all_items = sp.get_list_items(distribution=distribution)
for item in all_items:
if item.properties["Title"] == sp.settings.version and item.properties["build_date"] == build_date:
logging.info(f"Build is up to date {item.properties['build_date']} == {build_date}")
return
sp.download_file()
archive_file = Path(sp.zip_file)
version, product = sp.settings.version.split("_")
time_now = datetime.datetime.now().strftime("%Y%m%d_%H%M")
folder_url = sp.prepare_upload(archive_file, distribution, product, version, time_now)
if not folder_url:
raise UploaderError("folder_url is None")
sp.add_list_item(f"{folder_url}/{archive_file.name}", int(build_date), folder_url, distribution=distribution)
archive_file.unlink() |
Python | def prepare_upload(self, file_path, *remote_path):
"""
Create remote folder and call upload a file
Args:
file_path: local file path
*remote_path: list with subfolders for remote path
Returns: (str) URL with remote path to the folder
"""
folder_url = "/".join(["Shared Documents"] + list(remote_path))
target_folder = self.ctx.web.ensure_folder_path(folder_url)
self.ctx.execute_query() # execute, otherwise upload stuck
size_chunk_mb = 100
size_chunk = size_chunk_mb * 1024 * 1024
logging.info(f"Start uploading {file_path} to {folder_url}")
try:
self.upload_file(file_path, size_chunk, target_folder)
except UploaderError:
target_folder.recycle()
return folder_url | def prepare_upload(self, file_path, *remote_path):
"""
Create remote folder and call upload a file
Args:
file_path: local file path
*remote_path: list with subfolders for remote path
Returns: (str) URL with remote path to the folder
"""
folder_url = "/".join(["Shared Documents"] + list(remote_path))
target_folder = self.ctx.web.ensure_folder_path(folder_url)
self.ctx.execute_query() # execute, otherwise upload stuck
size_chunk_mb = 100
size_chunk = size_chunk_mb * 1024 * 1024
logging.info(f"Start uploading {file_path} to {folder_url}")
try:
self.upload_file(file_path, size_chunk, target_folder)
except UploaderError:
target_folder.recycle()
return folder_url |
Python | def upload_file(self, file_path, size_chunk, target_folder):
"""
Upload a file to Sharepoint and validate its size
Args:
file_path: local file path
size_chunk: size of chunks to upload, in bytes
target_folder: office365 folder object
Returns: None
"""
file_size = file_path.stat().st_size
result_file = target_folder.files.create_upload_session(
str(file_path), size_chunk, self.print_upload_progress, file_size
)
self.ctx.execute_query_retry(
max_retry=100,
exceptions=(Exception,),
failure_callback=lambda attempt, err: self.log_fail(attempt, err, total=100),
)
remote_size = result_file.length
if remote_size is None or abs(file_size - remote_size) > 0.03 * file_size:
logging.warning(f"Remote size is {remote_size}. Local is {file_size}. File isn't fully uploaded, recycle")
result_file.recycle()
self.ctx.execute_query()
raise UploaderError("File size difference is more than 3%")
logging.info("File {0} has been uploaded successfully".format(result_file.serverRelativeUrl)) | def upload_file(self, file_path, size_chunk, target_folder):
"""
Upload a file to Sharepoint and validate its size
Args:
file_path: local file path
size_chunk: size of chunks to upload, in bytes
target_folder: office365 folder object
Returns: None
"""
file_size = file_path.stat().st_size
result_file = target_folder.files.create_upload_session(
str(file_path), size_chunk, self.print_upload_progress, file_size
)
self.ctx.execute_query_retry(
max_retry=100,
exceptions=(Exception,),
failure_callback=lambda attempt, err: self.log_fail(attempt, err, total=100),
)
remote_size = result_file.length
if remote_size is None or abs(file_size - remote_size) > 0.03 * file_size:
logging.warning(f"Remote size is {remote_size}. Local is {file_size}. File isn't fully uploaded, recycle")
result_file.recycle()
self.ctx.execute_query()
raise UploaderError("File size difference is more than 3%")
logging.info("File {0} has been uploaded successfully".format(result_file.serverRelativeUrl)) |
Python | def add_list_item(self, file_url, build_date, folder_url, distribution):
"""
Add information about uploaded file to Sharepoint list
Args:
file_url: direct download URL of the build file
build_date: parsed build date of the file
folder_url: URL of the folder where archive is locate
distribution: lin or win
Returns: None
"""
title = "product_list" if distribution == "winx64" else "linux_product_list"
product_list = self.ctx.web.lists.get_by_title(title)
product_list.add_item(
{
"Title": self.settings.version,
"build_date": build_date,
"relative_url": file_url,
"shareable_folder": f"{SHAREPOINT_SITE_URL}/{folder_url}",
}
)
self.ctx.execute_query() | def add_list_item(self, file_url, build_date, folder_url, distribution):
"""
Add information about uploaded file to Sharepoint list
Args:
file_url: direct download URL of the build file
build_date: parsed build date of the file
folder_url: URL of the folder where archive is locate
distribution: lin or win
Returns: None
"""
title = "product_list" if distribution == "winx64" else "linux_product_list"
product_list = self.ctx.web.lists.get_by_title(title)
product_list.add_item(
{
"Title": self.settings.version,
"build_date": build_date,
"relative_url": file_url,
"shareable_folder": f"{SHAREPOINT_SITE_URL}/{folder_url}",
}
)
self.ctx.execute_query() |
Python | def transfer_items(self, items):
"""
Transfers items from SharePoint list items to InfluxDB
Args:
items: SharePoint list of items
Returns:
None
"""
for item in items:
self.settings.artifactory = "SharePoint"
self.settings.username = item.properties["Title"]
error = item.properties.get("error", None)
if not item.properties["in_influx"]:
item.set_property("in_influx", True)
item.update()
self.ctx.execute_query()
self.send_statistics_to_influx(
tool=item.properties["tool"],
version=item.properties["version"],
time_now=item.properties["Date"],
error=error,
downloader_ver=item.properties.get("downloader_ver", "2.0.0"),
) | def transfer_items(self, items):
"""
Transfers items from SharePoint list items to InfluxDB
Args:
items: SharePoint list of items
Returns:
None
"""
for item in items:
self.settings.artifactory = "SharePoint"
self.settings.username = item.properties["Title"]
error = item.properties.get("error", None)
if not item.properties["in_influx"]:
item.set_property("in_influx", True)
item.update()
self.ctx.execute_query()
self.send_statistics_to_influx(
tool=item.properties["tool"],
version=item.properties["version"],
time_now=item.properties["Date"],
error=error,
downloader_ver=item.properties.get("downloader_ver", "2.0.0"),
) |
Python | def _dataFiles_sourceFile(self): #set the datafile path when the combobox changes
#get value on combo box
parName = self.comboBox_DF_par.currentText()
candName = self.comboBox_DF_candName.currentText()
for v in [parName, candName]:
if v=='':
return
#retrieve filepath
pars_d = self.tableWidget_P.get_values(candName, axis=0)
assert parName in pars_d, 'failed to get \'%s\' for \'%s\''%(parName, candName)
data_fp = pars_d[parName]
#empty check
if pd.isnull(data_fp):
self.logger.push('got null filepath for \'%s\''%data_fp)
return
assert isinstance(data_fp, str), 'got bad filepath for %s'%parName
assert os.path.exists(data_fp), 'requested file path for \'%s\' does not exist'%parName
#set on the source lineedit
self.lineEdit_DF_fp.setText(data_fp)
"""setting the sink filepath once the user clicks Load""" | def _dataFiles_sourceFile(self): #set the datafile path when the combobox changes
#get value on combo box
parName = self.comboBox_DF_par.currentText()
candName = self.comboBox_DF_candName.currentText()
for v in [parName, candName]:
if v=='':
return
#retrieve filepath
pars_d = self.tableWidget_P.get_values(candName, axis=0)
assert parName in pars_d, 'failed to get \'%s\' for \'%s\''%(parName, candName)
data_fp = pars_d[parName]
#empty check
if pd.isnull(data_fp):
self.logger.push('got null filepath for \'%s\''%data_fp)
return
assert isinstance(data_fp, str), 'got bad filepath for %s'%parName
assert os.path.exists(data_fp), 'requested file path for \'%s\' does not exist'%parName
#set on the source lineedit
self.lineEdit_DF_fp.setText(data_fp)
"""setting the sink filepath once the user clicks Load""" |
Python | def results_joinGeo(self,
fp_attn = 'r_passet',
style_fn =None, #for loading specific styles
):
log = self.logger.getChild('results_joinGeo')
#=======================================================================
# collect inputs
#=======================================================================
self._set_setup() #only need the common
geo_vlay = self.comboBox_JGfinv.currentLayer()
self.feedback.setProgress(5)
#=======================================================================
# check inputs
#=======================================================================
assert isinstance(geo_vlay, QgsVectorLayer), \
'need to specify a geometry layer on the \'Setup\' tab to join results to'
#=======================================================================
# execute
#=======================================================================
#setup
kwargs = {attn:getattr(self, attn) for attn in ['logger', 'tag', 'cf_fp', 'out_dir', 'feedback', 'init_q_d']}
wrkr = results.djoin.Djoiner(**kwargs).setup()
self.feedback.setProgress(25)
#=======================================================================
# #execute
#=======================================================================
"""running with all defaults
more customization is done on teh Resultd dialog"""
res_vlay = wrkr.run(geo_vlay, keep_fnl='all')
self.feedback.setProgress(80)
#=======================================================================
# load
#=======================================================================
self._load_toCanvas(res_vlay, logger=log, style_fn=style_fn)
self.feedback.setProgress(99)
#=======================================================================
# wrap
#=======================================================================
log.push('run_joinGeo finished')
self.feedback.upd_prog(None) | def results_joinGeo(self,
fp_attn = 'r_passet',
style_fn =None, #for loading specific styles
):
log = self.logger.getChild('results_joinGeo')
#=======================================================================
# collect inputs
#=======================================================================
self._set_setup() #only need the common
geo_vlay = self.comboBox_JGfinv.currentLayer()
self.feedback.setProgress(5)
#=======================================================================
# check inputs
#=======================================================================
assert isinstance(geo_vlay, QgsVectorLayer), \
'need to specify a geometry layer on the \'Setup\' tab to join results to'
#=======================================================================
# execute
#=======================================================================
#setup
kwargs = {attn:getattr(self, attn) for attn in ['logger', 'tag', 'cf_fp', 'out_dir', 'feedback', 'init_q_d']}
wrkr = results.djoin.Djoiner(**kwargs).setup()
self.feedback.setProgress(25)
#=======================================================================
# #execute
#=======================================================================
"""running with all defaults
more customization is done on teh Resultd dialog"""
res_vlay = wrkr.run(geo_vlay, keep_fnl='all')
self.feedback.setProgress(80)
#=======================================================================
# load
#=======================================================================
self._load_toCanvas(res_vlay, logger=log, style_fn=style_fn)
self.feedback.setProgress(99)
#=======================================================================
# wrap
#=======================================================================
log.push('run_joinGeo finished')
self.feedback.upd_prog(None) |
Python | def run(self, #run a set of WorkFlows
wFlow_l, #set of workflows to run
**kwargs
):
"""
lets the user define their own methods for batching together workflows
"""
log=self.logger.getChild('r')
log.info('running %i flows: \n %s'%(len(wFlow_l), wFlow_l))
rlib = dict()
for fWrkr in wFlow_l:
runr = self._run_wflow(fWrkr, **kwargs)
rlib[runr.name] = runr.res_d.copy()
runr.__exit__()
log.info('finished on %i: \n %s'%(len(rlib), list(rlib.keys())))
return rlib | def run(self, #run a set of WorkFlows
wFlow_l, #set of workflows to run
**kwargs
):
"""
lets the user define their own methods for batching together workflows
"""
log=self.logger.getChild('r')
log.info('running %i flows: \n %s'%(len(wFlow_l), wFlow_l))
rlib = dict()
for fWrkr in wFlow_l:
runr = self._run_wflow(fWrkr, **kwargs)
rlib[runr.name] = runr.res_d.copy()
runr.__exit__()
log.info('finished on %i: \n %s'%(len(rlib), list(rlib.keys())))
return rlib |
Python | def prep_cf(self, pars_d, #Preparor.copy_cf_template and set_cf_pars
logger=None):
"""
this ones a bit weird because the main mechanism is a file write...
"""
if logger is None: logger=self.logger
log = logger.getChild('prep_cf')
wrkr = self._get_wrkr(Preparor)
#copy the template
wrkr.tag = '%s_%s'%(self.name, self.tag)
cf_fp = wrkr.copy_cf_template() #just copy the default template
#=======================================================================
# #set some basics
#=======================================================================
#fix filepaths
#loop and pull
new_pars_d =dict()
for sect, keys in {
'parameters':['impact_units', 'rtail', 'event_rels', 'felv', 'prec', 'ltail'],
'dmg_fps':['curves'],
'plotting':['impactfmt_str', 'color'],
#'risk_fps':['evals'],
}.items():
d = {k:str(pars_d[k]) for k in keys if k in pars_d} #get these keys where present
if sect == 'parameters':
d['name']=self.name
if len(d)>0:
new_pars_d[sect] = tuple([d, '#set by testAll.py on %s'%wrkr.today_str])
wrkr.set_cf_pars(new_pars_d)
#=======================================================================
# wrap
#=======================================================================
#update the session
"""subsequent workers will inherit the control file for this workflow"""
self.cf_fp = cf_fp
self.com_hndls.append('cf_fp')
log.info('control file created: %s'%cf_fp)
return cf_fp | def prep_cf(self, pars_d, #Preparor.copy_cf_template and set_cf_pars
logger=None):
"""
this ones a bit weird because the main mechanism is a file write...
"""
if logger is None: logger=self.logger
log = logger.getChild('prep_cf')
wrkr = self._get_wrkr(Preparor)
#copy the template
wrkr.tag = '%s_%s'%(self.name, self.tag)
cf_fp = wrkr.copy_cf_template() #just copy the default template
#=======================================================================
# #set some basics
#=======================================================================
#fix filepaths
#loop and pull
new_pars_d =dict()
for sect, keys in {
'parameters':['impact_units', 'rtail', 'event_rels', 'felv', 'prec', 'ltail'],
'dmg_fps':['curves'],
'plotting':['impactfmt_str', 'color'],
#'risk_fps':['evals'],
}.items():
d = {k:str(pars_d[k]) for k in keys if k in pars_d} #get these keys where present
if sect == 'parameters':
d['name']=self.name
if len(d)>0:
new_pars_d[sect] = tuple([d, '#set by testAll.py on %s'%wrkr.today_str])
wrkr.set_cf_pars(new_pars_d)
#=======================================================================
# wrap
#=======================================================================
#update the session
"""subsequent workers will inherit the control file for this workflow"""
self.cf_fp = cf_fp
self.com_hndls.append('cf_fp')
log.info('control file created: %s'%cf_fp)
return cf_fp |
Python | def rsamp_dtm(self, pars_d, #hazar draster sampler
logger=None,
rkwargs=None,
):
"""
kwargs not setup to be different from the rsamp
"""
if logger is None: logger=self.logger
log = logger.getChild('rsamp_dtm')
wrkr = self._get_wrkr(Rsamp)
#=======================================================================
# load the data
#=======================================================================
fp = os.path.join(self.base_dir, pars_d['dtm_fp'])
dtm_rlay = self._retrieve('dtm_rlay',
f = lambda logger=None: wrkr.load_rlay(fp, logger=logger))
#pull previously loaded
finv_vlay = self.data_d['finv_vlay']
#=======================================================================
# execute
#=======================================================================
if rkwargs is None: rkwargs = self._get_kwargs(wrkr.__class__.__name__)
res_vlay = wrkr.run([dtm_rlay], finv_vlay, fname='gels',
**rkwargs)
#=======================================================================
# #post
#=======================================================================
wrkr.dtm_check(res_vlay)
df = wrkr.write_res(res_vlay, write=self.write)
if not self.write: wrkr.out_fp = 'none' #placeholder
wrkr.upd_cf_dtm(cf_fp = self.cf_fp)
return df | def rsamp_dtm(self, pars_d, #hazar draster sampler
logger=None,
rkwargs=None,
):
"""
kwargs not setup to be different from the rsamp
"""
if logger is None: logger=self.logger
log = logger.getChild('rsamp_dtm')
wrkr = self._get_wrkr(Rsamp)
#=======================================================================
# load the data
#=======================================================================
fp = os.path.join(self.base_dir, pars_d['dtm_fp'])
dtm_rlay = self._retrieve('dtm_rlay',
f = lambda logger=None: wrkr.load_rlay(fp, logger=logger))
#pull previously loaded
finv_vlay = self.data_d['finv_vlay']
#=======================================================================
# execute
#=======================================================================
if rkwargs is None: rkwargs = self._get_kwargs(wrkr.__class__.__name__)
res_vlay = wrkr.run([dtm_rlay], finv_vlay, fname='gels',
**rkwargs)
#=======================================================================
# #post
#=======================================================================
wrkr.dtm_check(res_vlay)
df = wrkr.write_res(res_vlay, write=self.write)
if not self.write: wrkr.out_fp = 'none' #placeholder
wrkr.upd_cf_dtm(cf_fp = self.cf_fp)
return df |
Python | def validate(self, pars_d, #validation
logger=None,
):
"""because we're not using the control file for testing...
no point in running the validator"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('validate')
wrkr = self._get_wrkr(Vali)
#=======================================================================
# precheck
#=======================================================================
assert 'validate' in pars_d
vtag = pars_d.pop('validate')
#=======================================================================
# setup
#=======================================================================
wrkr.config_cf()
#=======================================================================
# validate by vtag
#=======================================================================
for k, modObj in {
'risk1':Risk1,
'dmg2':Dmg2,
}.items():
if not k == vtag: continue
#do the check
errors = wrkr.cf_check(modObj)
if not len(errors)==0:
raise Error('\'%s\' got some errors \n %s'%(vtag, errors))
wrkr.cf_mark() #update the controlf ile
log.debug('finished') | def validate(self, pars_d, #validation
logger=None,
):
"""because we're not using the control file for testing...
no point in running the validator"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('validate')
wrkr = self._get_wrkr(Vali)
#=======================================================================
# precheck
#=======================================================================
assert 'validate' in pars_d
vtag = pars_d.pop('validate')
#=======================================================================
# setup
#=======================================================================
wrkr.config_cf()
#=======================================================================
# validate by vtag
#=======================================================================
for k, modObj in {
'risk1':Risk1,
'dmg2':Dmg2,
}.items():
if not k == vtag: continue
#do the check
errors = wrkr.cf_check(modObj)
if not len(errors)==0:
raise Error('\'%s\' got some errors \n %s'%(vtag, errors))
wrkr.cf_mark() #update the controlf ile
log.debug('finished') |
Python | def plot_risk_ttl(self, #single risk plot of total results
pars_d=None,
logger=None,
ylabs = ['AEP', 'impacts'], #types of plots to generate
rkwargs=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('plot_risk_ttl')
if pars_d is None: pars_d = self.pars_d
"""just let it pass... if the function is called the user wants to plot"""
#assert self.plot
#=======================================================================
# setup worker
#=======================================================================
wrkr = self._get_wrkr(RiskPlotr)
wrkr.setup_fromData(self.data_d) #setup w/ the pre-loaded data
#=======================================================================
# get plots
#=======================================================================
if rkwargs is None: rkwargs = self._get_kwargs(wrkr.__class__.__name__)
for ylab in ylabs:
fig = wrkr.plot_riskCurve(y1lab=ylab, **rkwargs)
self.output_fig(fig) | def plot_risk_ttl(self, #single risk plot of total results
pars_d=None,
logger=None,
ylabs = ['AEP', 'impacts'], #types of plots to generate
rkwargs=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('plot_risk_ttl')
if pars_d is None: pars_d = self.pars_d
"""just let it pass... if the function is called the user wants to plot"""
#assert self.plot
#=======================================================================
# setup worker
#=======================================================================
wrkr = self._get_wrkr(RiskPlotr)
wrkr.setup_fromData(self.data_d) #setup w/ the pre-loaded data
#=======================================================================
# get plots
#=======================================================================
if rkwargs is None: rkwargs = self._get_kwargs(wrkr.__class__.__name__)
for ylab in ylabs:
fig = wrkr.plot_riskCurve(y1lab=ylab, **rkwargs)
self.output_fig(fig) |
Python | def run_expo(self): #execute dike exposure routeines
log = self.logger.getChild('run_expo')
log.debug('start')
self.set_setup() #attach all the commons
self.inherit_fieldNames.add('init_q_d')
self.feedback.setProgress(5)
from misc.dikes.expo import Dexpo
#=======================================================================
# collect inputs
#=======================================================================
rlays_d = self.listView_expo_rlays.get_selected_layers()
#tside
if self.radioButton_v_tside_left.isChecked():
tside = 'Left'
else:
tside = 'Right'
#=======================================================================
# init
#=======================================================================
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = Dexpo(**kwargs)
self.feedback.setProgress(10)
#=======================================================================
# execute------
#=======================================================================
dike_vlay = wrkr.prep_dike(self.dike_vlay)
kwargs = {attn:getattr(self, attn) for attn in []}
dxcol, vlay_d = wrkr.get_dike_expo(rlays_d,
dtm_rlay = self.mMapLayerComboBox_dtm.currentLayer(),
dike_vlay = dike_vlay,
#transect pars
tside=tside,
write_tr=False, #loaded below
dist_dike=float(self.doubleSpinBox_dist_dike.value()),
dist_trans=float(self.doubleSpinBox_dist_trans.value()),
**kwargs)
self.feedback.setProgress(60)
expo_df = wrkr.get_fb_smry()
self.feedback.setProgress(80)
#=======================================================================
# write/load to cannvas----------
#=======================================================================
"""dont write any layers.. just load them and let the user write"""
#wrkr.output_expo_dxcol()
dexpo_fp = wrkr.output_expo_df()
#=======================================================================
# dike layer
#=======================================================================
if self.checkBox_expo_wDikes.isChecked():
dike_vlay = wrkr.get_dikes_vlay() #get the dikes layer for writintg (fixed index)
self._load_toCanvas(dike_vlay, logger=log.getChild('dike_vlay'))
#=======================================================================
# breach points
#=======================================================================
if self.checkBox_expo_breach_pts.isChecked():
breach_vlay_d = wrkr.get_breach_vlays()
self._load_toCanvas(list(breach_vlay_d.values()), logger=log.getChild('breachPts'))
#=======================================================================
# transects
#=======================================================================
if self.checkBox_expo_write_tr.isChecked():
self._load_toCanvas(wrkr.tr_vlay, logger=log.getChild('tr_vlay'))
log.info('loaded transect layer \'%s\' to canvas'%wrkr.tr_vlay.name())
#=======================================================================
# exposure crest points
#=======================================================================
if self.checkBox_expo_crestPts.isChecked():
self._load_toCanvas(list(wrkr.expo_vlay_d.values()), logger=log.getChild('expo_crestPts'))
#=======================================================================
# plots
#=======================================================================
if self.checkBox_expo_plot.isChecked():
wrkr._init_plt()
#check for exessive plot windows
if len(wrkr.sid_vals)>5:
plt_window = False
log.warning('got %i plots... setting plt_window=False'%len(wrkr.sid_vals))
elif len(wrkr.sid_vals)>100:
raise Error('too many plots!')
else:
plt_window = self.plt_window
for sidVal in wrkr.sid_vals:
fig = wrkr.plot_seg_prof(sidVal)
self.output_fig(fig, plt_window=plt_window)
self.feedback.setProgress(95)
#=======================================================================
# update gui
#=======================================================================
self.lineEdit_v_dexpo_fp.setText(dexpo_fp) #fill joinareaas filepath
#populate the Join Areas widget
self.scrollAreaWidgetContents_ja.clear_all() #clear for repeat runs
self.scrollAreaWidgetContents_ja.set_selections('event', list(rlays_d.values()))
log.info('finished Dike Expo w/ %s'%str(expo_df.shape))
self.feedback.upd_prog(None) | def run_expo(self): #execute dike exposure routeines
log = self.logger.getChild('run_expo')
log.debug('start')
self.set_setup() #attach all the commons
self.inherit_fieldNames.add('init_q_d')
self.feedback.setProgress(5)
from misc.dikes.expo import Dexpo
#=======================================================================
# collect inputs
#=======================================================================
rlays_d = self.listView_expo_rlays.get_selected_layers()
#tside
if self.radioButton_v_tside_left.isChecked():
tside = 'Left'
else:
tside = 'Right'
#=======================================================================
# init
#=======================================================================
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = Dexpo(**kwargs)
self.feedback.setProgress(10)
#=======================================================================
# execute------
#=======================================================================
dike_vlay = wrkr.prep_dike(self.dike_vlay)
kwargs = {attn:getattr(self, attn) for attn in []}
dxcol, vlay_d = wrkr.get_dike_expo(rlays_d,
dtm_rlay = self.mMapLayerComboBox_dtm.currentLayer(),
dike_vlay = dike_vlay,
#transect pars
tside=tside,
write_tr=False, #loaded below
dist_dike=float(self.doubleSpinBox_dist_dike.value()),
dist_trans=float(self.doubleSpinBox_dist_trans.value()),
**kwargs)
self.feedback.setProgress(60)
expo_df = wrkr.get_fb_smry()
self.feedback.setProgress(80)
#=======================================================================
# write/load to cannvas----------
#=======================================================================
"""dont write any layers.. just load them and let the user write"""
#wrkr.output_expo_dxcol()
dexpo_fp = wrkr.output_expo_df()
#=======================================================================
# dike layer
#=======================================================================
if self.checkBox_expo_wDikes.isChecked():
dike_vlay = wrkr.get_dikes_vlay() #get the dikes layer for writintg (fixed index)
self._load_toCanvas(dike_vlay, logger=log.getChild('dike_vlay'))
#=======================================================================
# breach points
#=======================================================================
if self.checkBox_expo_breach_pts.isChecked():
breach_vlay_d = wrkr.get_breach_vlays()
self._load_toCanvas(list(breach_vlay_d.values()), logger=log.getChild('breachPts'))
#=======================================================================
# transects
#=======================================================================
if self.checkBox_expo_write_tr.isChecked():
self._load_toCanvas(wrkr.tr_vlay, logger=log.getChild('tr_vlay'))
log.info('loaded transect layer \'%s\' to canvas'%wrkr.tr_vlay.name())
#=======================================================================
# exposure crest points
#=======================================================================
if self.checkBox_expo_crestPts.isChecked():
self._load_toCanvas(list(wrkr.expo_vlay_d.values()), logger=log.getChild('expo_crestPts'))
#=======================================================================
# plots
#=======================================================================
if self.checkBox_expo_plot.isChecked():
wrkr._init_plt()
#check for exessive plot windows
if len(wrkr.sid_vals)>5:
plt_window = False
log.warning('got %i plots... setting plt_window=False'%len(wrkr.sid_vals))
elif len(wrkr.sid_vals)>100:
raise Error('too many plots!')
else:
plt_window = self.plt_window
for sidVal in wrkr.sid_vals:
fig = wrkr.plot_seg_prof(sidVal)
self.output_fig(fig, plt_window=plt_window)
self.feedback.setProgress(95)
#=======================================================================
# update gui
#=======================================================================
self.lineEdit_v_dexpo_fp.setText(dexpo_fp) #fill joinareaas filepath
#populate the Join Areas widget
self.scrollAreaWidgetContents_ja.clear_all() #clear for repeat runs
self.scrollAreaWidgetContents_ja.set_selections('event', list(rlays_d.values()))
log.info('finished Dike Expo w/ %s'%str(expo_df.shape))
self.feedback.upd_prog(None) |
Python | def run_rjoin(self): #join failure probabilities onto influence polygons
#=======================================================================
# setup
#=======================================================================
log = self.logger.getChild('run_rjoin')
log.debug('start')
self.set_setup() #attach all the commons
self.inherit_fieldNames.add('init_q_d')
self.feedback.setProgress(5)
from misc.dikes.rjoin import DikeJoiner
"""no reason to build the layers if they won't be loaded"""
assert self.loadRes, 'ensure \'Load Results to Canvas..\' is selected'
#=======================================================================
# init
#=======================================================================
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = DikeJoiner(**kwargs)
self.feedback.setProgress(10)
#==========================================================================
# load the data
#==========================================================================
wrkr.load_pfail_df(fp=self.lineEdit_ja_pfail_fp.text())
#get influence polygons {rasterLayerName:polygonLayer}
eifz_d = self.scrollAreaWidgetContents_ja.get_linked_layers(keyByFirst=True)
self.feedback.setProgress(40)
#==========================================================================
# execute
#==========================================================================
vlay_d = wrkr.join_pfails(eifz_d=eifz_d)
self.feedback.setProgress(80)
#=======================================================================
# outputs
#=======================================================================
self._load_toCanvas(list(vlay_d.values()), logger=log, style_fn = 'failPoly_graduated_reds.qml')
self.feedback.setProgress(95)
#=======================================================================
# wrapo
#=======================================================================
log.info('finisehd Join Areas w/ %i'%len(vlay_d))
self.feedback.upd_prog(None) | def run_rjoin(self): #join failure probabilities onto influence polygons
#=======================================================================
# setup
#=======================================================================
log = self.logger.getChild('run_rjoin')
log.debug('start')
self.set_setup() #attach all the commons
self.inherit_fieldNames.add('init_q_d')
self.feedback.setProgress(5)
from misc.dikes.rjoin import DikeJoiner
"""no reason to build the layers if they won't be loaded"""
assert self.loadRes, 'ensure \'Load Results to Canvas..\' is selected'
#=======================================================================
# init
#=======================================================================
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = DikeJoiner(**kwargs)
self.feedback.setProgress(10)
#==========================================================================
# load the data
#==========================================================================
wrkr.load_pfail_df(fp=self.lineEdit_ja_pfail_fp.text())
#get influence polygons {rasterLayerName:polygonLayer}
eifz_d = self.scrollAreaWidgetContents_ja.get_linked_layers(keyByFirst=True)
self.feedback.setProgress(40)
#==========================================================================
# execute
#==========================================================================
vlay_d = wrkr.join_pfails(eifz_d=eifz_d)
self.feedback.setProgress(80)
#=======================================================================
# outputs
#=======================================================================
self._load_toCanvas(list(vlay_d.values()), logger=log, style_fn = 'failPoly_graduated_reds.qml')
self.feedback.setProgress(95)
#=======================================================================
# wrapo
#=======================================================================
log.info('finisehd Join Areas w/ %i'%len(vlay_d))
self.feedback.upd_prog(None) |
Python | def _init_plt(self, #initilize matplotlib
#**kwargs
):
"""
calling this here so we get clean parameters each time the class is instanced
"""
self.logger.debug('_init_plt')
#=======================================================================
# imports
#=======================================================================
import matplotlib
matplotlib.use('Qt5Agg') #sets the backend (case sensitive)
matplotlib.set_loglevel("info") #reduce logging level
import matplotlib.pyplot as plt
#set teh styles
plt.style.use('default')
#default QGIS windows font
matplotlib.rc('font', **{
'family' : 'sans-serif',
#'sans-serif':'Tahoma',
'weight' : 'normal',
'size' : 8})
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = ['Tahoma']
matplotlib.rcParams['axes.titlesize'] = 10 #set the figure title size
#spacing parameters
matplotlib.rcParams['figure.autolayout'] = False #use tight layout
#legends
matplotlib.rcParams['legend.title_fontsize'] = 'large'
self.plt, self.matplotlib = plt, matplotlib
return {'plt':plt, 'matplotlib':matplotlib} | def _init_plt(self, #initilize matplotlib
#**kwargs
):
"""
calling this here so we get clean parameters each time the class is instanced
"""
self.logger.debug('_init_plt')
#=======================================================================
# imports
#=======================================================================
import matplotlib
matplotlib.use('Qt5Agg') #sets the backend (case sensitive)
matplotlib.set_loglevel("info") #reduce logging level
import matplotlib.pyplot as plt
#set teh styles
plt.style.use('default')
#default QGIS windows font
matplotlib.rc('font', **{
'family' : 'sans-serif',
#'sans-serif':'Tahoma',
'weight' : 'normal',
'size' : 8})
matplotlib.rcParams['font.family'] = 'sans-serif'
matplotlib.rcParams['font.sans-serif'] = ['Tahoma']
matplotlib.rcParams['axes.titlesize'] = 10 #set the figure title size
#spacing parameters
matplotlib.rcParams['figure.autolayout'] = False #use tight layout
#legends
matplotlib.rcParams['legend.title_fontsize'] = 'large'
self.plt, self.matplotlib = plt, matplotlib
return {'plt':plt, 'matplotlib':matplotlib} |
Python | def _init_fmtFunc(self, #setup impact formatting from two options
impactFmtFunc=None, #raw function
impactfmt_str=None, #ALTERNATIVE: python formatting string for building function
):
"""
called during init with a callable
generally overwritten by init_model() (with impactfmt_str)
"""
#=======================================================================
# defaults
#=======================================================================
"""whatever was passed during construction.. usually None"""
#get from string
if impactFmtFunc is None:
if impactfmt_str is None: impactfmt_str=self.impactfmt_str
assert isinstance(impactfmt_str, str)
impactFmtFunc = lambda x, fmt=impactfmt_str:'{:>{fmt}}'.format(x, fmt=fmt)
self.impactFmtFunc=impactFmtFunc
#check it
assert callable(self.impactFmtFunc)
try:
impactFmtFunc(1.2)
except Exception as e:
self.logger.warning('bad formatter: %s w/ \n %s'%(impactfmt_str, e))
return | def _init_fmtFunc(self, #setup impact formatting from two options
impactFmtFunc=None, #raw function
impactfmt_str=None, #ALTERNATIVE: python formatting string for building function
):
"""
called during init with a callable
generally overwritten by init_model() (with impactfmt_str)
"""
#=======================================================================
# defaults
#=======================================================================
"""whatever was passed during construction.. usually None"""
#get from string
if impactFmtFunc is None:
if impactfmt_str is None: impactfmt_str=self.impactfmt_str
assert isinstance(impactfmt_str, str)
impactFmtFunc = lambda x, fmt=impactfmt_str:'{:>{fmt}}'.format(x, fmt=fmt)
self.impactFmtFunc=impactFmtFunc
#check it
assert callable(self.impactFmtFunc)
try:
impactFmtFunc(1.2)
except Exception as e:
self.logger.warning('bad formatter: %s w/ \n %s'%(impactfmt_str, e))
return |
Python | def upd_impStyle(self): #update the plotting pars based on your attributes
"""
taking instance variables (rather than parser's section) because these are already typset
usually called twice
1) before loading the control file, to build a default
Plotr.__init__()
2) after, to update values
Model.init_model()
default class values are used, unless matching parameters are passed in teh control file
there are no checks on these ploting parameters
TODO: find a better way to not have to run this so many times
"""
#assert not self.cfPars_d is None, 'load the control file first!'
d = dict()
#loop through the default values
for k, v in self.impStyle_d.items():
if hasattr(self, k):
d[k] = getattr(self, k)
else: #just use default
d[k] = v
#re-insert hashtags
if 'color' in d:
d['color'] = d['color'].replace('?','#')
self.impStyle_d = d | def upd_impStyle(self): #update the plotting pars based on your attributes
"""
taking instance variables (rather than parser's section) because these are already typset
usually called twice
1) before loading the control file, to build a default
Plotr.__init__()
2) after, to update values
Model.init_model()
default class values are used, unless matching parameters are passed in teh control file
there are no checks on these ploting parameters
TODO: find a better way to not have to run this so many times
"""
#assert not self.cfPars_d is None, 'load the control file first!'
d = dict()
#loop through the default values
for k, v in self.impStyle_d.items():
if hasattr(self, k):
d[k] = getattr(self, k)
else: #just use default
d[k] = v
#re-insert hashtags
if 'color' in d:
d['color'] = d['color'].replace('?','#')
self.impStyle_d = d |
Python | def plot_impact_boxes(self, #box plots for each event
df, #frame with columns to turn into box plots
#labelling
title = None,
xlab=None, ylab=None, val_str=None,
impactFmtFunc=None, #tick label format function for impact values
smry_method = 'sum', #series method for summary providedin labels
#figure parametrs
figsize=None,
grid=False,
ylims_t = None, #tuple of yaxis limits
logger=None,
pkwargs = {},
):
"""
todo: migrate these generic plotters to a more logical module
"""
#======================================================================
# defaults
#======================================================================
if logger is None: logger=self.logger
log = logger.getChild('plot_impact_boxes')
plt, matplotlib = self.plt, self.matplotlib
if figsize is None: figsize = self.figsize
if impactFmtFunc is None: impactFmtFunc=self.impactFmtFunc
if title is None:
title = 'Boxplots on %i events'%len(df.columns)
log.debug('on %s'%str(df.shape))
#=======================================================================
# check
#=======================================================================
assert callable(impactFmtFunc)
#=======================================================================
# manipulate data
#=======================================================================
#get a collectio nof arrays from a dataframe's columns
data = [ser.dropna().values for _, ser in df.items()]
log.debug('on %i'%len(data))
#======================================================================
# figure setup
#======================================================================
plt.close()
fig = plt.figure(figsize=figsize, constrained_layout = True)
#check for max
if len(data) > self.set_cnt_max:
log.warning('data set count exceeds max: %i... skipping'%len(data))
return fig
#axis setup
ax1 = fig.add_subplot(111)
#aep units
if not ylims_t is None:
ax1.set_ylim(ylims_t[0], ylims_t[1])
# axis label setup
fig.suptitle(title)
ax1.set_xlabel(xlab)
ax1.set_ylabel(ylab)
#=======================================================================
# plot thie histogram
#=======================================================================
boxRes_d = ax1.boxplot(data, whis=1.5, **pkwargs)
#=======================================================================
# format axis labels
#======================================================= ================
#build new lables
f = lambda idx: getattr(df.iloc[:, idx-1], smry_method)()
xfmtFunc = lambda idx:'%s (%i): %s=%s'%(
df.columns[idx-1], len(df.iloc[:, idx-1].dropna()), smry_method, impactFmtFunc(f(idx)))
l = [xfmtFunc(value) for value in ax1.get_xticks()]
#adjust locations
og_locs = ax1.get_xticks()
ax1.set_xticks(og_locs-0.3)
#apply new lables
Text_l = ax1.set_xticklabels(l, rotation=90, va='center', y=.5, color='red',)
self._tickSet(ax1, yfmtFunc=impactFmtFunc)
#=======================================================================
# post
#=======================================================================
self._postFmt(ax1, grid=grid, val_str=val_str,
xLocScale=.2, yLocScale=.8,
legendLoc=None, #boxplots do nt have legends
)
return fig | def plot_impact_boxes(self, #box plots for each event
df, #frame with columns to turn into box plots
#labelling
title = None,
xlab=None, ylab=None, val_str=None,
impactFmtFunc=None, #tick label format function for impact values
smry_method = 'sum', #series method for summary providedin labels
#figure parametrs
figsize=None,
grid=False,
ylims_t = None, #tuple of yaxis limits
logger=None,
pkwargs = {},
):
"""
todo: migrate these generic plotters to a more logical module
"""
#======================================================================
# defaults
#======================================================================
if logger is None: logger=self.logger
log = logger.getChild('plot_impact_boxes')
plt, matplotlib = self.plt, self.matplotlib
if figsize is None: figsize = self.figsize
if impactFmtFunc is None: impactFmtFunc=self.impactFmtFunc
if title is None:
title = 'Boxplots on %i events'%len(df.columns)
log.debug('on %s'%str(df.shape))
#=======================================================================
# check
#=======================================================================
assert callable(impactFmtFunc)
#=======================================================================
# manipulate data
#=======================================================================
#get a collectio nof arrays from a dataframe's columns
data = [ser.dropna().values for _, ser in df.items()]
log.debug('on %i'%len(data))
#======================================================================
# figure setup
#======================================================================
plt.close()
fig = plt.figure(figsize=figsize, constrained_layout = True)
#check for max
if len(data) > self.set_cnt_max:
log.warning('data set count exceeds max: %i... skipping'%len(data))
return fig
#axis setup
ax1 = fig.add_subplot(111)
#aep units
if not ylims_t is None:
ax1.set_ylim(ylims_t[0], ylims_t[1])
# axis label setup
fig.suptitle(title)
ax1.set_xlabel(xlab)
ax1.set_ylabel(ylab)
#=======================================================================
# plot thie histogram
#=======================================================================
boxRes_d = ax1.boxplot(data, whis=1.5, **pkwargs)
#=======================================================================
# format axis labels
#======================================================= ================
#build new lables
f = lambda idx: getattr(df.iloc[:, idx-1], smry_method)()
xfmtFunc = lambda idx:'%s (%i): %s=%s'%(
df.columns[idx-1], len(df.iloc[:, idx-1].dropna()), smry_method, impactFmtFunc(f(idx)))
l = [xfmtFunc(value) for value in ax1.get_xticks()]
#adjust locations
og_locs = ax1.get_xticks()
ax1.set_xticks(og_locs-0.3)
#apply new lables
Text_l = ax1.set_xticklabels(l, rotation=90, va='center', y=.5, color='red',)
self._tickSet(ax1, yfmtFunc=impactFmtFunc)
#=======================================================================
# post
#=======================================================================
self._postFmt(ax1, grid=grid, val_str=val_str,
xLocScale=.2, yLocScale=.8,
legendLoc=None, #boxplots do nt have legends
)
return fig |
Python | def plot_impact_hist(self, #stacked histograms for each column series
df,
#labelling
title = None,
xlab=None, ylab='asset count', val_str=None,
impactFmtFunc=None, #tick label format function for impact values
#figure parametrs
figsize=None,
grid=True,
xlims_t = None, #tuple of yaxis limits
logger=None,
pkwargs = {},
):
"""
todo: migrate these generic plotters to a more logical module
"""
#======================================================================
# defaults
#======================================================================
if logger is None: logger=self.logger
log = logger.getChild('plot_impact_boxes')
plt, matplotlib = self.plt, self.matplotlib
if figsize is None: figsize = self.figsize
if impactFmtFunc is None: impactFmtFunc=self.impactFmtFunc
if title is None:
title = 'Histogram on %i Events'%len(df.columns)
log.debug('on %s'%str(df.shape))
#=======================================================================
# manipulate data
#=======================================================================
#get a collectio nof arrays from a dataframe's columns
data = [ser.dropna().values for _, ser in df.items()]
#======================================================================
# figure setup
#======================================================================
plt.close()
fig = plt.figure(figsize=figsize, constrained_layout = True)
#check for max
if len(data) > self.set_cnt_max:
log.warning('data set count exceeds max: %i... skipping'%len(data))
return fig
#axis setup
ax1 = fig.add_subplot(111)
#aep units
if not xlims_t is None:
ax1.set_xlim(xlims_t[0], xlims_t[1])
# axis label setup
fig.suptitle(title)
ax1.set_xlabel(xlab)
ax1.set_ylabel(ylab)
#=======================================================================
# plot thie histogram
#=======================================================================
histVals_ar, bins_ar, patches = ax1.hist(
data, bins='auto', stacked=False, label=df.columns.to_list(),
alpha=0.9,
**pkwargs)
#=======================================================================
# post
#=======================================================================
self._tickSet(ax1, xfmtFunc=impactFmtFunc)
self._postFmt(ax1, grid=grid, val_str=val_str,
xLocScale=.3, yLocScale=.8,
)
return fig | def plot_impact_hist(self, #stacked histograms for each column series
df,
#labelling
title = None,
xlab=None, ylab='asset count', val_str=None,
impactFmtFunc=None, #tick label format function for impact values
#figure parametrs
figsize=None,
grid=True,
xlims_t = None, #tuple of yaxis limits
logger=None,
pkwargs = {},
):
"""
todo: migrate these generic plotters to a more logical module
"""
#======================================================================
# defaults
#======================================================================
if logger is None: logger=self.logger
log = logger.getChild('plot_impact_boxes')
plt, matplotlib = self.plt, self.matplotlib
if figsize is None: figsize = self.figsize
if impactFmtFunc is None: impactFmtFunc=self.impactFmtFunc
if title is None:
title = 'Histogram on %i Events'%len(df.columns)
log.debug('on %s'%str(df.shape))
#=======================================================================
# manipulate data
#=======================================================================
#get a collectio nof arrays from a dataframe's columns
data = [ser.dropna().values for _, ser in df.items()]
#======================================================================
# figure setup
#======================================================================
plt.close()
fig = plt.figure(figsize=figsize, constrained_layout = True)
#check for max
if len(data) > self.set_cnt_max:
log.warning('data set count exceeds max: %i... skipping'%len(data))
return fig
#axis setup
ax1 = fig.add_subplot(111)
#aep units
if not xlims_t is None:
ax1.set_xlim(xlims_t[0], xlims_t[1])
# axis label setup
fig.suptitle(title)
ax1.set_xlabel(xlab)
ax1.set_ylabel(ylab)
#=======================================================================
# plot thie histogram
#=======================================================================
histVals_ar, bins_ar, patches = ax1.hist(
data, bins='auto', stacked=False, label=df.columns.to_list(),
alpha=0.9,
**pkwargs)
#=======================================================================
# post
#=======================================================================
self._tickSet(ax1, xfmtFunc=impactFmtFunc)
self._postFmt(ax1, grid=grid, val_str=val_str,
xLocScale=.3, yLocScale=.8,
)
return fig |
Python | def _get_val_str(self, #helper to get value string for writing text on the plot
val_str, #cant be a kwarg.. allowing None
impactFmtFunc=None,
):
"""
generally just returns the val_str
but also provides some special handles
self.matplotlib.__version__
"""
#=======================================================================
# defaults
#=======================================================================
if impactFmtFunc is None: impactFmtFunc=self.impactFmtFunc
if val_str is None:
val_str = self.val_str
#=======================================================================
# special keys
#=======================================================================
if isinstance(val_str, str):
if val_str=='*default':
assert isinstance(self.ead_tot, float)
val_str='total annualized impacts = ' + impactFmtFunc(self.ead_tot)
elif val_str=='*no':
val_str=None
elif val_str.startswith('*'):
raise Error('unrecognized val_str: %s'%val_str)
return val_str | def _get_val_str(self, #helper to get value string for writing text on the plot
val_str, #cant be a kwarg.. allowing None
impactFmtFunc=None,
):
"""
generally just returns the val_str
but also provides some special handles
self.matplotlib.__version__
"""
#=======================================================================
# defaults
#=======================================================================
if impactFmtFunc is None: impactFmtFunc=self.impactFmtFunc
if val_str is None:
val_str = self.val_str
#=======================================================================
# special keys
#=======================================================================
if isinstance(val_str, str):
if val_str=='*default':
assert isinstance(self.ead_tot, float)
val_str='total annualized impacts = ' + impactFmtFunc(self.ead_tot)
elif val_str=='*no':
val_str=None
elif val_str.startswith('*'):
raise Error('unrecognized val_str: %s'%val_str)
return val_str |
Python | def write(self,#store this scenario to file
#filepaths
out_dir = None,
logger=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('write')
if out_dir is None:
out_dir = os.path.join(self.parent.out_dir, self.name)
if not os.path.exists(out_dir):os.makedirs(out_dir)
self.out_dir=out_dir #set this
self.resname = self.name #normally set by risk models
log.info('set out_dir to: %s'%out_dir)
#=======================================================================
#set the new control file
#=======================================================================
#=======================================================================
# #duplicate
# cf_fp = os.path.join(out_dir, 'cf_controlFile_%s.txt'%self.name)
# shutil.copy2(self.cf_fp,cf_fp)
#=======================================================================
#open the copy
cpars_raw = configparser.ConfigParser(inline_comment_prefixes='#')
log.info('reading parameters from \n %s'%cpars_raw.read(self.cf_fp))
#cleaqr filepath sections
for sectName in cpars_raw.sections():
if sectName.endswith('_fps'):
log.info('clearing section \"%s\''%sectName)
assert cpars_raw.remove_section(sectName) #remove it
cpars_raw.add_section(sectName) #add it back empty
#write the config file
cf_fp = os.path.join(out_dir, 'cf_controlFile_%s.txt'%self.name)
with open(cf_fp, 'w') as configfile:
cpars_raw.write(configfile)
self.cf_fp = cf_fp
#=======================================================================
# #add new data
#=======================================================================
"""each of these makes a new call to set_cf_pars"""
self.set_cf_pars({
'parameters':({'name':self.name}, '#copy')
})
#update
"""each of these should write using intelligent name/path/index and update the control file"""
meta_d = dict()
for dtag, data in self.data_d.items():
assert dtag in self.out_funcs_d, 'missing function key on %s'%dtag
#retrieve the outputter function
assert hasattr(self, self.out_funcs_d[dtag]), self.out_funcs_d[dtag]
f = getattr(self, self.out_funcs_d[dtag])
#output the data using the function
meta_d[dtag] = f(df=data, upd_cf=True, logger=log)
log.info('finished on %i \n %s'%(len(meta_d), meta_d))
return meta_d | def write(self,#store this scenario to file
#filepaths
out_dir = None,
logger=None,
):
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('write')
if out_dir is None:
out_dir = os.path.join(self.parent.out_dir, self.name)
if not os.path.exists(out_dir):os.makedirs(out_dir)
self.out_dir=out_dir #set this
self.resname = self.name #normally set by risk models
log.info('set out_dir to: %s'%out_dir)
#=======================================================================
#set the new control file
#=======================================================================
#=======================================================================
# #duplicate
# cf_fp = os.path.join(out_dir, 'cf_controlFile_%s.txt'%self.name)
# shutil.copy2(self.cf_fp,cf_fp)
#=======================================================================
#open the copy
cpars_raw = configparser.ConfigParser(inline_comment_prefixes='#')
log.info('reading parameters from \n %s'%cpars_raw.read(self.cf_fp))
#cleaqr filepath sections
for sectName in cpars_raw.sections():
if sectName.endswith('_fps'):
log.info('clearing section \"%s\''%sectName)
assert cpars_raw.remove_section(sectName) #remove it
cpars_raw.add_section(sectName) #add it back empty
#write the config file
cf_fp = os.path.join(out_dir, 'cf_controlFile_%s.txt'%self.name)
with open(cf_fp, 'w') as configfile:
cpars_raw.write(configfile)
self.cf_fp = cf_fp
#=======================================================================
# #add new data
#=======================================================================
"""each of these makes a new call to set_cf_pars"""
self.set_cf_pars({
'parameters':({'name':self.name}, '#copy')
})
#update
"""each of these should write using intelligent name/path/index and update the control file"""
meta_d = dict()
for dtag, data in self.data_d.items():
assert dtag in self.out_funcs_d, 'missing function key on %s'%dtag
#retrieve the outputter function
assert hasattr(self, self.out_funcs_d[dtag]), self.out_funcs_d[dtag]
f = getattr(self, self.out_funcs_d[dtag])
#output the data using the function
meta_d[dtag] = f(df=data, upd_cf=True, logger=log)
log.info('finished on %i \n %s'%(len(meta_d), meta_d))
return meta_d |
Python | def merge_expectations(self, #merge expectation containers
modLevel
):
"""
because we're merging Risk and Impact model classes, need to combine the expectation pars
"""
assert modLevel in self.modelTypes_d
#loop through each of your sibilings
for ModObj in self.modelTypes_d[modLevel]:
#print('collecting from %s'%ModObj.__name__)
#===================================================================
# loop each expectation container
#===================================================================
for attn in self.collect_attns:
#get from the sibling
sib_att_d = copy.deepcopy(getattr(ModObj, attn))
if not hasattr(self, attn):
new_att_d = sib_att_d
#=======================================================================
# merge
#=======================================================================
else:
#start with a copy of th eold
new_att_d = copy.deepcopy(getattr(self, attn))
for k, sub_d in sib_att_d.items():
if k in new_att_d:
new_att_d[k] = {**new_att_d[k], **sub_d}
else:
new_att_d[k] = sub_d
#set it
setattr(self, attn, new_att_d)
#===================================================================
# special fixes
#===================================================================
"""need to remove the dmgs from the mandatory expectations"""
if 'dmgs' in self.exp_pars_md['risk_fps']:
del self.exp_pars_md['risk_fps']['dmgs']
else:
assert modLevel=='L1' | def merge_expectations(self, #merge expectation containers
modLevel
):
"""
because we're merging Risk and Impact model classes, need to combine the expectation pars
"""
assert modLevel in self.modelTypes_d
#loop through each of your sibilings
for ModObj in self.modelTypes_d[modLevel]:
#print('collecting from %s'%ModObj.__name__)
#===================================================================
# loop each expectation container
#===================================================================
for attn in self.collect_attns:
#get from the sibling
sib_att_d = copy.deepcopy(getattr(ModObj, attn))
if not hasattr(self, attn):
new_att_d = sib_att_d
#=======================================================================
# merge
#=======================================================================
else:
#start with a copy of th eold
new_att_d = copy.deepcopy(getattr(self, attn))
for k, sub_d in sib_att_d.items():
if k in new_att_d:
new_att_d[k] = {**new_att_d[k], **sub_d}
else:
new_att_d[k] = sub_d
#set it
setattr(self, attn, new_att_d)
#===================================================================
# special fixes
#===================================================================
"""need to remove the dmgs from the mandatory expectations"""
if 'dmgs' in self.exp_pars_md['risk_fps']:
del self.exp_pars_md['risk_fps']['dmgs']
else:
assert modLevel=='L1' |
Python | def rpDia(): #helper to connect slots and
"""only executing setup once called to simplify initial loading"""
_ = self.RPrepDialog._setup()
self.RPrepDialog.pushButton_HS_prep.clicked.connect(self.run_rPrep)
self.RPrepDialog.show() | def rpDia(): #helper to connect slots and
"""only executing setup once called to simplify initial loading"""
_ = self.RPrepDialog._setup()
self.RPrepDialog.pushButton_HS_prep.clicked.connect(self.run_rPrep)
self.RPrepDialog.show() |
Python | def build_scenario(self): # Generate a CanFlood project from scratch
"""
This tab facilitates the creation of a Control File from user specified parameters and inventory,
as well as providing general file control variables for the other tools in the toolset.
"""
log = self.logger.getChild('build_scenario')
log.info('build_scenario started')
#tag = self.linEdit_ScenTag.text() #set the secnario tag from user provided name
#wdir = self.lineEdit_wdir.text() #pull the wd filepath from the user provided in 'Browse'
#=======================================================================
# collect inputs
#=======================================================================
self.set_setup(set_cf_fp=False, set_finv=False)
prec = str(int(self.spinBox_s_prec.value())) #need a string for setting
#=======================================================================
# prechecks
#=======================================================================
if self.radioButton_SS_fpRel.isChecked():
raise Error('Relative filepaths not implemented')
self.feedback.upd_prog(10)
#=======================================================================
# run the control file builder----
#=======================================================================
#initilize
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = Preparor(**kwargs)
self.feedback.upd_prog(20)
#=======================================================================
# #copy the template
#=======================================================================
cf_path = wrkr.copy_cf_template()
self.feedback.upd_prog(75)
#=======================================================================
# #set some basics
#=======================================================================
wrkr.upd_cf_first(scenarioName=self.linEdit_ScenTag.text(), **{'prec':prec})
log.info("default CanFlood model config file created :\n %s"%cf_path)
"""NO. should only populate this automatically from ModelControlFile.Browse
self.lineEdit_curve.setText(os.path.normpath(os.path.join(wdir, 'CanFlood - curve set 01.xls')))"""
"""TODO:
write aoi filepath to scratch file
"""
self.feedback.upd_prog(95)
#=======================================================================
# ui updates
#=======================================================================
#display the control file in the dialog
self.lineEdit_cf_fp.setText(cf_path)
#======================================================================
# wrap
#======================================================================
log.push('control file created for "\'%s\''%self.tag)
self.feedback.upd_prog(None) | def build_scenario(self): # Generate a CanFlood project from scratch
"""
This tab facilitates the creation of a Control File from user specified parameters and inventory,
as well as providing general file control variables for the other tools in the toolset.
"""
log = self.logger.getChild('build_scenario')
log.info('build_scenario started')
#tag = self.linEdit_ScenTag.text() #set the secnario tag from user provided name
#wdir = self.lineEdit_wdir.text() #pull the wd filepath from the user provided in 'Browse'
#=======================================================================
# collect inputs
#=======================================================================
self.set_setup(set_cf_fp=False, set_finv=False)
prec = str(int(self.spinBox_s_prec.value())) #need a string for setting
#=======================================================================
# prechecks
#=======================================================================
if self.radioButton_SS_fpRel.isChecked():
raise Error('Relative filepaths not implemented')
self.feedback.upd_prog(10)
#=======================================================================
# run the control file builder----
#=======================================================================
#initilize
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = Preparor(**kwargs)
self.feedback.upd_prog(20)
#=======================================================================
# #copy the template
#=======================================================================
cf_path = wrkr.copy_cf_template()
self.feedback.upd_prog(75)
#=======================================================================
# #set some basics
#=======================================================================
wrkr.upd_cf_first(scenarioName=self.linEdit_ScenTag.text(), **{'prec':prec})
log.info("default CanFlood model config file created :\n %s"%cf_path)
"""NO. should only populate this automatically from ModelControlFile.Browse
self.lineEdit_curve.setText(os.path.normpath(os.path.join(wdir, 'CanFlood - curve set 01.xls')))"""
"""TODO:
write aoi filepath to scratch file
"""
self.feedback.upd_prog(95)
#=======================================================================
# ui updates
#=======================================================================
#display the control file in the dialog
self.lineEdit_cf_fp.setText(cf_path)
#======================================================================
# wrap
#======================================================================
log.push('control file created for "\'%s\''%self.tag)
self.feedback.upd_prog(None) |
Python | def run_rPrep(self):
log = self.logger.getChild('run_rPrep')
start = datetime.datetime.now()
log.info('start \'run_rPrep\' at %s'%start)
"""the buttons have been moved onto the sub-dialog
but we're keeping all the functions here for better integration
treats the popout as more of an extension"""
subDia = self.RPrepDialog
#=======================================================================
# assemble/prepare inputs
#=======================================================================
self.set_setup(set_finv=False)
rlayRaw_l = list(self.listView_expo_rlays.get_selected_layers().values())
aoi_vlay = self.aoi_vlay
#raster prep parameters
clip_rlays = subDia.checkBox_HS_clip.isChecked()
allow_download = subDia.checkBox_HS_dpConv.isChecked()
allow_rproj = subDia.checkBox_HS_rproj.isChecked()
scaleFactor = subDia.doubleSpinBox_HS_sf.value()
#=======================================================================
# precheck
#=======================================================================
#check rastsers
for rlay in rlayRaw_l:
if not isinstance(rlay, QgsRasterLayer):
raise Error('unexpected type on raster layer')
#raster prep checks
assert isinstance(clip_rlays, bool)
assert isinstance(allow_download, bool)
assert isinstance(allow_rproj, bool)
assert isinstance(scaleFactor, float)
if clip_rlays:
assert isinstance(aoi_vlay, QgsVectorLayer), 'for clip_rlays=True, must provide AOI'
self.feedback.setProgress(10)
#=======================================================================
# execute
#=======================================================================
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = Rsamp(**kwargs)
#execute the tool
rlay_l = wrkr.runPrep(rlayRaw_l,
aoi_vlay = aoi_vlay,
clip_rlays = clip_rlays,
allow_download = allow_download,
allow_rproj = allow_rproj,
scaleFactor = scaleFactor,
)
#=======================================================================
# load results onto list widget
#=======================================================================
self.listView_expo_rlays.clear_checks()
if self.checkBox_loadres.isChecked():
log.debug('loading %i result layers onto widget'%len(rlay_l))
for rlay in rlay_l:
self._load_toCanvas(rlay, logger=log)
#update the widget
self.listView_expo_rlays.populate_layers() #referesh all
self.listView_expo_rlays.check_byName([l.name() for l in rlay_l])
else:
log.warning('prepped rasters not loaded to canvas!')
#=======================================================================
# wrap
#=======================================================================
self.feedback.upd_prog(None) #set the progress bar back down to zero
log.push('run_rPrep finished in %s'%(datetime.datetime.now() - start))
return | def run_rPrep(self):
log = self.logger.getChild('run_rPrep')
start = datetime.datetime.now()
log.info('start \'run_rPrep\' at %s'%start)
"""the buttons have been moved onto the sub-dialog
but we're keeping all the functions here for better integration
treats the popout as more of an extension"""
subDia = self.RPrepDialog
#=======================================================================
# assemble/prepare inputs
#=======================================================================
self.set_setup(set_finv=False)
rlayRaw_l = list(self.listView_expo_rlays.get_selected_layers().values())
aoi_vlay = self.aoi_vlay
#raster prep parameters
clip_rlays = subDia.checkBox_HS_clip.isChecked()
allow_download = subDia.checkBox_HS_dpConv.isChecked()
allow_rproj = subDia.checkBox_HS_rproj.isChecked()
scaleFactor = subDia.doubleSpinBox_HS_sf.value()
#=======================================================================
# precheck
#=======================================================================
#check rastsers
for rlay in rlayRaw_l:
if not isinstance(rlay, QgsRasterLayer):
raise Error('unexpected type on raster layer')
#raster prep checks
assert isinstance(clip_rlays, bool)
assert isinstance(allow_download, bool)
assert isinstance(allow_rproj, bool)
assert isinstance(scaleFactor, float)
if clip_rlays:
assert isinstance(aoi_vlay, QgsVectorLayer), 'for clip_rlays=True, must provide AOI'
self.feedback.setProgress(10)
#=======================================================================
# execute
#=======================================================================
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = Rsamp(**kwargs)
#execute the tool
rlay_l = wrkr.runPrep(rlayRaw_l,
aoi_vlay = aoi_vlay,
clip_rlays = clip_rlays,
allow_download = allow_download,
allow_rproj = allow_rproj,
scaleFactor = scaleFactor,
)
#=======================================================================
# load results onto list widget
#=======================================================================
self.listView_expo_rlays.clear_checks()
if self.checkBox_loadres.isChecked():
log.debug('loading %i result layers onto widget'%len(rlay_l))
for rlay in rlay_l:
self._load_toCanvas(rlay, logger=log)
#update the widget
self.listView_expo_rlays.populate_layers() #referesh all
self.listView_expo_rlays.check_byName([l.name() for l in rlay_l])
else:
log.warning('prepped rasters not loaded to canvas!')
#=======================================================================
# wrap
#=======================================================================
self.feedback.upd_prog(None) #set the progress bar back down to zero
log.push('run_rPrep finished in %s'%(datetime.datetime.now() - start))
return |
Python | def run_rsamp(self): #execute raster sampler
log = self.logger.getChild('run_rsamp')
start = datetime.datetime.now()
log.info('start \'run_rsamp\' at %s'%start)
self.feedback.setProgress(1)
self.set_setup(set_finv=True) #common setup routines and attachments
#=======================================================================
# assemble/prepare inputs-----
#=======================================================================
rlay_l = list(self.listView_expo_rlays.get_selected_layers().values())
finv = self.finv_vlay #set by set_setup()
#=======================================================================
# #exposure configuration
#=======================================================================
#pull parameters from dialog
psmp_stat, psmp_fieldName, as_inun, dtm_rlay, dthresh = self._get_rsamp_pars()
self.feedback.setProgress(5)
#=======================================================================
# checks
#=======================================================================
for rlay in rlay_l:
if not isinstance(rlay, QgsRasterLayer):
raise Error('unexpected type on raster layer')
assert rlay.crs()==self.qproj.crs(), 'raster layer CRS does not match project'
self.feedback.setProgress(10)
#======================================================================
# execute----
#======================================================================
#build the sample
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = Rsamp(**kwargs)
#execute the tool
res_vlay = wrkr.run(rlay_l, finv,
psmp_stat=psmp_stat, psmp_fieldName=psmp_fieldName,
as_inun=as_inun, dtm_rlay=dtm_rlay, dthresh=dthresh,
)
self.feedback.setProgress(90)
#check it
wrkr.check()
#save csv results to file
wrkr.write_res(res_vlay, )
#update ocntrol file
wrkr.update_cf(self.cf_fp)
#=======================================================================
# plots
#=======================================================================
if self.checkBox_ras_pBox.isChecked():
fig = wrkr.plot_boxes()
self.output_fig(fig)
if self.checkBox_ras_pHist.isChecked():
fig = wrkr.plot_hist()
self.output_fig(fig)
#======================================================================
# post---------
#======================================================================
"""
the hazard sampler sets up a lot of the other tools
"""
#======================================================================
# add to map
#======================================================================
if self.checkBox_loadres.isChecked():
self._load_toCanvas(res_vlay, logger=log)
#======================================================================
# update event names
#======================================================================
self.event_name_set = [lay.name() for lay in rlay_l]
log.info('set %i event names: \n %s'%(len(self.event_name_set),
self.event_name_set))
#======================================================================
# populate Event Likelihoods table
#======================================================================
l = self.event_name_set
for tbl in [self.fieldsTable_EL]:
tbl.setRowCount(len(l)) #add this many rows
for rindx, ename in enumerate(l):
tbl.setItem(rindx, 0, QTableWidgetItem(ename))
log.info('populated tables with event names')
self.feedback.setProgress(95)
#======================================================================
# populate Conditional P
#======================================================================
"""todo: some more intelligent populating"""
#get the mlcb
try:
self._CP_clear() #clear everything
#loop through each of the raster layers and collcet those with 'fail' in the name
rFail_l = []
for rlay in rlay_l:
if 'fail' in rlay.name():
rFail_l.append(rlay)
#loop through and re-key
rFail_d = dict()
for indxr, rlay in enumerate(rFail_l):
rFail_d[list(self.ls_cb_d.keys())[indxr]] = rlay
#loop through each combobox pair and assign a raster to it
res_d = dict()
for lsKey, (mlcb_h, mlcb_v) in self.ls_cb_d.items():
if lsKey in rFail_d:
mlcb_h.setLayer(rFail_d[lsKey])
res_d[lsKey] = rFail_d[lsKey].name()
#wrap
log.info('populated %i Conditional P diaglogs'%len(res_d))
except Exception as e:
log.error('failed to populate lisamp fields w/\n %s'%e)
#======================================================================
# wrap
#======================================================================
self.feedback.upd_prog(None) #set the progress bar back down to zero
log.push('run_rsamp finished in %s'%(datetime.datetime.now() - start))
return | def run_rsamp(self): #execute raster sampler
log = self.logger.getChild('run_rsamp')
start = datetime.datetime.now()
log.info('start \'run_rsamp\' at %s'%start)
self.feedback.setProgress(1)
self.set_setup(set_finv=True) #common setup routines and attachments
#=======================================================================
# assemble/prepare inputs-----
#=======================================================================
rlay_l = list(self.listView_expo_rlays.get_selected_layers().values())
finv = self.finv_vlay #set by set_setup()
#=======================================================================
# #exposure configuration
#=======================================================================
#pull parameters from dialog
psmp_stat, psmp_fieldName, as_inun, dtm_rlay, dthresh = self._get_rsamp_pars()
self.feedback.setProgress(5)
#=======================================================================
# checks
#=======================================================================
for rlay in rlay_l:
if not isinstance(rlay, QgsRasterLayer):
raise Error('unexpected type on raster layer')
assert rlay.crs()==self.qproj.crs(), 'raster layer CRS does not match project'
self.feedback.setProgress(10)
#======================================================================
# execute----
#======================================================================
#build the sample
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = Rsamp(**kwargs)
#execute the tool
res_vlay = wrkr.run(rlay_l, finv,
psmp_stat=psmp_stat, psmp_fieldName=psmp_fieldName,
as_inun=as_inun, dtm_rlay=dtm_rlay, dthresh=dthresh,
)
self.feedback.setProgress(90)
#check it
wrkr.check()
#save csv results to file
wrkr.write_res(res_vlay, )
#update ocntrol file
wrkr.update_cf(self.cf_fp)
#=======================================================================
# plots
#=======================================================================
if self.checkBox_ras_pBox.isChecked():
fig = wrkr.plot_boxes()
self.output_fig(fig)
if self.checkBox_ras_pHist.isChecked():
fig = wrkr.plot_hist()
self.output_fig(fig)
#======================================================================
# post---------
#======================================================================
"""
the hazard sampler sets up a lot of the other tools
"""
#======================================================================
# add to map
#======================================================================
if self.checkBox_loadres.isChecked():
self._load_toCanvas(res_vlay, logger=log)
#======================================================================
# update event names
#======================================================================
self.event_name_set = [lay.name() for lay in rlay_l]
log.info('set %i event names: \n %s'%(len(self.event_name_set),
self.event_name_set))
#======================================================================
# populate Event Likelihoods table
#======================================================================
l = self.event_name_set
for tbl in [self.fieldsTable_EL]:
tbl.setRowCount(len(l)) #add this many rows
for rindx, ename in enumerate(l):
tbl.setItem(rindx, 0, QTableWidgetItem(ename))
log.info('populated tables with event names')
self.feedback.setProgress(95)
#======================================================================
# populate Conditional P
#======================================================================
"""todo: some more intelligent populating"""
#get the mlcb
try:
self._CP_clear() #clear everything
#loop through each of the raster layers and collcet those with 'fail' in the name
rFail_l = []
for rlay in rlay_l:
if 'fail' in rlay.name():
rFail_l.append(rlay)
#loop through and re-key
rFail_d = dict()
for indxr, rlay in enumerate(rFail_l):
rFail_d[list(self.ls_cb_d.keys())[indxr]] = rlay
#loop through each combobox pair and assign a raster to it
res_d = dict()
for lsKey, (mlcb_h, mlcb_v) in self.ls_cb_d.items():
if lsKey in rFail_d:
mlcb_h.setLayer(rFail_d[lsKey])
res_d[lsKey] = rFail_d[lsKey].name()
#wrap
log.info('populated %i Conditional P diaglogs'%len(res_d))
except Exception as e:
log.error('failed to populate lisamp fields w/\n %s'%e)
#======================================================================
# wrap
#======================================================================
self.feedback.upd_prog(None) #set the progress bar back down to zero
log.push('run_rsamp finished in %s'%(datetime.datetime.now() - start))
return |
Python | def run_validate(self):
"""only validating the text in the control file for now (not the data objects)
"""
log = self.logger.getChild('run_validate')
log.info('user pressed \'pushButton_Validate\'')
#======================================================================
# collect form ui
#======================================================================
self._set_setup()
#===================================================================
# setup validation worker
#===================================================================
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = Vali(**kwargs)
#======================================================================
# assemble the validation parameters
#======================================================================
#import the class objects
from model.dmg2 import Dmg2
from model.risk2 import Risk2
from model.risk1 import Risk1
#populate all possible test parameters
vpars_all_d = {
'risk1':(self.checkBox_Vr1, Risk1),
'dmg2':(self.checkBox_Vi2, Dmg2),
'risk2':(self.checkBox_Vr2, Risk2),
#'risk3':(self.checkBox_Vr3, None),
}
#loop and collect based on check boxes
vpars_d = dict()
for vtag, (checkBox, modObj) in vpars_all_d.items():
if not checkBox.isChecked(): continue #skip this one
vpars_d[vtag] = modObj
self.feedback.upd_prog(10)
#======================================================================
# loop through each possibility and validate
#======================================================================
res_d = dict()
for vtag, modObj in vpars_d.items():
log.debug('checking \"%s\''%vtag)
#===================================================================
# parameter value/type check
#===================================================================
errors = wrkr.cf_check(modObj)
# #report on all the errors
for indxr, msg in enumerate(errors):
log.warning('%s error %i: \n%s'%(vtag, indxr+1, msg))
#===================================================================
# update control file
#===================================================================
wrkr.cf_mark()
self.feedback.upd_prog(80/len(vpars_d), method='append')
#store
if len(errors) == 0:
res_d[vtag] = True
else:
res_d[vtag] = False
#=======================================================================
# wrap
#=======================================================================
self.feedback.upd_prog(100)
log.push('passed %i (of %i) validations. see log for errors'%(
np.array(list(res_d.values())).sum(), len(vpars_d)
))
self.feedback.upd_prog(None)
return | def run_validate(self):
"""only validating the text in the control file for now (not the data objects)
"""
log = self.logger.getChild('run_validate')
log.info('user pressed \'pushButton_Validate\'')
#======================================================================
# collect form ui
#======================================================================
self._set_setup()
#===================================================================
# setup validation worker
#===================================================================
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = Vali(**kwargs)
#======================================================================
# assemble the validation parameters
#======================================================================
#import the class objects
from model.dmg2 import Dmg2
from model.risk2 import Risk2
from model.risk1 import Risk1
#populate all possible test parameters
vpars_all_d = {
'risk1':(self.checkBox_Vr1, Risk1),
'dmg2':(self.checkBox_Vi2, Dmg2),
'risk2':(self.checkBox_Vr2, Risk2),
#'risk3':(self.checkBox_Vr3, None),
}
#loop and collect based on check boxes
vpars_d = dict()
for vtag, (checkBox, modObj) in vpars_all_d.items():
if not checkBox.isChecked(): continue #skip this one
vpars_d[vtag] = modObj
self.feedback.upd_prog(10)
#======================================================================
# loop through each possibility and validate
#======================================================================
res_d = dict()
for vtag, modObj in vpars_d.items():
log.debug('checking \"%s\''%vtag)
#===================================================================
# parameter value/type check
#===================================================================
errors = wrkr.cf_check(modObj)
# #report on all the errors
for indxr, msg in enumerate(errors):
log.warning('%s error %i: \n%s'%(vtag, indxr+1, msg))
#===================================================================
# update control file
#===================================================================
wrkr.cf_mark()
self.feedback.upd_prog(80/len(vpars_d), method='append')
#store
if len(errors) == 0:
res_d[vtag] = True
else:
res_d[vtag] = False
#=======================================================================
# wrap
#=======================================================================
self.feedback.upd_prog(100)
log.push('passed %i (of %i) validations. see log for errors'%(
np.array(list(res_d.values())).sum(), len(vpars_d)
))
self.feedback.upd_prog(None)
return |
Python | def run_pStack(self): #single risk plot of total results
"""
similar to plotRisk for now... may choose to expand later
"""
log = self.logger.getChild('run_pStack')
log.info('user pushed \'run_pStack\'')
#=======================================================================
# collect inputs
#=======================================================================
self._set_setup(set_cf_fp=True)
self.feedback.setProgress(5)
#=======================================================================
# setup and load
#=======================================================================
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = results.attribution.Attr(**kwargs).setup()
self.feedback.setProgress(10)
stack_dxind, sEAD_ser = wrkr.get_stack()
self.feedback.setProgress(20)
#=======================================================================
# #execute
#=======================================================================
if self.checkBox_RP_aep.isChecked():
fig = wrkr.plot_stackdRCurves(stack_dxind, sEAD_ser, y1lab='AEP')
self.output_fig(fig)
self.feedback.upd_prog(30, method='append')
if self.checkBox_RP_ari.isChecked():
fig = wrkr.plot_stackdRCurves(stack_dxind, sEAD_ser, y1lab='impacts')
self.output_fig(fig)
self.feedback.upd_prog(30, method='append')
#=======================================================================
# wrap
#=======================================================================
self.feedback.upd_prog(None) #set the progress bar back down to zero
log.push('pStack finished') | def run_pStack(self): #single risk plot of total results
"""
similar to plotRisk for now... may choose to expand later
"""
log = self.logger.getChild('run_pStack')
log.info('user pushed \'run_pStack\'')
#=======================================================================
# collect inputs
#=======================================================================
self._set_setup(set_cf_fp=True)
self.feedback.setProgress(5)
#=======================================================================
# setup and load
#=======================================================================
kwargs = {attn:getattr(self, attn) for attn in self.inherit_fieldNames}
wrkr = results.attribution.Attr(**kwargs).setup()
self.feedback.setProgress(10)
stack_dxind, sEAD_ser = wrkr.get_stack()
self.feedback.setProgress(20)
#=======================================================================
# #execute
#=======================================================================
if self.checkBox_RP_aep.isChecked():
fig = wrkr.plot_stackdRCurves(stack_dxind, sEAD_ser, y1lab='AEP')
self.output_fig(fig)
self.feedback.upd_prog(30, method='append')
if self.checkBox_RP_ari.isChecked():
fig = wrkr.plot_stackdRCurves(stack_dxind, sEAD_ser, y1lab='impacts')
self.output_fig(fig)
self.feedback.upd_prog(30, method='append')
#=======================================================================
# wrap
#=======================================================================
self.feedback.upd_prog(None) #set the progress bar back down to zero
log.push('pStack finished') |
Python | def init_model(self, #load and attach control file parameters
check_pars=True,
):
"""
should be called by the model's own 'setup()' func
during standalones and Dialog runs
"""
log = self.logger.getChild('init_model')
#=======================================================================
# #parameter setup-----
#=======================================================================
#=======================================================================
# load the control file
#=======================================================================
cf_fp = self.cf_fp
if cf_fp == '':
raise Error('passed an empty cf_fp!')
assert os.path.exists(cf_fp), 'provided parameter file path does not exist \n %s'%cf_fp
self.pars = configparser.ConfigParser(inline_comment_prefixes='#')
log.info('reading parameters from \n %s'%self.pars.read(cf_fp))
#=======================================================================
# filepaths
#=======================================================================
if not self.absolute_fp:
log.info('converting relative filepaths')
self.pars = self._cf_relative(self.pars)
#=======================================================================
# check against expectations
#=======================================================================
if check_pars:
errors = self._get_cf_miss(self.pars)
#report on all the errors
for indxr, msg in enumerate(errors):
log.error('error %i: \n%s'%(indxr+1, msg))
#final trip
"""lets us loop through all the checks before failing"""
if not len(errors)==0:
raise Error('failed to validate ControlFile w/ %i error(s)... see log'%len(errors))
#=======================================================================
# attach control file parameter values
#=======================================================================
self.cfPars_d = self.cf_attach_pars(self.pars)
#=======================================================================
# #check our validity tag
#=======================================================================
if check_pars:
if not self.valid_par is None:
if not getattr(self, self.valid_par):
raise Error('control file not validated for \'%s\'. please run InputValidator'%self.valid_par)
#=======================================================================
# update plotting handles
#=======================================================================
if hasattr(self, 'upd_impStyle'):
self.upd_impStyle()
self._init_fmtFunc()
self.resname = '%s_%s_%s'%(self.valid_par, self.name, self.tag)
"""TODO: consolidate this with ComWrkr.resname"""
#=======================================================================
# #wrap
#=======================================================================
self.logger.debug('finished init_modelon Model') | def init_model(self, #load and attach control file parameters
check_pars=True,
):
"""
should be called by the model's own 'setup()' func
during standalones and Dialog runs
"""
log = self.logger.getChild('init_model')
#=======================================================================
# #parameter setup-----
#=======================================================================
#=======================================================================
# load the control file
#=======================================================================
cf_fp = self.cf_fp
if cf_fp == '':
raise Error('passed an empty cf_fp!')
assert os.path.exists(cf_fp), 'provided parameter file path does not exist \n %s'%cf_fp
self.pars = configparser.ConfigParser(inline_comment_prefixes='#')
log.info('reading parameters from \n %s'%self.pars.read(cf_fp))
#=======================================================================
# filepaths
#=======================================================================
if not self.absolute_fp:
log.info('converting relative filepaths')
self.pars = self._cf_relative(self.pars)
#=======================================================================
# check against expectations
#=======================================================================
if check_pars:
errors = self._get_cf_miss(self.pars)
#report on all the errors
for indxr, msg in enumerate(errors):
log.error('error %i: \n%s'%(indxr+1, msg))
#final trip
"""lets us loop through all the checks before failing"""
if not len(errors)==0:
raise Error('failed to validate ControlFile w/ %i error(s)... see log'%len(errors))
#=======================================================================
# attach control file parameter values
#=======================================================================
self.cfPars_d = self.cf_attach_pars(self.pars)
#=======================================================================
# #check our validity tag
#=======================================================================
if check_pars:
if not self.valid_par is None:
if not getattr(self, self.valid_par):
raise Error('control file not validated for \'%s\'. please run InputValidator'%self.valid_par)
#=======================================================================
# update plotting handles
#=======================================================================
if hasattr(self, 'upd_impStyle'):
self.upd_impStyle()
self._init_fmtFunc()
self.resname = '%s_%s_%s'%(self.valid_par, self.name, self.tag)
"""TODO: consolidate this with ComWrkr.resname"""
#=======================================================================
# #wrap
#=======================================================================
self.logger.debug('finished init_modelon Model') |
Python | def cf_attach_pars(self, #load parmaeteres from file
cpars,
setAttr=True, #whether to save each attribute
):
"""
cf_chk_pars() should be run first to make sure parameter membership and type matches expectations
here we just update every parameter value found:
on the class AND in the ControlFile
"""
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('cf_attach_pars')
#=======================================================================
# precheck
#=======================================================================
assert isinstance(cpars, configparser.ConfigParser)
#=======================================================================
# loop and retrieve
#=======================================================================
cpars_d = dict() #set values
no_d = dict() #not set values
noCnt = 0
for sectName in cpars.sections():
cpars_d[sectName], no_d[sectName] = dict(), dict() #add the page
log.debug('loading %i parameters from section \'%s\''%(len(cpars[sectName]), sectName))
#loop through each variable name/value in this section
for varName, valRaw in cpars[sectName].items():
#check we care about this
if not hasattr(self, varName):
log.debug('passed variable \'%s\' not found on class... skipping'%varName)
no_d[sectName][varName] = valRaw
noCnt+=1
continue
#retrieve typset value
pval = self._get_from_cpar(cpars, sectName, varName, logger=log) #get the typeset variable
#store it
cpars_d[sectName][varName] = pval
#======================================================================
# attach all the paramers
#======================================================================
cnt = 0
if setAttr:
for sectName, spars_d in cpars_d.items():
for varnm, val in spars_d.items():
setattr(self, varnm, val)
log.debug('set %s=%s'%(varnm, val))
cnt +=1
#check types
self._chk_attributes()
#=======================================================================
# wrap
#=======================================================================
log.info('attached %i parmaeters to self (skipped %i)'%(cnt, noCnt))
return cpars_d | def cf_attach_pars(self, #load parmaeteres from file
cpars,
setAttr=True, #whether to save each attribute
):
"""
cf_chk_pars() should be run first to make sure parameter membership and type matches expectations
here we just update every parameter value found:
on the class AND in the ControlFile
"""
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('cf_attach_pars')
#=======================================================================
# precheck
#=======================================================================
assert isinstance(cpars, configparser.ConfigParser)
#=======================================================================
# loop and retrieve
#=======================================================================
cpars_d = dict() #set values
no_d = dict() #not set values
noCnt = 0
for sectName in cpars.sections():
cpars_d[sectName], no_d[sectName] = dict(), dict() #add the page
log.debug('loading %i parameters from section \'%s\''%(len(cpars[sectName]), sectName))
#loop through each variable name/value in this section
for varName, valRaw in cpars[sectName].items():
#check we care about this
if not hasattr(self, varName):
log.debug('passed variable \'%s\' not found on class... skipping'%varName)
no_d[sectName][varName] = valRaw
noCnt+=1
continue
#retrieve typset value
pval = self._get_from_cpar(cpars, sectName, varName, logger=log) #get the typeset variable
#store it
cpars_d[sectName][varName] = pval
#======================================================================
# attach all the paramers
#======================================================================
cnt = 0
if setAttr:
for sectName, spars_d in cpars_d.items():
for varnm, val in spars_d.items():
setattr(self, varnm, val)
log.debug('set %s=%s'%(varnm, val))
cnt +=1
#check types
self._chk_attributes()
#=======================================================================
# wrap
#=======================================================================
log.info('attached %i parmaeters to self (skipped %i)'%(cnt, noCnt))
return cpars_d |
Python | def _cfFile_relative(self, #adding basedir to the control file
cf_fp=None, #control file
sections=['dmg_fps', 'risk_fps'], #parameter sections to manipulate
logger=None,
**kwargs):
"""wraper to work with the control file (rather than the configparser"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log=logger.getChild('_cfFile_relative')
if cf_fp is None: cf_fp=self.cf_fp
assert not self.absolute_fp
#=======================================================================
# load the control file
#=======================================================================
pars = configparser.ConfigParser(inline_comment_prefixes='#')
_ = pars.read(cf_fp)
pars = self._cf_relative(pars, sections=sections, warn=False, logger=log, **kwargs)
#=======================================================================
# write the config file
#=======================================================================
with open(cf_fp, 'w') as configfile:
pars.write(configfile)
#=======================================================================
# wrap
#=======================================================================
log.info('set ControlFile to absolute: %s'%(cf_fp))
return cf_fp | def _cfFile_relative(self, #adding basedir to the control file
cf_fp=None, #control file
sections=['dmg_fps', 'risk_fps'], #parameter sections to manipulate
logger=None,
**kwargs):
"""wraper to work with the control file (rather than the configparser"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log=logger.getChild('_cfFile_relative')
if cf_fp is None: cf_fp=self.cf_fp
assert not self.absolute_fp
#=======================================================================
# load the control file
#=======================================================================
pars = configparser.ConfigParser(inline_comment_prefixes='#')
_ = pars.read(cf_fp)
pars = self._cf_relative(pars, sections=sections, warn=False, logger=log, **kwargs)
#=======================================================================
# write the config file
#=======================================================================
with open(cf_fp, 'w') as configfile:
pars.write(configfile)
#=======================================================================
# wrap
#=======================================================================
log.info('set ControlFile to absolute: %s'%(cf_fp))
return cf_fp |
Python | def _get_finv_cnest(self, #resolve column group relations
df, #finv data
):
""" this would have been easier with a dxcol"""
#======================================================================
# get prefix values (using elv columns)
#======================================================================
#pull all the elv columns
tag_coln_l = df.columns[df.columns.str.endswith('_elv')].tolist()
assert len(tag_coln_l) > 0, 'no \'elv\' columns found in inventory'
assert tag_coln_l[0] == 'f0_elv', 'expected first tag column to be \'f0_elv\''
#get nested prefix values
prefix_l = [coln[:2] for coln in tag_coln_l]
#check
for e in prefix_l:
assert e.startswith('f'), 'bad prefix: \'%s\'.. check field names'
#=======================================================================
# add each nest column name
#=======================================================================
cdf = pd.DataFrame(columns=df.columns, index=['ctype', 'nestID', 'bname']) #upgrade to a series
"""
view(cdf)
"""
for pfx in prefix_l:
l = [e for e in df.columns if e.startswith('%s_'%pfx)]
for e in l:
cdf.loc['nestID', e] = pfx
cdf.loc['ctype', e] = 'nest'
cdf.loc['bname', e] = e.replace('%s_'%pfx, '')
#set flag for mitigation columns
cdf.loc['ctype', cdf.columns.str.startswith('mi_')] = 'miti'
return cdf, prefix_l | def _get_finv_cnest(self, #resolve column group relations
df, #finv data
):
""" this would have been easier with a dxcol"""
#======================================================================
# get prefix values (using elv columns)
#======================================================================
#pull all the elv columns
tag_coln_l = df.columns[df.columns.str.endswith('_elv')].tolist()
assert len(tag_coln_l) > 0, 'no \'elv\' columns found in inventory'
assert tag_coln_l[0] == 'f0_elv', 'expected first tag column to be \'f0_elv\''
#get nested prefix values
prefix_l = [coln[:2] for coln in tag_coln_l]
#check
for e in prefix_l:
assert e.startswith('f'), 'bad prefix: \'%s\'.. check field names'
#=======================================================================
# add each nest column name
#=======================================================================
cdf = pd.DataFrame(columns=df.columns, index=['ctype', 'nestID', 'bname']) #upgrade to a series
"""
view(cdf)
"""
for pfx in prefix_l:
l = [e for e in df.columns if e.startswith('%s_'%pfx)]
for e in l:
cdf.loc['nestID', e] = pfx
cdf.loc['ctype', e] = 'nest'
cdf.loc['bname', e] = e.replace('%s_'%pfx, '')
#set flag for mitigation columns
cdf.loc['ctype', cdf.columns.str.startswith('mi_')] = 'miti'
return cdf, prefix_l |
Python | def build_exp_finv(self, #assemble the expanded finv
group_cnt = None, #number of groups to epxect per prefix
):
"""
initial loading of the finv is done in load_finv()
here we pivot out to 1nest on bids
"""
#======================================================================
# defaults
#======================================================================
log = self.logger.getChild('build_exp_finv')
fdf = self.data_d['finv']
finv_cdf = self.finv_cdf.copy() #metadata for finv columns. see load_finv()
cid, bid = self.cid, self.bid
if group_cnt is None: group_cnt = self.group_cnt
bcolns = ['gels'] #columns to map back onto/copy over to each row of the expanded finv
#======================================================================
# group_cnt defaults
#======================================================================
assert isinstance(group_cnt, int)
exp_fcolns = [cid, 'fscale', 'felv']
if group_cnt == 2: #Risk1
pass
elif group_cnt == 4: #Dmg2 and Risk2
"""fcap is optional"""
exp_fcolns = exp_fcolns + ['ftag']
else:
raise Error('bad group_cnt %i'%group_cnt)
#======================================================================
# precheck
#======================================================================
assert fdf.index.name == cid, 'bad index on fdf'
#======================================================================
# expand: nested entries---------------
#======================================================================
bdf = None
for prefix, fcolsi_df in finv_cdf.drop('ctype', axis=0).dropna(axis=1).T.groupby('nestID', axis=0):
#get slice and clean
df = fdf.loc[:, fcolsi_df.index].dropna(axis=0, how='all').sort_index(axis=1)
#get clean column names
df.columns = df.columns.str.replace('%s_'%prefix, 'f')
df = df.reset_index()
df['nestID'] = prefix
#add to main
if bdf is None:
bdf = df
else:
bdf = bdf.append(df, ignore_index=True, sort=False)
log.info('for \"%s\' got %s'%(prefix, str(df.shape)))
#==================================================================
# #add back in other needed columns
#==================================================================
boolcol = fdf.columns.isin(bcolns) #additional columns to pivot out
if boolcol.any(): #if we are only linking in gels, these may not exist
bdf = bdf.merge(fdf.loc[:, boolcol], on=cid, how='left',validate='m:1')
log.debug('joined back in %i columns: %s'%(
boolcol.sum(), fdf.loc[:, boolcol].columns.tolist()))
#wrap
log.info('expanded inventory from %i nest sets %s to finv %s'%(
len(finv_cdf.loc['nestID', :].dropna(axis=0).unique()), str(fdf.shape), str(bdf.shape)))
#set indexers
bdf[bid] = bdf.index
bdf.index.name=bid
#======================================================================
# check
#======================================================================
miss_l = set(exp_fcolns).difference(bdf.columns)
assert len(miss_l) == 0, miss_l
#======================================================================
# adjust fscale--------------
#======================================================================
"""
view(bdf)
"""
boolidx = bdf['fscale'].isna()
if boolidx.any():
log.info('setting %i null fscale values to 1'%boolidx.sum())
bdf.loc[:, 'fscale'] = bdf['fscale'].fillna(1.0)
#======================================================================
# convert heights ----------
#======================================================================
s = bdf.loc[:, 'felv']
log.info('\'%s\' felv: \n min=%.2f, mean=%.2f, max=%.2f'%(
self.felv, s.min(), s.mean(), s.max()))
if self.felv == 'ground':
assert not self.as_inun
assert 'gels' in bdf.columns, 'missing gels column'
assert bdf['gels'].notna().all()
bdf.loc[:, 'felv'] = bdf['felv'] + bdf['gels']
#log.info('converted asset ground heights to datum elevations')
s = bdf.loc[:, 'felv']
log.info('converted felv from \'ground\' to \'datum\' \n min=%.2f, mean=%.2f, max=%.2f'%(
s.min(), s.mean(), s.max()))
elif self.felv=='datum':
log.debug('felv = \'%s\' no conversion'%self.felv)
else:
raise Error('unrecognized felv=%s'%self.felv)
#=======================================================================
# add mitigation data---
#=======================================================================
if self.apply_miti:
#get data
bdf = bdf.join(fdf.loc[:, finv_cdf.columns[finv_cdf.loc['ctype', :]=='miti']],
on=cid)
#=======================================================================
# checks
#=======================================================================
"""check_finv does this now
for coln in ['ftag']:
bx = bdf[coln].isna()
if bx.any():
log.debug('\n%s'%bdf.loc[bx, :])
raise Error('got %i \'%s\' nulls...see logger'%(bx.sum(), coln))"""
#======================================================================
# wrap
#======================================================================
log.info('finished with %s'%str(bdf.shape))
self.bdf = bdf | def build_exp_finv(self, #assemble the expanded finv
group_cnt = None, #number of groups to epxect per prefix
):
"""
initial loading of the finv is done in load_finv()
here we pivot out to 1nest on bids
"""
#======================================================================
# defaults
#======================================================================
log = self.logger.getChild('build_exp_finv')
fdf = self.data_d['finv']
finv_cdf = self.finv_cdf.copy() #metadata for finv columns. see load_finv()
cid, bid = self.cid, self.bid
if group_cnt is None: group_cnt = self.group_cnt
bcolns = ['gels'] #columns to map back onto/copy over to each row of the expanded finv
#======================================================================
# group_cnt defaults
#======================================================================
assert isinstance(group_cnt, int)
exp_fcolns = [cid, 'fscale', 'felv']
if group_cnt == 2: #Risk1
pass
elif group_cnt == 4: #Dmg2 and Risk2
"""fcap is optional"""
exp_fcolns = exp_fcolns + ['ftag']
else:
raise Error('bad group_cnt %i'%group_cnt)
#======================================================================
# precheck
#======================================================================
assert fdf.index.name == cid, 'bad index on fdf'
#======================================================================
# expand: nested entries---------------
#======================================================================
bdf = None
for prefix, fcolsi_df in finv_cdf.drop('ctype', axis=0).dropna(axis=1).T.groupby('nestID', axis=0):
#get slice and clean
df = fdf.loc[:, fcolsi_df.index].dropna(axis=0, how='all').sort_index(axis=1)
#get clean column names
df.columns = df.columns.str.replace('%s_'%prefix, 'f')
df = df.reset_index()
df['nestID'] = prefix
#add to main
if bdf is None:
bdf = df
else:
bdf = bdf.append(df, ignore_index=True, sort=False)
log.info('for \"%s\' got %s'%(prefix, str(df.shape)))
#==================================================================
# #add back in other needed columns
#==================================================================
boolcol = fdf.columns.isin(bcolns) #additional columns to pivot out
if boolcol.any(): #if we are only linking in gels, these may not exist
bdf = bdf.merge(fdf.loc[:, boolcol], on=cid, how='left',validate='m:1')
log.debug('joined back in %i columns: %s'%(
boolcol.sum(), fdf.loc[:, boolcol].columns.tolist()))
#wrap
log.info('expanded inventory from %i nest sets %s to finv %s'%(
len(finv_cdf.loc['nestID', :].dropna(axis=0).unique()), str(fdf.shape), str(bdf.shape)))
#set indexers
bdf[bid] = bdf.index
bdf.index.name=bid
#======================================================================
# check
#======================================================================
miss_l = set(exp_fcolns).difference(bdf.columns)
assert len(miss_l) == 0, miss_l
#======================================================================
# adjust fscale--------------
#======================================================================
"""
view(bdf)
"""
boolidx = bdf['fscale'].isna()
if boolidx.any():
log.info('setting %i null fscale values to 1'%boolidx.sum())
bdf.loc[:, 'fscale'] = bdf['fscale'].fillna(1.0)
#======================================================================
# convert heights ----------
#======================================================================
s = bdf.loc[:, 'felv']
log.info('\'%s\' felv: \n min=%.2f, mean=%.2f, max=%.2f'%(
self.felv, s.min(), s.mean(), s.max()))
if self.felv == 'ground':
assert not self.as_inun
assert 'gels' in bdf.columns, 'missing gels column'
assert bdf['gels'].notna().all()
bdf.loc[:, 'felv'] = bdf['felv'] + bdf['gels']
#log.info('converted asset ground heights to datum elevations')
s = bdf.loc[:, 'felv']
log.info('converted felv from \'ground\' to \'datum\' \n min=%.2f, mean=%.2f, max=%.2f'%(
s.min(), s.mean(), s.max()))
elif self.felv=='datum':
log.debug('felv = \'%s\' no conversion'%self.felv)
else:
raise Error('unrecognized felv=%s'%self.felv)
#=======================================================================
# add mitigation data---
#=======================================================================
if self.apply_miti:
#get data
bdf = bdf.join(fdf.loc[:, finv_cdf.columns[finv_cdf.loc['ctype', :]=='miti']],
on=cid)
#=======================================================================
# checks
#=======================================================================
"""check_finv does this now
for coln in ['ftag']:
bx = bdf[coln].isna()
if bx.any():
log.debug('\n%s'%bdf.loc[bx, :])
raise Error('got %i \'%s\' nulls...see logger'%(bx.sum(), coln))"""
#======================================================================
# wrap
#======================================================================
log.info('finished with %s'%str(bdf.shape))
self.bdf = bdf |
Python | def build_depths(self): #build the expanded depths data from the wsl data
#======================================================================
# defaults
#======================================================================
log = self.logger.getChild('build_depths')
bdf = self.bdf.copy() #expanded finv
cid, bid = self.cid, self.bid
wdf = self.data_d['expos'] #wsl
#======================================================================
# expand
#======================================================================
#add indexer columns expand w/ wsl data
"""would have been easier with a multindex"""
ddf = self.bdf.loc[:, [bid, cid]].join(wdf.round(self.prec), on=cid
).set_index(bid, drop=False)
#=======================================================================
# precheck
#=======================================================================
if self.as_inun:
boolidx = bdf['felv'] !=0
if not boolidx.sum().sum() == 0:
raise Error('with as_inun=True got %i (of %i) elv values with non-zero depths'%(
boolidx.sum().sum(), boolidx.size))
#======================================================================
# calc depths
#======================================================================
#loop and subtract to get depths
boolcol = ~ddf.columns.isin([cid, bid]) #columns w/ depth values
for coln in ddf.columns[boolcol]:
ddf.loc[:, coln] = (ddf[coln] - bdf['felv']).round(self.prec)
"""
maintains nulls
view(ddf)
"""
log.debug('converted wsl to depth %s'%str(ddf.shape))
#======================================================================
# fill nulls
#======================================================================
"""no! dont want to mix these up w/ negatives.
filtering nulls in risk1.run() and dmg2.bdmg()
booldf = ddf.drop([bid, cid], axis=1).isna()
if booldf.any().any():
log.warning('setting %i (of %i) null depth values to zero'%(
booldf.sum().sum(), booldf.size))
ddf = ddf.fillna(0.0)"""
#======================================================================
# negative depths
#======================================================================
booldf = ddf.loc[:,boolcol] < 0 #True=wsl below ground
if booldf.any().any():
assert not self.as_inun
"""
note these are expanded (un-nesetd) assets, so counts will be larger than expected
"""
#user wants to ignore ground_water, set all negatives to zero
if not self.ground_water:
log.warning('setting %i (of %i) negative depths to zero'%(
booldf.sum().sum(), booldf.size))
"""NO! filtering negatives during dmg2.bdmg()
ddf.loc[:, boolcol] = ddf.loc[:,boolcol].where(~booldf, other=0)"""
#user wants to keep negative depths.. leave as is
else:
log.info('gorund_water=True. preserving %i (of %i) negative depths'%(
booldf.sum().sum(), booldf.size))
#======================================================================
# post checks
#======================================================================
assert np.array_equal(ddf.index, bdf.index)
assert bid in ddf.columns
assert ddf.index.name == bid
assert np.array_equal(ddf.index.values, ddf[bid].values)
#max depth
boolidx = ddf.loc[:,boolcol].max(axis=1)>self.max_depth
if boolidx.any():
log.debug('\n%s'%ddf[boolidx])
raise Error('%i (of %i) nested assets exceed max depth: %.2f. see logger'%(
boolidx.sum(), len(boolidx), self.max_depth))
#======================================================================
# wrap
#======================================================================
log.info('assembled depth_df w/ \nmax:\n%s\nmean: \n%s'%(
ddf.loc[:,boolcol].max(),
ddf.loc[:,boolcol].mean()
))
self.ddf = ddf | def build_depths(self): #build the expanded depths data from the wsl data
#======================================================================
# defaults
#======================================================================
log = self.logger.getChild('build_depths')
bdf = self.bdf.copy() #expanded finv
cid, bid = self.cid, self.bid
wdf = self.data_d['expos'] #wsl
#======================================================================
# expand
#======================================================================
#add indexer columns expand w/ wsl data
"""would have been easier with a multindex"""
ddf = self.bdf.loc[:, [bid, cid]].join(wdf.round(self.prec), on=cid
).set_index(bid, drop=False)
#=======================================================================
# precheck
#=======================================================================
if self.as_inun:
boolidx = bdf['felv'] !=0
if not boolidx.sum().sum() == 0:
raise Error('with as_inun=True got %i (of %i) elv values with non-zero depths'%(
boolidx.sum().sum(), boolidx.size))
#======================================================================
# calc depths
#======================================================================
#loop and subtract to get depths
boolcol = ~ddf.columns.isin([cid, bid]) #columns w/ depth values
for coln in ddf.columns[boolcol]:
ddf.loc[:, coln] = (ddf[coln] - bdf['felv']).round(self.prec)
"""
maintains nulls
view(ddf)
"""
log.debug('converted wsl to depth %s'%str(ddf.shape))
#======================================================================
# fill nulls
#======================================================================
"""no! dont want to mix these up w/ negatives.
filtering nulls in risk1.run() and dmg2.bdmg()
booldf = ddf.drop([bid, cid], axis=1).isna()
if booldf.any().any():
log.warning('setting %i (of %i) null depth values to zero'%(
booldf.sum().sum(), booldf.size))
ddf = ddf.fillna(0.0)"""
#======================================================================
# negative depths
#======================================================================
booldf = ddf.loc[:,boolcol] < 0 #True=wsl below ground
if booldf.any().any():
assert not self.as_inun
"""
note these are expanded (un-nesetd) assets, so counts will be larger than expected
"""
#user wants to ignore ground_water, set all negatives to zero
if not self.ground_water:
log.warning('setting %i (of %i) negative depths to zero'%(
booldf.sum().sum(), booldf.size))
"""NO! filtering negatives during dmg2.bdmg()
ddf.loc[:, boolcol] = ddf.loc[:,boolcol].where(~booldf, other=0)"""
#user wants to keep negative depths.. leave as is
else:
log.info('gorund_water=True. preserving %i (of %i) negative depths'%(
booldf.sum().sum(), booldf.size))
#======================================================================
# post checks
#======================================================================
assert np.array_equal(ddf.index, bdf.index)
assert bid in ddf.columns
assert ddf.index.name == bid
assert np.array_equal(ddf.index.values, ddf[bid].values)
#max depth
boolidx = ddf.loc[:,boolcol].max(axis=1)>self.max_depth
if boolidx.any():
log.debug('\n%s'%ddf[boolidx])
raise Error('%i (of %i) nested assets exceed max depth: %.2f. see logger'%(
boolidx.sum(), len(boolidx), self.max_depth))
#======================================================================
# wrap
#======================================================================
log.info('assembled depth_df w/ \nmax:\n%s\nmean: \n%s'%(
ddf.loc[:,boolcol].max(),
ddf.loc[:,boolcol].mean()
))
self.ddf = ddf |
Python | def check_attrimat(self, #check the logic of the attrimat
atr_dxcol=None,
logger=None,
):
"""
attrimat rows should always sum to 1.0 on lvl0
"""
#=======================================================================
# defaults
#=======================================================================
if atr_dxcol is None: atr_dxcol=self.att_df
#mdex = atr_dxcol.columns
#=======================================================================
# #determine what level to perofrm sum check on
# sumLvl = atr_dxcol.columns.nlevels -2 #should always be the last rank/level
#=======================================================================
#sum each of the grpColns nested under the rEventName
bool_df = atr_dxcol.sum(axis=1, level=0, skipna=False).round(self.prec)==1.0
#=======================================================================
# #drop all but the top level. identify null locations
# nbool_df = atr_dxcol.droplevel(
# level=list(range(1, mdex.nlevels)), axis=1
# ).notna()
#
# #check the failures line up with the nulls
# bool2_df = psumBool_df==nbool_df.loc[:, ~nbool_df.columns.duplicated(keep='first')]
#=======================================================================
"""
view(atr_dxcol.sum(axis=1, level=0, skipna=False))
view(bool_df)
view(atr_dxcol)
"""
if not bool_df.all().all():
raise Error('%i (of %i) attribute matrix entries failed sum=1 test'%(
np.invert(bool_df).sum().sum(), bool_df.size))
return True | def check_attrimat(self, #check the logic of the attrimat
atr_dxcol=None,
logger=None,
):
"""
attrimat rows should always sum to 1.0 on lvl0
"""
#=======================================================================
# defaults
#=======================================================================
if atr_dxcol is None: atr_dxcol=self.att_df
#mdex = atr_dxcol.columns
#=======================================================================
# #determine what level to perofrm sum check on
# sumLvl = atr_dxcol.columns.nlevels -2 #should always be the last rank/level
#=======================================================================
#sum each of the grpColns nested under the rEventName
bool_df = atr_dxcol.sum(axis=1, level=0, skipna=False).round(self.prec)==1.0
#=======================================================================
# #drop all but the top level. identify null locations
# nbool_df = atr_dxcol.droplevel(
# level=list(range(1, mdex.nlevels)), axis=1
# ).notna()
#
# #check the failures line up with the nulls
# bool2_df = psumBool_df==nbool_df.loc[:, ~nbool_df.columns.duplicated(keep='first')]
#=======================================================================
"""
view(atr_dxcol.sum(axis=1, level=0, skipna=False))
view(bool_df)
view(atr_dxcol)
"""
if not bool_df.all().all():
raise Error('%i (of %i) attribute matrix entries failed sum=1 test'%(
np.invert(bool_df).sum().sum(), bool_df.size))
return True |
Python | def _get_from_cpar(self, #special parameter extraction recognizing object's t ype
cpars,
sectName,
varName,
logger = None):
"""each parameter should exist on teh class instance.
we use this to set the type"""
if logger is None: logger=self.logger
log = logger.getChild('_get_from_cpar')
#=======================================================================
# get native type on class
#=======================================================================
assert hasattr(self, varName), '\'%s\' does not exist on %s'%(varName, self)
#get class instance's native type
ntype = type(getattr(self, varName))
#==============================================================
# retrive and typeset (using native t ype)
#==============================================================
assert isinstance(cpars, configparser.ConfigParser)
csect = cpars[sectName]
pval_raw = csect[varName] #raw value (always a string)
#boolean
if ntype == bool:
pval = csect.getboolean(varName)
#no check or type conversion
elif getattr(self, varName) is None:
pval = pval_raw
#other types
else:
try:
pval = ntype(pval_raw)
except Exception as e:
raise Error('failed to set %s.%s with input \'%s\' (%s) to %s \n %s'%(
sectName, varName, pval_raw, type(pval_raw), ntype, e))
#=======================================================================
# blank set
#=======================================================================
"""seems like we're setup for ''.... not sure the value in switching everything over
if pval == '':
pval = np.nan"""
log.debug('retrieved \'%s.%s\'=\'%s\' w/ type: \'%s\''%(sectName, varName, pval, type(pval)))
return pval | def _get_from_cpar(self, #special parameter extraction recognizing object's t ype
cpars,
sectName,
varName,
logger = None):
"""each parameter should exist on teh class instance.
we use this to set the type"""
if logger is None: logger=self.logger
log = logger.getChild('_get_from_cpar')
#=======================================================================
# get native type on class
#=======================================================================
assert hasattr(self, varName), '\'%s\' does not exist on %s'%(varName, self)
#get class instance's native type
ntype = type(getattr(self, varName))
#==============================================================
# retrive and typeset (using native t ype)
#==============================================================
assert isinstance(cpars, configparser.ConfigParser)
csect = cpars[sectName]
pval_raw = csect[varName] #raw value (always a string)
#boolean
if ntype == bool:
pval = csect.getboolean(varName)
#no check or type conversion
elif getattr(self, varName) is None:
pval = pval_raw
#other types
else:
try:
pval = ntype(pval_raw)
except Exception as e:
raise Error('failed to set %s.%s with input \'%s\' (%s) to %s \n %s'%(
sectName, varName, pval_raw, type(pval_raw), ntype, e))
#=======================================================================
# blank set
#=======================================================================
"""seems like we're setup for ''.... not sure the value in switching everything over
if pval == '':
pval = np.nan"""
log.debug('retrieved \'%s.%s\'=\'%s\' w/ type: \'%s\''%(sectName, varName, pval, type(pval)))
return pval |
Python | def is_null(obj): #check if the object is none
if obj is None:
return True
"""might not work for non string multi element objects"""
if np.any(pd.isnull(obj)):
return True
#excel checks
if obj in ('', 0, '0', '0.0'):
return True
return False | def is_null(obj): #check if the object is none
if obj is None:
return True
"""might not work for non string multi element objects"""
if np.any(pd.isnull(obj)):
return True
#excel checks
if obj in ('', 0, '0', '0.0'):
return True
return False |
Python | def load_ifz_fps(self, #load filepaths in an ifz map and replace with layer referneces
eifz_d, #ifz map w/ filepaths
aoi_vlay=None,
base_dir=None,
):
"""
plugin runs dont need this.
just pass eifz_d with vlays (no fps)
what about copies??
#=======================================================================
# data structure:
#=======================================================================
each event (eTag) should have its own ifz polygon layer
(here we allow for the re-use of layers... but a single layer is still expected)
each ifz poly feature corresponds to a dike (sid) segment (for that event) (1 ifzID: many sid)
these ifz polys can be overlapping
often users may duplicate layers/maps between events
but our structure allows for unique vals
#=======================================================================
# TODO:
#=======================================================================
setup to assign map based on name matching (between ifz and raster)
"""
log = self.logger.getChild('load_ifz')
#=======================================================================
# prechecks
#=======================================================================
miss_l = set(eifz_d.keys()).difference(self.etag_l)
assert len(miss_l)==0, 'eTag mismatch on eifz_d and pfail_df: %s'%miss_l
fp_vlay_d = dict() #container for repeat layers
#=======================================================================
# loop and load
#=======================================================================
log.info('loading on %i events'%len(eifz_d))
for eTag, fp_raw in eifz_d.copy().items():
#===================================================================
# relative filepaths
#===================================================================
if not base_dir is None:
fp = os.path.join(base_dir, fp_raw)
else:
fp = fp_raw
#===================================================================
# get the layer
#===================================================================
#check if its already loaded
if fp in fp_vlay_d:
vlay = self.fixgeometries(fp_vlay_d[fp], logger=log, layname=fp_vlay_d[fp].name())
else:
vlay = self.load_vlay(fp, aoi_vlay=aoi_vlay, logger=log)
fp_vlay_d[fp] = vlay #add it for next time
#===================================================================
# check it
#===================================================================
assert self._check_ifz(vlay)
#===================================================================
# #check sid_ifz_d
#===================================================================
"""
could force a symmetric_difference...
but not a great reason to break when the user passes a map thats too big
because some pfails may get filtered out...
we may not want to throw this error
"""
#===================================================================
# miss_l = set(self.sid_l).difference(e_d['sid_ifz_d'])
# assert len(miss_l)==0, '%s sid_ifz_d missing some %s keys: %s'%(eTag, self.sid, miss_l)
#===================================================================
#===================================================================
# update the container
#===================================================================
dp = vlay.dataProvider()
log.info('%s got vlay \'%s\' w/ %i features'%(eTag, vlay.name(), dp.featureCount()))
eifz_d[eTag] = vlay
#=======================================================================
# wrap
#=======================================================================
log.info('finished loading %i layers for %i events'%(len(fp_vlay_d), len(eifz_d)))
self.eifz_d = eifz_d
return self.eifz_d | def load_ifz_fps(self, #load filepaths in an ifz map and replace with layer referneces
eifz_d, #ifz map w/ filepaths
aoi_vlay=None,
base_dir=None,
):
"""
plugin runs dont need this.
just pass eifz_d with vlays (no fps)
what about copies??
#=======================================================================
# data structure:
#=======================================================================
each event (eTag) should have its own ifz polygon layer
(here we allow for the re-use of layers... but a single layer is still expected)
each ifz poly feature corresponds to a dike (sid) segment (for that event) (1 ifzID: many sid)
these ifz polys can be overlapping
often users may duplicate layers/maps between events
but our structure allows for unique vals
#=======================================================================
# TODO:
#=======================================================================
setup to assign map based on name matching (between ifz and raster)
"""
log = self.logger.getChild('load_ifz')
#=======================================================================
# prechecks
#=======================================================================
miss_l = set(eifz_d.keys()).difference(self.etag_l)
assert len(miss_l)==0, 'eTag mismatch on eifz_d and pfail_df: %s'%miss_l
fp_vlay_d = dict() #container for repeat layers
#=======================================================================
# loop and load
#=======================================================================
log.info('loading on %i events'%len(eifz_d))
for eTag, fp_raw in eifz_d.copy().items():
#===================================================================
# relative filepaths
#===================================================================
if not base_dir is None:
fp = os.path.join(base_dir, fp_raw)
else:
fp = fp_raw
#===================================================================
# get the layer
#===================================================================
#check if its already loaded
if fp in fp_vlay_d:
vlay = self.fixgeometries(fp_vlay_d[fp], logger=log, layname=fp_vlay_d[fp].name())
else:
vlay = self.load_vlay(fp, aoi_vlay=aoi_vlay, logger=log)
fp_vlay_d[fp] = vlay #add it for next time
#===================================================================
# check it
#===================================================================
assert self._check_ifz(vlay)
#===================================================================
# #check sid_ifz_d
#===================================================================
"""
could force a symmetric_difference...
but not a great reason to break when the user passes a map thats too big
because some pfails may get filtered out...
we may not want to throw this error
"""
#===================================================================
# miss_l = set(self.sid_l).difference(e_d['sid_ifz_d'])
# assert len(miss_l)==0, '%s sid_ifz_d missing some %s keys: %s'%(eTag, self.sid, miss_l)
#===================================================================
#===================================================================
# update the container
#===================================================================
dp = vlay.dataProvider()
log.info('%s got vlay \'%s\' w/ %i features'%(eTag, vlay.name(), dp.featureCount()))
eifz_d[eTag] = vlay
#=======================================================================
# wrap
#=======================================================================
log.info('finished loading %i layers for %i events'%(len(fp_vlay_d), len(eifz_d)))
self.eifz_d = eifz_d
return self.eifz_d |
Python | def load_lpols(self, #helper for loading vector polygons in standalone runs
lpol_files_d, #{event name:polygon filepath}
basedir = None, #optional directory to append to lpol_files_d
providerLib='ogr',
logger=None,
**kwargs
):
"""
can't load from a directory dump because we need to link to events
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log=logger.getChild('load_lpols')
if not basedir is None:
assert os.path.exists(basedir), 'bad fpoly basedir: %s'%basedir
log.info('on %i layers'%len(lpol_files_d))
#=======================================================================
# loop and load
#=======================================================================
lpol_d = dict()
for ename, file_str in lpol_files_d.items():
#get filepath
if isinstance(basedir, str):
fp = os.path.join(basedir, file_str)
else:
fp = file_str
#load it
lpol_d[ename] = self.load_vlay(fp, logger=log, providerLib=providerLib,
**kwargs)
log.info('finished w/ %i'%len(lpol_d))
return lpol_d | def load_lpols(self, #helper for loading vector polygons in standalone runs
lpol_files_d, #{event name:polygon filepath}
basedir = None, #optional directory to append to lpol_files_d
providerLib='ogr',
logger=None,
**kwargs
):
"""
can't load from a directory dump because we need to link to events
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log=logger.getChild('load_lpols')
if not basedir is None:
assert os.path.exists(basedir), 'bad fpoly basedir: %s'%basedir
log.info('on %i layers'%len(lpol_files_d))
#=======================================================================
# loop and load
#=======================================================================
lpol_d = dict()
for ename, file_str in lpol_files_d.items():
#get filepath
if isinstance(basedir, str):
fp = os.path.join(basedir, file_str)
else:
fp = file_str
#load it
lpol_d[ename] = self.load_vlay(fp, logger=log, providerLib=providerLib,
**kwargs)
log.info('finished w/ %i'%len(lpol_d))
return lpol_d |
Python | def union_probabilities(self,
probs,
logger = None,
):
"""
calculating the union probability of multiple independent events using the exclusion principle
probability that ANY of the passed independent events will occur
https://en.wikipedia.org/wiki/Inclusion%E2%80%93exclusion_principle#In_probability
from Walter
Parameters
----------
probs_list : Python 1d list
A list of probabilities between 0 and 1 with len() less than 23
After 23, the memory consumption gets huge. 23 items uses ~1.2gb of ram.
Returns
-------
total_prob : float64
Total probability
"""
if logger is None: logger=self.logger
#log = self.logger.getChild('union_probabilities')
#======================================================================
# prechecks
#======================================================================
assert isinstance(probs, list), 'unexpected type: %s'%type(probs)
assert len(probs) >0, 'got empty container'
#======================================================================
# prefilter
#======================================================================
#guranteed
if max(probs) == 1.0:
#log.debug('passed a probability with 1.0... returning this')
return 1.0
#clean out zeros
if 0.0 in probs:
probs = [x for x in probs if not x==0]
#===========================================================================
# do some checks
#===========================================================================
assert (len(probs) < 20), "list too long"
assert (all(map(lambda x: x < 1 and x > 0, probs))), 'probabilities out of range'
#===========================================================================
# loop and add (or subtract) joint probabliities
#===========================================================================
#log.debug('calc total_prob for %i probs: %s'%(len(probs), probs))
total_prob = 0
for r in range(1, len(probs) + 1): #enumerate through each entry in the probs list
combs = itertools.combinations(probs, r) #assemble all the possible combinations
"""
list(combs)
"""
#multiply all the probability combinations together and add for this layer
total_prob += ((-1) ** (r + 1)) * sum([np.prod(items) for items in combs])
assert total_prob <1 and total_prob > 0, 'bad result'
return total_prob | def union_probabilities(self,
probs,
logger = None,
):
"""
calculating the union probability of multiple independent events using the exclusion principle
probability that ANY of the passed independent events will occur
https://en.wikipedia.org/wiki/Inclusion%E2%80%93exclusion_principle#In_probability
from Walter
Parameters
----------
probs_list : Python 1d list
A list of probabilities between 0 and 1 with len() less than 23
After 23, the memory consumption gets huge. 23 items uses ~1.2gb of ram.
Returns
-------
total_prob : float64
Total probability
"""
if logger is None: logger=self.logger
#log = self.logger.getChild('union_probabilities')
#======================================================================
# prechecks
#======================================================================
assert isinstance(probs, list), 'unexpected type: %s'%type(probs)
assert len(probs) >0, 'got empty container'
#======================================================================
# prefilter
#======================================================================
#guranteed
if max(probs) == 1.0:
#log.debug('passed a probability with 1.0... returning this')
return 1.0
#clean out zeros
if 0.0 in probs:
probs = [x for x in probs if not x==0]
#===========================================================================
# do some checks
#===========================================================================
assert (len(probs) < 20), "list too long"
assert (all(map(lambda x: x < 1 and x > 0, probs))), 'probabilities out of range'
#===========================================================================
# loop and add (or subtract) joint probabliities
#===========================================================================
#log.debug('calc total_prob for %i probs: %s'%(len(probs), probs))
total_prob = 0
for r in range(1, len(probs) + 1): #enumerate through each entry in the probs list
combs = itertools.combinations(probs, r) #assemble all the possible combinations
"""
list(combs)
"""
#multiply all the probability combinations together and add for this layer
total_prob += ((-1) ** (r + 1)) * sum([np.prod(items) for items in combs])
assert total_prob <1 and total_prob > 0, 'bad result'
return total_prob |
Python | def _get_data(self, keys):
"""
keys passed NOT found in teh CALC set will be ignored
"""
#=======================================================================
# get the data
#=======================================================================
calc_d = {k:v for k,v in self.runr.res_d.items() if k in keys} #requested keys from calc set
test_d = self.runr.pick_d #test library
"""
self.runr.res_d['si_ttl']
"""
#=======================================================================
# key check
#=======================================================================
#check we have everything found in teh calc set in the test set
miss_l = set(calc_d.keys()).difference(test_d.keys())
assert len(miss_l)==0, 'missing keys: %s'%miss_l
return {k:(v, test_d[k]) for k,v in calc_d.items()} | def _get_data(self, keys):
"""
keys passed NOT found in teh CALC set will be ignored
"""
#=======================================================================
# get the data
#=======================================================================
calc_d = {k:v for k,v in self.runr.res_d.items() if k in keys} #requested keys from calc set
test_d = self.runr.pick_d #test library
"""
self.runr.res_d['si_ttl']
"""
#=======================================================================
# key check
#=======================================================================
#check we have everything found in teh calc set in the test set
miss_l = set(calc_d.keys()).difference(test_d.keys())
assert len(miss_l)==0, 'missing keys: %s'%miss_l
return {k:(v, test_d[k]) for k,v in calc_d.items()} |
Python | def build_pickels(self, #build the input and output pickels
wFLow_l,
**kwargs):
"""
should have a pickle for inputs
and one for the outputs (that we'll test against)
"""
log = self.logger.getChild('build_pickels')
#=======================================================================
# loop and load, execute, and save each test
#=======================================================================
d = dict()
for fWrkr in wFLow_l:
runr = self._run_wflow(fWrkr, **kwargs)
d[fWrkr.name] = runr.write_pick()
runr.__exit__()
log.info('finished on %i \n %s'%(len(d), list(d.keys())))
return d | def build_pickels(self, #build the input and output pickels
wFLow_l,
**kwargs):
"""
should have a pickle for inputs
and one for the outputs (that we'll test against)
"""
log = self.logger.getChild('build_pickels')
#=======================================================================
# loop and load, execute, and save each test
#=======================================================================
d = dict()
for fWrkr in wFLow_l:
runr = self._run_wflow(fWrkr, **kwargs)
d[fWrkr.name] = runr.write_pick()
runr.__exit__()
log.info('finished on %i \n %s'%(len(d), list(d.keys())))
return d |
Python | def promote_attrim(self, dtag=None): #add new index level
if dtag is None: dtag = self.attrdtag_in
"""
risk1 doesnt use dmg1... so the attrim will be differnet
"""
aep_ser = self.data_d['evals'].copy()
atr_dxcol = self.data_d[dtag].copy()
"""
view(atr_dxcol)
"""
#get the new mindex we want to join in
mindex2 = pd.MultiIndex.from_frame(
aep_ser.to_frame().reset_index().rename(columns={'index':'rEventName'}))
#join this in and move it up some levels
atr_dxcol.columns = atr_dxcol.columns.join(mindex2).swaplevel(i=2, j=1).swaplevel(i=1, j=0)
#check the values all match
"""nulls are not matching for somereaseon"""
booldf = atr_dxcol.droplevel(level=0, axis=1).fillna(999) == self.data_d[dtag].fillna(999)
assert booldf.all().all(), 'bad conversion'
del self.data_d[dtag]
self.att_df = atr_dxcol.sort_index(axis=1, level=0, sort_remaining=True,
inplace=False, ascending=True)
assert self.attriMode
return | def promote_attrim(self, dtag=None): #add new index level
if dtag is None: dtag = self.attrdtag_in
"""
risk1 doesnt use dmg1... so the attrim will be differnet
"""
aep_ser = self.data_d['evals'].copy()
atr_dxcol = self.data_d[dtag].copy()
"""
view(atr_dxcol)
"""
#get the new mindex we want to join in
mindex2 = pd.MultiIndex.from_frame(
aep_ser.to_frame().reset_index().rename(columns={'index':'rEventName'}))
#join this in and move it up some levels
atr_dxcol.columns = atr_dxcol.columns.join(mindex2).swaplevel(i=2, j=1).swaplevel(i=1, j=0)
#check the values all match
"""nulls are not matching for somereaseon"""
booldf = atr_dxcol.droplevel(level=0, axis=1).fillna(999) == self.data_d[dtag].fillna(999)
assert booldf.all().all(), 'bad conversion'
del self.data_d[dtag]
self.att_df = atr_dxcol.sort_index(axis=1, level=0, sort_remaining=True,
inplace=False, ascending=True)
assert self.attriMode
return |
Python | def launch(self): #placeholder for launching the dialog
"""allows children to customize what happens when called"""
log = self.logger.getChild('launch')
#=======================================================================
# launch setup
#=======================================================================
if self.first_launch:
self.connect_slots()
#=======================================================================
# #customs
#=======================================================================
"""
lets each dialog attach custom functions when they are launched
useful for automatically setting some dialog boxes
prioritizinmg inheritanve over customs
"""
for fName, f in self.launch_actions.items():
log.debug('%s: %s'%(fName, f))
try:
f()
except Exception as e:
log.warning('failed to execute \'%s\' w/ \n %s'%(fName, e))
#=======================================================================
# inherit from other tools
#=======================================================================
"""for dialogs with control files"""
#my control file path
if hasattr(self, 'lineEdit_cf_fp'):
#try and set the control file path from the session if there
if os.path.exists(self.session.cf_fp):
#set the control file path
self.lineEdit_cf_fp.setText(self.session.cf_fp)
#woking directory
if hasattr(self, 'lineEdit_wdir'):
#from session control file
if os.path.exists(self.session.cf_fp):
newdir = os.path.join(os.path.dirname(self.session.cf_fp))
assert os.path.exists(newdir), 'this should exist...%s'%newdir
self.lineEdit_wdir.setText(newdir)
#default catch for working directory
if self.lineEdit_wdir.text() == '':
newdir = os.path.join(os.getcwd(), 'CanFlood')
if not os.path.exists(newdir): os.makedirs(newdir)
self.lineEdit_wdir.setText(newdir)
#inventory vector layer
if isinstance(self.session.finv_vlay, QgsVectorLayer):
if hasattr(self, 'comboBox_JGfinv'): #should just skip the Build
self.comboBox_JGfinv.setLayer(self.session.finv_vlay)
self.first_launch=False
self.show() | def launch(self): #placeholder for launching the dialog
"""allows children to customize what happens when called"""
log = self.logger.getChild('launch')
#=======================================================================
# launch setup
#=======================================================================
if self.first_launch:
self.connect_slots()
#=======================================================================
# #customs
#=======================================================================
"""
lets each dialog attach custom functions when they are launched
useful for automatically setting some dialog boxes
prioritizinmg inheritanve over customs
"""
for fName, f in self.launch_actions.items():
log.debug('%s: %s'%(fName, f))
try:
f()
except Exception as e:
log.warning('failed to execute \'%s\' w/ \n %s'%(fName, e))
#=======================================================================
# inherit from other tools
#=======================================================================
"""for dialogs with control files"""
#my control file path
if hasattr(self, 'lineEdit_cf_fp'):
#try and set the control file path from the session if there
if os.path.exists(self.session.cf_fp):
#set the control file path
self.lineEdit_cf_fp.setText(self.session.cf_fp)
#woking directory
if hasattr(self, 'lineEdit_wdir'):
#from session control file
if os.path.exists(self.session.cf_fp):
newdir = os.path.join(os.path.dirname(self.session.cf_fp))
assert os.path.exists(newdir), 'this should exist...%s'%newdir
self.lineEdit_wdir.setText(newdir)
#default catch for working directory
if self.lineEdit_wdir.text() == '':
newdir = os.path.join(os.getcwd(), 'CanFlood')
if not os.path.exists(newdir): os.makedirs(newdir)
self.lineEdit_wdir.setText(newdir)
#inventory vector layer
if isinstance(self.session.finv_vlay, QgsVectorLayer):
if hasattr(self, 'comboBox_JGfinv'): #should just skip the Build
self.comboBox_JGfinv.setLayer(self.session.finv_vlay)
self.first_launch=False
self.show() |
Python | def newFileSelect_button(self,
lineEdit, #text bar where selected file should be displayed
caption = 'Specify new file name', #title of box
path = None,
filters = "All Files (*)",
qfd = QFileDialog.getSaveFileName, #dialog to launch
):
#=======================================================================
# defaults
#=======================================================================
if path is None:
path = os.getcwd()
if not os.path.exists(path):
path = os.getcwd()
#ask the user for the path
"""
using the Dialog instance as the QWidge parent
"""
fp = qfd(self, caption, path, filters)
#just take the first
if len(fp) == 2:
fp = fp[0]
#see if they picked something
if fp == '':
self.logger.warning('user failed to make a selection. skipping')
return
#update the bar
lineEdit.setText(fp)
self.logger.info('user selected: \n %s'%fp) | def newFileSelect_button(self,
lineEdit, #text bar where selected file should be displayed
caption = 'Specify new file name', #title of box
path = None,
filters = "All Files (*)",
qfd = QFileDialog.getSaveFileName, #dialog to launch
):
#=======================================================================
# defaults
#=======================================================================
if path is None:
path = os.getcwd()
if not os.path.exists(path):
path = os.getcwd()
#ask the user for the path
"""
using the Dialog instance as the QWidge parent
"""
fp = qfd(self, caption, path, filters)
#just take the first
if len(fp) == 2:
fp = fp[0]
#see if they picked something
if fp == '':
self.logger.warning('user failed to make a selection. skipping')
return
#update the bar
lineEdit.setText(fp)
self.logger.info('user selected: \n %s'%fp) |
Python | def _loghlp(self, #helper function for generalized logging
msg_raw, qlevel,
push=False, #treat as a push message on Qgis' bar
status=False, #whether to send to the status widget
):
"""
QgsMessageLog writes to the message panel
optionally, users can enable file logging
this file logger
"""
#=======================================================================
# send message based on qlevel
#=======================================================================
msgDebug = '%s %s: %s'%(datetime.datetime.now().strftime('%d-%H.%M.%S'), self.log_nm, msg_raw)
if qlevel < 0: #file logger only
QgsLogger.debug('D_%s'%msgDebug)
push, status = False, False #should never trip
else:#console logger
msg = '%s: %s'%(self.log_nm, msg_raw)
QgsMessageLog.logMessage(msg, self.log_tabnm, level=qlevel)
QgsLogger.debug('%i_%s'%(qlevel, msgDebug)) #also send to file
#Qgis bar
if push:
try:
self.iface.messageBar().pushMessage(self.log_tabnm, msg_raw, level=qlevel)
except:
QgsLogger.debug('failed to push to interface') #used for standalone tests
#Optional widget
if status or push:
if not self.statusQlab is None:
self.statusQlab.setText(msg_raw) | def _loghlp(self, #helper function for generalized logging
msg_raw, qlevel,
push=False, #treat as a push message on Qgis' bar
status=False, #whether to send to the status widget
):
"""
QgsMessageLog writes to the message panel
optionally, users can enable file logging
this file logger
"""
#=======================================================================
# send message based on qlevel
#=======================================================================
msgDebug = '%s %s: %s'%(datetime.datetime.now().strftime('%d-%H.%M.%S'), self.log_nm, msg_raw)
if qlevel < 0: #file logger only
QgsLogger.debug('D_%s'%msgDebug)
push, status = False, False #should never trip
else:#console logger
msg = '%s: %s'%(self.log_nm, msg_raw)
QgsMessageLog.logMessage(msg, self.log_tabnm, level=qlevel)
QgsLogger.debug('%i_%s'%(qlevel, msgDebug)) #also send to file
#Qgis bar
if push:
try:
self.iface.messageBar().pushMessage(self.log_tabnm, msg_raw, level=qlevel)
except:
QgsLogger.debug('failed to push to interface') #used for standalone tests
#Optional widget
if status or push:
if not self.statusQlab is None:
self.statusQlab.setText(msg_raw) |
Python | def basic_logger(root_lvl = logging.DEBUG,
new_wdir = None,
): #attaches to a log file in the users directory per the parameter file
"""
the control file generates a 'DEBUG' and a 'WARNING' filehandler
"""
#===========================================================================
# get filepaths
#===========================================================================
base_dir = os.path.dirname(os.path.dirname(__file__)) #canflood
logcfg_file = os.path.join(base_dir, '_pars', 'logger.conf')
if not os.path.exists(logcfg_file):
raise Error('logger config file does not exist:\n %s'%logcfg_file)
#===========================================================================
# #change path to users directory
#===========================================================================
if new_wdir is None:
new_wdir = os.path.join(os.path.expanduser('~'), 'CanFlood')
if not os.path.exists(new_wdir):
os.makedirs(new_wdir)
print('default working directory didnt exist. made it:\n %s'%new_wdir)
os.chdir(new_wdir)
print('changed current directory to: \n %s'%os.getcwd())
#===========================================================================
# build logger from file
#===========================================================================
logger = logging.getLogger() #get the root logger
logging.config.fileConfig(logcfg_file) #load the configuration file
logger.info('root logger initiated and configured from file: %s'%(logcfg_file))
#override default level in the config file
logger.setLevel(root_lvl)
return logger | def basic_logger(root_lvl = logging.DEBUG,
new_wdir = None,
): #attaches to a log file in the users directory per the parameter file
"""
the control file generates a 'DEBUG' and a 'WARNING' filehandler
"""
#===========================================================================
# get filepaths
#===========================================================================
base_dir = os.path.dirname(os.path.dirname(__file__)) #canflood
logcfg_file = os.path.join(base_dir, '_pars', 'logger.conf')
if not os.path.exists(logcfg_file):
raise Error('logger config file does not exist:\n %s'%logcfg_file)
#===========================================================================
# #change path to users directory
#===========================================================================
if new_wdir is None:
new_wdir = os.path.join(os.path.expanduser('~'), 'CanFlood')
if not os.path.exists(new_wdir):
os.makedirs(new_wdir)
print('default working directory didnt exist. made it:\n %s'%new_wdir)
os.chdir(new_wdir)
print('changed current directory to: \n %s'%os.getcwd())
#===========================================================================
# build logger from file
#===========================================================================
logger = logging.getLogger() #get the root logger
logging.config.fileConfig(logcfg_file) #load the configuration file
logger.info('root logger initiated and configured from file: %s'%(logcfg_file))
#override default level in the config file
logger.setLevel(root_lvl)
return logger |
Python | def _get_inher_atts(self, #return a container with the attribute values from your inher_d
inher_d=None,
logger=None,
):
"""used by parents to retrieve kwargs to pass to children"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log=logger.getChild('get_inher_atts')
if inher_d is None:
inher_d = self.inher_d
#=======================================================================
# retrieve
#=======================================================================
att_d = dict()
for className, attn_l in inher_d.items():
d = dict()
for attn in attn_l:
attv = getattr(self, attn)
#assert not attv is None, attn #allowing Nones to pass
att_d[attn] = attv
d[attn] = attv
log.debug('got %i atts from \'%s\'\n %s'%(
len(d), className, d))
return att_d | def _get_inher_atts(self, #return a container with the attribute values from your inher_d
inher_d=None,
logger=None,
):
"""used by parents to retrieve kwargs to pass to children"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log=logger.getChild('get_inher_atts')
if inher_d is None:
inher_d = self.inher_d
#=======================================================================
# retrieve
#=======================================================================
att_d = dict()
for className, attn_l in inher_d.items():
d = dict()
for attn in attn_l:
attv = getattr(self, attn)
#assert not attv is None, attn #allowing Nones to pass
att_d[attn] = attv
d[attn] = attv
log.debug('got %i atts from \'%s\'\n %s'%(
len(d), className, d))
return att_d |
Python | def bdmg_raw(self, #get damages on expanded finv
bdf = None, #expanded finv. see modcom.build_exp_finv(). each row has 1 ftag
ddf = None, #expanded exposure set. depth at each bid. see build_depths() or get_mitid()
):
#======================================================================
# defaults
#======================================================================
log = self.logger.getChild('bdmg')
#defaults
if ddf is None: ddf = self.ddf
if bdf is None: bdf = self.bdf
"""ddf is appending _1 to column names"""
cid, bid = self.cid, self.bid
#=======================================================================
# prechecks
#=======================================================================
assert len(self.dfuncs_d)>0
assert bid in ddf.columns
assert ddf.index.name == bid
assert np.array_equal(ddf.index.values, ddf[bid].values)
#identifier for depth columns
dboolcol = ~ddf.columns.isin([cid, bid])
log.debug('running on %i assets and %i events'%(len(bdf), len(ddf.columns)-2))
#======================================================================
# adjust depths by exposure grade
#======================================================================
"""
see get_mitid()
"""
#======================================================================
# setup-----
#======================================================================
edf = ddf.loc[:, dboolcol] #just the exposure values
#=======================================================================
# build the events matrix
#=======================================================================
"""makes it easier to keep track of all the results by event
view(events_df)
"""
#get events name set
events_df = pd.DataFrame(index = ddf.columns[dboolcol])
for sufix in ['raw', 'scaled', 'capped', 'dmg']:
events_df[sufix] = events_df.index + '_%s'%sufix
self.events_df = events_df #set for later
#=======================================================================
# id valid bids
#=======================================================================
if self.ground_water:
mdval = min(self.df_minD_d.values())
else:
mdval = 0
"""this marks nulls as False"""
dep_booldf = edf >= mdval #True= depth is valid
#report those faling the check
if not dep_booldf.all().all():
log.debug('marked %i (of %i) entries w/ excluded depths (<= %.2f or NULL)'%(
np.invert(dep_booldf).sum().sum(), dep_booldf.size, mdval))
#check if EVERYTHING failed
if not dep_booldf.any().any():
log.warning('ZERO (of %i) exposures exceed the minimum threshold (%.2f)! returning all zeros'%(
dep_booldf.size, mdval))
self.res_df = pd.DataFrame(0, index=edf.index, columns= ['%s_raw'%e for e in edf.columns])
return self.res_df
#======================================================================
# RAW: loop and calc raw damage by ftag-------------
#======================================================================
res_df = None
for indxr, (ftag, dfunc) in enumerate(self.dfuncs_d.items()):
log = self.logger.getChild('bdmg.%s'%ftag)
#entries matching this tag
tag_booldf = pd.DataFrame(np.tile(bdf['ftag']==ftag, (len(dep_booldf.columns),1)).T,
index=dep_booldf.index, columns=dep_booldf.columns)
booldf = np.logical_and(
dep_booldf, #entries w/ valid depths
tag_booldf #entries matching this tag
)
log.info('(%i/%i) calculating \'%s\' w/ %i un-nested assets (of %i)'%(
indxr+1, len(self.dfuncs_d), ftag,
booldf.any(axis=1).sum(), len(booldf)))
if not booldf.any().any():
log.debug(' no valid entries!')
continue
#==================================================================
# calc damage by tag.depth
#==================================================================
"""
to improve performance,
we only calculate each depth once, then join back to the results
todo: add check for max depth to improve performance
"""
#get just the unique depths that need calculating
deps_ar = pd.Series(np.unique(np.ravel(edf[booldf].values))
).dropna().values
log.debug('calc for %i (of %i) unique depths'%(
len(deps_ar), edf.size))
"""multi-threading would nice for this loop"""
#loop each depth through the damage function to get the result
e_impacts_d = {dep:dfunc.get_dmg(dep) for dep in deps_ar}
#===================================================================
# link
#===================================================================
ri_df = edf[booldf].replace(e_impacts_d)
# update master=
if res_df is None:
res_df = ri_df
else:
res_df.update(ri_df, overwrite=False, errors='raise')
#=======================================================================
# wrap-------
#=======================================================================
log = self.logger.getChild('bdmg')
assert not res_df is None, 'failed to get any valid entries'
res_df.columns = ['%s_raw'%e for e in res_df.columns] #add the suffix
#attach
self.res_df = res_df
log.info('got raw impacts for %i dfuncs and %i events: \n %s'%(
len(self.dfuncs_d),dboolcol.sum(), self._rdf_smry('_raw')))
return res_df | def bdmg_raw(self, #get damages on expanded finv
bdf = None, #expanded finv. see modcom.build_exp_finv(). each row has 1 ftag
ddf = None, #expanded exposure set. depth at each bid. see build_depths() or get_mitid()
):
#======================================================================
# defaults
#======================================================================
log = self.logger.getChild('bdmg')
#defaults
if ddf is None: ddf = self.ddf
if bdf is None: bdf = self.bdf
"""ddf is appending _1 to column names"""
cid, bid = self.cid, self.bid
#=======================================================================
# prechecks
#=======================================================================
assert len(self.dfuncs_d)>0
assert bid in ddf.columns
assert ddf.index.name == bid
assert np.array_equal(ddf.index.values, ddf[bid].values)
#identifier for depth columns
dboolcol = ~ddf.columns.isin([cid, bid])
log.debug('running on %i assets and %i events'%(len(bdf), len(ddf.columns)-2))
#======================================================================
# adjust depths by exposure grade
#======================================================================
"""
see get_mitid()
"""
#======================================================================
# setup-----
#======================================================================
edf = ddf.loc[:, dboolcol] #just the exposure values
#=======================================================================
# build the events matrix
#=======================================================================
"""makes it easier to keep track of all the results by event
view(events_df)
"""
#get events name set
events_df = pd.DataFrame(index = ddf.columns[dboolcol])
for sufix in ['raw', 'scaled', 'capped', 'dmg']:
events_df[sufix] = events_df.index + '_%s'%sufix
self.events_df = events_df #set for later
#=======================================================================
# id valid bids
#=======================================================================
if self.ground_water:
mdval = min(self.df_minD_d.values())
else:
mdval = 0
"""this marks nulls as False"""
dep_booldf = edf >= mdval #True= depth is valid
#report those faling the check
if not dep_booldf.all().all():
log.debug('marked %i (of %i) entries w/ excluded depths (<= %.2f or NULL)'%(
np.invert(dep_booldf).sum().sum(), dep_booldf.size, mdval))
#check if EVERYTHING failed
if not dep_booldf.any().any():
log.warning('ZERO (of %i) exposures exceed the minimum threshold (%.2f)! returning all zeros'%(
dep_booldf.size, mdval))
self.res_df = pd.DataFrame(0, index=edf.index, columns= ['%s_raw'%e for e in edf.columns])
return self.res_df
#======================================================================
# RAW: loop and calc raw damage by ftag-------------
#======================================================================
res_df = None
for indxr, (ftag, dfunc) in enumerate(self.dfuncs_d.items()):
log = self.logger.getChild('bdmg.%s'%ftag)
#entries matching this tag
tag_booldf = pd.DataFrame(np.tile(bdf['ftag']==ftag, (len(dep_booldf.columns),1)).T,
index=dep_booldf.index, columns=dep_booldf.columns)
booldf = np.logical_and(
dep_booldf, #entries w/ valid depths
tag_booldf #entries matching this tag
)
log.info('(%i/%i) calculating \'%s\' w/ %i un-nested assets (of %i)'%(
indxr+1, len(self.dfuncs_d), ftag,
booldf.any(axis=1).sum(), len(booldf)))
if not booldf.any().any():
log.debug(' no valid entries!')
continue
#==================================================================
# calc damage by tag.depth
#==================================================================
"""
to improve performance,
we only calculate each depth once, then join back to the results
todo: add check for max depth to improve performance
"""
#get just the unique depths that need calculating
deps_ar = pd.Series(np.unique(np.ravel(edf[booldf].values))
).dropna().values
log.debug('calc for %i (of %i) unique depths'%(
len(deps_ar), edf.size))
"""multi-threading would nice for this loop"""
#loop each depth through the damage function to get the result
e_impacts_d = {dep:dfunc.get_dmg(dep) for dep in deps_ar}
#===================================================================
# link
#===================================================================
ri_df = edf[booldf].replace(e_impacts_d)
# update master=
if res_df is None:
res_df = ri_df
else:
res_df.update(ri_df, overwrite=False, errors='raise')
#=======================================================================
# wrap-------
#=======================================================================
log = self.logger.getChild('bdmg')
assert not res_df is None, 'failed to get any valid entries'
res_df.columns = ['%s_raw'%e for e in res_df.columns] #add the suffix
#attach
self.res_df = res_df
log.info('got raw impacts for %i dfuncs and %i events: \n %s'%(
len(self.dfuncs_d),dboolcol.sum(), self._rdf_smry('_raw')))
return res_df |
Python | def bdmg_capped(self, #apply the optional 'fcap' values to the scaled damages
res_df = None,
):
"""
bdf can ber passed w/o fcap
shouldn't be passed w/ all nulls... but this would still wortk I thihnk
"""
log = self.logger.getChild('bdmg_capped')
#=======================================================================
# get data
#=======================================================================
if res_df is None: res_df = self.res_df
events_df = self.events_df
bdf = self.bdf
cid, bid = self.cid, self.bid
#=======================================================================
# start meta
#=======================================================================
meta_d = dict()
cmeta_df =bdf.loc[:,bdf.columns.isin([cid, bid, 'ftag', 'fcap', 'fscale', 'nestID'])]
#=======================================================================
# #loop and add scaled damages
#=======================================================================
for event, e_ser in events_df.iterrows():
#join scaled values and cap values for easy comparison
if 'fcap' in bdf.columns:
sc_df = res_df[e_ser['scaled']].to_frame().join(bdf['fcap'])
else:
sc_df = res_df[e_ser['scaled']].to_frame()
#identify nulls
boolidx = res_df[e_ser['scaled']].notna()
#calc and set the scalecd values
"""this will ignore any null fcap values when determining theminimum"""
res_df.loc[boolidx, e_ser['capped']] = sc_df[boolidx].min(axis=1, skipna=True)
#===================================================================
# #meta
#===================================================================
#where the scaled values were capped
if 'fcap' in bdf.columns:
mser = res_df.loc[boolidx, e_ser['scaled']] >bdf.loc[boolidx, 'fcap']
else:
#all FALSE
mser = pd.Series(index=cmeta_df.index, dtype=bool)
cmeta_df= cmeta_df.join(mser.rename(event), how='left')
#totals
meta_d[event] = mser.sum()
#=======================================================================
# wrap
#=======================================================================
"""written by bdmg_smry"""
"""
view(cmeta_df)
"""
self.cmeta_df = cmeta_df
self.res_colg = 'capped' #needed by mitigation funcs
self.res_df = res_df
log.info('cappd %i events w/ bid cnts maxing out (of %i) \n %s\n %s'%(
len(meta_d), len(res_df), meta_d, self._rdf_smry('_capped')))
return res_df | def bdmg_capped(self, #apply the optional 'fcap' values to the scaled damages
res_df = None,
):
"""
bdf can ber passed w/o fcap
shouldn't be passed w/ all nulls... but this would still wortk I thihnk
"""
log = self.logger.getChild('bdmg_capped')
#=======================================================================
# get data
#=======================================================================
if res_df is None: res_df = self.res_df
events_df = self.events_df
bdf = self.bdf
cid, bid = self.cid, self.bid
#=======================================================================
# start meta
#=======================================================================
meta_d = dict()
cmeta_df =bdf.loc[:,bdf.columns.isin([cid, bid, 'ftag', 'fcap', 'fscale', 'nestID'])]
#=======================================================================
# #loop and add scaled damages
#=======================================================================
for event, e_ser in events_df.iterrows():
#join scaled values and cap values for easy comparison
if 'fcap' in bdf.columns:
sc_df = res_df[e_ser['scaled']].to_frame().join(bdf['fcap'])
else:
sc_df = res_df[e_ser['scaled']].to_frame()
#identify nulls
boolidx = res_df[e_ser['scaled']].notna()
#calc and set the scalecd values
"""this will ignore any null fcap values when determining theminimum"""
res_df.loc[boolidx, e_ser['capped']] = sc_df[boolidx].min(axis=1, skipna=True)
#===================================================================
# #meta
#===================================================================
#where the scaled values were capped
if 'fcap' in bdf.columns:
mser = res_df.loc[boolidx, e_ser['scaled']] >bdf.loc[boolidx, 'fcap']
else:
#all FALSE
mser = pd.Series(index=cmeta_df.index, dtype=bool)
cmeta_df= cmeta_df.join(mser.rename(event), how='left')
#totals
meta_d[event] = mser.sum()
#=======================================================================
# wrap
#=======================================================================
"""written by bdmg_smry"""
"""
view(cmeta_df)
"""
self.cmeta_df = cmeta_df
self.res_colg = 'capped' #needed by mitigation funcs
self.res_df = res_df
log.info('cappd %i events w/ bid cnts maxing out (of %i) \n %s\n %s'%(
len(meta_d), len(res_df), meta_d, self._rdf_smry('_capped')))
return res_df |
Python | def prep_model(self):
log = self.logger.getChild('prep_model')
self.set_ttl() #load and prep the total results
self.set_passet()
self.set_etypes()
self.set_attrimat()
#=======================================================================
# attrim----
#=======================================================================
"""ROUNDING
forcing the project precision aon all hte aep values...
not the greatest.. but only decent way to ensure they are treated as members
"""
#reformat aep values
atr_dxcol = self.data_d.pop(self.attrdtag_in)
mdex = atr_dxcol.columns
atr_dxcol.columns = mdex.set_levels(
np.around(mdex.levels[0].astype(float), decimals=self.prec),
level=0)
#sort them
"""this flips the usual atr_dxcol order.. but matches the EAD calc expectation"""
atr_dxcol = atr_dxcol.sort_index(axis=1, level=0, ascending=False)
#=======================================================================
# check
#=======================================================================
mdex = atr_dxcol.columns
#check aep values
miss_l = set(mdex.levels[0]).symmetric_difference(
self.aep_df.loc[~self.aep_df['extrap'], 'aep'])
assert len(miss_l)==0, 'aep mismatch: %s'%miss_l
#check rEventNames
miss_l = set(mdex.levels[1]).symmetric_difference(
self.data_d['eventypes']['rEventName'])
assert len(miss_l)==0, 'rEventName mismatch: %s'%miss_l
#store
self.data_d[self.attrdtag_in] = atr_dxcol
#=======================================================================
# get TOTAL multiplied values---
#=======================================================================
self.mula_dxcol = self.get_mult(atr_dxcol.copy(), logger=log)
#=======================================================================
# wrap
#=======================================================================
log.debug('finished')
return | def prep_model(self):
log = self.logger.getChild('prep_model')
self.set_ttl() #load and prep the total results
self.set_passet()
self.set_etypes()
self.set_attrimat()
#=======================================================================
# attrim----
#=======================================================================
"""ROUNDING
forcing the project precision aon all hte aep values...
not the greatest.. but only decent way to ensure they are treated as members
"""
#reformat aep values
atr_dxcol = self.data_d.pop(self.attrdtag_in)
mdex = atr_dxcol.columns
atr_dxcol.columns = mdex.set_levels(
np.around(mdex.levels[0].astype(float), decimals=self.prec),
level=0)
#sort them
"""this flips the usual atr_dxcol order.. but matches the EAD calc expectation"""
atr_dxcol = atr_dxcol.sort_index(axis=1, level=0, ascending=False)
#=======================================================================
# check
#=======================================================================
mdex = atr_dxcol.columns
#check aep values
miss_l = set(mdex.levels[0]).symmetric_difference(
self.aep_df.loc[~self.aep_df['extrap'], 'aep'])
assert len(miss_l)==0, 'aep mismatch: %s'%miss_l
#check rEventNames
miss_l = set(mdex.levels[1]).symmetric_difference(
self.data_d['eventypes']['rEventName'])
assert len(miss_l)==0, 'rEventName mismatch: %s'%miss_l
#store
self.data_d[self.attrdtag_in] = atr_dxcol
#=======================================================================
# get TOTAL multiplied values---
#=======================================================================
self.mula_dxcol = self.get_mult(atr_dxcol.copy(), logger=log)
#=======================================================================
# wrap
#=======================================================================
log.debug('finished')
return |
Python | def run(self,
rlayRaw_l, #set of rasters to sample
finv_raw, #inventory layer
cid = None, #index field name on finv
#exposure value controls
psmp_stat=None, #for complex geo finvs, statistic to sample
psmp_fieldName = None, #for complex geo finvs, field name with sampling statistic
#inundation sampling controls
as_inun=False, #whether to sample for inundation (rather than wsl values)
dtm_rlay=None, #dtm raster (for as_inun=True)
dthresh = 0, #fordepth threshold
clip_dtm=False,
fname = None, #prefix for layer name
):
"""
Generate the exposure dataset ('expos') from a set of hazard event rasters
"""
#======================================================================
# defaults
#======================================================================
log = self.logger.getChild('run')
if cid is None: cid = self.cid
if fname is None: fname=self.fname
self.as_inun = as_inun
self.finv_name = finv_raw.name() #for plotters
self.gtype = QgsWkbTypes().displayString(finv_raw.wkbType())
log.info('executing on %i rasters'%(len(rlayRaw_l)))
#======================================================================
# precheck
#======================================================================
assert len(rlayRaw_l)>0, 'no rasters passed!'
#check the finv_raw
assert isinstance(finv_raw, QgsVectorLayer), 'bad type on finv_raw'
assert finv_raw.crs() == self.qproj.crs(), 'finv_raw crs %s doesnt match projects \'%s\'' \
%(finv_raw.crs().authid(), self.qproj.crs().authid())
assert cid in [field.name() for field in finv_raw.fields()], \
'requested cid field \'%s\' not found on the finv_raw'%cid
#check the rasters
rname_l = []
for rlay in rlayRaw_l:
assert isinstance(rlay, QgsRasterLayer)
assert rlay.crs() == self.qproj.crs(), 'rlay %s crs doesnt match project'%(rlay.name())
rname_l.append(rlay.name())
self.rname_l = rname_l
#check sampling parameter logic
"""see samp_vals()"""
#======================================================================
# prep the finv for sampling
#======================================================================
self.finv_name = finv_raw.name()
#drop all data fields
if not psmp_fieldName is None:
keep_fnl = [cid, psmp_fieldName]
else:
keep_fnl = [cid]
finv = self.deletecolumn(finv_raw, keep_fnl, invert=True)
#fix the geometry
finv = self.fixgeometries(finv, logger=log)
#check field lengths
self.finv_fcnt = len(finv.fields())
assert self.finv_fcnt==len(keep_fnl), 'failed to drop all the fields'
if self.gtype.endswith('Z'):
log.warning('passed finv has Z values... these are not supported')
self.names_d = dict() #setting an empty as some paths dont fill this anymore
self.feedback.setProgress(20)
#get the results name
res_name = '%s_%s_%i_%i'%(fname, self.tag, len(rlayRaw_l),finv.dataProvider().featureCount())
#=======================================================================
# simple geometries (Points)-----
#=======================================================================
if 'Point' in self.gtype:
res_vlay = self.samp_vals_pts(finv, rlayRaw_l)
assert not as_inun
#=======================================================================
# complex geos--------
#=======================================================================
else:
#=======================================================================
#threshold area (inundation)--------
#=======================================================================
if as_inun:
#===================================================================
# #prep DTM
#===================================================================
if clip_dtm:
"""makes the raster clipping a bitcleaner
2020-05-06
ran 2 tests, and this INCREASED run times by ~20%
set default to clip_dtm=False
"""
log.info('trimming dtm \'%s\' by finv extents'%(dtm_rlay.name()))
finv_buf = self.polygonfromlayerextent(finv,
round_to=dtm_rlay.rasterUnitsPerPixelX()*3,#buffer by 3x the pixel size
logger=log )
#clip to just the polygons
dtm_rlay1 = self.cliprasterwithpolygon(dtm_rlay,finv_buf, logger=log)
else:
dtm_rlay1 = dtm_rlay
#===================================================================
# sample by goetype
#===================================================================
if 'Polygon' in self.gtype:
res_vlay = self.samp_inun(finv,rlayRaw_l, dtm_rlay1, dthresh)
elif 'Line' in self.gtype:
res_vlay = self.samp_inun_line(finv, rlayRaw_l, dtm_rlay1, dthresh)
else:
raise Error('\'%s\' got unexpected gtype: %s'%(finv.name(), self.gtype))
res_name = res_name + 'd%.2f'%(dthresh)
#=======================================================================
# value sampler------
#=======================================================================
else:
#===============================================================
# Global staitsitc
#===============================================================
if not psmp_stat is None:
assert psmp_fieldName is None
res_vlay = self.samp_vals_cplx(finv,rlayRaw_l, psmp_stat=psmp_stat)
res_name = res_name + '_%s'%psmp_stat.lower()
#===============================================================
# per-asset stat
#===============================================================
else:
res_vlay = self.samp_passet(finv,rlayRaw_l, psmp_fieldName=psmp_fieldName)
res_name = res_name + '_passet'
res_vlay.setName(res_name)
#=======================================================================
# check field names
#=======================================================================
if not as_inun:
"""still handling renaming at the end for inundation runs"""
miss_l = set(self.rname_l).difference([f.name() for f in res_vlay.fields()])
assert len(miss_l)==0, 'field name mismatch: %s'%miss_l
#=======================================================================
# wrap
#=======================================================================
"""TODO: harmonize output types for build modules"""
#get dataframe like results
try:
"""renaming with the algo on each loop now"""
df = vlay_get_fdf(res_vlay, logger=log).set_index(cid, drop=True)
#get sorted index by values
sum_ser = pd.Series({k:cser.dropna().sum() for k, cser in df.items()}).sort_values()
#set this new index
self.res_df = df.loc[:, sum_ser.index].sort_index()
except Exception as e:
log.warning('failed to convert vlay to dataframe w/ \n %s'%e)
#max out the progress bar
self.feedback.setProgress(90)
log.info('sampling finished')
self.psmp_stat=psmp_stat #set for val_str
return res_vlay | def run(self,
rlayRaw_l, #set of rasters to sample
finv_raw, #inventory layer
cid = None, #index field name on finv
#exposure value controls
psmp_stat=None, #for complex geo finvs, statistic to sample
psmp_fieldName = None, #for complex geo finvs, field name with sampling statistic
#inundation sampling controls
as_inun=False, #whether to sample for inundation (rather than wsl values)
dtm_rlay=None, #dtm raster (for as_inun=True)
dthresh = 0, #fordepth threshold
clip_dtm=False,
fname = None, #prefix for layer name
):
"""
Generate the exposure dataset ('expos') from a set of hazard event rasters
"""
#======================================================================
# defaults
#======================================================================
log = self.logger.getChild('run')
if cid is None: cid = self.cid
if fname is None: fname=self.fname
self.as_inun = as_inun
self.finv_name = finv_raw.name() #for plotters
self.gtype = QgsWkbTypes().displayString(finv_raw.wkbType())
log.info('executing on %i rasters'%(len(rlayRaw_l)))
#======================================================================
# precheck
#======================================================================
assert len(rlayRaw_l)>0, 'no rasters passed!'
#check the finv_raw
assert isinstance(finv_raw, QgsVectorLayer), 'bad type on finv_raw'
assert finv_raw.crs() == self.qproj.crs(), 'finv_raw crs %s doesnt match projects \'%s\'' \
%(finv_raw.crs().authid(), self.qproj.crs().authid())
assert cid in [field.name() for field in finv_raw.fields()], \
'requested cid field \'%s\' not found on the finv_raw'%cid
#check the rasters
rname_l = []
for rlay in rlayRaw_l:
assert isinstance(rlay, QgsRasterLayer)
assert rlay.crs() == self.qproj.crs(), 'rlay %s crs doesnt match project'%(rlay.name())
rname_l.append(rlay.name())
self.rname_l = rname_l
#check sampling parameter logic
"""see samp_vals()"""
#======================================================================
# prep the finv for sampling
#======================================================================
self.finv_name = finv_raw.name()
#drop all data fields
if not psmp_fieldName is None:
keep_fnl = [cid, psmp_fieldName]
else:
keep_fnl = [cid]
finv = self.deletecolumn(finv_raw, keep_fnl, invert=True)
#fix the geometry
finv = self.fixgeometries(finv, logger=log)
#check field lengths
self.finv_fcnt = len(finv.fields())
assert self.finv_fcnt==len(keep_fnl), 'failed to drop all the fields'
if self.gtype.endswith('Z'):
log.warning('passed finv has Z values... these are not supported')
self.names_d = dict() #setting an empty as some paths dont fill this anymore
self.feedback.setProgress(20)
#get the results name
res_name = '%s_%s_%i_%i'%(fname, self.tag, len(rlayRaw_l),finv.dataProvider().featureCount())
#=======================================================================
# simple geometries (Points)-----
#=======================================================================
if 'Point' in self.gtype:
res_vlay = self.samp_vals_pts(finv, rlayRaw_l)
assert not as_inun
#=======================================================================
# complex geos--------
#=======================================================================
else:
#=======================================================================
#threshold area (inundation)--------
#=======================================================================
if as_inun:
#===================================================================
# #prep DTM
#===================================================================
if clip_dtm:
"""makes the raster clipping a bitcleaner
2020-05-06
ran 2 tests, and this INCREASED run times by ~20%
set default to clip_dtm=False
"""
log.info('trimming dtm \'%s\' by finv extents'%(dtm_rlay.name()))
finv_buf = self.polygonfromlayerextent(finv,
round_to=dtm_rlay.rasterUnitsPerPixelX()*3,#buffer by 3x the pixel size
logger=log )
#clip to just the polygons
dtm_rlay1 = self.cliprasterwithpolygon(dtm_rlay,finv_buf, logger=log)
else:
dtm_rlay1 = dtm_rlay
#===================================================================
# sample by goetype
#===================================================================
if 'Polygon' in self.gtype:
res_vlay = self.samp_inun(finv,rlayRaw_l, dtm_rlay1, dthresh)
elif 'Line' in self.gtype:
res_vlay = self.samp_inun_line(finv, rlayRaw_l, dtm_rlay1, dthresh)
else:
raise Error('\'%s\' got unexpected gtype: %s'%(finv.name(), self.gtype))
res_name = res_name + 'd%.2f'%(dthresh)
#=======================================================================
# value sampler------
#=======================================================================
else:
#===============================================================
# Global staitsitc
#===============================================================
if not psmp_stat is None:
assert psmp_fieldName is None
res_vlay = self.samp_vals_cplx(finv,rlayRaw_l, psmp_stat=psmp_stat)
res_name = res_name + '_%s'%psmp_stat.lower()
#===============================================================
# per-asset stat
#===============================================================
else:
res_vlay = self.samp_passet(finv,rlayRaw_l, psmp_fieldName=psmp_fieldName)
res_name = res_name + '_passet'
res_vlay.setName(res_name)
#=======================================================================
# check field names
#=======================================================================
if not as_inun:
"""still handling renaming at the end for inundation runs"""
miss_l = set(self.rname_l).difference([f.name() for f in res_vlay.fields()])
assert len(miss_l)==0, 'field name mismatch: %s'%miss_l
#=======================================================================
# wrap
#=======================================================================
"""TODO: harmonize output types for build modules"""
#get dataframe like results
try:
"""renaming with the algo on each loop now"""
df = vlay_get_fdf(res_vlay, logger=log).set_index(cid, drop=True)
#get sorted index by values
sum_ser = pd.Series({k:cser.dropna().sum() for k, cser in df.items()}).sort_values()
#set this new index
self.res_df = df.loc[:, sum_ser.index].sort_index()
except Exception as e:
log.warning('failed to convert vlay to dataframe w/ \n %s'%e)
#max out the progress bar
self.feedback.setProgress(90)
log.info('sampling finished')
self.psmp_stat=psmp_stat #set for val_str
return res_vlay |
Python | def samp_vals_pts(self, #sample a set of rasters with a points vectorlayer
finv, raster_l,
):
""""
2021-10-18:split out function from polygons/line sammpler
"""
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('samp_vals_pts')
algo_nm = 'qgis:rastersampling'
#=======================================================================
# sample loop
#=======================================================================
self.names_d = dict()
log.info('sampling %i raster layers w/ algo \'%s\' and gtype: %s'%(
len(raster_l), algo_nm, self.gtype))
for indxr, rlay in enumerate(raster_l):
log.info('%i/%i sampling \'%s\' on \'%s\''%(
indxr+1, len(raster_l), finv.name(), rlay.name()))
ofnl = [field.name() for field in finv.fields()]
self.mstore.addMapLayer(finv) #not sure when/where we clear this
finv = processing.run(algo_nm,
{ 'COLUMN_PREFIX' : rlay.name(),'INPUT' : finv,
'OUTPUT' : 'TEMPORARY_OUTPUT','RASTERCOPY' : rlay},
feedback=self.feedback)['OUTPUT']
#report and handle names
finv = self._smp_loop_wrap(finv, ofnl, rlay, indxr, log)
log.info('finished %i w/ %s'%(indxr, [f.name() for f in finv.fields()]))
#=======================================================================
# check
#=======================================================================
log.debug('finished w/ \n%s'%self.names_d)
return finv | def samp_vals_pts(self, #sample a set of rasters with a points vectorlayer
finv, raster_l,
):
""""
2021-10-18:split out function from polygons/line sammpler
"""
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('samp_vals_pts')
algo_nm = 'qgis:rastersampling'
#=======================================================================
# sample loop
#=======================================================================
self.names_d = dict()
log.info('sampling %i raster layers w/ algo \'%s\' and gtype: %s'%(
len(raster_l), algo_nm, self.gtype))
for indxr, rlay in enumerate(raster_l):
log.info('%i/%i sampling \'%s\' on \'%s\''%(
indxr+1, len(raster_l), finv.name(), rlay.name()))
ofnl = [field.name() for field in finv.fields()]
self.mstore.addMapLayer(finv) #not sure when/where we clear this
finv = processing.run(algo_nm,
{ 'COLUMN_PREFIX' : rlay.name(),'INPUT' : finv,
'OUTPUT' : 'TEMPORARY_OUTPUT','RASTERCOPY' : rlay},
feedback=self.feedback)['OUTPUT']
#report and handle names
finv = self._smp_loop_wrap(finv, ofnl, rlay, indxr, log)
log.info('finished %i w/ %s'%(indxr, [f.name() for f in finv.fields()]))
#=======================================================================
# check
#=======================================================================
log.debug('finished w/ \n%s'%self.names_d)
return finv |
Python | def samp_vals_cplx(self, #sample a set of rasters with a complex vectorlayer (global stat)
finv,
raster_l,
psmp_stat='Max', #global statistic to use for samplingn algo
selected=False, #perform sample on selected features only
logger=None,
):
"""
sampling raster values (not inundation)_
COLUMN NAMES: the script has some approximat prefixing.
but all the fields are re-named in the end
2021-10-18: split out points
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('samp_vals_cplx')
gtype=self.gtype
#=======================================================================
# check parameter logic
#=======================================================================
assert psmp_stat in self.psmp_codes, 'unrecognized psmp_stat'
#=======================================================================
# sample loop
#=======================================================================
#self.names_d = dict()
log.info('sampling %i raster layers w/ gtype: %s'%(len(raster_l), gtype))
first= True
for indxr, rlay in enumerate(raster_l):
#setup
log.info('%i/%i sampling \'%s\' on \'%s\''%(
indxr+1, len(raster_l), finv.name(), rlay.name()))
ofnl = [field.name() for field in finv.fields()]
self.mstore.addMapLayer(finv)
#add the sample values as a new field
finv = self.get_raster_sample(finv, rlay, psmp_stat, indxr=indxr,
selected=(first and selected), #only need to clip the first time
log=log)
#report and handle names
finv = self._smp_loop_wrap(finv, ofnl, rlay, indxr, log)
#wrap
first=False
log.debug('finished w/ \n%s'%self.names_d)
return finv | def samp_vals_cplx(self, #sample a set of rasters with a complex vectorlayer (global stat)
finv,
raster_l,
psmp_stat='Max', #global statistic to use for samplingn algo
selected=False, #perform sample on selected features only
logger=None,
):
"""
sampling raster values (not inundation)_
COLUMN NAMES: the script has some approximat prefixing.
but all the fields are re-named in the end
2021-10-18: split out points
"""
#=======================================================================
# defaults
#=======================================================================
if logger is None: logger=self.logger
log = logger.getChild('samp_vals_cplx')
gtype=self.gtype
#=======================================================================
# check parameter logic
#=======================================================================
assert psmp_stat in self.psmp_codes, 'unrecognized psmp_stat'
#=======================================================================
# sample loop
#=======================================================================
#self.names_d = dict()
log.info('sampling %i raster layers w/ gtype: %s'%(len(raster_l), gtype))
first= True
for indxr, rlay in enumerate(raster_l):
#setup
log.info('%i/%i sampling \'%s\' on \'%s\''%(
indxr+1, len(raster_l), finv.name(), rlay.name()))
ofnl = [field.name() for field in finv.fields()]
self.mstore.addMapLayer(finv)
#add the sample values as a new field
finv = self.get_raster_sample(finv, rlay, psmp_stat, indxr=indxr,
selected=(first and selected), #only need to clip the first time
log=log)
#report and handle names
finv = self._smp_loop_wrap(finv, ofnl, rlay, indxr, log)
#wrap
first=False
log.debug('finished w/ \n%s'%self.names_d)
return finv |
Python | def samp_passet(self, #sample complex asset values using per-asset stats
finv_raw,
raster_l,
psmp_fieldName='sample_stat', #field name containing sampling statistic
):
"""
basically a wrapper around samp_vals_cplx()
iterating with selections by sample_stat
"""
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('samp_passet')
gtype=self.gtype
#=======================================================================
# checks----
#=======================================================================
assert ('Polygon' in gtype) or ('Line' in gtype)
#=======================================================================
# #data checks on sampling field name
#=======================================================================
assert psmp_fieldName in [f.name() for f in finv_raw.fields()], \
'missing psmp_fieldName \'%s\' on finv_raw'%psmp_fieldName
pser = vlay_get_fdata(finv_raw, fieldn=psmp_fieldName, logger=log, fmt='ser')
assert not pser.isna().any(), 'got %i nulls in sampling field \'%s\''%(
pser.isna().sum(), psmp_fieldName)
smp_stats_l = list(pser.unique()) #set of statistics we'll sample on
miss_l = set(smp_stats_l).difference(self.psmp_codes.keys())
assert len(miss_l)==0, 'got %i unrecognized sampling statistc keys on \'%s\': \n %s'%(
len(miss_l), psmp_fieldName, miss_l)
#=======================================================================
# helpers
#=======================================================================
def get_meta(vlay, #build meta entry
):
return {'sfcnt':vlay.selectedFeatureCount(),
'fcnt':vlay.dataProvider().featureCount(),
'name':vlay.name(),
'fn_l':[f.name() for f in vlay.fields()]}
#=======================================================================
# sample loop-----
#=======================================================================
self.names_d = dict()
log.info('sampling %i raster layers w/ \'%s\' and %i stats: %s'%(
len(raster_l), gtype, len(smp_stats_l), smp_stats_l))
meta_d = {'raw':get_meta(finv_raw)}
#===================================================================
# #loop through each statistic
#===================================================================
"""
looping through each stat first (rather than rlay)
so we can collect features rather than attributes
and re-use the other function
"""
lays_d = dict() #container for results per psmp
for psmp_stat in smp_stats_l:
log = self.logger.getChild('samp_passet.%s'%psmp_stat)
finv_raw.removeSelection()
#===================================================================
# #select those matching this stat
#===================================================================
processing.run('qgis:selectbyattribute',
{'FIELD' : psmp_fieldName, 'INPUT' : finv_raw,
'METHOD' : 0, #select new
'OPERATOR' : 0, #equals
'VALUE' : psmp_stat},feedback=self.feedback)
assert finv_raw.selectedFeatureCount() >0, 'failed to get any \'%s\''%psmp_stat
log.debug('with \'%s\' got %i/%i'%(
psmp_stat, finv_raw.selectedFeatureCount(), finv_raw.dataProvider().featureCount()))
#===================================================================
# #sample these
#===================================================================
finv_stat = self.samp_vals_cplx(finv_raw, raster_l, psmp_stat=psmp_stat,
selected=True, logger=log)
assert finv_raw.selectedFeatureCount() == finv_stat.dataProvider().featureCount()
finv_stat.setName('finv_%s_%i'%(psmp_stat, finv_stat.dataProvider().featureCount()))
#===================================================================
# wrap
#===================================================================
lays_d[psmp_stat] = finv_stat
meta_d[psmp_stat] = get_meta(finv_stat)
log.debug(meta_d[psmp_stat])
#=======================================================================
# merge each section
#=======================================================================
log = self.logger.getChild('samp_passet')
log.debug('merging %i: %s'%(len(lays_d), list(lays_d.keys())))
finv_res = processing.run('native:mergevectorlayers',
{ 'CRS' : self.qproj.crs(),
'LAYERS' :list(lays_d.values()),
'OUTPUT' : 'TEMPORARY_OUTPUT'},feedback=self.feedback)['OUTPUT']
#drop the meta fields
finv_res = processing.run('qgis:deletecolumn',
{ 'COLUMN' : ['layer', 'path', psmp_fieldName],
'INPUT' : finv_res,'OUTPUT' : 'TEMPORARY_OUTPUT' },
feedback=self.feedback)['OUTPUT']
assert finv_res.dataProvider().featureCount()==finv_raw.dataProvider().featureCount()
#check field alignment
miss_l = set([f.name() for f in finv_res.fields()]).symmetric_difference(
[f.name() for f in finv_stat.fields()])
"""only the psmp_fieldName should be missing"""
assert len(miss_l)==1,'fieldName mismatch on merge \n %s'%miss_l
#=======================================================================
# warp
#=======================================================================
meta_d['result'] = get_meta(finv_res)
log.info('finished on %i'%len(meta_d))
self.names_d
return finv_res
"""
view(pd.DataFrame.from_dict(meta_d, orient='index'))
view(finv_res)
""" | def samp_passet(self, #sample complex asset values using per-asset stats
finv_raw,
raster_l,
psmp_fieldName='sample_stat', #field name containing sampling statistic
):
"""
basically a wrapper around samp_vals_cplx()
iterating with selections by sample_stat
"""
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('samp_passet')
gtype=self.gtype
#=======================================================================
# checks----
#=======================================================================
assert ('Polygon' in gtype) or ('Line' in gtype)
#=======================================================================
# #data checks on sampling field name
#=======================================================================
assert psmp_fieldName in [f.name() for f in finv_raw.fields()], \
'missing psmp_fieldName \'%s\' on finv_raw'%psmp_fieldName
pser = vlay_get_fdata(finv_raw, fieldn=psmp_fieldName, logger=log, fmt='ser')
assert not pser.isna().any(), 'got %i nulls in sampling field \'%s\''%(
pser.isna().sum(), psmp_fieldName)
smp_stats_l = list(pser.unique()) #set of statistics we'll sample on
miss_l = set(smp_stats_l).difference(self.psmp_codes.keys())
assert len(miss_l)==0, 'got %i unrecognized sampling statistc keys on \'%s\': \n %s'%(
len(miss_l), psmp_fieldName, miss_l)
#=======================================================================
# helpers
#=======================================================================
def get_meta(vlay, #build meta entry
):
return {'sfcnt':vlay.selectedFeatureCount(),
'fcnt':vlay.dataProvider().featureCount(),
'name':vlay.name(),
'fn_l':[f.name() for f in vlay.fields()]}
#=======================================================================
# sample loop-----
#=======================================================================
self.names_d = dict()
log.info('sampling %i raster layers w/ \'%s\' and %i stats: %s'%(
len(raster_l), gtype, len(smp_stats_l), smp_stats_l))
meta_d = {'raw':get_meta(finv_raw)}
#===================================================================
# #loop through each statistic
#===================================================================
"""
looping through each stat first (rather than rlay)
so we can collect features rather than attributes
and re-use the other function
"""
lays_d = dict() #container for results per psmp
for psmp_stat in smp_stats_l:
log = self.logger.getChild('samp_passet.%s'%psmp_stat)
finv_raw.removeSelection()
#===================================================================
# #select those matching this stat
#===================================================================
processing.run('qgis:selectbyattribute',
{'FIELD' : psmp_fieldName, 'INPUT' : finv_raw,
'METHOD' : 0, #select new
'OPERATOR' : 0, #equals
'VALUE' : psmp_stat},feedback=self.feedback)
assert finv_raw.selectedFeatureCount() >0, 'failed to get any \'%s\''%psmp_stat
log.debug('with \'%s\' got %i/%i'%(
psmp_stat, finv_raw.selectedFeatureCount(), finv_raw.dataProvider().featureCount()))
#===================================================================
# #sample these
#===================================================================
finv_stat = self.samp_vals_cplx(finv_raw, raster_l, psmp_stat=psmp_stat,
selected=True, logger=log)
assert finv_raw.selectedFeatureCount() == finv_stat.dataProvider().featureCount()
finv_stat.setName('finv_%s_%i'%(psmp_stat, finv_stat.dataProvider().featureCount()))
#===================================================================
# wrap
#===================================================================
lays_d[psmp_stat] = finv_stat
meta_d[psmp_stat] = get_meta(finv_stat)
log.debug(meta_d[psmp_stat])
#=======================================================================
# merge each section
#=======================================================================
log = self.logger.getChild('samp_passet')
log.debug('merging %i: %s'%(len(lays_d), list(lays_d.keys())))
finv_res = processing.run('native:mergevectorlayers',
{ 'CRS' : self.qproj.crs(),
'LAYERS' :list(lays_d.values()),
'OUTPUT' : 'TEMPORARY_OUTPUT'},feedback=self.feedback)['OUTPUT']
#drop the meta fields
finv_res = processing.run('qgis:deletecolumn',
{ 'COLUMN' : ['layer', 'path', psmp_fieldName],
'INPUT' : finv_res,'OUTPUT' : 'TEMPORARY_OUTPUT' },
feedback=self.feedback)['OUTPUT']
assert finv_res.dataProvider().featureCount()==finv_raw.dataProvider().featureCount()
#check field alignment
miss_l = set([f.name() for f in finv_res.fields()]).symmetric_difference(
[f.name() for f in finv_stat.fields()])
"""only the psmp_fieldName should be missing"""
assert len(miss_l)==1,'fieldName mismatch on merge \n %s'%miss_l
#=======================================================================
# warp
#=======================================================================
meta_d['result'] = get_meta(finv_res)
log.info('finished on %i'%len(meta_d))
self.names_d
return finv_res
"""
view(pd.DataFrame.from_dict(meta_d, orient='index'))
view(finv_res)
""" |
Python | def samp_inun_line(self, #inundation percent for Line
finv, raster_l, dtm_rlay, dthresh,
):
""""
couldn't find a great pre-made algo
option 1:
SAGA profile from lines (does not retain line attributes)
join attributes by nearest (to retrieve XID)
option 2:
Generate points (pixel centroids) along line
(does not retain line attributes)
generates points on null pixel values
sample points
join by nearest
option 3:
add geometry attributes
Points along geometry (retains attribute)
sample raster
count those above threshold
divide by total for each line
get % above threshold for each line
get km inundated for each line
"""
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('samp_inun_line')
gtype=self.gtype
#setup temp dir
import tempfile #todo: move this up top
temp_dir = tempfile.mkdtemp()
#=======================================================================
# precheck
#=======================================================================
dp = finv.dataProvider()
assert isinstance(dtm_rlay, QgsRasterLayer)
assert isinstance(dthresh, float), 'expected float for dthresh. got %s'%type(dthresh)
assert 'Memory' in dp.storageType() #zonal stats makes direct edits
assert 'Line' in gtype
#=======================================================================
# sample loop---------
#=======================================================================
"""
too memory intensive to handle writing of all these.
an advanced user could retrive from the working folder if desiered
"""
names_d = dict()
for indxr, rlay in enumerate(raster_l):
log = self.logger.getChild('samp_inunL.%s'%rlay.name())
ofnl = [field.name() for field in finv.fields()]
#===================================================================
# #get depth raster
#===================================================================
dep_rlay = self._get_depr(dtm_rlay, log, temp_dir, rlay)
#===============================================================
# #convert to points
#===============================================================
params_d = { 'DISTANCE' : dep_rlay.rasterUnitsPerPixelX(),
'END_OFFSET' : 0,
'INPUT' : finv,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'START_OFFSET' : 0 }
res_d = processing.run('native:pointsalonglines', params_d, feedback=self.feedback)
fpts_vlay = res_d['OUTPUT']
#===============================================================
# #sample the raster
#===============================================================
ofnl2 = [field.name() for field in fpts_vlay.fields()]
params_d = { 'COLUMN_PREFIX' : rlay.name(),
'INPUT' : fpts_vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'RASTERCOPY' : dep_rlay}
res_d = processing.run('qgis:rastersampling', params_d, feedback=self.feedback)
fpts_vlay = res_d['OUTPUT']
#get new field name
new_fn = set([field.name() for field in fpts_vlay.fields()]).difference(ofnl2) #new field names not in the old
assert len(new_fn)==1
new_fn = list(new_fn)[0]
#===================================================================
# clean/pull data
#===================================================================
#drop all the other fields
fpts_vlay = self.deletecolumn(fpts_vlay,[new_fn, self.cid], invert=True, logger=log )
#pull data
"""
the builtin statistics algo doesn't do a good job handling nulls
"""
pts_df = vlay_get_fdf(fpts_vlay, logger=log)
#===================================================================
# calc stats
#===================================================================
#set those below threshold to null
boolidx = pts_df[new_fn]<=dthresh
pts_df.loc[boolidx, new_fn] = np.nan
log.debug('set %i (of %i) \'%s\' vals <= %.2f to null'%(
boolidx.sum(), len(boolidx), new_fn, dthresh))
"""
view(pts_df)
(pts_df[self.cid]==4).sum()
"""
#get count of REAL values in each xid group
pts_df['all']=1 #add dummy column for the demoninator
sdf = pts_df.groupby(self.cid).count().reset_index(drop=False).rename(
columns={new_fn:'real'})
#get ratio (non-NAN count / all count)
new_fn = rlay.name()
sdf[new_fn] = sdf['real'].divide(sdf['all']).round(self.prec)
assert sdf[new_fn].max() <=1
#===================================================================
# link in result
#===================================================================
#convert df back to a mlay
pstat_vlay = self.vlay_new_df2(sdf.drop(['all', 'real'], axis=1),
layname='%s_stats'%(finv.name()), logger=log)
#join w/ algo
params_d = { 'DISCARD_NONMATCHING' : False,
'FIELD' : self.cid,
'FIELDS_TO_COPY' : [new_fn],
'FIELD_2' : self.cid,
'INPUT' : finv,
'INPUT_2' : pstat_vlay,
'METHOD' : 1, #Take attributes of the first matching feature only (one-to-one)
'OUTPUT' : 'TEMPORARY_OUTPUT',
'PREFIX' : ''}
res_d = processing.run('native:joinattributestable', params_d, feedback=self.feedback)
finv = res_d['OUTPUT']
#===================================================================
# check/correct field names
#===================================================================
"""
algos don't assign good field names.
collecting a conversion dictionary then adjusting below
"""
#get/updarte the field names
nfnl = [field.name() for field in finv.fields()]
new_fn = set(nfnl).difference(ofnl) #new field names not in the old
if len(new_fn) > 1:
raise Error('unexpected algo behavior... bad new field count: %s'%new_fn)
elif len(new_fn) == 1:
names_d[list(new_fn)[0]] = rlay.name()
log.debug('updated names_d w/ %s'%rlay.name())
else:
raise Error('bad fn match')
#=======================================================================
# wrap-------------
#=======================================================================
self.names_d = dict() #names should be fine
log.debug('finished')
"""
view(finv)
"""
return finv | def samp_inun_line(self, #inundation percent for Line
finv, raster_l, dtm_rlay, dthresh,
):
""""
couldn't find a great pre-made algo
option 1:
SAGA profile from lines (does not retain line attributes)
join attributes by nearest (to retrieve XID)
option 2:
Generate points (pixel centroids) along line
(does not retain line attributes)
generates points on null pixel values
sample points
join by nearest
option 3:
add geometry attributes
Points along geometry (retains attribute)
sample raster
count those above threshold
divide by total for each line
get % above threshold for each line
get km inundated for each line
"""
#=======================================================================
# defaults
#=======================================================================
log = self.logger.getChild('samp_inun_line')
gtype=self.gtype
#setup temp dir
import tempfile #todo: move this up top
temp_dir = tempfile.mkdtemp()
#=======================================================================
# precheck
#=======================================================================
dp = finv.dataProvider()
assert isinstance(dtm_rlay, QgsRasterLayer)
assert isinstance(dthresh, float), 'expected float for dthresh. got %s'%type(dthresh)
assert 'Memory' in dp.storageType() #zonal stats makes direct edits
assert 'Line' in gtype
#=======================================================================
# sample loop---------
#=======================================================================
"""
too memory intensive to handle writing of all these.
an advanced user could retrive from the working folder if desiered
"""
names_d = dict()
for indxr, rlay in enumerate(raster_l):
log = self.logger.getChild('samp_inunL.%s'%rlay.name())
ofnl = [field.name() for field in finv.fields()]
#===================================================================
# #get depth raster
#===================================================================
dep_rlay = self._get_depr(dtm_rlay, log, temp_dir, rlay)
#===============================================================
# #convert to points
#===============================================================
params_d = { 'DISTANCE' : dep_rlay.rasterUnitsPerPixelX(),
'END_OFFSET' : 0,
'INPUT' : finv,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'START_OFFSET' : 0 }
res_d = processing.run('native:pointsalonglines', params_d, feedback=self.feedback)
fpts_vlay = res_d['OUTPUT']
#===============================================================
# #sample the raster
#===============================================================
ofnl2 = [field.name() for field in fpts_vlay.fields()]
params_d = { 'COLUMN_PREFIX' : rlay.name(),
'INPUT' : fpts_vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'RASTERCOPY' : dep_rlay}
res_d = processing.run('qgis:rastersampling', params_d, feedback=self.feedback)
fpts_vlay = res_d['OUTPUT']
#get new field name
new_fn = set([field.name() for field in fpts_vlay.fields()]).difference(ofnl2) #new field names not in the old
assert len(new_fn)==1
new_fn = list(new_fn)[0]
#===================================================================
# clean/pull data
#===================================================================
#drop all the other fields
fpts_vlay = self.deletecolumn(fpts_vlay,[new_fn, self.cid], invert=True, logger=log )
#pull data
"""
the builtin statistics algo doesn't do a good job handling nulls
"""
pts_df = vlay_get_fdf(fpts_vlay, logger=log)
#===================================================================
# calc stats
#===================================================================
#set those below threshold to null
boolidx = pts_df[new_fn]<=dthresh
pts_df.loc[boolidx, new_fn] = np.nan
log.debug('set %i (of %i) \'%s\' vals <= %.2f to null'%(
boolidx.sum(), len(boolidx), new_fn, dthresh))
"""
view(pts_df)
(pts_df[self.cid]==4).sum()
"""
#get count of REAL values in each xid group
pts_df['all']=1 #add dummy column for the demoninator
sdf = pts_df.groupby(self.cid).count().reset_index(drop=False).rename(
columns={new_fn:'real'})
#get ratio (non-NAN count / all count)
new_fn = rlay.name()
sdf[new_fn] = sdf['real'].divide(sdf['all']).round(self.prec)
assert sdf[new_fn].max() <=1
#===================================================================
# link in result
#===================================================================
#convert df back to a mlay
pstat_vlay = self.vlay_new_df2(sdf.drop(['all', 'real'], axis=1),
layname='%s_stats'%(finv.name()), logger=log)
#join w/ algo
params_d = { 'DISCARD_NONMATCHING' : False,
'FIELD' : self.cid,
'FIELDS_TO_COPY' : [new_fn],
'FIELD_2' : self.cid,
'INPUT' : finv,
'INPUT_2' : pstat_vlay,
'METHOD' : 1, #Take attributes of the first matching feature only (one-to-one)
'OUTPUT' : 'TEMPORARY_OUTPUT',
'PREFIX' : ''}
res_d = processing.run('native:joinattributestable', params_d, feedback=self.feedback)
finv = res_d['OUTPUT']
#===================================================================
# check/correct field names
#===================================================================
"""
algos don't assign good field names.
collecting a conversion dictionary then adjusting below
"""
#get/updarte the field names
nfnl = [field.name() for field in finv.fields()]
new_fn = set(nfnl).difference(ofnl) #new field names not in the old
if len(new_fn) > 1:
raise Error('unexpected algo behavior... bad new field count: %s'%new_fn)
elif len(new_fn) == 1:
names_d[list(new_fn)[0]] = rlay.name()
log.debug('updated names_d w/ %s'%rlay.name())
else:
raise Error('bad fn match')
#=======================================================================
# wrap-------------
#=======================================================================
self.names_d = dict() #names should be fine
log.debug('finished')
"""
view(finv)
"""
return finv |
Python | def line_sample_stats(self, #get raster stats using a line
input_obj, #line vectorylayer with geometry to sample from
rlay, #raster to sample
sample_stats, #list of stats to sample
indxr=0, #used as a prefix for new string
logger=None,
):
"""
sampliung a raster layer with a line and a statistic
TODO: check if using the following is faster:
Densify by Interval
Drape
Extract Z
"""
if logger is None: logger=self.logger
log=logger.getChild('line_sample_stats')
log.debug('on %s'%(input_obj))
#drop everythin gto lower case
sample_stats = [e.lower() for e in sample_stats]
#===============================================================
# #convert to points
#===============================================================
params_d = { 'DISTANCE' : rlay.rasterUnitsPerPixelX(),
'END_OFFSET' : 0,
'INPUT' : input_obj,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'START_OFFSET' : 0 }
res_d = processing.run('native:pointsalonglines', params_d, feedback=self.feedback)
fpts_vlay = res_d['OUTPUT']
#===============================================================
# #sample the raster
#===============================================================
ofnl2 = [field.name() for field in fpts_vlay.fields()]
params_d = { 'COLUMN_PREFIX' : rlay.name(),
'INPUT' : fpts_vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'RASTERCOPY' : rlay}
res_d = processing.run('qgis:rastersampling', params_d, feedback=self.feedback)
fpts_vlay = res_d['OUTPUT']
"""
view(fpts_vlay)
"""
#get new field name
new_fn = set([field.name() for field in fpts_vlay.fields()]
).difference(ofnl2) #new field names not in the old
assert len(new_fn)==1
new_fn = list(new_fn)[0]
#===============================================================
# get stats
#===============================================================
"""note this does not return xid values where everything sampled as null"""
params_d = { 'CATEGORIES_FIELD_NAME' : [self.cid],
'INPUT' : fpts_vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'VALUES_FIELD_NAME' :new_fn}
res_d = processing.run('qgis:statisticsbycategories', params_d, feedback=self.feedback)
stat_tbl = res_d['OUTPUT']
#===============================================================
# join stats back to line_vlay
#===============================================================
#check that the sample stat is in there
s = set(sample_stats).difference([field.name() for field in stat_tbl.fields()])
assert len(s)==0, 'requested sample statistics \"%s\' failed to generate'%s
#run algo
params_d = { 'DISCARD_NONMATCHING' : False,
'FIELD' : self.cid,
'FIELDS_TO_COPY' : sample_stats,
'FIELD_2' : self.cid,
'INPUT' : input_obj,
'INPUT_2' : stat_tbl,
'METHOD' : 1, #Take attributes of the first matching feature only (one-to-one)
'OUTPUT' : 'TEMPORARY_OUTPUT',
'PREFIX' : '%i_'%indxr, #prefix string for new field
}
res_d = processing.run('native:joinattributestable', params_d, feedback=self.feedback)
line_vlay = res_d['OUTPUT']
"""
view(line_vlay)
"""
log.debug('finished on %s w/ %i'%(line_vlay.name(), len(line_vlay)))
return line_vlay | def line_sample_stats(self, #get raster stats using a line
input_obj, #line vectorylayer with geometry to sample from
rlay, #raster to sample
sample_stats, #list of stats to sample
indxr=0, #used as a prefix for new string
logger=None,
):
"""
sampliung a raster layer with a line and a statistic
TODO: check if using the following is faster:
Densify by Interval
Drape
Extract Z
"""
if logger is None: logger=self.logger
log=logger.getChild('line_sample_stats')
log.debug('on %s'%(input_obj))
#drop everythin gto lower case
sample_stats = [e.lower() for e in sample_stats]
#===============================================================
# #convert to points
#===============================================================
params_d = { 'DISTANCE' : rlay.rasterUnitsPerPixelX(),
'END_OFFSET' : 0,
'INPUT' : input_obj,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'START_OFFSET' : 0 }
res_d = processing.run('native:pointsalonglines', params_d, feedback=self.feedback)
fpts_vlay = res_d['OUTPUT']
#===============================================================
# #sample the raster
#===============================================================
ofnl2 = [field.name() for field in fpts_vlay.fields()]
params_d = { 'COLUMN_PREFIX' : rlay.name(),
'INPUT' : fpts_vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'RASTERCOPY' : rlay}
res_d = processing.run('qgis:rastersampling', params_d, feedback=self.feedback)
fpts_vlay = res_d['OUTPUT']
"""
view(fpts_vlay)
"""
#get new field name
new_fn = set([field.name() for field in fpts_vlay.fields()]
).difference(ofnl2) #new field names not in the old
assert len(new_fn)==1
new_fn = list(new_fn)[0]
#===============================================================
# get stats
#===============================================================
"""note this does not return xid values where everything sampled as null"""
params_d = { 'CATEGORIES_FIELD_NAME' : [self.cid],
'INPUT' : fpts_vlay,
'OUTPUT' : 'TEMPORARY_OUTPUT',
'VALUES_FIELD_NAME' :new_fn}
res_d = processing.run('qgis:statisticsbycategories', params_d, feedback=self.feedback)
stat_tbl = res_d['OUTPUT']
#===============================================================
# join stats back to line_vlay
#===============================================================
#check that the sample stat is in there
s = set(sample_stats).difference([field.name() for field in stat_tbl.fields()])
assert len(s)==0, 'requested sample statistics \"%s\' failed to generate'%s
#run algo
params_d = { 'DISCARD_NONMATCHING' : False,
'FIELD' : self.cid,
'FIELDS_TO_COPY' : sample_stats,
'FIELD_2' : self.cid,
'INPUT' : input_obj,
'INPUT_2' : stat_tbl,
'METHOD' : 1, #Take attributes of the first matching feature only (one-to-one)
'OUTPUT' : 'TEMPORARY_OUTPUT',
'PREFIX' : '%i_'%indxr, #prefix string for new field
}
res_d = processing.run('native:joinattributestable', params_d, feedback=self.feedback)
line_vlay = res_d['OUTPUT']
"""
view(line_vlay)
"""
log.debug('finished on %s w/ %i'%(line_vlay.name(), len(line_vlay)))
return line_vlay |
Python | def update_cf(self, cf_fp): #configured control file updater
"""make sure you write the file first"""
return self.set_cf_pars(
{
'dmg_fps':(
{'expos':self.out_fp},
'#\'expos\' file path set from rsamp.py at %s'%(datetime.datetime.now().strftime('%Y-%m-%d %H.%M.%S')),
),
'parameters':(
{'as_inun':str(self.as_inun)},
)
},
cf_fp = cf_fp
) | def update_cf(self, cf_fp): #configured control file updater
"""make sure you write the file first"""
return self.set_cf_pars(
{
'dmg_fps':(
{'expos':self.out_fp},
'#\'expos\' file path set from rsamp.py at %s'%(datetime.datetime.now().strftime('%Y-%m-%d %H.%M.%S')),
),
'parameters':(
{'as_inun':str(self.as_inun)},
)
},
cf_fp = cf_fp
) |
Python | def calc_ead(self, #get EAD from a set of impacts per event
df_raw, #xid: aep
ltail = None,
rtail = None,
drop_tails = None, #whether to remove the dummy tail values from results
dx = None, #damage step for integration (default:None)
logger = None
):
"""
#======================================================================
# inputs
#======================================================================
ltail: left tail treatment code (low prob high damage)
flat: extend the max damage to the zero probability event
extrapolate: extend the fucntion to the zero aep value (interp1d)
float: extend the function to this damage value (must be greater than max)
none: don't extend the tail (not recommended)
rtail: right trail treatment (high prob low damage)
extrapolate: extend the function to the zero damage value
float: extend the function to this aep
none: don't extend (not recommended)
"""
#======================================================================
# setups and defaults
#======================================================================
if logger is None: logger = self.logger
log = logger.getChild('calc_ead')
if ltail is None: ltail = self.ltail
if rtail is None: rtail = self.rtail
if drop_tails is None: drop_tails=self.drop_tails
assert isinstance(drop_tails, bool)
#format tail values
assert not ltail is None
assert not rtail is None
if not ltail in ['flat', 'extrapolate', 'none']:
try:
ltail = float(ltail)
except Exception as e:
raise Error('failed to convert \'ltail\'=\'%s\' to numeric \n %s'%(ltail, e))
if rtail == 'flat':
raise Error('rtail=flat. not implemented')
if not rtail in ['extrapolate', 'none']:
rtail = float(rtail)
log.info('getting ead on %s w/ ltail=\'%s\' and rtail=\'%s\''%(
str(df_raw.shape), ltail, rtail))
#=======================================================================
# data prep-----
#=======================================================================
"""
view(df_raw)
"""
df = df_raw.copy().sort_index(axis=1, ascending=False)
#=======================================================================
# no value----
#=======================================================================
"""
this can happen for small inventories w/ no failure probs
"""
#identify columns to calc ead for
bx = (df > 0).any(axis=1) #only want those with some real damages
if not bx.any():
log.warning('%s got no positive damages %s'%(self.tag, str(df.shape)))
#apply dummy tails as 'flat'
if not ltail is None:
df.loc[:,0] = df.iloc[:,0]
if not rtail is None:
aep_val = max(df.columns.tolist())*(1+10**-(self.prec+2))
df[aep_val] = 0
#re-arrange columns so x is ascending
df = df.sort_index(ascending=False, axis=1)
#apply dummy ead
df['ead'] = 0
#=======================================================================
# some values---------
#=======================================================================
else:
#=======================================================================
# get tail values-----
#=======================================================================
self.check_eDmg(df, dropna=True, logger=log)
#======================================================================
# left tail
#======================================================================
#flat projection
if ltail == 'flat':
"""
view(df)
"""
df.loc[:,0] = df.iloc[:,-1]
if len(df)==1:
self.extrap_vals_d[0] = df.loc[:,0].mean().round(self.prec) #store for later
elif ltail == 'extrapolate': #DEFAULT
df.loc[bx,0] = df.loc[bx, :].apply(self._extrap_rCurve, axis=1, left=True)
#extrap vqalue will be different for each entry
if len(df)==1:
self.extrap_vals_d[0] = df.loc[:,0].mean().round(self.prec) #store for later
elif isinstance(ltail, float):
"""this cant be a good idea...."""
df.loc[bx,0] = ltail
self.extrap_vals_d[0] = ltail #store for later
elif ltail == 'none':
pass
else:
raise Error('unexected ltail key'%ltail)
#======================================================================
# right tail
#======================================================================
if rtail == 'extrapolate':
"""just using the average for now...
could extraploate for each asset but need an alternate method"""
aep_ser = df.loc[bx, :].apply(
self._extrap_rCurve, axis=1, left=False)
aep_val = round(aep_ser.mean(), 5)
assert aep_val > df.columns.max()
df.loc[bx, aep_val] = 0
log.info('using right intersection of aep= %.2e from average extraploation'%(
aep_val))
self.extrap_vals_d[aep_val] = 0 #store for later
elif isinstance(rtail, float): #DEFAULT
aep_val = round(rtail, 5)
assert aep_val > df.columns.max(), 'passed rtail value (%.2f) not > max aep (%.2f)'%(
aep_val, df.columns.max())
df.loc[bx, aep_val] = 0
log.debug('setting ZeroDamage event from user passed \'rtail\' aep=%.7f'%(
aep_val))
self.extrap_vals_d[aep_val] = 0 #store for later
elif rtail == 'flat':
#set the zero damage year as the lowest year in the model (with a small buffer)
aep_val = max(df.columns.tolist())*(1+10**-(self.prec+2))
df.loc[bx, aep_val] = 0
log.info('rtail=\'flat\' setting ZeroDamage event as aep=%.7f'%aep_val)
elif rtail == 'none':
log.warning('no rtail extrapolation specified! leads to invalid integration bounds!')
else:
raise Error('unexpected rtail %s'%rtail)
#re-arrange columns so x is ascending
df = df.sort_index(ascending=False, axis=1)
#======================================================================
# check again
#======================================================================
self.check_eDmg(df, dropna=True, logger=log)
#======================================================================
# calc EAD-----------
#======================================================================
#get reasonable dx (integration step along damage axis)
"""todo: allow the user to set t his"""
if dx is None:
dx = df.max().max()/100
assert isinstance(dx, float)
#apply the ead func
df.loc[bx, 'ead'] = df.loc[bx, :].apply(
self._get_ev, axis=1, dx=dx)
df.loc[:, 'ead'] = df['ead'].fillna(0) #fill remander w/ zeros
#======================================================================
# check it
#======================================================================
boolidx = df['ead'] < 0
if boolidx.any():
log.warning('got %i (of %i) negative eads'%( boolidx.sum(), len(boolidx)))
"""
df.columns.dtype
"""
#======================================================================
# clean results
#======================================================================
if drop_tails:
#just add the results values onto the raw
res_df = df_raw.sort_index(axis=1, ascending=False).join(df['ead']).round(self.prec)
else:
#take everything
res_df = df.round(self.prec)
#final check
"""nasty conversion because we use aep as a column name..."""
cdf = res_df.drop('ead', axis=1)
cdf.columns = cdf.columns.astype(float)
self.check_eDmg(cdf, dropna=True, logger=log)
return res_df | def calc_ead(self, #get EAD from a set of impacts per event
df_raw, #xid: aep
ltail = None,
rtail = None,
drop_tails = None, #whether to remove the dummy tail values from results
dx = None, #damage step for integration (default:None)
logger = None
):
"""
#======================================================================
# inputs
#======================================================================
ltail: left tail treatment code (low prob high damage)
flat: extend the max damage to the zero probability event
extrapolate: extend the fucntion to the zero aep value (interp1d)
float: extend the function to this damage value (must be greater than max)
none: don't extend the tail (not recommended)
rtail: right trail treatment (high prob low damage)
extrapolate: extend the function to the zero damage value
float: extend the function to this aep
none: don't extend (not recommended)
"""
#======================================================================
# setups and defaults
#======================================================================
if logger is None: logger = self.logger
log = logger.getChild('calc_ead')
if ltail is None: ltail = self.ltail
if rtail is None: rtail = self.rtail
if drop_tails is None: drop_tails=self.drop_tails
assert isinstance(drop_tails, bool)
#format tail values
assert not ltail is None
assert not rtail is None
if not ltail in ['flat', 'extrapolate', 'none']:
try:
ltail = float(ltail)
except Exception as e:
raise Error('failed to convert \'ltail\'=\'%s\' to numeric \n %s'%(ltail, e))
if rtail == 'flat':
raise Error('rtail=flat. not implemented')
if not rtail in ['extrapolate', 'none']:
rtail = float(rtail)
log.info('getting ead on %s w/ ltail=\'%s\' and rtail=\'%s\''%(
str(df_raw.shape), ltail, rtail))
#=======================================================================
# data prep-----
#=======================================================================
"""
view(df_raw)
"""
df = df_raw.copy().sort_index(axis=1, ascending=False)
#=======================================================================
# no value----
#=======================================================================
"""
this can happen for small inventories w/ no failure probs
"""
#identify columns to calc ead for
bx = (df > 0).any(axis=1) #only want those with some real damages
if not bx.any():
log.warning('%s got no positive damages %s'%(self.tag, str(df.shape)))
#apply dummy tails as 'flat'
if not ltail is None:
df.loc[:,0] = df.iloc[:,0]
if not rtail is None:
aep_val = max(df.columns.tolist())*(1+10**-(self.prec+2))
df[aep_val] = 0
#re-arrange columns so x is ascending
df = df.sort_index(ascending=False, axis=1)
#apply dummy ead
df['ead'] = 0
#=======================================================================
# some values---------
#=======================================================================
else:
#=======================================================================
# get tail values-----
#=======================================================================
self.check_eDmg(df, dropna=True, logger=log)
#======================================================================
# left tail
#======================================================================
#flat projection
if ltail == 'flat':
"""
view(df)
"""
df.loc[:,0] = df.iloc[:,-1]
if len(df)==1:
self.extrap_vals_d[0] = df.loc[:,0].mean().round(self.prec) #store for later
elif ltail == 'extrapolate': #DEFAULT
df.loc[bx,0] = df.loc[bx, :].apply(self._extrap_rCurve, axis=1, left=True)
#extrap vqalue will be different for each entry
if len(df)==1:
self.extrap_vals_d[0] = df.loc[:,0].mean().round(self.prec) #store for later
elif isinstance(ltail, float):
"""this cant be a good idea...."""
df.loc[bx,0] = ltail
self.extrap_vals_d[0] = ltail #store for later
elif ltail == 'none':
pass
else:
raise Error('unexected ltail key'%ltail)
#======================================================================
# right tail
#======================================================================
if rtail == 'extrapolate':
"""just using the average for now...
could extraploate for each asset but need an alternate method"""
aep_ser = df.loc[bx, :].apply(
self._extrap_rCurve, axis=1, left=False)
aep_val = round(aep_ser.mean(), 5)
assert aep_val > df.columns.max()
df.loc[bx, aep_val] = 0
log.info('using right intersection of aep= %.2e from average extraploation'%(
aep_val))
self.extrap_vals_d[aep_val] = 0 #store for later
elif isinstance(rtail, float): #DEFAULT
aep_val = round(rtail, 5)
assert aep_val > df.columns.max(), 'passed rtail value (%.2f) not > max aep (%.2f)'%(
aep_val, df.columns.max())
df.loc[bx, aep_val] = 0
log.debug('setting ZeroDamage event from user passed \'rtail\' aep=%.7f'%(
aep_val))
self.extrap_vals_d[aep_val] = 0 #store for later
elif rtail == 'flat':
#set the zero damage year as the lowest year in the model (with a small buffer)
aep_val = max(df.columns.tolist())*(1+10**-(self.prec+2))
df.loc[bx, aep_val] = 0
log.info('rtail=\'flat\' setting ZeroDamage event as aep=%.7f'%aep_val)
elif rtail == 'none':
log.warning('no rtail extrapolation specified! leads to invalid integration bounds!')
else:
raise Error('unexpected rtail %s'%rtail)
#re-arrange columns so x is ascending
df = df.sort_index(ascending=False, axis=1)
#======================================================================
# check again
#======================================================================
self.check_eDmg(df, dropna=True, logger=log)
#======================================================================
# calc EAD-----------
#======================================================================
#get reasonable dx (integration step along damage axis)
"""todo: allow the user to set t his"""
if dx is None:
dx = df.max().max()/100
assert isinstance(dx, float)
#apply the ead func
df.loc[bx, 'ead'] = df.loc[bx, :].apply(
self._get_ev, axis=1, dx=dx)
df.loc[:, 'ead'] = df['ead'].fillna(0) #fill remander w/ zeros
#======================================================================
# check it
#======================================================================
boolidx = df['ead'] < 0
if boolidx.any():
log.warning('got %i (of %i) negative eads'%( boolidx.sum(), len(boolidx)))
"""
df.columns.dtype
"""
#======================================================================
# clean results
#======================================================================
if drop_tails:
#just add the results values onto the raw
res_df = df_raw.sort_index(axis=1, ascending=False).join(df['ead']).round(self.prec)
else:
#take everything
res_df = df.round(self.prec)
#final check
"""nasty conversion because we use aep as a column name..."""
cdf = res_df.drop('ead', axis=1)
cdf.columns = cdf.columns.astype(float)
self.check_eDmg(cdf, dropna=True, logger=log)
return res_df |
Python | def _get_indeEV(self,
inde_df #prob, consq, mutual exclusivity flag for each exposure event
):
"""
get the expected value at an asset with
n>1 indepednet failure events (w/ probabilities)
and 1 noFail event
"""
#=======================================================================
# prechecks
#=======================================================================
#check the columns
miss_l = set(['prob', 'consq', 'mutEx']).symmetric_difference(inde_df.columns)
assert len(miss_l)==0
#=======================================================================
# failures---------
#=======================================================================
bxf = ~inde_df['mutEx']
#=======================================================================
# assemble complete scenario matrix
#=======================================================================
n = len(inde_df[bxf])
#build it
if not n in self.scen_ar_d:
scenFail_ar = np.array([i for i in itertools.product(['yes','no'], repeat=n)])
self.scen_ar_d[n] = copy.copy(scenFail_ar)
#retrieve pre-built
else:
scenFail_ar = copy.copy(self.scen_ar_d[n])
#=======================================================================
# probs
#=======================================================================
sFailP_df = pd.DataFrame(scenFail_ar, columns=inde_df[bxf].index)
#expand probabilities to mathc size
prob_ar = np.tile(inde_df.loc[bxf, 'prob'].to_frame().T.values, (len(sFailP_df), 1))
#swap in positives
sFailP_df = sFailP_df.where(
np.invert(sFailP_df=='yes'),
prob_ar, inplace=False)
#swap in negatives
sFailP_df = sFailP_df.where(
np.invert(sFailP_df=='no'),
1-prob_ar, inplace=False).astype(float)
#combine
sFailP_df['pTotal'] = sFailP_df.prod(axis=1)
assert round(sFailP_df['pTotal'].sum(), self.prec)==1, inde_df
#=======================================================================
# consequences
#=======================================================================
sFailC_df = pd.DataFrame(scenFail_ar, columns=inde_df[bxf].index).replace(
{'yes':1.0, 'no':0.0}).astype(float)
#add in consequences
sFailC_df = sFailC_df.multiply(inde_df.loc[bxf, 'consq'])
#get maximums
sFailC_df['cTotal'] = sFailC_df.max(axis=1)
#=======================================================================
# expected values
#=======================================================================
evFail_ser = sFailP_df['pTotal']*sFailC_df['cTotal']
#=======================================================================
# total-------
#=======================================================================
noFail_ar = inde_df.loc[~bxf, ['prob', 'consq']].iloc[0, :].values
return evFail_ser.sum() + noFail_ar[0]*noFail_ar[1] | def _get_indeEV(self,
inde_df #prob, consq, mutual exclusivity flag for each exposure event
):
"""
get the expected value at an asset with
n>1 indepednet failure events (w/ probabilities)
and 1 noFail event
"""
#=======================================================================
# prechecks
#=======================================================================
#check the columns
miss_l = set(['prob', 'consq', 'mutEx']).symmetric_difference(inde_df.columns)
assert len(miss_l)==0
#=======================================================================
# failures---------
#=======================================================================
bxf = ~inde_df['mutEx']
#=======================================================================
# assemble complete scenario matrix
#=======================================================================
n = len(inde_df[bxf])
#build it
if not n in self.scen_ar_d:
scenFail_ar = np.array([i for i in itertools.product(['yes','no'], repeat=n)])
self.scen_ar_d[n] = copy.copy(scenFail_ar)
#retrieve pre-built
else:
scenFail_ar = copy.copy(self.scen_ar_d[n])
#=======================================================================
# probs
#=======================================================================
sFailP_df = pd.DataFrame(scenFail_ar, columns=inde_df[bxf].index)
#expand probabilities to mathc size
prob_ar = np.tile(inde_df.loc[bxf, 'prob'].to_frame().T.values, (len(sFailP_df), 1))
#swap in positives
sFailP_df = sFailP_df.where(
np.invert(sFailP_df=='yes'),
prob_ar, inplace=False)
#swap in negatives
sFailP_df = sFailP_df.where(
np.invert(sFailP_df=='no'),
1-prob_ar, inplace=False).astype(float)
#combine
sFailP_df['pTotal'] = sFailP_df.prod(axis=1)
assert round(sFailP_df['pTotal'].sum(), self.prec)==1, inde_df
#=======================================================================
# consequences
#=======================================================================
sFailC_df = pd.DataFrame(scenFail_ar, columns=inde_df[bxf].index).replace(
{'yes':1.0, 'no':0.0}).astype(float)
#add in consequences
sFailC_df = sFailC_df.multiply(inde_df.loc[bxf, 'consq'])
#get maximums
sFailC_df['cTotal'] = sFailC_df.max(axis=1)
#=======================================================================
# expected values
#=======================================================================
evFail_ser = sFailP_df['pTotal']*sFailC_df['cTotal']
#=======================================================================
# total-------
#=======================================================================
noFail_ar = inde_df.loc[~bxf, ['prob', 'consq']].iloc[0, :].values
return evFail_ser.sum() + noFail_ar[0]*noFail_ar[1] |
Python | def _get_ev(self, #integration caller
ser, #row from damage results
dx = 0.1,
):
"""
should integrate along the damage axis (0 - infinity)
"""
#print('%i.%s %s'%(self.cnt, ser.name, ser.to_dict()))
x = ser.tolist() #impacts
y = ser.index.values.round(self.prec+2).tolist() #AEPs
"""
from matplotlib import pyplot as plt
#build plot
lines = plt.plot(x, y)
#lines = plt.semilogx(x, y)
#format
ax = plt.gca()
ax.grid()
ax.set_xlim(1, max(x)) #aep limits
ax.set_ylabel('AEP')
ax.set_xlabel('impacts')
plt.show()
self.rtail
"""
#======================================================================
# ser_inv = ser.sort_index(ascending=False)
#
# x = ser_inv.tolist()
# y = ser_inv.index.tolist()
#
#======================================================================
if self.integrate == 'trapz':
ead_tot = integrate.trapz(
y, #yaxis - aeps
x=x, #xaxis = damages
dx = dx)
elif self.integrate == 'simps':
self.logger.warning('integration method not tested')
ead_tot = integrate.simps(
y, #yaxis - aeps
x=x, #xaxis = damages
dx = dx)
else:
raise Error('integration method \'%s\' not recognized'%self.integrate)
return round(ead_tot, self.prec) | def _get_ev(self, #integration caller
ser, #row from damage results
dx = 0.1,
):
"""
should integrate along the damage axis (0 - infinity)
"""
#print('%i.%s %s'%(self.cnt, ser.name, ser.to_dict()))
x = ser.tolist() #impacts
y = ser.index.values.round(self.prec+2).tolist() #AEPs
"""
from matplotlib import pyplot as plt
#build plot
lines = plt.plot(x, y)
#lines = plt.semilogx(x, y)
#format
ax = plt.gca()
ax.grid()
ax.set_xlim(1, max(x)) #aep limits
ax.set_ylabel('AEP')
ax.set_xlabel('impacts')
plt.show()
self.rtail
"""
#======================================================================
# ser_inv = ser.sort_index(ascending=False)
#
# x = ser_inv.tolist()
# y = ser_inv.index.tolist()
#
#======================================================================
if self.integrate == 'trapz':
ead_tot = integrate.trapz(
y, #yaxis - aeps
x=x, #xaxis = damages
dx = dx)
elif self.integrate == 'simps':
self.logger.warning('integration method not tested')
ead_tot = integrate.simps(
y, #yaxis - aeps
x=x, #xaxis = damages
dx = dx)
else:
raise Error('integration method \'%s\' not recognized'%self.integrate)
return round(ead_tot, self.prec) |
Python | def plot_riskCurve(self, #risk plot
res_ttl=None, #dataframe(columns=['aep','ari','impacts']
y1lab='AEP', #yaxis label and plot type c ontrol
#'impacts': impacts vs. ARI (use self.impact_name)
#'AEP': AEP vs. impacts
impactFmtFunc=None, #tick label format function for impact values
#lambda x:'{:,.0f}'.format(x) #thousands comma
val_str=None, #text to write on plot. see _get_val_str()
figsize=None, logger=None, plotTag=None,
):
"""
summary risk results plotter
see self._lineToAx() for formatting
"""
#======================================================================
# defaults
#======================================================================
if logger is None: logger=self.logger
log = logger.getChild('plot_riskCurve')
plt, matplotlib = self.plt, self.matplotlib
if figsize is None: figsize = self.figsize
if y1lab =='impacts':
y1lab = self.impact_name
if impactFmtFunc is None: impactFmtFunc=self.impactFmtFunc
if res_ttl is None: res_ttl = self.data_d['r_ttl']
if plotTag is None: plotTag=self.tag
log.debug('on %s'%res_ttl)
#=======================================================================
# prechecks
#=======================================================================
assert isinstance(res_ttl, pd.DataFrame)
miss_l = set(['aep', 'ari', 'impacts']).difference(res_ttl.columns)
assert len(miss_l)==0, miss_l
#======================================================================
# labels
#======================================================================
val_str = self._get_val_str(val_str, impactFmtFunc)
if y1lab == 'AEP':
title = '%s %s AEP-Impacts plot for %i events'%(self.name, plotTag, len(res_ttl))
xlab=self.impact_name
elif y1lab == self.impact_name:
title = '%s %s Impacts-ARI plot for %i events'%(self.name, plotTag, len(res_ttl))
xlab='ARI'
else:
raise Error('bad y1lab: %s'%y1lab)
#=======================================================================
# figure setup
#=======================================================================
"""
plt.show()
"""
plt.close()
fig = plt.figure(figsize=figsize, constrained_layout = True)
#axis setup
ax1 = fig.add_subplot(111)
#ax2 = ax1.twinx()
# axis label setup
fig.suptitle(title)
ax1.set_ylabel(y1lab)
ax1.set_xlabel(xlab)
#=======================================================================
# add the line
#=======================================================================
self._lineToAx(res_ttl, y1lab, ax1, lineLabel=self.name)
#set limits
if y1lab == 'AEP':
ax1.set_xlim(0, max(res_ttl['impacts'])) #aep limits
ax1.set_ylim(0, max(res_ttl['aep'])*1.1)
xLocScale, yLocScale = 0.3,0.6
elif y1lab == self.impact_name:
ax1.set_xlim(max(res_ttl['ari']), 1) #aep limits
xLocScale, yLocScale = 0.2,0.1
else:
log.warning('unrecognized y1lab: %s'%y1lab)
xLocScale, yLocScale = 0.1,0.1
#=======================================================================
# post format
#=======================================================================
self._postFmt(ax1, val_str=val_str, xLocScale=xLocScale, yLocScale=yLocScale)
#assign tick formatter functions
if y1lab == 'AEP':
xfmtFunc = impactFmtFunc
yfmtFunc=lambda x:'%.4f'%x
elif y1lab==self.impact_name:
xfmtFunc = lambda x:'{:,.0f}'.format(x) #thousands separatro
yfmtFunc=impactFmtFunc
self._tickSet(ax1, xfmtFunc=xfmtFunc, yfmtFunc=yfmtFunc)
return fig | def plot_riskCurve(self, #risk plot
res_ttl=None, #dataframe(columns=['aep','ari','impacts']
y1lab='AEP', #yaxis label and plot type c ontrol
#'impacts': impacts vs. ARI (use self.impact_name)
#'AEP': AEP vs. impacts
impactFmtFunc=None, #tick label format function for impact values
#lambda x:'{:,.0f}'.format(x) #thousands comma
val_str=None, #text to write on plot. see _get_val_str()
figsize=None, logger=None, plotTag=None,
):
"""
summary risk results plotter
see self._lineToAx() for formatting
"""
#======================================================================
# defaults
#======================================================================
if logger is None: logger=self.logger
log = logger.getChild('plot_riskCurve')
plt, matplotlib = self.plt, self.matplotlib
if figsize is None: figsize = self.figsize
if y1lab =='impacts':
y1lab = self.impact_name
if impactFmtFunc is None: impactFmtFunc=self.impactFmtFunc
if res_ttl is None: res_ttl = self.data_d['r_ttl']
if plotTag is None: plotTag=self.tag
log.debug('on %s'%res_ttl)
#=======================================================================
# prechecks
#=======================================================================
assert isinstance(res_ttl, pd.DataFrame)
miss_l = set(['aep', 'ari', 'impacts']).difference(res_ttl.columns)
assert len(miss_l)==0, miss_l
#======================================================================
# labels
#======================================================================
val_str = self._get_val_str(val_str, impactFmtFunc)
if y1lab == 'AEP':
title = '%s %s AEP-Impacts plot for %i events'%(self.name, plotTag, len(res_ttl))
xlab=self.impact_name
elif y1lab == self.impact_name:
title = '%s %s Impacts-ARI plot for %i events'%(self.name, plotTag, len(res_ttl))
xlab='ARI'
else:
raise Error('bad y1lab: %s'%y1lab)
#=======================================================================
# figure setup
#=======================================================================
"""
plt.show()
"""
plt.close()
fig = plt.figure(figsize=figsize, constrained_layout = True)
#axis setup
ax1 = fig.add_subplot(111)
#ax2 = ax1.twinx()
# axis label setup
fig.suptitle(title)
ax1.set_ylabel(y1lab)
ax1.set_xlabel(xlab)
#=======================================================================
# add the line
#=======================================================================
self._lineToAx(res_ttl, y1lab, ax1, lineLabel=self.name)
#set limits
if y1lab == 'AEP':
ax1.set_xlim(0, max(res_ttl['impacts'])) #aep limits
ax1.set_ylim(0, max(res_ttl['aep'])*1.1)
xLocScale, yLocScale = 0.3,0.6
elif y1lab == self.impact_name:
ax1.set_xlim(max(res_ttl['ari']), 1) #aep limits
xLocScale, yLocScale = 0.2,0.1
else:
log.warning('unrecognized y1lab: %s'%y1lab)
xLocScale, yLocScale = 0.1,0.1
#=======================================================================
# post format
#=======================================================================
self._postFmt(ax1, val_str=val_str, xLocScale=xLocScale, yLocScale=yLocScale)
#assign tick formatter functions
if y1lab == 'AEP':
xfmtFunc = impactFmtFunc
yfmtFunc=lambda x:'%.4f'%x
elif y1lab==self.impact_name:
xfmtFunc = lambda x:'{:,.0f}'.format(x) #thousands separatro
yfmtFunc=impactFmtFunc
self._tickSet(ax1, xfmtFunc=xfmtFunc, yfmtFunc=yfmtFunc)
return fig |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.