content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def load_ref_system():
""" Returns alpha-d-rhamnopyranose as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -0.8728 1.4263 -0.3270
O -1.5909 0.3677 0.2833
C -1.1433 -0.9887 0.0086
C 0.3390 -1.0821 0.4414
O 0.8751 -2.3755 0.1209
C 1.1868 -0.1193 -0.4147
C 0.6705 1.3308 -0.3098
O 1.0480 1.9270 0.9344
O 2.5224 -0.0436 0.1069
C -2.0793 -1.8494 0.8365
O -1.2047 1.4329 -1.7148
H -1.2520 -1.1765 -1.0826
H 0.4676 -0.8772 1.5266
H 1.2377 -0.4682 -1.4721
H 1.1005 1.9572 -1.1305
H -1.2644 2.3269 0.1977
H -2.1732 1.3552 -1.8431
H 1.9510 1.6015 1.1977
H 2.8904 -0.9551 0.1994
H 0.5074 -3.0553 0.7225
H -2.0383 -1.5878 1.9031
H -3.1236 -1.6900 0.5276
H -1.8591 -2.9160 0.7258
""") | ba8d952cb777fac9e9d445c0865027c2fe25fbe4 | 3,500 |
def load_aggregates(affiliations, a_agg, d_agg, table, dry_run=False):
"""
Description: Will take a dict of affiliation statuses, total affiliation, and individual and load them
into the VP database.
Args:
affiliations (dict): Keyed on carId. Expects a similar format to
'<carId>': {
'<AFFIL>: {
'i': 0 # The number of interpretations associated with this carId, affiliation combo
# which are in progress.
'a': 1 # Number of combos that have a snapshot in Approved status.
'p': 1 # Number of combos that have a Provisioned status.
}
}
a_agg (dict): Keyed on carId, sum of all Affiliation dicts for this carId
'<carId>': {
'i': 3,
'a': 5,
'p': 6
}
d_agg (dict): Keyed on carId, sum of all interpretations without an affiliation which has a snapshot in the recorded status.
'<carId>': {
'i': 2,
'a': 3,
'p': 4,
}
Returns:
success (bool): True on success
"""
unique_keys = set( list(affiliations) + list(a_agg) + list(d_agg) )
total_statuses_loaded = 0
for carId in unique_keys:
# Get the PK
pk = get_pk_by_carId(carId, table)
if pk is None:
logger.info("Did not find PK in VPT Table for %s. Skipping.", carId)
continue
logger.debug("Found PK %s for carId %s", pk, carId)
# Construct the VCI Status Object
vciStatus = {}
if carId in affiliations:
vciStatus = affiliations[carId]
if carId in a_agg:
vciStatus['a'] = a_agg[carId]
if carId in d_agg:
vciStatus['d'] = d_agg[carId]
logger.debug("vciStatus: %s", vciStatus)
if dry_run:
logger.info("Dry Run: Not loading:")
logger.info("[%s] %s: %s", carId, pk, vciStatus)
else:
total_statuses_loaded += 1
load_vci_status(vciStatus, pk, table)
logger.info("Loaded vciStatus for %d VP variants", total_statuses_loaded)
return True | bf02747b35ddc807460e7e47c5e475a2af34ffb5 | 3,501 |
def find_python_root_dir(possibles):
"""Find a python root in a list of dirs
If all dirs have the same name, and one of them has setup.py
then it is probably common Python project tree, like
/path/to/projects/cd
/path/to/projects/cd/cd
Or, if all dirs are the same,
except that one has an egg suffix, like
/path/to/dotsite/dotsite
/path/to/dotsite/dotsite.egg-info
then ignore the egg
"""
names = {_.basename() for _ in possibles}
if len(names) == 1:
for possible in possibles:
setup = possible / "setup.py"
if setup.isfile():
return possible
eggless = {paths.path(p.replace(".egg-info", "")) for p in possibles}
if len(eggless) == 1:
return eggless.pop()
return None | 849d9f967899f1cdba7f9579891afbb7fb0580dc | 3,502 |
import collections
import shutil
import os
def _unzip_and_handle_result(zip_content, run, output_handler, benchmark):
"""
Call handle_result with appropriate parameters to fit into the BenchExec expectations.
"""
result_values = collections.OrderedDict()
def _open_output_log(output_path):
log_file = open(run.log_file, 'wb')
log_header = " ".join(
run.cmdline()
) + "\n\n\n--------------------------------------------------------------------------------\n"
log_file.write(log_header.encode('utf-8'))
return log_file
def _handle_run_info(values):
def parseTimeValue(s):
if s[-1] != 's':
raise ValueError(
'Cannot parse "{0}" as a time value.'.format(s))
return float(s[:-1])
for key, value in values.items():
if key == "memory":
result_values["memory"] = int(value.strip('B'))
elif key in ["walltime", "cputime"]:
result_values[key] = parseTimeValue(value)
elif key == "exitcode":
result_values["exitcode"] = int(value)
elif (key == "terminationreason" or key.startswith("blkio-") or
key.startswith("cpuenergy") or key.startswith("energy-") or
key.startswith("cputime-cpu")):
result_values[key] = value
elif key not in IGNORED_VALUES:
result_values['vcloud-' + key] = value
return None
def _handle_host_info(values):
host = values.pop("name", "-")
output_handler.store_system_info(
values.get("os", "-"),
values.get("cpuModel", "-"),
values.get("cores", "-"),
values.get("frequency", "-"),
values.get("memory", "-"),
host,
runSet=run.runSet)
for key, value in values.items():
result_values['vcloud-' + key] = value
result_values["host"] = host
def _handle_stderr_file(result_zip_file, files, output_path):
if RESULT_FILE_STDERR in files:
result_zip_file.extract(RESULT_FILE_STDERR, output_path)
shutil.move(
os.path.join(output_path, RESULT_FILE_STDERR),
run.log_file + ".stdError")
os.rmdir(output_path)
handle_result(
zip_content,
run.log_file + ".output",
run.identifier,
result_files_patterns=benchmark.result_files_patterns,
open_output_log=_open_output_log,
handle_run_info=_handle_run_info,
handle_host_info=_handle_host_info,
handle_special_files=_handle_stderr_file)
if result_values:
with _print_lock:
output_handler.output_before_run(run)
run.set_result(result_values, ["host"])
output_handler.output_after_run(run) | 42aff71f890e6c3340f2a194d0d852a6451bfe05 | 3,503 |
import time
def findEndpoint():
"""
scroll to bottom to get the last number
"""
print("Fetching school count")
clickToOpen()
# get scroller
scrollbar = driver.find_elements_by_class_name("scrollbar-inner")[1]
driver.execute_script("arguments[0].scrollBy(0,2);", scrollbar)
inner = driver.find_elements_by_class_name("scroll-bar")
time.sleep(2)
top = float(inner[1].get_attribute("style").split("top: ")[-1].replace("px;", ""))
# scroll until
while top < 159:
driver.execute_script("arguments[0].scrollBy(0,200);", scrollbar)
time.sleep(0.3)
top = float(inner[1].get_attribute("style").split("top: ")[-1].replace("px;", ""))
time.sleep(2)
# get point-inset
vis = driver.find_element_by_class_name("visibleGroup")
children = vis.find_elements_by_xpath(".//div[@class='slicerItemContainer']")
last = children[-1].get_attribute("aria-posinset")
print(f"School count: {last}")
time.sleep(1)
return int(last) | 7f979df78ff7dbebc4819ee4f09c86d9d8c243c0 | 3,504 |
def _shake_shake_block(x, output_filters, stride, is_training):
"""Builds a full shake-shake sub layer."""
batch_size = tf.shape(x)[0]
# Generate random numbers for scaling the branches
rand_forward = [
tf.random_uniform(
[batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
for _ in range(2)
]
rand_backward = [
tf.random_uniform(
[batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
for _ in range(2)
]
# Normalize so that all sum to 1
total_forward = tf.add_n(rand_forward)
total_backward = tf.add_n(rand_backward)
rand_forward = [samp / total_forward for samp in rand_forward]
rand_backward = [samp / total_backward for samp in rand_backward]
zipped_rand = zip(rand_forward, rand_backward)
branches = []
for branch, (r_forward, r_backward) in enumerate(zipped_rand):
with tf.variable_scope('branch_{}'.format(branch)):
b = _shake_shake_branch(x, output_filters, stride, r_forward, r_backward,
is_training)
branches.append(b)
res = _shake_shake_skip_connection(x, output_filters, stride)
return res + tf.add_n(branches) | 29170e867ab8a01277adbbd49f60960408ceb28b | 3,505 |
def get_all_paged_events(decision, conn, domain, task_list, identity, maximum_page_size):
"""
Given a poll_for_decision_task response, check if there is a nextPageToken
and if so, recursively poll for all workflow events, and assemble a final
decision response to return
"""
# First check if there is no nextPageToken, if there is none
# return the decision, nothing to page
next_page_token = None
try:
next_page_token = decision["nextPageToken"]
except KeyError:
next_page_token = None
if next_page_token is None:
return decision
# Continue, we have a nextPageToken. Assemble a full array of events by continually polling
all_events = decision["events"]
while next_page_token is not None:
try:
next_page_token = decision["nextPageToken"]
if next_page_token is not None:
decision = conn.poll_for_decision_task(domain, task_list,
identity, maximum_page_size,
next_page_token)
for event in decision["events"]:
all_events.append(event)
except KeyError:
next_page_token = None
# Finally, reset the original decision response with the full set of events
decision["events"] = all_events
return decision | 5125c5bbff547a02496443477aee88165ece9d80 | 3,506 |
def get_pools():
""" gets json of pools. schema follows
#{
# "kind": "tm:ltm:pool:poolcollectionstate",
# "selfLink": "https://localhost/mgmt/tm/ltm/pool?ver=11.5.3",
# "items": [
# {
# "kind": "tm:ltm:pool:poolstate",
# "name": "mypoolname",
# "partition": "mypartition",
# "fullPath": "/mypartition/mypoolname",
# "generation": 1,
# "selfLink": "https://localhost/mgmt/tm/ltm/pool/~mypartition~mypoolname?ver=11.5.3",
# "allowNat": "yes",
# "allowSnat": "yes",
# "ignorePersistedWeight": "disabled",
# "ipTosToClient": "pass-through",
# "ipTosToServer": "pass-through",
# "linkQosToClient": "pass-through",
# "linkQosToServer": "pass-through",
# "loadBalancingMode": "round-robin",
# "minActiveMembers": 0,
# "minUpMembers": 0,
# "minUpMembersAction": "failover",
# "minUpMembersChecking": "disabled",
# "monitor": "/Common/gateway_icmp ",
# "queueDepthLimit": 0,
# "queueOnConnectionLimit": "disabled",
# "queueTimeLimit": 0,
# "reselectTries": 0,
# "slowRampTime": 10,
# "membersReference": {
# "link": "url-for-rest-request-for-pool-members",
# "isSubcollection": true
# }
# }
## ,(repeated as needed for additional pools)
# ]
#}
"""
global f5rest_url
return (get_f5json(f5rest_url + 'ltm/pool')) | 36baa4d556a69a9498357b1a5534433fc8732683 | 3,507 |
def set_runner_properties(timestep, infguard=False, profile_nodenet=False, profile_world=False, log_levels={}, log_file=None):
"""Sets the speed of the nodenet calculation in ms.
Argument:
timestep: sets the calculation speed.
"""
if log_file:
if not tools.is_file_writeable(log_file):
return False, "Can not write to specified log file."
logger.set_logfile(log_file)
runner_config['log_file'] = log_file
if log_levels:
set_logging_levels(log_levels)
runner_config['runner_timestep'] = timestep
runner_config['runner_infguard'] = bool(infguard)
runner_config['profile_nodenet'] = bool(profile_nodenet)
runner_config['profile_world'] = bool(profile_world)
runner['timestep'] = timestep
return True, "" | a2ddcd6b33304cef4c03a55a78a691fa9b5d1112 | 3,508 |
def chunk_sum(vec, chunksize):
"""Computes the sums of chunks of points in a vector.
"""
Nchunks = len(vec)//chunksize
end = Nchunks*chunksize
arr = np.reshape(vec[:end], [Nchunks, chunksize])
sums = np.sum(arr, 1)
return sums | 036bfe2d2277d90339c2bcacc1c0943a96a52ece | 3,509 |
import os
import shutil
import time
import zipfile
def install_custom_app(app, app_url, app_trigger = "False"):
"""this function is used to install custom apps"""
if app.endswith(".zip"):
app = app.split(".")[0]
if app.endswith(".git"):
app = app.split(".")[0]
if not os.path.exists(wapps_dir_path):
os.mkdir(wapps_dir)
app_url = app_url.split(" ")
directory = app
if app_url[0].endswith(".git"):
if len(app_url) == 3:
repo_url = app_url[0]
user_branch = app_url[2]
else:
repo_url = app_url[0]
user_branch = 'master'
tempdir = os.path.join(BASE_DIR, directory)
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
os.mkdir(tempdir)
try:
Repo.clone_from(repo_url, tempdir, branch=user_branch)
except:
try:
print(colored("\nFailed to fetch "+app+" app data from git due to poor internet connection, retrying in a moment...", "red"))
time.sleep(4)
Repo.clone_from(repo_url, tempdir, branch=user_branch)
except:
raise
#check if app already exists then compare version and ask for user input
existing_app_path = os.path.join(BASE_DIR, "wapps", directory)
new_app_path = os.path.join(tempdir)
if os.path.exists(existing_app_path):
usr_choice, message = pre_install_checks(app, existing_app_path, new_app_path, app_trigger)
if usr_choice not in ["y", "Y", "yes", "YES", "n", "N", "NO", "no"]:
if message != "Reinstall":
print(colored("Invalid choice!, continuing with the default choice: (Y)", "yellow"))
usr_choice = "Y"
else:
print(colored("Invalid choice!, continuing with the default choice: (N)", "yellow"))
usr_choice = "N"
if usr_choice in ["Y", "y", "yes", "YES"]:
if message == "Reinstall":
print("Reinstalling the " + app + " app")
elif message == "Upgrade":
print("Upgrading the " + app + " app")
elif message == "Downgrade":
print("Downgrading the " + app + " app")
remove_appurl_from_urls_custom(app, "wapps")
remove_app_from_settings_custom(app, "wapps")
remove_cust_app_source(app, "wapps")
source = os.path.join(tempdir)
destination = os.path.join(wapps_dir_path, directory)
shutil.move(source, destination)
configure_urls_file_custom(app, "wapps")
configure_settings_file_custom_app(app)
return "True"
else:
print(colored(message +": Skipped", "yellow"))
return "False"
else:
print("Installing: " + app)
create_log("Installing: " + app)
source = os.path.join(tempdir)
destination = os.path.join(wapps_dir_path, directory)
if os.path.exists(destination):
shutil.rmtree(destination)
shutil.move(source, destination)
configure_urls_file_custom(app, "wapps")
configure_settings_file_custom_app(app)
return "True"
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
else:
#check if app already exists then compare version and ask for user input
existing_app_path = os.path.join(wapps_dir_path, directory)
new_app_path = app_url[0]
destination = existing_app_path
app_path = app_url[0]
tempdir = os.path.join(BASE_DIR, directory)
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
os.mkdir(tempdir)
if app_path.endswith(".zip"):
if os.path.exists(app_path):
zip_ref = zipfile.ZipFile(app_path, 'r')
zip_ref.extractall(tempdir)
zip_ref.close()
app_path = new_app_path = os.path.join(tempdir, app)
if os.path.exists(existing_app_path):
usr_choice, message = pre_install_checks(app, existing_app_path, new_app_path, app_trigger)
if usr_choice not in ["y", "Y", "yes", "YES", "n", "N", "NO", "no"]:
print(colored("Invalid choice!, it must be (y/n)", "yellow"))
usr_choice, message = pre_install_checks(app, existing_app_path, new_app_path, app_trigger)
if usr_choice not in ["y", "Y", "yes", "YES", "n", "N", "NO", "no"]:
print(colored("Invalid choice!, continuing with the default choice: (y)", "yellow"))
usr_choice = "Y"
if usr_choice in ["Y", "y", "yes", "YES"]:
if message == "Reinstall":
print("Reinstalling the " + app + " app")
elif message == "Upgrade":
print("Upgrading the " + app + " app")
elif message == "Downgrade":
print("Downgrading the " + app + " app")
remove_appurl_from_urls_custom(app, "wapps")
remove_app_from_settings_custom(app, "wapps")
remove_cust_app_source(app, "wapps")
if os.path.isdir(app_path):
shutil.copytree(app_path, destination)
configure_urls_file_custom(app, "wapps")
configure_settings_file_custom_app(app)
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
return "True"
else:
print(colored(message +": Skipped", "yellow"))
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
return "False"
else:
print("Installing: " + app)
create_log("Installing: " + app)
if os.path.isdir(app_path):
shutil.copytree(app_path, destination)
configure_urls_file_custom(app, "wapps")
configure_settings_file_custom_app(app)
return "True" | fd6729ef9abdf1c85a7b09d3cd368ffd68a117e9 | 3,510 |
def get_feature_definitions(df, feature_group):
"""
Get datatypes from pandas DataFrame and map them
to Feature Store datatypes.
:param df: pandas.DataFrame
:param feature_group: FeatureGroup
:return: list
"""
# Dtype int_, int8, int16, int32, int64, uint8, uint16, uint32
# and uint64 are mapped to Integral feature type.
# Dtype float_, float16, float32 and float64
# are mapped to Fractional feature type.
# string dtype is mapped to String feature type.
# Our schema of our data that we expect
# _after_ SageMaker Processing
feature_definitions = []
for column in df.columns:
feature_type = feature_group._DTYPE_TO_FEATURE_DEFINITION_CLS_MAP.get(
str(df[column].dtype), None
)
feature_definitions.append(
FeatureDefinition(column, feature_type)
) # you can alternatively define your own schema
return feature_definitions | 75573b3e8ed1f666b68dde2067b5b14655c341de | 3,511 |
def draw_2d_wp_basis(shape, keys, fmt='k', plot_kwargs={}, ax=None,
label_levels=0):
"""Plot a 2D representation of a WaveletPacket2D basis."""
coords, centers = _2d_wp_basis_coords(shape, keys)
if ax is None:
fig, ax = plt.subplots(1, 1)
else:
fig = ax.get_figure()
for coord in coords:
ax.plot(coord[0], coord[1], fmt)
ax.set_axis_off()
ax.axis('square')
if label_levels > 0:
for key, c in centers.items():
if len(key) <= label_levels:
ax.text(c[0], c[1], key,
horizontalalignment='center',
verticalalignment='center')
return fig, ax | 5e731fa02c833f36c0cde833bfdb5d4b30706519 | 3,512 |
import numpy
def build_normalizer(signal, sample_weight=None):
"""Prepares normalization function for some set of values
transforms it to uniform distribution from [0, 1]. Example of usage:
>>>normalizer = build_normalizer(signal)
>>>pylab.hist(normalizer(background))
>>># this one should be uniform in [0,1]
>>>pylab.hist(normalizer(signal))
:param numpy.array signal: shape = [n_samples] with floats
:param numpy.array sample_weight: shape = [n_samples], non-negative weights associated to events.
"""
sample_weight = check_sample_weight(signal, sample_weight)
assert numpy.all(sample_weight >= 0.), 'sample weight must be non-negative'
sorter = numpy.argsort(signal)
signal, sample_weight = signal[sorter], sample_weight[sorter]
predictions = numpy.cumsum(sample_weight) / numpy.sum(sample_weight)
def normalizing_function(data):
return numpy.interp(data, signal, predictions)
return normalizing_function | b4227364899468ca2017d9bedd87aff4bf35d9c6 | 3,513 |
def get_angler_tag_recoveries(project_slug, tagstat="A"):
"""This is a helper function used by tags_applied_project(). It uses
raw sql to retrieve all of the non-MNR recoveries of tags applied
in a particular project. Only recap's with both a lat and lon and
of the same species as the original tagging event are returned.
Arguments:
- `project_slug`: unique identify for project in which tags were applied
- `tagstat`: the tag status of the tags in project identified by
project slug. 'A' returns agler recaps of tags applied in the
project, 'C' will return angler recaps of tags also recaptured
by the OMNR
Returns dictionary with the following elements:
queryset - a raw sql queryset.
Nobs - the number of records in the queryset
TODO - TEST tagstat argument
"""
sql = """
SELECT species.spc_nmco as common_name,
angler.first_name || ' ' || angler.last_name as reported_by,
angler.id as reported_by_id,
recovery.*
FROM tfat_recovery recovery
join tfat_report report on report.id=recovery.report_id
join tfat_joepublic angler on angler.id=report.reported_by_id
JOIN tfat_encounter encounter
ON encounter.tagid=recovery.tagid
AND encounter.species_id=recovery.species_id
JOIN tfat_project proj ON proj.id=encounter.project_id
join common_species species on species.id=recovery.species_id
WHERE encounter.tagstat='{tagstat}'
AND proj.slug=%s
ORDER BY recovery.recovery_date
"""
# sql = '''
# select recovery.* from tfat_recovery recovery where tagid in (
# select tagid from tfat_encounter encounter join tfat_project project on project.id=encounter.project_id where slug=%s and tagstat='{tagstat}'
# ) order by recovery_date
# '''
sql = sql.format(**{"tagstat": tagstat})
queryset = Recovery.objects.raw(sql, [project_slug])
prefetch_related_objects(queryset, "species", "report", "report__reported_by")
nobs = len([x.id for x in queryset])
return {"queryset": queryset, "nobs": nobs} | 709b12dff26ce797916a0617c97c6df2afa9efc6 | 3,514 |
from typing import Callable
from typing import Tuple
def get_phased_trajectory(init_state: np.ndarray,
update_fn: Callable) -> Tuple[np.ndarray, HashableNdArray]:
"""
evolve an initial state until it reaches a limit cycle
Parameters
----------
init_state
update_fn
Returns
-------
trajectory, phase-point pair
"""
state = init_state
trajectory = list()
trajectory_set = set() # set lookup should be faster
# compute state by state until we have a repeat
hashable_state = HashableNdArray(state)
while hashable_state not in trajectory_set:
trajectory.append(hashable_state)
trajectory_set.add(hashable_state)
state = update_fn(state)
hashable_state = HashableNdArray(state)
# separate trajectory into in-bound and limit-cycle parts
repeated_state = HashableNdArray(state)
repeated_state_index = trajectory.index(repeated_state)
limit_cycle = trajectory[repeated_state_index:]
# find state in limit cycle with smallest hash (i.e. smallest lexicographic
# ordering if there is no integer overflow)
# this is our phase fixing point
cycle_min_index: int = 0
cycle_min: int = hash(limit_cycle[0])
for idx in range(1, len(limit_cycle)):
nxt_hash: int = hash(limit_cycle[idx])
if nxt_hash < cycle_min:
cycle_min_index = idx
cycle_min = nxt_hash
# get trajectory with phase
phase_idx: int = len(trajectory) - len(limit_cycle) + cycle_min_index
phased_trajectory = np.array(
[hashable.array for hashable in trajectory[:phase_idx]], dtype=np.int64
)
return phased_trajectory, trajectory[phase_idx] | 603aa9f3e626d51132b70b87321453cfacba579a | 3,515 |
import logging
import time
def exponential_backoff(func):
"""
Retries a Boto3 call up to 5 times if request rate limits are hit.
The time waited between retries increases exponentially. If rate limits are
hit 5 times, exponential_backoff raises a
:py:class:sceptre.exceptions.RetryLimitExceededException().
:param func: a function that uses boto calls
:type func: func
:returns: The decorated function.
:rtype: func
:raises: sceptre.exceptions.RetryLimitExceededException
"""
logger = logging.getLogger(__name__)
@wraps(func)
def decorated(*args, **kwargs):
max_retries = 5
attempts = 0
while attempts < max_retries:
try:
return func(*args, **kwargs)
except ClientError as e:
if e.response["Error"]["Code"] == "Throttling":
logger.error("Request limit exceeded, pausing...")
time.sleep(2 ** attempts)
attempts += 1
else:
raise e
raise RetryLimitExceededError(
"Exceeded request limit {0} times. Aborting.".format(max_retries)
)
return decorated | 63db7514b88eb64ca0ee5d626be5a59f7d40d7d6 | 3,516 |
def post_config_adobe_granite_saml_authentication_handler(key_store_password=None, key_store_password_type_hint=None, service_ranking=None, service_ranking_type_hint=None, idp_http_redirect=None, idp_http_redirect_type_hint=None, create_user=None, create_user_type_hint=None, default_redirect_url=None, default_redirect_url_type_hint=None, user_id_attribute=None, user_id_attribute_type_hint=None, default_groups=None, default_groups_type_hint=None, idp_cert_alias=None, idp_cert_alias_type_hint=None, add_group_memberships=None, add_group_memberships_type_hint=None, path=None, path_type_hint=None, synchronize_attributes=None, synchronize_attributes_type_hint=None, clock_tolerance=None, clock_tolerance_type_hint=None, group_membership_attribute=None, group_membership_attribute_type_hint=None, idp_url=None, idp_url_type_hint=None, logout_url=None, logout_url_type_hint=None, service_provider_entity_id=None, service_provider_entity_id_type_hint=None, assertion_consumer_service_url=None, assertion_consumer_service_url_type_hint=None, handle_logout=None, handle_logout_type_hint=None, sp_private_key_alias=None, sp_private_key_alias_type_hint=None, use_encryption=None, use_encryption_type_hint=None, name_id_format=None, name_id_format_type_hint=None, digest_method=None, digest_method_type_hint=None, signature_method=None, signature_method_type_hint=None, user_intermediate_path=None, user_intermediate_path_type_hint=None): # noqa: E501
"""post_config_adobe_granite_saml_authentication_handler
# noqa: E501
:param key_store_password:
:type key_store_password: str
:param key_store_password_type_hint:
:type key_store_password_type_hint: str
:param service_ranking:
:type service_ranking: int
:param service_ranking_type_hint:
:type service_ranking_type_hint: str
:param idp_http_redirect:
:type idp_http_redirect: bool
:param idp_http_redirect_type_hint:
:type idp_http_redirect_type_hint: str
:param create_user:
:type create_user: bool
:param create_user_type_hint:
:type create_user_type_hint: str
:param default_redirect_url:
:type default_redirect_url: str
:param default_redirect_url_type_hint:
:type default_redirect_url_type_hint: str
:param user_id_attribute:
:type user_id_attribute: str
:param user_id_attribute_type_hint:
:type user_id_attribute_type_hint: str
:param default_groups:
:type default_groups: List[str]
:param default_groups_type_hint:
:type default_groups_type_hint: str
:param idp_cert_alias:
:type idp_cert_alias: str
:param idp_cert_alias_type_hint:
:type idp_cert_alias_type_hint: str
:param add_group_memberships:
:type add_group_memberships: bool
:param add_group_memberships_type_hint:
:type add_group_memberships_type_hint: str
:param path:
:type path: List[str]
:param path_type_hint:
:type path_type_hint: str
:param synchronize_attributes:
:type synchronize_attributes: List[str]
:param synchronize_attributes_type_hint:
:type synchronize_attributes_type_hint: str
:param clock_tolerance:
:type clock_tolerance: int
:param clock_tolerance_type_hint:
:type clock_tolerance_type_hint: str
:param group_membership_attribute:
:type group_membership_attribute: str
:param group_membership_attribute_type_hint:
:type group_membership_attribute_type_hint: str
:param idp_url:
:type idp_url: str
:param idp_url_type_hint:
:type idp_url_type_hint: str
:param logout_url:
:type logout_url: str
:param logout_url_type_hint:
:type logout_url_type_hint: str
:param service_provider_entity_id:
:type service_provider_entity_id: str
:param service_provider_entity_id_type_hint:
:type service_provider_entity_id_type_hint: str
:param assertion_consumer_service_url:
:type assertion_consumer_service_url: str
:param assertion_consumer_service_url_type_hint:
:type assertion_consumer_service_url_type_hint: str
:param handle_logout:
:type handle_logout: bool
:param handle_logout_type_hint:
:type handle_logout_type_hint: str
:param sp_private_key_alias:
:type sp_private_key_alias: str
:param sp_private_key_alias_type_hint:
:type sp_private_key_alias_type_hint: str
:param use_encryption:
:type use_encryption: bool
:param use_encryption_type_hint:
:type use_encryption_type_hint: str
:param name_id_format:
:type name_id_format: str
:param name_id_format_type_hint:
:type name_id_format_type_hint: str
:param digest_method:
:type digest_method: str
:param digest_method_type_hint:
:type digest_method_type_hint: str
:param signature_method:
:type signature_method: str
:param signature_method_type_hint:
:type signature_method_type_hint: str
:param user_intermediate_path:
:type user_intermediate_path: str
:param user_intermediate_path_type_hint:
:type user_intermediate_path_type_hint: str
:rtype: None
"""
return 'do some magic!' | b6b082929904123f96c044753995ff1d19cb9cbf | 3,517 |
import os
def get_req_env(var_name: str) -> str:
"""
Try to get environment variable and exits if not available
"""
try:
return os.environ[var_name]
except KeyError:
print(f"Missing required environment variable '{var_name}'.")
exit(1) | c1dadd65bf1da91f7304246d236afc922e65fb54 | 3,518 |
import json
def _pcheck(block):
""" Helper for multiprocesses: check a block of logs
Args:
block List[List[str], int]: lines, block_id
Returns:
[type]: [description]
"""
results = []
lines, block_id = block
for li, line in enumerate(lines):
json_line = json.loads(line)
result = [
"%s: %s" % (e.error_type, e.message)
for e in [
validate_normalized(json_line),
check_timestamp_digits(json_line["timestamp"])
if "timestamp" in json_line
else None,
check_time(json_line),
]
if e
]
global_line_number = block_id * BLOCK_SIZE + li
results.append((global_line_number, result))
return results | 3a581e3440079d5f31b00c86d84abee3eca0a396 | 3,519 |
import sqlite3
def fetch_exon_locations(database):
""" Queries the database to create a dictionary mapping exon IDs to
the chromosome, start, end, and strand of the exon """
conn = sqlite3.connect(database)
cursor = conn.cursor()
query = """
SELECT
e.edge_ID,
loc1.chromosome,
MIN(loc1.position,loc2.position),
MAX(loc1.position,loc2.position),
e.strand
FROM edge e
LEFT JOIN location loc1 ON e.v1 = loc1.location_ID
LEFT JOIN location loc2 ON e.v2 = loc2.location_ID
WHERE e.edge_type = 'exon';"""
cursor.execute(query)
exon_location_tuples = cursor.fetchall()
# Create dictionary
exon_locations = {}
for loc_tuple in exon_location_tuples:
exon_ID = loc_tuple[0]
exon_locations[exon_ID] = loc_tuple[1:]
conn.close()
return exon_locations | 54cdd3ffa2ccd10bd777f9130caaae992ac3d451 | 3,520 |
import http
def add(request):
"""Displays/processes a form to create a collection."""
data = {}
if request.method == 'POST':
form = forms.CollectionForm(
request.POST, request.FILES,
initial=initial_data_from_request(request))
aform = forms.AddonsForm(request.POST)
if form.is_valid():
collection = form.save(default_locale=request.LANG)
collection.save()
if aform.is_valid():
aform.save(collection)
collection_message(request, collection, 'add')
statsd.incr('collections.created')
log.info('Created collection %s' % collection.id)
return http.HttpResponseRedirect(collection.get_url_path())
else:
data['addons'] = Addon.objects.filter(pk__in=aform.clean_addon())
data['comments'] = aform.clean_addon_comment()
else:
form = forms.CollectionForm()
data.update(form=form, filter=get_filter(request))
return render_cat(request, 'bandwagon/add.html', data) | 04491d0465e09057a2e6c3cc7b80f41ff84314ec | 3,521 |
def get_camera_pose_cpp():
"""
Returns camera pose
"""
rospy.wait_for_service('/asr_robot_model_services/GetCameraPose', timeout=5)
pose = rospy.ServiceProxy('/asr_robot_model_services/GetCameraPose',GetPose)
return pose().pose | b52be4a613d1de482387f6643ea12181f84f6cf4 | 3,522 |
import copy
def _convert_model_from_bytearray_to_object(model_bytearray):
"""Converts a tflite model from a bytearray into a parsable object."""
model_object = schema_fb.Model.GetRootAsModel(model_bytearray, 0)
model_object = schema_fb.ModelT.InitFromObj(model_object)
model_object = copy.deepcopy(model_object)
return model_object | 9579b58438571b41e8451a13d5e292e142cd6b57 | 3,523 |
import requests
def get_service_mapping():
""" Get mapping dict of service types
Returns:
A mapping dict which maps service types names to their ids
"""
# Get all Service types:
all_service_type = requests.get(base_url + 'services/v2/service_types', headers=headers3).json()
# Make Dict of service names and ids
service_name_to_id = {service_type['attributes']['name']:service_type['id'] for service_type in all_service_type['data']}
return service_name_to_id | 8a8adb8fff086b323dd95012c1a901918afa2bc2 | 3,524 |
def get_recently_viewed(request):
""" get settings.PRODUCTS_PER_ROW most recently viewed products for current customer """
t_id = tracking_id(request)
views = ProductView.objects.filter(tracking_id=t_id).values(
'product_id').order_by('-date')[0:PRODUCTS_PER_ROW]
product_ids = [v['product_id'] for v in views]
return Product.active.filter(id__in=product_ids) | 048187566446294285c7c8fe48b3dafba5efdcc1 | 3,525 |
import re
def chapter_by_url(url):
"""Helper function that iterates through the chapter scrapers defined in
cu2.scrapers.__init__ and returns an initialized chapter object when it
matches the URL regex.
"""
for Chapter in chapter_scrapers:
if re.match(Chapter.url_re, url):
return Chapter.from_url(url) | 83112ab2b68faf7d48a7a8fab3cd359ff442550d | 3,526 |
def get_model_name(part_num):
"""
根据型号获取设备名称
:param part_num:
:return:
"""
models = current_config.MODELS
for model_name, model_part_num in models.items():
if model_part_num == part_num:
return model_name | 843489174c844e05752f499f250a30b157ae386e | 3,527 |
def run_get_pk(process, *args, **inputs):
"""Run the process with the supplied inputs in a local runner that will block until the process is completed.
:param process: the process class or process function to run
:param inputs: the inputs to be passed to the process
:return: tuple of the outputs of the process and process node pk
"""
if isinstance(process, Process):
runner = process.runner
else:
runner = manager.get_manager().get_runner()
return runner.run_get_pk(process, *args, **inputs) | 782c3b49b90347a48495f2b7b48e34c2418d31a1 | 3,528 |
def _random_mask(target_tokens, noise_probability=None, target_length=None):
""" target_length其实是mask_length"""
unk = 3
target_masks = get_base_mask(target_tokens)
if target_length is None:
target_length = target_masks.sum(1).float()
if noise_probability is None:
# sample from [0,1]
target_length = target_length * target_length.clone().uniform_() # 要mask的长度
else:
target_length = target_length * noise_probability
target_length = target_length + 1 # make sure to mask at least one token.
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk)
return prev_target_tokens | 89e603c7a5bfd4fd6fa1d6a36cea3e429e7bd6b7 | 3,529 |
import os
import urllib
import shutil
import tarfile
def download_and_extract(data_dir, force=False):
"""Download fname from the datasets_url, and save it to target_dir,
unless the file already exists, and force is False.
Parameters
----------
data_dir : str
Directory of where to download cifar10 data
force : bool
Force downloading the file, if it already exists
Returns
-------
fname : str
Full path of the downloaded file
"""
target_fname = os.path.join(data_dir, 'cifar-10-batches-py')
if force or not os.path.isdir(target_fname):
try:
os.makedirs(data_dir)
except IOError:
pass
download_fname = os.path.join(data_dir, 'cifar-10-python.tar.gz')
logger.info("Downloading CIFAR10 dataset from:" + str(DATASET_URL))
with urllib.request.urlopen(DATASET_URL) as response, open(download_fname, 'wb') as out_file:
logger.info(str(DATASET_URL) + ' --> ' + download_fname)
shutil.copyfileobj(response, out_file)
tf = tarfile.open(download_fname)
tf.extractall(data_dir)
# verify files are there, otherwise throw error
for f in TRAIN_FLIST:
if not os.path.isfile(os.path.join(target_fname, f)):
msg = "Training file " + str(f) + " missing! Please try manually downloading the data from: "\
+ str(DATASET_URL)
logger.error(msg)
raise IOError(msg)
for f in TEST_FLIST:
if not os.path.isfile(os.path.join(target_fname, f)):
msg = "Test file " + str(f) + " missing! Please try manually downloading the data from: " \
+ str(DATASET_URL)
logger.error(msg)
raise IOError(msg)
return target_fname | 39d9b3dcd0497a2b8742e2fc68e3af87284094db | 3,530 |
def calculate(cart):
"""Return the total shipping cost for the cart. """
total = 0
for line in cart.get_lines():
total += line.item.shipping_cost * line.quantity
return total | 4b6d9bd94ce3a5748f0d94ab4b23dab993b430e4 | 3,531 |
def arr_to_rgb(arr, rgb=(0, 0, 0), alpha=1, invert=False, ax=None):
"""
arr to be made a mask
rgb:assumed using floats (0..1,0..1,0..1) or string
"""
# arr should be scaled to 1
img = np.asarray(arr, dtype=np.float64)
img = img - np.nanmin(img)
img = img / np.nanmax(img)
im2 = np.zeros(img.shape + (4,))
if isinstance(rgb, str):
rgb = mpl.colors.to_rgb(rgb)
if invert:
img = 1 - img
im2[:, :, 3] = img * alpha
r, g, b = rgb
im2[:, :, 0] = r
im2[:, :, 1] = g
im2[:, :, 2] = b
# if ax is None:
# ax = plt.gca()
# plt.sca(ax)
# plt.imshow(im2)
return im2 | 73a19cfed712d93cbbaa69454ca2df780bc523fa | 3,532 |
def parameters(number, size, v=3):
"""
sets item parameters of items and puts in list
:param number: number of items
:param size: characteristic size of the items
:param v: velocity
:return: list with items
"""
param = []
for i in range(number):
angle = randint(0, int(2 * pi * 100))
param.append({
'x': randint(100, screen_width - 100),
'y': randint(100, screen_height - 100),
'vx': v * cos(angle / 100),
'vy': v * sin(angle / 100),
'r': randint(40, 255),
'g': randint(40, 255),
'b': randint(40, 255),
's': size
})
return param | 1b2ad225359c53ce4dd3feb9d356b1637c76c79c | 3,533 |
def _validate_args_for_toeplitz_ops(c_or_cr, b, check_finite, keep_b_shape,
enforce_square=True):
"""Validate arguments and format inputs for toeplitz functions
Parameters
----------
c_or_cr : array_like or tuple of (array_like, array_like)
The vector ``c``, or a tuple of arrays (``c``, ``r``). Whatever the
actual shape of ``c``, it will be converted to a 1-D array. If not
supplied, ``r = conjugate(c)`` is assumed; in this case, if c[0] is
real, the Toeplitz matrix is Hermitian. r[0] is ignored; the first row
of the Toeplitz matrix is ``[c[0], r[1:]]``. Whatever the actual shape
of ``r``, it will be converted to a 1-D array.
b : (M,) or (M, K) array_like
Right-hand side in ``T x = b``.
check_finite : bool
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(result entirely NaNs) if the inputs do contain infinities or NaNs.
keep_b_shape : bool
Whether to convert a (M,) dimensional b into a (M, 1) dimensional
matrix.
enforce_square : bool, optional
If True (default), this verifies that the Toeplitz matrix is square.
Returns
-------
r : array
1d array corresponding to the first row of the Toeplitz matrix.
c: array
1d array corresponding to the first column of the Toeplitz matrix.
b: array
(M,), (M, 1) or (M, K) dimensional array, post validation,
corresponding to ``b``.
dtype: numpy datatype
``dtype`` stores the datatype of ``r``, ``c`` and ``b``. If any of
``r``, ``c`` or ``b`` are complex, ``dtype`` is ``np.complex128``,
otherwise, it is ``np.float``.
b_shape: tuple
Shape of ``b`` after passing it through ``_asarray_validated``.
"""
if isinstance(c_or_cr, tuple):
c, r = c_or_cr
c = _asarray_validated(c, check_finite=check_finite).ravel()
r = _asarray_validated(r, check_finite=check_finite).ravel()
else:
c = _asarray_validated(c_or_cr, check_finite=check_finite).ravel()
r = c.conjugate()
if b is None:
raise ValueError('`b` must be an array, not None.')
b = _asarray_validated(b, check_finite=check_finite)
b_shape = b.shape
is_not_square = r.shape[0] != c.shape[0]
if (enforce_square and is_not_square) or b.shape[0] != r.shape[0]:
raise ValueError('Incompatible dimensions.')
is_cmplx = np.iscomplexobj(r) or np.iscomplexobj(c) or np.iscomplexobj(b)
dtype = np.complex128 if is_cmplx else np.double
r, c, b = (np.asarray(i, dtype=dtype) for i in (r, c, b))
if b.ndim == 1 and not keep_b_shape:
b = b.reshape(-1, 1)
elif b.ndim != 1:
b = b.reshape(b.shape[0], -1)
return r, c, b, dtype, b_shape | 977a40e279d3a0000cc03b181e65182e2fe325cc | 3,534 |
from unittest.mock import call
import os
def run_dft_en_par(dft_input:str, structure,
dft_loc:str, ncpus:int, dft_out:str ="dft.out",
npool:int =None, mpi:str ="mpi", **dft_kwargs):
"""run DFT calculation with given input template
and atomic configurations. This function is not used atm.
:param dft_input: input template file name
:param structure: atomic configuration
:param dft_loc: relative/absolute executable of the DFT code
:param ncpus: # of CPU for mpi
:param dft_out: output file name
:param npool: not used
:param mpi: not used
:param **dft_wargs: not used
:return: forces, energy
"""
newfilename = edit_dft_input_positions(dft_input, structure)
dft_command = \
f'{dft_loc} -i {newfilename} > {dft_out}'
if (ncpus > 1):
dft_command = f'mpirun -np {ncpus} {dft_command}'
# output.write_to_output(dft_command+'\n')
call(dft_command, shell=True)
os.remove(newfilename)
forces, energy = parse_dft_forces_and_energy(dft_out)
return forces, energy | 49084edbace0bde5e451359c11f4140e0dd6a6ed | 3,535 |
def get_application_id():
"""Returns the app id from the app_identity service."""
return app_identity.get_application_id() | f23f6ac7473f81af6c2b40807dd20f8f602bbd96 | 3,536 |
def prior(X, ls, kernel_func=rbf,
ridge_factor=1e-3, name=None):
"""Defines Gaussian Process prior with kernel_func.
Args:
X: (np.ndarray of float32) input training features.
with dimension (N, D).
kernel_func: (function) kernel function for the gaussian process.
Default to rbf.
ls: (float32) length scale parameter.
ridge_factor: (float32) ridge factor to stabilize Cholesky decomposition.
name: (str) name of the random variable
Returns:
(ed.RandomVariable) A random variable representing the Gaussian Process,
dimension (N,)
"""
X = tf.convert_to_tensor(X, dtype=tf.float32)
N, _ = X.shape.as_list()
K_mat = kernel_func(X, ls=ls, ridge_factor=ridge_factor)
return ed.MultivariateNormalTriL(loc=tf.zeros(N, dtype=tf.float32),
scale_tril=tf.cholesky(K_mat),
name=name) | 6ca9d150924473a0b78baa8da4e0734b7719e615 | 3,537 |
import os
def ast_for_statement(statement: Ast, ctx: ReferenceDict):
"""
statement
::= (label | let | expr | into | importExpr) [';'];
"""
# assert statement.name == 'statement'
sexpr = statement[0]
s_name: str = sexpr.name
try:
if s_name is UNameEnum.expr: # expr
# RuikoEBNF:
# expr ::= testExpr (thenTrailer | applicationTrailer)* [where];
if len(statement) is 2:
# end with ';' then return None
ast_for_expr(sexpr, ctx)
else:
return ast_for_expr(sexpr, ctx)
elif s_name is UNameEnum.label:
[symbol] = sexpr
assert symbol.name is UNameEnum.symbol
ctx.set_local('@label', symbol.string)
elif s_name is UNameEnum.let:
# RuikoEBNF:
# let Throw ['=' '!']
# ::= ['`let`'] symbol ['!' trailer+] '=' expr;
to_new_ctx = False
if sexpr[0].string is UNameEnum.keyword_let:
# bind a new var in current environment(closure).
to_new_ctx = True
_, symbol, *trailers, expr = sexpr
else:
# For the readability of source codes,
# pattern matching using list destruction is better.
symbol, *trailers, expr = sexpr
res = ast_for_expr(expr, ctx)
if not trailers:
# let symbol = ...
ctx.set_local(symbol.string, res) if to_new_ctx else ctx.set_nonlocal(symbol.string, res)
return
# let symbol 'attr = ... | let symbol ![item] = ...
ref = ctx.get_nonlocal(symbol.string)
*fst_n, [last] = trailers
# `trailers` is a list of trailer.
# RuikoEBNF:
# trailer Throw ['[' ']' '.']
# ::= '[' exprCons ']' | '\'' symbol;
for each, in fst_n:
if each.name is UNameEnum.symbol: # symbol
ref = getattr(ref, each.string)
else: # [exprCons]
item = tuple(ast_for_expr_cons(each, ctx))
if len(item) is 1:
item = item[0]
ref = ref[item]
if last.name == UNameEnum.symbol: # symbol
# trailer = . symbol
setattr(ref, last.string, res)
else:
# trailer = [exprCons]
item = tuple(ast_for_expr_cons(last, ctx))
if len(item) is 1:
item = item[0]
ref[item] = res
# let expr return Nothing
elif s_name is UNameEnum.into:
# RuikoEBNF:
# into Throw ['`into`']
# ::= '`into`' symbol;
[symbol] = sexpr
# TODO with result
raise BreakUntil(symbol.string)
elif s_name is UNameEnum.importStmt:
# RuikoEBNF:
# importExpr
# ::= singleImportExpr | fromImportExpr | remImport;
[branch] = sexpr
if branch.name is not UNameEnum.remImport:
exec(' '
.join
(map(lambda _: _.string,
flatten(
branch)))
.strip(),
ctx.local)
return
if len(branch) is 2:
string, symbol = branch
path = eval(string.string)
name = symbol.string
else:
[string] = branch
path = eval(string.string)
name = os.path.split(
os.path.splitext(path)[0])[1]
src_code, md5_v = md5(path)
manager = ctx.module_manager
managed_modules = manager['@modules']
if md5_v == managed_modules.get(path):
# imported and file not changed.
# so do not import again
return
managed_modules[path] = md5_v
env = make_new_module(name, manager, ctx['__compiler__'])
add_exec_func(to=env)
ast_for_file(env['__compiler__'].from_source_code(path, src_code, MetaInfo(fileName=path)),
env)
ctx.set_local(name, ModuleAgent(env.local))
else:
raise TypeError('unknown statement.')
except BreakUntil as e:
raise e
except Exception as e:
raise Trace(e, statement) | 4718adfc4b5a1beb7bb4f72a1464fdd878e2d38f | 3,538 |
def multiclass_eval(y_hat: FloatTensor, y: IntTensor) -> int:
"""
Returns number correct: how often the rounded predicted value matches gold.
Arguments:
y_hat: 2d (N x C): guesses for each class
y: 2d (N x C): onehot representation of class labels
Returns:
nubmer correct
"""
# max(dim) returns both values and indices. compare best indices from
# predictions and gold (which are just onehot)
_, pred_idxes = y_hat.max(1)
_, gold_idxes = y.max(1)
return (pred_idxes == gold_idxes).sum() | 6385d1113a0c29191a8951a30d8219278511451d | 3,539 |
from typing import Dict
import sys
import unicodedata
def punctuation_for_spaces_dict() -> Dict[int, str]:
"""Provide a dictionary for removing punctuation, keeping spaces. Essential for scansion
to keep stress patterns in alignment with original vowel positions in the verse.
:return dict with punctuation from the unicode table
>>> print("I'm ok! Oh #%&*()[]{}!? Fine!".translate(
... punctuation_for_spaces_dict()).strip())
I m ok Oh Fine
"""
return dict(
(i, " ")
for i in range(sys.maxunicode)
if unicodedata.category(chr(i)).startswith("P")
) | 07551f80ab6736788b555ebfb1a443a318d0cc21 | 3,540 |
def extract止めないでお姉さま(item):
"""
Parser for '止めないで、お姉さま…'
"""
badwords = [
'subs',
]
if any([bad in item['tags'] for bad in badwords]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'WATTT' in item['tags']:
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False | 1ee52b649fbf24452285c44538b1bfc0fe3cb0c8 | 3,541 |
import urllib
import json
def GetExpID(startRow=0,numRows=2000,totalRows = -1):
"""
Queries the Allen Mouse Brain Institute website for all gene expression data available for download.
Returns:
--------
GeneNames: list[dict()]
list of all genes where expression data is available for download. Dict contains experiment/gene metadata.
SectionDataSetID : list(int)
corresponding SectionDataSetID (SectionDataSet: see "http://help.brain-map.org/display/api/Data+Model")
ID needed to specify download target.
"""
startRow = startRow
numRows = numRows
totalRows = totalRows
rows = []
GeneNames = []
SectionDataSetID = []
info = list()
done = False
while not done:
r = "&start_row={0}&num_rows={1}".format(startRow,numRows)
pagedUrl = API_DATA_PATH + "query.json?criteria=model::SectionDataSet,rma::criteria,products%5Bid$eq5%5D,rma::include,specimen(stereotaxic_injections(primary_injection_structure,structures))" + r
source = urllib.request.urlopen(pagedUrl).read()
response = json.loads(source)
rows += response['msg']
for x in response['msg']:
if x['failed'] == False :
print(x['id'])
info.append(x['id'])
if totalRows < 0:
totalRows = int(response['total_rows'])
startRow += len(response['msg'])
if startRow >= totalRows:
done = True
return info | f361e94b5d2c61bc80b182c2ee63a734d7e8dc4e | 3,542 |
from typing import Counter
def _add_biotype_attribute(gene_content):
"""
Add `biotype` attribute to all intervals in gene_content.
Parameters
----------
gene_content_ : dict
Intervals in gene separated by transcript id.
Returns
-------
dict
Same gene_content_ object with added `biotype` attributes.
"""
gene_content = gene_content.copy()
# Determine gene biotype:
gbiotype = _get_biotype(gene_content['gene'])
# List to keep track of all possible biotypes in gene:
gene_biotypes = [gbiotype] if gbiotype else []
for transcript_id, transcript_intervals in gene_content.items():
if transcript_id == 'gene':
continue
first_exon = [i for i in transcript_intervals if i[2] in ['CDS', 'ncRNA']][0]
biotype = _get_biotype(first_exon)
gene_biotypes.append(biotype)
new_intervals = []
for interval in transcript_intervals:
new_intervals.append(_add_biotype_value(interval, biotype))
gene_content[transcript_id] = new_intervals
# Finally, make also gene biotype: a list of all biotypes in gene,
# sorted by frequency. Additionally, another sorting is added to sort
# by alphabet if counts are equal.
biotype = ', '.join([i[0] for i in sorted(
sorted(Counter(gene_biotypes).items()), key=lambda x: x[1], reverse=True)])
gene_content['gene'] = _add_biotype_value(gene_content['gene'], biotype)
return gene_content | 21e09ebd4048acf4223855c23b819352aac013db | 3,543 |
def random_solution(W, D, n):
"""
We generate a solution of size n
"""
sol = np.random.permutation(n) + 1
fitness_sol = fitness(W=W, D=D, sol=sol)
return [fitness_sol, sol] | be95c1f08591e90e0e808ea7938763bb7fecd1da | 3,544 |
def format_perm(model, action):
"""
Format a permission string "app.verb_model" for the model and the
requested action (add, change, delete).
"""
return '{meta.app_label}.{action}_{meta.model_name}'.format(
meta=model._meta, action=action) | 12f532e28f685c2a38a638de63928f07039d44c8 | 3,545 |
def verify_password(plain_password: str, hashed_password: str) -> bool:
"""Verify plain password and hashed password.
Args:
plain_password (str): Plain text password.
hashed_password (str): Hashed password.
Returns:
bool: Returns true if secret is verified against given hash.
"""
return pwd_context.verify(plain_password, hashed_password) | adb7ac87516a02298468858216481d9ddad1ce13 | 3,546 |
from typing import List
from typing import Dict
def render_siecle2(tpl: str, parts: List[str], data: Dict[str, str]) -> str:
"""
>>> render_siecle2("siècle2", ["1"], defaultdict(str))
'I<sup>er</sup>'
>>> render_siecle2("siècle2", ["I"], defaultdict(str))
'I<sup>er</sup>'
>>> render_siecle2("siècle2", ["i"], defaultdict(str))
'I<sup>er</sup>'
>>> render_siecle2("siècle2", ["18"], defaultdict(str))
'XVIII<sup>e</sup>'
>>> render_siecle2("siècle2", ["XVIII"], defaultdict(str))
'XVIII<sup>e</sup>'
>>> render_siecle2("siècle2", ["xviii"], defaultdict(str))
'XVIII<sup>e</sup>'
"""
number = parts[0]
number = int_to_roman(int(number)) if number.isnumeric() else number.upper()
suffix = "er" if number == "I" else "e"
return f"{number}{superscript(suffix)}" | d0abd2703aebf3a05c0cc893c6678ed6683bb31a | 3,547 |
def get_interactions(request):
"""Function to get the interactions for a molecule"""
dist_dict = {"SingleAtomAcceptor_SingleAtomDonor": {"dist": 4.0}, # H-bonding
"SingleAtomAcceptor_WeakDonor": {"dist": 3.0}, # Weak H-bond
"Halogen_SingleAtomAcceptor": {"dist": 4.0}, # Halogen bonding
"AcidicGroup_BasicGroup": {"dist": 4.0}, # Acid-base
"Arom5_Arom6": {"dist": 5.5},"Arom6_Arom6": {"dist": 5.5},"Arom5_Arom5": {"dist": 5.5},# Aromatic-aromatic interactions
"Arom6_Carbonyl": {"dist": 4.5}, "Arom5_Carbonyl": {"dist": 4.5},# Carbonyl-aromatic interactions - CARBONLY from PROTEIN ONLY!!!!
"Hydrophobe_Hydrophobe": {"dist": 4.5}}#Hydrophobic interactions
mol_pk = request.GET['obs_id']
my_dist = request.GET['dist']
host = request.get_host()
mol = Molecule.objects.get(pk=mol_pk)
out_l = []
prot = mol.prot_id
# Get the interactions
interactions = ProbeBit.objects.filter(prot_id=prot, mol_id=mol, dist__lte=my_dist)
i = -1
for my_int in interactions:
if my_int.type not in dist_dict:
continue
if my_int.dist > dist_dict[my_int.type]["dist"]:
continue
print "HERE"
i += 1
out_l.append({})
f = my_int.probe_source_id
out_l[i]["url_1"] = "http://"+host+'/WONKA/show_point/?x_com='+str(f.x_com)+'&y_com='+str(f.y_com)+'&z_com='+str(f.z_com)
f = my_int.probe_dest_id
out_l[i]["url_2"] = "http://"+host+'/WONKA/show_point/?x_com='+str(f.x_com)+'&y_com='+str(f.y_com)+'&z_com='+str(f.z_com)
out_l[i]["dist"] = my_int.dist
out_l[i]["type"] = my_int.type
out_l[i]["angle_1"] = my_int.angle_1
out_l[i]["angle_2"] = my_int.angle_2
return HttpResponse(json.dumps(out_l)) | 86d4444b1c4a9251e8be91f453e1489234edf087 | 3,548 |
import json
def sendmail_action():
"""Send an email to the address entered in the sendmail form."""
if not MSGRAPHAPI.loggedin:
redirect("/sendmail")
email_body = json.dumps(
{
"Message": {
"Subject": request.query.subject,
"Body": {"ContentType": "HTML", "Content": request.query.body},
"ToRecipients": [{"EmailAddress": {"Address": request.query.to}}],
},
"SaveToSentItems": "true",
}
)
# send the email
response = MSGRAPHAPI.post(endpoint="me/microsoft.graph.sendMail", data=email_body)
# refresh the sendmail page, showing result (status_code) for this action
return template(
"sendmail.tpl",
dict(
fullname=MSGRAPHAPI.loggedin_name,
email=MSGRAPHAPI.loggedin_email,
status_code=response.status_code,
),
) | ed4f31ee22005e9ea1b3e24ca92b33be3980ebe2 | 3,549 |
import argparse
def parseArg():
"""
CMD argument parsing
:return: the parser
"""
parser = argparse.ArgumentParser(description='SAT solver')
parser.add_argument('infile', nargs=1, type=argparse.FileType('r'))
parser.add_argument('level', nargs='?', default=0, type=int)
return parser | 9bb757294d699208307c9a7188396f84743dcc28 | 3,550 |
import math
def normal_to_angle(x, y):
"""
Take two normal vectors and return the angle that they give.
:type x: float
:param x: x normal
:type y: float
:param y: y normal
:rtype: float
:return: angle created by the two normals
"""
return math.atan2(y, x) * 180 / math.pi | c6f5b5e2952858cd3592b4e0849806b0ccd5de78 | 3,551 |
def glDeleteFramebuffersEXT( baseOperation, n, framebuffers=None ):
"""glDeleteFramebuffersEXT( framebuffers ) -> None
"""
if framebuffers is None:
framebuffers = arrays.GLuintArray.asArray( n )
n = arrays.GLuintArray.arraySize( framebuffers )
return baseOperation( n, framebuffers ) | 8c14fa4ce55c6fe995c01822776b17728e132987 | 3,552 |
import yaml
import json
def enforce(data, service=False, tenant=None):
"""Enforce zone app or service."""
tstr = " -tenant=%s " % (tenant) if tenant else ""
ostr = " -enforce-service " if service else " -enforce-zone-app "
ret_val = {}
if not data:
ret_val['empty'] = {"success": "Empty enforcement request"}
else:
if g_debug:
print(yaml.dump(data))
rc = run_command("%s api %s %s" % (
g_araalictl_path, ostr, tstr), in_text=yaml.dump(data), debug=False, result=True, strip=False)
assert rc[0] == 0, rc[1]
ret_val = json.loads(rc[1])
return ret_val | b6b9ce5e86e21cf540844232dbddd11168b7ba81 | 3,553 |
def outline(image, mask, color):
"""
Give a color to the outline of the mask
Args:
image: an image
mask: a label
color: a RGB color for outline
Return:
image: the image which is drawn outline
"""
mask = np.round(mask)
yy, xx = np.nonzero(mask)
for y, x in zip(yy, xx):
if 0.0 < np.mean(mask[max(0, y - 1) : y + 2, max(0, x - 1) : x + 2]) < 1.0:
image[max(0, y) : y + 1, max(0, x) : x + 1] = color
return image | dc66410053e2326965c591a9d3b566e477133295 | 3,554 |
def bogen_ab(dl, dr):
"""Bogen abwärts, für weites D (durch usw.).
Ende nur spitz wenn allein steht, sonst letzten Stützpunkt etwas früher,
letzten Kontrollpunkt für Fortsetzung in gleicher Richtung setzen, damit
glatte Verbindung zu Folgekonsonant.
"""
y0 = 0.5 # Höhe Anfang- und Endpunkt
h = 0.65
l = 0.3
b = [(0, y0), (0, y0)] # Start immer spitz [P2], (P3/Q0) # TODO: ändern für jedoch
m = [(l, y0 - h), (1 - l, y0 - h)] # [Q1], [Q2]
e = [(1, y0), (1, y0)] if not dr else [(1 - l/2, y0 - h/2), (1 - l/3, y0 - h/3)] # Ende (Q3/R0)
return b + m + e | 64ea1ca1fad647b021b5088c9df96bcce50c646c | 3,555 |
import socket
def is_open_port(port):
"""
Check if port is open
:param port: port number to be checked
:type port: int
:return: is port open
:rtype: bool
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("127.0.0.1", port))
except socket.error:
return False
return True | df81cc942f39d00bbdb8cb11628666a117c9788f | 3,556 |
def stopping_fn_from_metric(metric_name: str):
"""
Returns a stopping function for ignite.handlers.EarlyStopping using the given metric name.
"""
def stopping_fn(engine: Engine):
return engine.state.metrics[metric_name]
return stopping_fn | c6b47fd417134eb72c017a4cd55b8dfc995ea0c5 | 3,557 |
def deconstruct_proto(model_proto, compression_pipeline):
"""Deconstruct the protobuf.
Args:
model_proto: The protobuf of the model
compression_pipeline: The compression pipeline object
Returns:
protobuf: A protobuf of the model
"""
# extract the tensor_dict and metadata
bytes_dict, metadata_dict = model_proto_to_bytes_and_metadata(model_proto)
# decompress the tensors
# TODO: Handle tensors meant to be held-out from the compression pipeline
# (currently none are held out).
tensor_dict = {}
for key in bytes_dict:
tensor_dict[key] = compression_pipeline.backward(data=bytes_dict[key],
transformer_metadata=metadata_dict[key])
return tensor_dict | 5a8878c84fef7094ba334d976f80b32be4561d7d | 3,558 |
def build_attribute_set(items, attr_name):
"""Build a set off of a particular attribute of a list of
objects. Adds 'None' to the set if one or more of the
objects in items is missing the attribute specified by
attr_name.
"""
attribute_set = set()
for item in items:
attribute_set.add(getattr(item, attr_name, None))
return attribute_set | 2cee5922463188a4a8d7db79d6be003e197b577f | 3,559 |
def potential_fn(q):
"""
- log density for the normal distribution
"""
return 0.5 * np.sum(((q['z'] - true_mean) / true_std) ** 2) | f2b4ff07b7188494c9901c85f3b920e9d273a3f4 | 3,560 |
from typing import Union
import torch
from typing import Tuple
def estimate_pointcloud_local_coord_frames(
pointclouds: Union[torch.Tensor, "Pointclouds"],
neighborhood_size: int = 50,
disambiguate_directions: bool = True,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Estimates the principal directions of curvature (which includes normals)
of a batch of `pointclouds`.
The algorithm first finds `neighborhood_size` nearest neighbors for each
point of the point clouds, followed by obtaining principal vectors of
covariance matrices of each of the point neighborhoods.
The main principal vector corresponds to the normals, while the
other 2 are the direction of the highest curvature and the 2nd highest
curvature.
Note that each principal direction is given up to a sign. Hence,
the function implements `disambiguate_directions` switch that allows
to ensure consistency of the sign of neighboring normals. The implementation
follows the sign disabiguation from SHOT descriptors [1].
The algorithm also returns the curvature values themselves.
These are the eigenvalues of the estimated covariance matrices
of each point neighborhood.
Args:
**pointclouds**: Batch of 3-dimensional points of shape
`(minibatch, num_point, 3)` or a `Pointclouds` object.
**neighborhood_size**: The size of the neighborhood used to estimate the
geometry around each point.
**disambiguate_directions**: If `True`, uses the algorithm from [1] to
ensure sign consistency of the normals of neighboring points.
Returns:
**curvatures**: The three principal curvatures of each point
of shape `(minibatch, num_point, 3)`.
If `pointclouds` are of `Pointclouds` class, returns a padded tensor.
**local_coord_frames**: The three principal directions of the curvature
around each point of shape `(minibatch, num_point, 3, 3)`.
The principal directions are stored in columns of the output.
E.g. `local_coord_frames[i, j, :, 0]` is the normal of
`j`-th point in the `i`-th pointcloud.
If `pointclouds` are of `Pointclouds` class, returns a padded tensor.
References:
[1] Tombari, Salti, Di Stefano: Unique Signatures of Histograms for
Local Surface Description, ECCV 2010.
"""
points_padded, num_points = convert_pointclouds_to_tensor(pointclouds)
ba, N, dim = points_padded.shape
if dim != 3:
raise ValueError(
"The pointclouds argument has to be of shape (minibatch, N, 3)"
)
if (num_points <= neighborhood_size).any():
raise ValueError(
"The neighborhood_size argument has to be"
+ " >= size of each of the point clouds."
)
# undo global mean for stability
# TODO: replace with tutil.wmean once landed
pcl_mean = points_padded.sum(1) / num_points[:, None]
points_centered = points_padded - pcl_mean[:, None, :]
# get the per-point covariance and nearest neighbors used to compute it
cov, knns = get_point_covariances(points_centered, num_points, neighborhood_size)
# get the local coord frames as principal directions of
# the per-point covariance
# this is done with torch.symeig, which returns the
# eigenvectors (=principal directions) in an ascending order of their
# corresponding eigenvalues, while the smallest eigenvalue's eigenvector
# corresponds to the normal direction
curvatures, local_coord_frames = torch.symeig(cov, eigenvectors=True)
# disambiguate the directions of individual principal vectors
if disambiguate_directions:
# disambiguate normal
n = _disambiguate_vector_directions(
points_centered, knns, local_coord_frames[:, :, :, 0]
)
# disambiguate the main curvature
z = _disambiguate_vector_directions(
points_centered, knns, local_coord_frames[:, :, :, 2]
)
# the secondary curvature is just a cross between n and z
y = torch.cross(n, z, dim=2)
# cat to form the set of principal directions
local_coord_frames = torch.stack((n, y, z), dim=3)
return curvatures, local_coord_frames | c2ffe5482e959d2b15dbd0df0ac71df5d043af01 | 3,561 |
import yaml
def patch_rbac(rbac_v1: RbacAuthorizationV1Api, yaml_manifest) -> RBACAuthorization:
"""
Patch a clusterrole and a binding.
:param rbac_v1: RbacAuthorizationV1Api
:param yaml_manifest: an absolute path to yaml manifest
:return: RBACAuthorization
"""
with open(yaml_manifest) as f:
docs = yaml.safe_load_all(f)
role_name = ""
binding_name = ""
for dep in docs:
if dep["kind"] == "ClusterRole":
print("Patch the cluster role")
role_name = dep["metadata"]["name"]
rbac_v1.patch_cluster_role(role_name, dep)
print(f"Patched the role '{role_name}'")
elif dep["kind"] == "ClusterRoleBinding":
print("Patch the binding")
binding_name = dep["metadata"]["name"]
rbac_v1.patch_cluster_role_binding(binding_name, dep)
print(f"Patched the binding '{binding_name}'")
return RBACAuthorization(role_name, binding_name) | 1830cc26dc79674bfb628556e34572ddc6d6e89e | 3,562 |
def _shp_to_boundary_gdf(shp_file_path):
""".shpかshpが格納された.zipを指定してgdfを作成する
Args:
shp_file_path (Path): 変換対象のshpファイルを格納するディレクトリのパス文字列
Returns:
gpd.GeoDataFrame: shpを変換したGeoDataFrame
"""
s2g = ShapeToGeoPandas(str(shp_file_path.resolve()))
gdf = s2g.gdf
necessary_columns = [
"KEY_CODE",
"PREF",
"CITY",
"PREF_NAME",
"CITY_NAME",
"geometry"]
geo_d = GdfDissolve(gdf, necessary_columns)
geo_d.join_columns("AREA_CODE", "PREF", "CITY")
geo_d.dissolve_poly("AREA_CODE")
boundary_gdf = geo_d.new_gdf
geojson_obj = geojson_str_to_obj(df_to_geojson(boundary_gdf))
write_geojson(geojson_obj, "./created/", "boundary.geojson")
output_csv_from_df(boundary_gdf, "./created/", "boundary.csv")
return boundary_gdf | 4e1aef42f6bb16f184f0669b37dc7720851bee14 | 3,563 |
import random
def insert_site(site, seq, offset=None):
"""Inserts a sequence (represeting a site) into a larger sequence (which
is a sequence object rather than a series of letters."""
# inputs:
# site The site to be inserted
# offsets the offset where the site is to be inserted
# seq The sequence into which the specified site is to
# be implanted
# get sequence info
name = seq.getName()
seq_data = seq.getSeq()
assert ((offset == None) or ((offset >= 0) and \
(offset <= (len(seq_data) - len(site)))))
# select a random offset if none given:
if (offset == None):
# insert signal in a random position, from 0 up to m (= l - w)
offset = random.randint(0,(len(seq_data) - len(site)))
# insert the signal
signal_seq = seq_data[:offset]+str(site)+seq_data[(offset + len(site)):]
# create a modified sequence object to return
new_seq = sequence.Seq(name, signal_seq)
return new_seq | 88a4df8e2ab094337a27d0e2d84c2078dfed5cc4 | 3,564 |
def sex2bpzmags(f, ef, zp=0., sn_min=1., m_lim=None):
"""
This function converts a pair of flux, error flux measurements from SExtractor
into a pair of magnitude, magnitude error which conform to BPZ input standards:
- Nondetections are characterized as mag=99, errormag=m_1sigma
- Objects with absurd flux/flux error combinations or very large errors are
characterized as mag=-99 errormag=0.
"""
nondetected = less_equal(f, 0.) * greater(
ef, 0) #Flux <=0, meaningful phot. error
nonobserved = less_equal(ef, 0.) #Negative errors
#Clip the flux values to avoid overflows
f = clip(f, 1e-100, 1e10)
ef = clip(ef, 1e-100, 1e10)
nonobserved += equal(ef, 1e10)
nondetected += less_equal(
old_div(f, ef),
sn_min) #Less than sn_min sigma detections: consider non-detections
detected = logical_not(nondetected + nonobserved)
m = zeros(len(f)) * 1.
em = zeros(len(ef)) * 1.
m = where(detected, -2.5 * log10(f) + zp, m)
m = where(nondetected, 99., m)
m = where(nonobserved, -99., m)
em = where(detected, 2.5 * log10(1. + old_div(ef, f)), em)
if not m_lim:
em = where(nondetected, -2.5 * log10(ef) + zp, em)
else:
em = where(nondetected, m_lim, em)
em = where(nonobserved, 0., em)
return m, em | ec95566f680326cd550626495a6983d0221ae29b | 3,565 |
def get_elk_command(line):
"""Return the 2 character command in the message."""
if len(line) < 4:
return ""
return line[2:4] | 550eda4e04f57ae740bfd294f9ec3b243e17d279 | 3,566 |
def safe_div(a, b):
"""
Safe division operation. When b is equal to zero, this function returns 0.
Otherwise it returns result of a divided by non-zero b.
:param a: number a
:param b: number b
:return: a divided by b or zero
"""
if b == 0:
return 0
return a / b | 68e5bccbe812315b9a1d27a1fa06d26d5339d6fd | 3,567 |
def decrypt_story():
"""
Using the methods you created in this problem set,
decrypt the story given by the function getStoryString().
Use the functions getStoryString and loadWords to get the
raw data you need.
returns: string - story in plain text
"""
story = CiphertextMessage(get_story_string())
return story.decrypt_message() | af636efedf5e95847c4a7dd52031cfc3dfa094a0 | 3,568 |
def shouldAvoidDirectory(root, dirsToAvoid):
"""
Given a directory (root, of type string) and a set of directory
paths to avoid (dirsToAvoid, of type set of strings), return a boolean value
describing whether the file is in that directory to avoid.
"""
subPaths = root.split('/')
for i, subPath in enumerate(subPaths):
dir = '/'.join(subPaths[:i+1])
if dir in dirsToAvoid:
return True
return False | afc92111f57031eb1e2ba797d80ea4abc2a7ccd0 | 3,569 |
def atom_explicit_hydrogen_valences(xgr):
""" explicit hydrogen valences, by atom
"""
return dict_.transform_values(atom_explicit_hydrogen_keys(xgr), len) | e409b82606f113373086ca52689baeb117d3a8ea | 3,570 |
def change_account_type(user_id):
"""Change a user's account type."""
if current_user.id == user_id:
flash('You cannot change the type of your own account. Please ask '
'another administrator to do this.', 'error')
return redirect(url_for('admin.user_info', user_id=user_id))
user = User.query.get(user_id)
if user is None:
abort(404)
form = ChangeAccountTypeForm()
if form.validate_on_submit():
user.role = form.role.data
db.session.add(user)
db.session.commit()
flash('Role for user {} successfully changed to {}.'.format(
user.full_name(), user.role.name), 'form-success')
return render_template('admin/manage_user.html', user=user, form=form) | 3c13b4ea1d3b080a4c925d576eee29c61346363a | 3,571 |
import re
def test_runner(filtered_tests, args):
"""
Driver function for the unit tests.
Prints information about the tests being run, executes the setup and
teardown commands and the command under test itself. Also determines
success/failure based on the information in the test case and generates
TAP output accordingly.
"""
testlist = filtered_tests
tcount = len(testlist)
index = 1
tap = str(index) + ".." + str(tcount) + "\n"
for tidx in testlist:
result = True
tresult = ""
if "flower" in tidx["category"] and args.device == None:
continue
print("Test " + tidx["id"] + ": " + tidx["name"])
prepare_env(tidx["setup"])
(p, procout) = exec_cmd(tidx["cmdUnderTest"])
exit_code = p.returncode
if (exit_code != int(tidx["expExitCode"])):
result = False
print("exit:", exit_code, int(tidx["expExitCode"]))
print(procout)
else:
match_pattern = re.compile(str(tidx["matchPattern"]), re.DOTALL)
(p, procout) = exec_cmd(tidx["verifyCmd"])
match_index = re.findall(match_pattern, procout)
if len(match_index) != int(tidx["matchCount"]):
result = False
if result == True:
tresult += "ok "
else:
tresult += "not ok "
tap += tresult + str(index) + " " + tidx["id"] + " " + tidx["name"] + "\n"
if result == False:
tap += procout
prepare_env(tidx["teardown"])
index += 1
return tap | 39e1af33a563dd9915ec6f107c8dd1f199528cab | 3,572 |
def get_all_infos_about_argument(db_argument: Argument, main_page, db_user, lang) -> dict:
"""
Returns bunch of information about the given argument
:param db_argument: The argument
:param main_page: url of the application
:param db_user: User
:param lang: Language
:rtype: dict
:return: dictionary with many information or an error
"""
_t = Translator(lang.ui_locales)
return_dict = dict()
db_votes = DBDiscussionSession.query(ClickedArgument).filter(ClickedArgument.argument_uid == db_argument.uid,
ClickedArgument.is_valid == True,
ClickedStatement.is_up_vote == True).all()
db_author = DBDiscussionSession.query(User).get(db_argument.author_uid)
return_dict['vote_count'] = str(len(db_votes))
return_dict['author'] = db_author.global_nickname
return_dict['author_url'] = main_page + '/user/' + str(db_author.uid)
return_dict['gravatar'] = get_profile_picture(db_author)
return_dict['timestamp'] = sql_timestamp_pretty_print(db_argument.timestamp, db_argument.lang)
text = get_text_for_argument_uid(db_argument.uid)
return_dict['text'] = start_with_capital(text)
supporters = []
gravatars = dict()
public_page = dict()
for vote in db_votes:
db_author = DBDiscussionSession.query(User).get(vote.author_uid)
name = db_author.global_nickname
if db_user.nickname == db_author.nickname:
name += ' (' + _t.get(_.itsYou) + ')'
supporters.append(name)
gravatars[name] = get_profile_picture(db_author)
public_page[name] = main_page + '/user/' + str(db_author.uid)
return_dict['supporter'] = supporters
return_dict['gravatars'] = gravatars
return_dict['public_page'] = public_page
return return_dict | 56c03e8d11d23c3726b54b2d2be954ecefba786b | 3,573 |
def average_proxy(ray, method, proxy_type):
""" method to average proxy over the raypath.
Simple method is direct average of the proxy: $\sum proxy(r) / \sum dr$.
Other methods could be: $1/(\sum 1 / proxy)$ (better for computing \delta t)
"""
total_proxy = 0.
try:
methode.evaluation
except (NameError, AttributeError):
method.evaluation = None # in case the variable was not defined.
if method.evaluation == "inverse":
for _, point in enumerate(ray):
_proxy = method.proxy_singlepoint(point, proxy_type)[proxy_type]
total_proxy += 1. / _proxy
number = len(ray)
proxy = 1. / total_proxy / float(number)
else:
for j, point in enumerate(ray):
_proxy = method.proxy_singlepoint(point, proxy_type)[proxy_type]
total_proxy += _proxy
number = len(ray)
proxy = total_proxy / float(number)
return proxy | 912925339f6a2087020781e898de102c3ee4f0d6 | 3,574 |
import _datetime
from datetime import datetime
def after(base=_datetime, diff=None):
"""
count datetime after diff args
:param base: str/datetime/date
:param diff: str
:return: datetime
"""
_base = parse(base)
if isinstance(_base, datetime.date):
_base = midnight(_base)
result_dict = dp(diff)
for unit in result_dict:
_val = result_dict[unit]
if not _val:
continue
if unit == 'years':
_base = _base.replace(year=(_base.year + _val))
elif unit == 'months':
if _base.month + _val <= 12:
_base = _base.replace(month=_base.month + _val)
else:
_month_diff = (_base.month + _val) - 12
_base = _base.replace(year=_base.year + 1).replace(month=_month_diff)
elif unit in ['days', 'hours', 'minutes', 'seconds']:
_base = _base + datetime.timedelta(**{unit: _val})
return _base | 80f3ccb2d45f247a93a0d69bf0a55bc9264ee7de | 3,575 |
import string
def open_lib(ifile):
"""Opens lib with name ifile and returns stationmeta, arraydim, rlengths, heights, sectors, data."""
with open(ifile, 'rb') as f:
lines = f.readlines()
lines = [x.strip() for x in lines]
data = {}
lines = filter(lambda x: x.strip(), lines)
data["meta"] = lines[0]
printable = set(string.printable)
data["meta"] = filter(lambda x: x in printable, data["meta"])
data["dim"] = np.array(lines[1].split()).astype(int)
data["R"] = np.array(lines[2].split()).astype(float) #[m]
data["H"] = np.array(lines[3].split()).astype(float) #[m]
data["sect"] = int(data["dim"][2])
data_block = lines[4:]
# frequencies
data["f"] = convert_to_np(data_block[::len(data["H"])*2+1],data["sect"])
# create masks for A, k value
mask = np.ones(len(data_block), dtype=bool)
mask[::len(data["H"])*2+1] = False
AK = convert_to_np(list(compress(data_block,mask)),data["sect"])
data["A"] = AK[::2]
data["k"] = AK[1::2]
f.close()
return data | 22fb842112f1558ab086058d997e08cb416e9a19 | 3,576 |
def convert_examples_to_features(examples, intent_label_list, slot_label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, intent_label_list, slot_label_list,
max_seq_length, tokenizer)
features.append(feature)
return features | 078f6e23e3a2f42a12851763a3424d938569d25b | 3,577 |
from bs4 import BeautifulSoup
def get_blb_links(driver):
"""takes (driver) and returns list of links to scrape"""
homepage = "https://www.bloomberg.com/europe"
rootpage = "https://www.bloomberg.com"
driver.get(homepage)
ssm = driver.find_elements_by_class_name("single-story-module")[0].get_attribute(
"outerHTML"
)
spm_1 = driver.find_elements_by_class_name("story-package-module")[0].get_attribute(
"outerHTML"
)
spm_2 = driver.find_elements_by_class_name("story-package-module")[1].get_attribute(
"outerHTML"
)
oped = driver.find_elements_by_class_name("story-package-module")[2].get_attribute(
"outerHTML"
)
soup = BeautifulSoup(ssm + spm_1 + spm_2 + oped, "lxml")
links = [
rootpage + link.get("href")
for link in soup.findAll("a")
if "/news/" in link.get("href")
]
links = list(dict.fromkeys(links))
return links | f2ecf967aa6e755b51e43450239b5606013cb9bf | 3,578 |
def randomProfile(freq,psd):
"""
Generate a random profile from an input PSD.
freq should be in standard fft.fftfreq format
psd should be symmetric as with a real signal
sqrt(sum(psd)) will equal RMS of profile
"""
amp = np.sqrt(psd)*len(freq)
ph = randomizePh(amp)
f = amp*ph
sig = np.fft.ifft(f)
return np.real(sig) | 54477fc37bb81c24c0bde3637112a18613201565 | 3,579 |
def getP(W, diagRegularize = False):
"""
Turn a similarity matrix into a proability matrix,
with each row sum normalized to 1
:param W: (MxM) Similarity matrix
:param diagRegularize: Whether or not to regularize
the diagonal of this matrix
:returns P: (MxM) Probability matrix
"""
if diagRegularize:
P = 0.5*np.eye(W.shape[0])
WNoDiag = np.array(W)
np.fill_diagonal(WNoDiag, 0)
RowSum = np.sum(WNoDiag, 1)
RowSum[RowSum == 0] = 1
P = P + 0.5*WNoDiag/RowSum[:, None]
return P
else:
RowSum = np.sum(W, 1)
RowSum[RowSum == 0] = 1
P = W/RowSum[:, None]
return P | 1c5c7da2e86b5c800660acaf825507914cd630ce | 3,580 |
def get_structure_index(structure_pattern,stream_index):
"""
Translates the stream index into a sequence of structure indices identifying an item in a hierarchy whose structure is specified by the provided structure pattern.
>>> get_structure_index('...',1)
[1]
>>> get_structure_index('.[.].',1)
[1, 0]
>>> get_structure_index('.[[...],..].',1)
[1, 0, 0]
>>> get_structure_index('.[[...]...].',2)
[1, 0, 1]
>>> get_structure_index('.[[...]...].',3)
[1, 0, 2]
>>> get_structure_index('.[[...]...].',4)
[1, 1]
>>> get_structure_index('.[[...]...].',5)
[1, 2]
>>> get_structure_index('.[[...]...].',6)
[1, 3]
>>> get_structure_index('.[[...]...].',7)
[2]
"""
structure_index = [0]
current_stream_index = 0
for p in structure_pattern:
if p == '[':
structure_index.append(0)
elif p == '.':
if current_stream_index == stream_index:
return structure_index
structure_index[-1] += 1
current_stream_index += 1
elif p == ']':
structure_index.pop(-1)
structure_index[-1] += 1
else:
raise Exception('Invalid character in structure pattern: %s' % repr(p))
raise Exception('Provided stream index does not exist in the provided structure pattern') | 8f1def101aa2ec63d1ea69382db9641bf0f51380 | 3,581 |
import random
import torch
def n_knapsack(n_knapsacks=5,
n_items=100, # Should be divisible by n_knapsack
n_weights_per_items=500,
use_constraints=False,
method='trust-constr',
backend='tf'
):
"""
Here we solve a continuous relaxation of the multiknapsack problem.
"""
# Let's emulate the multiknapsack problem with random weights
weights_ = random((n_weights_per_items, n_items))
# We create knapsacks with attribution of the items to knapsacks [0,1,2,3,4] as:
# [0 1 2 3 4 0 1 2 3 4 0 1 2 3 4 0 1 2 3 4]
capacity_knapsacks = weights_.reshape(
(n_weights_per_items, -1, n_knapsacks)).sum(-2)
if backend == 'tf':
weights_ = tf.constant(weights_, tf.float32)
capacity_knapsacks_ = tf.constant(capacity_knapsacks, tf.float32)
def func(W):
# We use softmax to impose the constraint that the attribution of items to knapsacks should sum to one
if use_constraints:
W = tf.nn.softmax(W, 1)
# We add a penalty only when the weights attribution sums higher than the knapsacks capacity.
res = tf.nn.relu(weights_@W-capacity_knapsacks_)
res = tf.reduce_mean(res**2)
return res
dev = None
else:
dev = torch.device("cuda" if torch.cuda.is_available() else "cpu")
weights_ = torch.tensor(weights_, dtype=torch.float32, device=dev)
capacity_knapsacks_ = torch.tensor(
capacity_knapsacks, dtype=torch.float32, device=dev)
def func(W):
# We use softmax to impose the constraint that the attribution of items to knapsacks should sum to one
if use_constraints:
W = torch.nn.functional.softmax(W, 1)
# We add a penalty only when the weights attribution sums higher than the knapsacks capacity.
res = torch.nn.functional.relu(weights_@W-capacity_knapsacks_)
res = (res**2).mean()
return res
if use_constraints:
if backend == 'tf':
def eq_fun(W):
return tf.reduce_sum(W, 1)-1
else:
def eq_fun(W):
return W.sum(1)-1
constraints = {
'type': 'eq',
'fun': eq_fun,
'lb': 0,
'ub': 0,
'use_autograd': False
}
else:
constraints = None
Winit = np.zeros((n_items, n_knapsacks))
res = minimize(func, Winit, tol=1e-8,
constraints=constraints,
bounds=(0, None),
method=method,
torch_device=dev,
backend=backend)
return res | ed4068bad29aff385a86286eab224da3c1beefa5 | 3,582 |
def has_url(account: Accounts) -> bool:
"""Return True if the account's note or fields seem to contain a URL."""
if account.note and "http" in account.note.lower():
return True
if "http" in str(account.fields).lower():
return True
return False | d858ef1bedcac064bbc9a5c20de02b1438c5cdad | 3,583 |
def conv2x2(in_planes, out_planes, stride=1, groups=1, dilation=1, padding=0):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=2,
stride=stride,
padding=padding,
groups=groups,
bias=False,
dilation=dilation) | a96112cd56817940292f92b5417584f18b7d2fb7 | 3,584 |
def face_normals(P,T,normalize=True):
"""Computes normal vectors to triangles (faces).
Args:
P: n*3 float array
T: m*3 int array
normalize: Whether or not to normalize to unit vectors. If False, then the magnitude of each vector is twice the area of the corresponding triangle. Default is True
Returns:
A Numpy array of size (num_tri,3) containing the face normal vectors.
"""
P1 = P[T[:,0],:]
P2 = P[T[:,1],:]
P3 = P[T[:,2],:]
N = np.cross(P2-P1,P3-P1)
if normalize:
N = (N.T/np.linalg.norm(N,axis =1)).T
return N | 379c82624aeb1a772befb91acee1b46a7ff7f937 | 3,585 |
import itertools
def transform_pairwise(X, y):
"""Transforms data into pairs with balanced labels for ranking
Transforms a n-class ranking problem into a two-class classification
problem. Subclasses implementing particular strategies for choosing
pairs should override this method.
In this method, all pairs are choosen, except for those that have the
same target value. The output is an array of balanced classes, i.e.
there are the same number of -1 as +1
Reference: "Large Margin Rank Boundaries for Ordinal Regression",
R. Herbrich, T. Graepel, K. Obermayer.
Authors: Fabian Pedregosa <[email protected]>
Alexandre Gramfort <[email protected]>
Args:
X: (np.array), shape (n_samples, n_features)
The data
y: (np.array), shape (n_samples,) or (n_samples, 2)
Target labels. If it's a 2D array, the second column represents
the grouping of samples, i.e., samples with different groups will
not be considered.
Returns:
X_trans: (np.array), shape (k, n_feaures)
Data as pairs, where k = n_samples * (n_samples-1)) / 2 if grouping
values were not passed. If grouping variables exist, then returns
values computed for each group.
y_trans: (np.array), shape (k,)
Output class labels, where classes have values {-1, +1}
If y was shape (n_samples, 2), then returns (k, 2) with groups on
the second dimension.
"""
X_new, y_new, y_group = [], [], []
y_ndim = y.ndim
if y_ndim == 1:
y = np.c_[y, np.ones(y.shape[0])]
comb = itertools.combinations(range(X.shape[0]), 2)
for k, (i, j) in enumerate(comb):
if y[i, 0] == y[j, 0] or y[i, 1] != y[j, 1]:
# skip if same target or different group
continue
X_new.append(X[i] - X[j])
y_new.append(np.sign(y[i, 0] - y[j, 0]))
y_group.append(y[i, 1])
# output balanced classes
if y_new[-1] != (-1) ** k:
y_new[-1] = -y_new[-1]
X_new[-1] = -X_new[-1]
if y_ndim == 1:
return np.asarray(X_new), np.asarray(y_new).ravel()
elif y_ndim == 2:
return np.asarray(X_new), np.vstack((np.asarray(y_new), np.asarray(y_group))).T | d4c376fe1a594f6baecb78aeaa11e90bffcde8a5 | 3,586 |
def format_project_title(rank: int, project_id: str, status: str) -> str:
"""Formats a project title for display in Slack.
Args:
rank: The rank of in the list. Will be prepended to the title.
project_id: The project ID.
status: The status of the project. This is used to determine which
emoji is used to prefix the title string.
Returns:
A formatted title string.
"""
project_link = generate_gcp_project_link(project_id)
if status == SETTINGS.STATUS_WARNING:
return f':warning: *{rank}. {project_link}*'
return f':white_check_mark: *{rank}. {project_link}*' | 35a8b7dd2c3e8afd3975ddf97056a81d631e3576 | 3,587 |
def fake_3dimage_vis():
"""
:return: a Nifti1Image (3D) in RAS+ space
Following characteristics:
- shape[LR] = 7
- shape[PA] = 8
- shape[IS] = 9
Visual thing using voxel art...
"""
shape = (7,8,9)
data = np.zeros(shape, dtype=np.float32, order="F")
# "L"
indices =np.array([
(0,1,6),
(0,1,5),
(0,1,4),
(0,1,3),
(0,1,2),
(0,1,1),
(0,2,1),
(0,3,1),
(0,4,1),
(0,5,1),
]).T
data[indices[0], indices[1], indices[2]] = 7
# "P"
indices =np.array([
(1,0,6),
(1,0,5),
(1,0,4),
(1,0,3),
(1,0,2),
(1,0,1),
(2,0,6),
(3,0,5),
(3,0,4),
(2,0,3),
(1,0,2),
]).T
data[indices[0], indices[1], indices[2]] = 9
# "I"
indices =np.array([
(3,1,0),
(2,1,0),
(1,1,0),
(4,1,0),
(5,1,0),
(3,1,0),
(3,2,0),
(3,3,0),
(3,4,0),
(3,5,0),
(3,6,0),
(3,7,0),
(2,7,0),
(1,7,0),
(4,7,0),
(5,7,0),
]).T
data[indices[0], indices[1], indices[2]] = 9
affine = np.eye(4)
return nibabel.nifti1.Nifti1Image(data, affine) | 65d766d04a6a85e5cafcdc0d7c62e2f3974caa5b | 3,588 |
import atexit
def watch_dependencies(dependency, func, time_execution=15000, registry=None, app=current_app):
"""
Register dependencies metrics up
"""
if not registry:
registry = app.extensions.get("registry", CollectorRegistry())
app.extensions["registry"] = registry
# pylint: disable=invalid-name
DEPENDENCY_UP = Gauge(
'dependency_up',
'records if a dependency is up or down. 1 for up, 0 for down',
["name"],
registry=registry
)
def register_dependecy():
DEPENDENCY_UP.labels(dependency).set(func())
scheduler = BackgroundScheduler()
scheduler.add_job(
func=register_dependecy,
trigger="interval",
seconds=time_execution/1000,
max_instances=1,
name='dependency',
misfire_grace_time=2,
replace_existing=True
)
scheduler.start()
# Shut down the scheduler when exiting the app
atexit.register(scheduler.shutdown)
return scheduler | f502f3e1e6beba5169160459bc0887462ac6662c | 3,589 |
def view_cache_key(func, args, kwargs, extra=None):
"""
Calculate cache key for view func.
Use url instead of not properly serializable request argument.
"""
if hasattr(args[0], 'build_absolute_uri'):
uri = args[0].build_absolute_uri()
else:
uri = args[0]
return 'v:' + func_cache_key(func, args[1:], kwargs, extra=(uri, extra)) | 85139dc6a77d3758b6e691b565361b390c643e91 | 3,590 |
def get_filter(sampling_freq, f_pass, f_stop, taps):
"""Get FIR filter coefficients using the Remez exchange algorithm.
Args:
f_pass (float): Passband edge.
f_stop (float): Stopband edge.
taps (int): Number of taps or coefficients in the resulting filter.
Returns:
(numpy.ndarray): Computed filter coefficients.
"""
return ffd.remez(taps, [0, f_pass/sampling_freq, f_stop/sampling_freq, 0.5], [0, 1]) | a0fb303ad74ee6e60dd0521af5e00b4b1d1d42e0 | 3,591 |
def errorcode_from_error(e):
"""
Get the error code from a particular error/exception caused by PostgreSQL.
"""
return e.orig.pgcode | 981b447d540e949834c10f4a05fb21769091b104 | 3,592 |
def _comp_point_coordinate(self):
"""Compute the point coordinates needed to plot the Slot.
Parameters
----------
self : SlotW28
A SlotW28 object
Returns
-------
point_dict: dict
A dict of the slot point coordinates
"""
Rbo = self.get_Rbo()
# alpha is the angle to rotate Z0 so ||Z1,Z8|| = W0
alpha = float(arcsin(self.W0 / (2 * Rbo)))
slot_pitch = 2 * pi / self.Zs
# comp point coordinate (in complex)
Z0 = Rbo * exp(1j * 0)
Z8 = Z0 * exp(-1j * alpha)
if self.is_outwards():
Z7 = Z8 + self.H0
# Rotation to get the tooth on X axis
Z7 = Z7 * exp(1j * slot_pitch / 2)
Z8 = Z8 * exp(1j * slot_pitch / 2)
# Z7 = x7 + 1j*y7
# Z6 = x + 1j * W3/2
# C2,Z6 _|_ Z6,Z5 => Re(C2) = Re(Z6)
# ||Z6,zc2|| = R1 => Zc2 = x + 1j*(W3/2+R1)
# ||Z7,zc2||² = R1² => (x7-x)²+ (y7-(W3/2+R1))² = R1²
# x² - 2*x7 x + (x7²+(y7-(W3/2+R1))²-R1²) = 0
# D = 4*x7² - 4*(x7²+(y7-(W3/2+R1))²-R1²) = -4((y7-(W3/2+R1))²-R1²)
# x = x7 + sqrt(-4((y7-(W3/2+R1))²-R1²))/2
Z6 = (
Z7.real
+ sqrt(-4 * ((Z7.imag - (self.W3 / 2.0 + self.R1)) ** 2 - self.R1 ** 2)) / 2
+ 1j * self.W3 / 2.0
)
Z5 = Z6 + self.H3
rot_sign = 1
else: # inward slot
Z7 = Z8 - self.H0
# Rotation to get the tooth on X axis
Z7 = Z7 * exp(1j * slot_pitch / 2)
Z8 = Z8 * exp(1j * slot_pitch / 2)
Z6 = (
Z7.real
- sqrt(-4 * ((Z7.imag - (self.W3 / 2.0 + self.R1)) ** 2 - self.R1 ** 2)) / 2
+ 1j * self.W3 / 2.0
)
Z5 = Z6 - self.H3
rot_sign = -1
# Tooth ref to slot
Z1, Z2, Z3, Z4 = (
Z8 * exp(-1j * slot_pitch / 2),
Z7 * exp(-1j * slot_pitch / 2),
Z6 * exp(-1j * slot_pitch / 2),
Z5 * exp(-1j * slot_pitch / 2),
)
point_dict = dict()
point_dict["Z1"] = Z1
point_dict["Z2"] = Z2
point_dict["Z3"] = Z3
point_dict["Z4"] = Z4
# symetry
point_dict["Z5"] = Z4.conjugate()
point_dict["Z6"] = Z3.conjugate()
point_dict["Z7"] = Z2.conjugate()
point_dict["Z8"] = Z1.conjugate()
# Center
A = Arc1(Z2, Z3, rot_sign * self.R1, self.is_outwards())
point_dict["Zc1"] = A.get_center()
point_dict["Zc2"] = (point_dict["Z4"] + point_dict["Z5"]) / 2
point_dict["Zc3"] = point_dict["Zc1"].conjugate()
return point_dict | 0938c9ecba5e9fc1ed76e804e8eab6f9bd97f253 | 3,593 |
from typing import IO
def save_expected_plot(series: pd.Series, colour="C0") -> IO:
"""Return an image of the plot with the given `series` and `colour`."""
fig, ax = plt.subplots()
ax.add_line(mpl_lines.Line2D(series.index, series.values, color=colour))
return _save_fig(fig, ax) | 6c898a622f983cebd46fb368a40e2a492bddb37f | 3,594 |
def GRU_architecture(
GRU_layers,
GRU_neurons,
Dense_layers,
Dense_neurons,
add_Dropout,
Dropout_rate,
data_shape,
):
"""
Parameters
----------
GRU_layers : int
Number of GRU layers.
GRU_neurons : list
List with the numbers of GRU cells in each GRU layer.
Dense_layers : int
Number of Dense layers after GRU layers.
Dense_neurons : list
List with the numbers of neurons in each fully-connecred layer.
add_Dropout : bool
Specifies whether dropout regularization should be applied.
Dropout_rate : float
Dropout rate - the number between 0 and 1.
data_shape : tuple
Shape of the training data.
Returns
-------
model : keras.engine.training.Model
Model with the specified architecture.
"""
# data_shape[1] - lag, data_shape[2] - number of signals
input_layer = Input((data_shape[1], data_shape[2]))
# If there is only one GRU layer, than return_sequences should be false
if GRU_layers == 1:
layers_gru = GRU(
GRU_neurons[0],
activation="tanh",
recurrent_activation="tanh",
use_bias=True,
return_sequences=False,
)(input_layer)
# For many GRU layers return_sequences should be True, to conncect layers with each other
else:
layers_gru = input_layer
# Adding GRU layers
for grul in range(0, GRU_layers - 1):
layers_gru = GRU(
GRU_neurons[grul],
activation="tanh",
recurrent_activation="tanh",
use_bias=True,
return_sequences=True,
)(layers_gru)
# Adding Dropout
if add_Dropout:
layers_gru = Dropout(Dropout_rate)(layers_gru)
# Adding last GRU layer
layers_gru = GRU(
GRU_neurons[-1],
activation="tanh",
recurrent_activation="tanh",
use_bias=True,
return_sequences=False,
)(layers_gru)
# Adding Dropout
if add_Dropout:
layers_gru = Dropout(Dropout_rate)(layers_gru)
# Adding Dense layers if asked
for densel in range(Dense_layers):
layers_gru = Dense(Dense_neurons[densel], activation="relu")(layers_gru)
# Adding Dropout
if add_Dropout:
layers_gru = Dropout(Dropout_rate)(layers_gru)
# Adding output layer
output = Dense(1, activation="linear")(layers_gru)
model = Model(inputs=input_layer, outputs=output)
return model | 77c2aaed6a82f2f4aedade73649d843ff8afd2e2 | 3,595 |
def create_lock(name):
"""Creates a file in the /locks folder by the given name"""
lock_path = get_lock_path(name)
if not check_lock(lock_path):
return touch_file(lock_path)
else:
return False | 1f5e4d43a85a01aaba19295a72cdee20fd7adb1b | 3,596 |
def gen_rigid_tform_rot(image, spacing, angle):
"""
generate a SimpleElastix transformation parameter Map to rotate image by angle
Parameters
----------
image : sitk.Image
SimpleITK image that will be rotated
spacing : float
Physical spacing of the SimpleITK image
angle : float
angle of rotation in degrees, rotates counter-clockwise if positive
Returns
-------
SimpleITK.ParameterMap of rotation transformation (EulerTransform)
"""
tform = BASE_RIG_TFORM.copy()
image.SetSpacing((spacing, spacing))
bound_w, bound_h = compute_rot_bound(image, angle=angle)
rot_cent_pt = image.TransformContinuousIndexToPhysicalPoint(
((bound_w - 1) / 2, (bound_h - 1) / 2)
)
c_x, c_y = (image.GetSize()[0] - 1) / 2, (image.GetSize()[1] - 1) / 2
c_x_phy, c_y_phy = image.TransformContinuousIndexToPhysicalPoint(
(c_x, c_y)
)
t_x = rot_cent_pt[0] - c_x_phy
t_y = rot_cent_pt[1] - c_y_phy
tform["Spacing"] = [str(spacing), str(spacing)]
tform["Size"] = [str(int(np.ceil(bound_w))), str(int(np.ceil(bound_h)))]
tform["CenterOfRotationPoint"] = [str(rot_cent_pt[0]), str(rot_cent_pt[1])]
tform["TransformParameters"] = [
str(np.radians(angle)),
str(-1 * t_x),
str(-1 * t_y),
]
return tform | 5cadbd59340328d1af5e4ec2c5c0c47e69e19013 | 3,597 |
def get_param_response(param_name, dict_data, num=0, default=None):
"""
:param param_name: 从接口返回值中要提取的参数
:param dict_data: 接口返回值
:param num: 返回值中存在list时,取指定第几个
:param default: 函数异常返回
:return: 提取的参数值
"""
if isinstance(dict_data, dict):
for k, v in dict_data.items():
if k == param_name:
return v
else:
if isinstance(v, dict):
ret = get_param_response(param_name, v)
if ret is not default:
return ret
if isinstance(v, list):
if num:
try:
if isinstance(v[num], dict):
ret = get_param_response(param_name, v[num])
if ret is not default:
return ret
except IndexError:
return {'error': ErrorCode.index_error}
else:
for i in v:
if isinstance(i, dict):
ret = get_param_response(param_name, i)
if ret is not default:
return ret
if isinstance(v, str):
try:
value = eval(v)
ret = get_param_response(param_name, value)
if ret is not default:
return ret
except Exception:
pass
elif isinstance(dict_data, list):
for content in dict_data:
ret = get_param_response(param_name, content)
if ret is not default:
return ret
return default | b9ffa5dfe6e9707771a812dad1d8d9284acbc2e7 | 3,598 |
def _strict_conv1d(x, h):
"""Return x * h for rank 1 tensors x and h."""
with ops.name_scope('strict_conv1d', values=[x, h]):
x = array_ops.reshape(x, (1, -1, 1, 1))
h = array_ops.reshape(h, (-1, 1, 1, 1))
result = nn_ops.conv2d(x, h, [1, 1, 1, 1], 'SAME')
return array_ops.reshape(result, [-1]) | dabe247f97c285ed5d250d7d8d99dae5795a8fec | 3,599 |
Subsets and Splits