content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _print_available_filters(supported_filters):
"""Prints information on available filters and their thresholds."""
widths = (20, 40, 20)
data = [("Filter", "Description", "Threshold Values"),
("------", "-----------", "----------------")]
# this is stupid
for f, (d, t, c) in supported_filters.items():
data.append((f, d, t))
print
for row in data:
i = 1
nextline = "\n"
for col, width in zip(row, widths):
print col[:width] + " " * max(0, width - len(col)),
if not i == 2:
i += 1
continue
mycol = col[width:]
mybgn = width + 1
while len(mycol) > 1:
nextline += " " * 21
nextline += mycol[:width]
nextline += " " * (width - len(mycol))
nextline += "\n"
mycol = mycol[width:]
mybgn += width
i += 1
print nextline,
print
return 0 | 49a769a27d2a4a0beaba1021821c8d1e551f53eb | 13,600 |
def _get_dates(i, *args, **kwargs):
"""
Get dates from arguments
"""
try:
start_date = kwargs['start_date']
except:
try:
start_date = args[i]
except:
start_date = None
try:
end_date = kwargs['end_date']
except:
try:
end_date = args[i+1]
except:
end_date = None
start_date, end_date = _sanitize_dates(start_date, end_date)
return(start_date, end_date) | 708bc0fcc5be80ef3b3008b9569bb14a01c4bace | 13,601 |
def home_page():
"""Shows home page"""
html = """
<html>
<body>
<h1>Home Page</h1>
<p>Welcome to my simple app!</p>
<a href='/hello'>Go to hello page</a>
</body>
</html>
"""
return html | 444833ab61803d1fe52676834e211ac79e770b4e | 13,602 |
def GetPDFHexString(s, i, iend):
"""Convert and return pdf hex string starting at s[i],
ending at s[iend-1]."""
j = i + 1
v = []
c = ''
jend = iend - 1
while j < jend:
p = _re_pswhitespaceandcomments.match(s, j)
if p:
j = p.end()
d = chr(ordat(s, j))
if c != '':
v.append(FromHexPair(c, d))
c = ''
else:
c = d
j += 1
if c != '':
v.append(FromHexPair(c, '0'))
return ((OSTRING, ''.join(v)), iend) | 516d7c33bcd1b2237eb482e9722de4552ac79ce2 | 13,603 |
def get_edge_syslog_info(edge_id):
"""Get syslog information for specific edge id"""
nsxv = get_nsxv_client()
syslog_info = nsxv.get_edge_syslog(edge_id)[1]
if not syslog_info['enabled']:
return 'Disabled'
output = ""
if 'protocol' in syslog_info:
output += syslog_info['protocol']
if 'serverAddresses' in syslog_info:
for server_address in syslog_info['serverAddresses']['ipAddress']:
output += "\n" + server_address
return output | 5c5ea79109b9a9053f95945a7902d9e6322a6ba6 | 13,604 |
from typing import List
def _get_rec_suffix(operations:List[str]) -> str:
""" finished, checked,
Parameters
----------
operations: list of str,
names of operations to perform (or has performed),
Returns
-------
suffix: str,
suffix of the filename of the preprocessed ecg signal
"""
suffix = "-".join(sorted([item.lower() for item in operations]))
return suffix | 270a1b3749342d05819eafef3fa5175da393b1ad | 13,605 |
def get_A_text(params, func_type=None):
"""
Get text associated with the fit of A(s)
"""
line1 = r'$A(s|r)$ is assumed to take the form:'
line2 = (r'$A(s|r) = s^{-1}\bigg{(}\frac{s}{\Sigma(r)}\bigg{)}^a '
r'exp\bigg{(}{-\bigg{(}\frac{s}{\Sigma(r)}\bigg{)}^b}\bigg{)}$')
a, b = params['a'], params['b']
line3 = r'where a = {:.4f} and b = {:.4f}'.format(a, b)
text = '\n'.join([line1, line2, line3])
return text | ec68c49a7912dc5630e3c96a09d667ce52f89914 | 13,606 |
def transform_to_dict(closest_list: list) -> dict:
"""
Returns dict {(latitude, longitude): {film1, film2, ...}, ...} from
closest_list [[film1, (latitude, longitude)], ...], where film1,
film2 are titles of films, (latitude, longitude) is a coordinates of
a place where those films were shoot.
>>> transform_to_dict([["film1", (49, 24)]])
{(49, 24): {'film1'}}
"""
closest_dict = {}
for film, coord in closest_list:
if coord in closest_dict:
closest_dict[coord].add(film)
else:
closest_dict[coord] = {film}
return closest_dict | e7c6fae73792a828d85db03e794bfb69c7b1fe87 | 13,607 |
def ECGDataQuality(datastream: DataStream,
windowsize: float = 5.0,
bufferLength: int = 3,
acceptableOutlierPercent: int = 50,
outlierThresholdHigh: int = 4500,
outlierThresholdLow: int = 20,
badSegmentThreshod: int = 2,
ecgBandLooseThreshold: int = 47) -> AnnotationStream:
"""
:param datastream:
:param windowsize:
:param bufferLength:
:param acceptableOutlierPercent:
:param outlierThresholdHigh:
:param outlierThresholdLow:
:param badSegmentThreshod:
:param ecgBandLooseThreshold:
:return:
"""
# windows = window(datastream.datapoints, window_size=windowsize)
# TODO: Do something with windows here
result = DataStream.from_datastream(input_streams=[datastream])
# Do something here for data quality
# ecgQuality = []
# for i in range(1, 10):
# ecgQuality.append(Span(result.getID(),
# starttime=datetime.now(),
# endtime=datetime.now(),
# label=DataQuality.GOOD))
#
# result.set_spans(ecgQuality)
return result | c9c462ec688c102097ccad051cbcc1868d71565b | 13,608 |
def get_numpy_val_from_form_input(input_name):
"""Get a NumPy-compatible numerical value from the request object"""
return get_numpy_val(input_name, request.form[input_name]) | fadfbf106c82088103674e5da5f526e08e2a05ac | 13,609 |
import numpy
def load_model(model, path):
"""Load a the model parameters from a file and set them.
Parameters
----------
model : a :class:`Layer` instance
The model with unset parameters.
path : string
The file with the model parameters.
Returns
-------
a :class:`Layer` instance
The given model with set parameters.
"""
with numpy.load(path) as fobj:
values = [fobj['arr_%d' % i] for i in range(len(fobj.files))]
set_all_param_values(model, values)
return model | dae27ffc78be7aa7476c645c4f021d4acaef5b44 | 13,610 |
def node_id_at_cells(shape):
"""Node ID at each cell.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
ndarray :
ID of node associated with each cell.
Examples
--------
>>> from landlab.grid.structured_quad.cells import node_id_at_cells
>>> node_id_at_cells((3, 4))
array([[5, 6]])
"""
node_ids = nodes.node_ids(shape)
return node_ids[1:-1, 1:-1].copy().reshape(shape_of_cells(shape)) | f089f598cacc4d5ec6885477098dcca741358820 | 13,611 |
def search_media(search_queries, media, ignore_likes=True):
"""Return a list of media matching a queary that searches for a match in the comments, likes, and tags in a list of media"""
# Initialize update message
update_message = print_update_message(len(media))
update_message.send(None)
# Initialize result data
if type(search_queries) is not list: search_queries = [search_queries]
matches = [ [] for _ in range(len(search_queries))]
# Iterate through media looking for matches to search_queries
for idx0, medium in enumerate(media):
results = search_medium(search_queries, medium, ignore_likes=ignore_likes)
for idx1, result in enumerate(results):
if result:
matches[idx1].append(medium)
# Send update message
message = Template(
'Found {} matches in {} media out of {}. {} api calls remaining'.format(
repr([len(x) for x in matches]), idx0+1, len(media),
_api.last_used_api.x_ratelimit_remaining) )
update_message.send( (idx0, message) )
return matches | 23ed38496310cc86c4d3d7f8aff4e1d5c61f9d69 | 13,612 |
import os
def validation_plot_thesis(show_plot=True, results_2010=None, results_2011=None, model_run="cosumnes_michigan_bar"):
"""
Hardcoded items because they're for my thesis, not meant for more general use.
:return:
"""
if results_2010 is None:
results_2010 = validate_flow_methods("{}_2010".format(model_run), show_plot=False)
if results_2011 is None:
results_2011 = validate_flow_methods("{}_2011".format(model_run), show_plot=False)
# Creates two subplots and unpacks the output array immediately
fig = plt.figure()
plt.margins(0)
full_plot = fig.add_subplot(1, 1, 1) # The big subplot
full_plot.set_xlabel("Percent of Available Flow")
full_plot.set_ylabel("Environmental Benefit", labelpad=20) # move it off the tick values
# Turn off axis lines and ticks of the big subplot
full_plot.spines['top'].set_color('none')
full_plot.spines['bottom'].set_color('none')
full_plot.spines['left'].set_color('none')
full_plot.spines['right'].set_color('none')
full_plot.tick_params(labelcolor='w', top=False, bottom=False, left=False, right=False)
left_plot = fig.add_subplot(1, 2, 1) # The big subplot
left_plot.plot(results_2010["x"], results_2010["y"])
left_plot.set_title('2010')
right_plot = fig.add_subplot(1, 2, 2, sharey=left_plot) # The big subplot
right_plot.plot(results_2011["x"], results_2011["y"])
right_plot.set_title('2011')
# remove the axis values on the left to make space
right_plot.tick_params(left=True, labelleft=False, )
plt.savefig(os.path.join(settings.BASE_DIR, "data", "results", "validation_plot_thesis.png"), dpi=300)
if show_plot:
plt.show()
plt.close()
return results_2010, results_2011 | bfe2ee71e008d0beff4c6bee203b04d448e656e1 | 13,613 |
def get_produced_messages(func):
"""Returns a list of message fqn and channel pairs.
Args:
func (Function): function object
Returns:
list
"""
result = []
for msg, channel in func.produces:
result.append((_build_msg_fqn(msg), channel))
return result | b63d9305f3af3e474beb1fb328881123d8f4ece6 | 13,614 |
from pathlib import Path
from typing import Sequence
def find_coverage_files(src_path: Path) -> Sequence:
"""
Find the coverage files within the specified src_path.
Parameters:
src_path (Path): The path in which to look for the .coverage files.
Returns:
(Sequence) The set of .coverage files within the specified folder.
"""
return Path(src_path).glob("**/*.coverage") | 53fd9b2d2405ed6fe895718e22cc6b1ddb86f4df | 13,615 |
def get_exploration_summary_from_model(exp_summary_model):
"""Returns an ExplorationSummary domain object.
Args:
exp_summary_model: ExplorationSummary. An ExplorationSummary model
instance.
Returns:
ExplorationSummary. The summary domain object correspoding to the
given exploration summary model.
"""
return exp_domain.ExplorationSummary(
exp_summary_model.id, exp_summary_model.title,
exp_summary_model.category, exp_summary_model.objective,
exp_summary_model.language_code, exp_summary_model.tags,
exp_summary_model.ratings, exp_summary_model.scaled_average_rating,
exp_summary_model.status, exp_summary_model.community_owned,
exp_summary_model.owner_ids, exp_summary_model.editor_ids,
exp_summary_model.voice_artist_ids, exp_summary_model.viewer_ids,
exp_summary_model.contributor_ids,
exp_summary_model.contributors_summary, exp_summary_model.version,
exp_summary_model.exploration_model_created_on,
exp_summary_model.exploration_model_last_updated,
exp_summary_model.first_published_msec
) | c6561670f976e28a3869eb89c4be3ba884808da0 | 13,616 |
def get_service(api_name, api_version, scope, key_file_location,
service_account_email):
"""Get a service that communicates to a Google API.
Args:
api_name: The name of the api to connect to.
api_version: The api version to connect to.
scope: A list auth scopes to authorize for the application.
key_file_location: The path to a valid service account p12 key file.
service_account_email: The service account email address.
Returns:
A service that is connected to the specified API.
"""
f = open(key_file_location, 'rb')
key = f.read()
f.close()
credentials = ServiceAccountCredentials.from_p12_keyfile(service_account_email,key_file_location, scopes=scope)
http = credentials.authorize(httplib2.Http())
# Build the service object.
service = build(api_name, api_version, http=http)
return service
# Insert user email here | 6c333f43c5feb5b44128b8f592586804eba68e1e | 13,617 |
from IsabelaFunctions.langlais_coeff import glm as g
from IsabelaFunctions.langlais_coeff import hlm as h
import tqdm
def model_map(lon, lat, alt, comp, binsize = 0.1, nmax = 134, a = 3393.5):
"""
Calculates a map of one component of the crustal magnetic field field model, for a given altitude.
Parameters:
lon: array
The longitude range, in degrees. Ex.: [20., 50.].
lat: array
The latitude range, in degrees.
alt: float
The altitude in which the map will be computed, in km.
comp: string
The desired magnetic field component, in spherical coordinates. Options are 'Br', 'Btheta', 'Bphi', and 'Bt'.
binsize: float, list, optional
The resolution of the grid. If a float, apply the same binsize for longitude and latitude.
If a list, the first value represents the longitude binsize and the second, the latitude binsize.
nmax: integer, optional
The maximum degree and order of the functions.
a: float, optional
The radius of the planet. Default is the Mars' radius.
Returns:
A lon X lat array containing the magnetic field component.
"""
# Raise an AssertionError if arguments are invalid
assert comp == 'Br' or comp == 'Btheta' or comp == 'Bphi' or comp == 'Bt', "Check argument for comp"
assert type(binsize) is float or type(binsize) is list, "Argument for binsize should be a float or a list"
# Import the coefficient files
# Calculate r, theta, phi, and the Legendre functions
r = a + alt
if type(binsize) is float:
binsize = [binsize, binsize]
lat_len = int(round((lat[1] - lat[0]) / binsize[1] + 1.0))
lon_len = int(round((lon[1] - lon[0]) / binsize[0] + 1.0))
longitude = np.deg2rad(np.linspace(lon[0], lon[1], lon_len))
latitude = np.linspace(lat[0], lat[1], lat_len)
P = np.empty((nmax+1, nmax+1, lat_len)) * np.nan
dP = np.empty_like(P) * np.nan
for theta in range(lat_len):
P[:, :, theta], dP[:, :, theta] = legendre_schmidt_Pyshtools(latitude[theta])
cos = np.empty((nmax+1, lon_len)) * np.nan
sen = np.empty_like(cos) * np.nan
for phi in range(lon_len):
for m in range(nmax+1):
cos[m, phi] = np.cos(m * longitude[phi])
sen[m, phi] = np.sin(m * longitude[phi])
a_over_r = np.empty((nmax+1)) * np.nan
for n in range(nmax+1):
a_over_r[n] = (a/r)**(n+2)
if comp == 'Bt':
Br = np.zeros((lon_len, lat_len))
Btheta = np.zeros((lon_len, lat_len))
Bphi = np.zeros((lon_len, lat_len))
sen_theta = np.sin(np.deg2rad(90.0 - latitude))
for n in range(1, nmax+1):
for m in range(n+1):
tmp1 = g[n, m] * cos[m, :] + h[n, m] * sen[m, :]
tmp2 = np.outer(tmp1, P[n, m, :])
tmp3 = tmp2 * (n+1) * a_over_r[n]
Br += tmp3
tmp2 = np.outer(tmp1, dP[n, m, :] * sen_theta)
tmp3 = tmp2 * a_over_r[n]
Btheta += tmp3
tmp1 = g[n, m] * sen[m, :] + h[n, m] * cos[m, :]
tmp2 = np.outer(tmp1, P[n, m, :])
tmp3 = tmp2 * m * a_over_r[n]
Bphi += tmp3
for theta in range(lat_len):
Bphi[:, theta] /= sen_theta[theta]
B = np.sqrt(Br**2 + Btheta**2 + Bphi**2)
else:
B = np.zeros((lon_len, lat_len))
if comp == 'Br':
for n in tqdm(range(1, nmax+1)):
for m in range(n+1):
tmp1 = g[n, m] * cos[m, :] + h[n, m] * sen[m, :]
tmp2 = np.outer(tmp1, P[n, m, :])
tmp3 = tmp2 * (n+1) * a_over_r[n]
B += tmp3
elif comp == 'Btheta':
sen_theta = np.sin(np.deg2rad(90.0 - latitude))
for n in tqdm(range(1, nmax+1)):
for m in range(n+1):
tmp1 = g[n, m] * cos[m, :] + h[n, m] * sen[m, :]
tmp2 = np.outer(tmp1, dP[n, m, :] * sen_theta)
tmp3 = tmp2 * a_over_r[n]
B += tmp3
else:
sen_theta = np.sin(np.deg2rad(90.0 - latitude))
for n in tqdm(range(1, nmax+1)):
for m in range(n+1):
tmp1 = g[n, m] * sen[m, :] + h[n, m] * cos[m, :]
tmp2 = np.outer(tmp1, P[n, m, :])
tmp3 = tmp2 * m * a_over_r[n]
B += tmp3
for theta in range(lat_len):
B[:, theta] /= sen_theta[theta]
return B.T | 9a49e4a1f31180cd7a26f2028c5e45d077103346 | 13,618 |
def parse_command(incoming_text):
"""
incoming_text: A text string to parse for docker commands
returns: a fully validated docker command
"""
docker_action = ''
parse1 = re.compile(r"(?<=\bdocker\s)(\w+)")
match_obj = parse1.search(incoming_text)
if match_obj:
docker_action = match_obj.group()
print("Got docker action %s" % (docker_action,))
if docker_action and docker_action in DOCKER_SUPPORTED:
# Use this type of code if we want to limit the docker commands
#parse2 = re.compile(r"(?<=\b%s\s)(\w+)" % docker_action)
#match_obj = parse2.search(incoming_text)
#if match_obj:
# docker_subcommand = match_obj.group()
# if docker_subcommand in SUBCOMMAND_SUPPORTED:
# return "docker %s %s" % (docker_action, docker_subcommand)
# Otherwise let it fly and return help if it pumps mud.
print "returning docker %s%s" % (docker_action, incoming_text[match_obj.end():])
return "docker %s%s" % (docker_action, incoming_text[match_obj.end():])
return docker_usage_message() | abe82ae2fe29014b3441889c973a412a536b78f1 | 13,619 |
def get_entities_from_tags(query, tags):
"""From a set of joint IOB tags, parse the app and system entities.
This performs the reverse operation of get_tags_from_entities.
Args:
query (Query): Any query instance.
tags (list of str): Joint app and system tags, like those
created by get_tags_from_entities.
Returns:
(list of QueryEntity) The tuple containing the list of entities.
"""
normalized_tokens = query.normalized_tokens
entities = []
def _is_system_entity(entity_type):
if entity_type.split('_')[0] == 'sys':
return True
return False
def _append_entity(token_start, entity_type, tokens):
prefix = ' '.join(normalized_tokens[:token_start])
# If there is a prefix, we have to add one for the whitespace
start = len(prefix) + 1 if len(prefix) else 0
end = start - 1 + len(' '.join(tokens))
norm_span = Span(start, end)
entity = QueryEntity.from_query(query, normalized_span=norm_span, entity_type=entity_type)
entities.append(entity)
logger.debug("Appended %s.", entity)
def _append_system_entity(token_start, token_end, entity_type):
msg = "Looking for '%s' between %s and %s."
logger.debug(msg, entity_type, token_start, token_end)
prefix = ' '.join(normalized_tokens[:token_start])
# If there is a prefix, we have to add one for the whitespace
start = len(prefix) + 1 if len(prefix) else 0
end = start - 1 + len(' '.join(normalized_tokens[token_start:token_end]))
norm_span = Span(start, end)
span = query.transform_span(norm_span, TEXT_FORM_NORMALIZED, TEXT_FORM_RAW)
try:
entity = resolve_system_entity(query, entity_type, span)
entities.append(entity)
logger.debug("Appended system entity %s.", entity)
except SystemEntityResolutionError:
msg = "Found no matching system entity {}-{}, {!r}".format(
token_start, token_end, entity_type)
logger.debug(msg)
entity_tokens = []
entity_start = None
prev_ent_type = ''
for tag_idx, tag in enumerate(tags):
iob, ent_type = tag.split('|')
# Close entity and reset if the tag indicates a new entity
if (entity_start is not None and
(iob in (O_TAG, B_TAG, S_TAG) or ent_type != prev_ent_type)):
logger.debug("Entity closed at prev")
if _is_system_entity(prev_ent_type):
_append_system_entity(entity_start, tag_idx, prev_ent_type)
else:
_append_entity(entity_start, prev_ent_type, entity_tokens)
entity_start = None
prev_ent_type = ''
entity_tokens = []
# Check if an entity has started
if iob in (B_TAG, S_TAG) or ent_type not in ('', prev_ent_type):
entity_start = tag_idx
if _is_system_entity(ent_type):
# During predict time, we construct sys_candidates for the input query.
# These candidates are "global" sys_candidates, in that the entire query
# is sent to Duckling to extract sys_candidates and not just a span range
# within the query. When we append system entities for a given token,
# we pick among candidates with start_span equivalent to the token's tag_idx.
picked_by_existing_system_entity_candidates = False
sys_entities = query.get_system_entity_candidates(ent_type)
if ent_type == 'sys_time':
sys_entities = _sort_by_lowest_time_grain(sys_entities)
for sys_candidate in sys_entities:
start_span = sys_candidate.normalized_token_span.start
end_span = sys_candidate.normalized_token_span.end
if start_span == tag_idx and tag_idx <= end_span:
# We currently don't prioritize any sys_candidate if there are
# multiple candidates that meet this conditional.
entity_start = sys_candidate.normalized_token_span.start
picked_by_existing_system_entity_candidates = True
if not picked_by_existing_system_entity_candidates:
entity_start = tag_idx
# Append the current token to the current entity, if applicable.
if iob != O_TAG and entity_start is not None and not _is_system_entity(ent_type):
entity_tokens.append(normalized_tokens[tag_idx])
# Close the entity if the tag indicates it closed
if entity_start is not None and iob in (E_TAG, S_TAG):
logger.debug("Entity closed here")
if _is_system_entity(ent_type):
_append_system_entity(entity_start, tag_idx+1, ent_type)
else:
_append_entity(entity_start, ent_type, entity_tokens)
entity_start = None
ent_type = ''
entity_tokens = []
prev_ent_type = ent_type
# Handle entities that end with the end of the query
if entity_start is not None:
logger.debug("Entity closed at end")
if _is_system_entity(prev_ent_type):
_append_system_entity(entity_start, len(tags), prev_ent_type)
else:
_append_entity(entity_start, prev_ent_type, entity_tokens)
else:
logger.debug("Entity did not end: %s.", entity_start)
return tuple(entities) | 2bc2aa1d12834d9414d66710473aaf8df8ef3fab | 13,620 |
def angle_connectivity(ibonds):
"""Given the bonds, get the indices of the atoms defining all the bond
angles
A 'bond angle' is defined as any set of 3 atoms, `i`, `j`, `k` such that
atom `i` is bonded to `j` and `j` is bonded to `k`
Parameters
----------
ibonds : np.ndarray, shape=[n_bonds, 2], dtype=int
Each row in `ibonds` is a pair of indicies `i`, `j`, indicating that
atoms `i` and `j` are bonded
Returns
-------
iangles : np.ndarray, shape[n_angles, 3], dtype=int
n_angles x 3 array of indices, where each row is the index of three
atoms m,n,o such that n is bonded to both m and o.
"""
graph = nx.from_edgelist(ibonds)
iangles = []
for i in graph.nodes():
for (m, n) in combinations(graph.neighbors(i), 2):
# so now the there is a bond angle m-i-n
iangles.append((m, i, n))
return np.array(iangles) | 86c992a1a8ac2d3c6b1fbc5a137ef0734a3079ed | 13,621 |
def BOPDS_PassKeyMapHasher_IsEqual(*args):
"""
:param aPKey1:
:type aPKey1: BOPDS_PassKey &
:param aPKey2:
:type aPKey2: BOPDS_PassKey &
:rtype: bool
"""
return _BOPDS.BOPDS_PassKeyMapHasher_IsEqual(*args) | 8da04f1755e3d2f7d10ad3ecf5ec6b0d00ca5fcb | 13,622 |
import re
def replaceToSantizeURL(url_str):
"""
Take arbitrary string and search for urls with user and password and
replace it with sanitized url.
"""
def _repUrl(matchObj):
return matchObj.group(1) + matchObj.group(4)
# TODO: won't catch every case (But is it good enough (trade off to performance)?)
urlRegExpr = r'\b(((?i)http|https|ftp|mysql|oracle|sqlite)+://)([^:]+:[^@]+@)(\S+)\b'
return re.sub(urlRegExpr, _repUrl, url_str) | c72dc459fae1918641926f2933ea3ff8c71acc38 | 13,623 |
def dms2dd(s):
"""convert lat and long to decimal degrees"""
direction = s[-1]
degrees = s[0:4]
dd = float(degrees)
if direction in ('S','W'):
dd*= -1
return dd | cb76efbf8c3b6a75bcc26593fab81a8ef3e16bbf | 13,624 |
def setna(self, value, na=np.nan, inplace=False):
""" set a value as missing
Parameters
----------
value : the values to set to na
na : the replacement value (default np.nan)
Examples
--------
>>> from dimarray import DimArray
>>> a = DimArray([1,2,-99])
>>> a.setna(-99)
dimarray: 2 non-null elements (1 null)
0 / x0 (3): 0 to 2
array([ 1., 2., nan])
>>> a.setna([-99, 2]) # sequence
dimarray: 1 non-null elements (2 null)
0 / x0 (3): 0 to 2
array([ 1., nan, nan])
>>> a.setna(a > 1) # boolean
dimarray: 2 non-null elements (1 null)
0 / x0 (3): 0 to 2
array([ 1., nan, -99.])
>>> a = DimArray([[1,2,-99]]) # multi-dim
>>> a.setna([-99, a>1]) # boolean
dimarray: 1 non-null elements (2 null)
0 / x0 (1): 0 to 0
1 / x1 (3): 0 to 2
array([[ 1., nan, nan]])
"""
return self.put(_matches(self.values, value), na, cast=True, inplace=inplace) | 6ada601dee346d5440a64ffdbf8d2642873bdb08 | 13,625 |
def hbox(*items, **config):
""" Create a DeferredConstraints object composed of horizontal
abutments for a given sequence of items.
"""
return LinearBoxHelper('horizontal', *items, **config) | cdfe16a35c73a2f8406207a0262b4210ce86146f | 13,626 |
def find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
visitors.traverse(clause, {}, {'column':cols.add})
return cols | 86b4c866a8fbe20ab1d4b0a34e4940155df00744 | 13,627 |
def _preprocess_data(smiles, labels, batchsize = 100):
"""
prepares all input batches to train/test the GDNN fingerprints implementation
"""
N = len(smiles)
batches = []
num_bond_features = 6
for i in range(int(np.ceil(N*1./batchsize))):
array_rep = utils.array_rep_from_smiles(smiles[i*batchsize:min(N,(i+1)*batchsize)])
labels_b = labels[i*batchsize:min(N,(i+1)*batchsize)]
atom_features = array_rep['atom_features']
summed_bond_features_by_degree = extract_bondfeatures_of_neighbors_by_degree(array_rep)
batch_dict = {'input_atom_features':atom_features} # (num_atoms, num_atom_features)
missing_degrees = []
for degree in degrees:
atom_neighbors_list = array_rep[('atom_neighbors', degree)]
if len(atom_neighbors_list)==0:
missing_degrees.append(degree)
continue
# this matrix is used by every layer to match and sum all neighboring updated atom features to the atoms
atom_neighbor_matching_matrix = connectivity_to_Matrix(atom_neighbors_list, atom_features.shape[0])
atom_batch_matching_matrix = connectivity_to_Matrix(array_rep['atom_list'], atom_features.shape[0]).T
assert np.all(atom_batch_matching_matrix.sum(1).mean()==1)
assert np.all(atom_batch_matching_matrix.sum(0).mean()>1),'Error: looks like a single-atom molecule?'
batch_dict['bond_features_degree_'+str(degree)] = summed_bond_features_by_degree[degree]
batch_dict['atom_neighbors_indices_degree_'+str(degree)] = atom_neighbors_list
batch_dict['atom_features_selector_matrix_degree_'+str(degree)] = atom_neighbor_matching_matrix
batch_dict['atom_batch_matching_matrix_degree_'+str(degree)] = atom_batch_matching_matrix.T # (batchsize, num_atoms)
if degree==0:
print 'degree 0 bond?'
print smiles[i*batchsize:min(N,(i+1)*batchsize)]
return
# input_atom_features (292L, 62L)
# bond_features_degree_ 1 (70L, 6L)
# atom_neighbors_indices_degree_ 1 (70L, 1L)
# bond_features_degree_ 2 (134L, 6L)
# atom_neighbors_indices_degree_ 2 (134L, 2L)
# bond_features_degree_ 3 (78L, 6L)
# atom_neighbors_indices_degree_ 3 (78L, 3L)
# bond_features_degree_ 4 (10L, 6L)
# atom_neighbors_indices_degree_ 4 (10L, 4L)
num_bond_features = batch_dict['bond_features_degree_'+str(degree)].shape[1]
num_atoms = atom_neighbor_matching_matrix.shape[1]
for missing_degree in missing_degrees:
batch_dict['atom_neighbors_indices_degree_'+str(missing_degree)] = np.zeros((0, missing_degree),'int32')
batch_dict['bond_features_degree_'+str(missing_degree)] = np.zeros((0, num_bond_features),'float32')
batch_dict['atom_features_selector_matrix_degree_'+str(missing_degree)] = np.zeros((0, num_atoms),'float32')
batch_dict['atom_batch_matching_matrix_degree_'+str(missing_degree)] = atom_batch_matching_matrix.T
batches.append((batch_dict,labels_b))
return batches | 3456fe2059e386088d359ec0c2d54dff2d7fac25 | 13,628 |
def linear_activation_forward(A_prev, W, b, activation, keep_prob=1):
"""
Implement the forward propagation for the LINEAR->ACTIVATION layer
Arguments:
A_prev -- activations from previous layer (or input data): (size of previous layer, number of examples)
W -- weights matrix: numpy array of shape (size of current layer, size of previous layer)
b -- bias vector, numpy array of shape (size of the current layer, 1)
activation -- the activation to be used in this layer, stored as a text string: "sigmoid" or "relu"
Returns:
A -- the output of the activation function, also called the post-activation value
cache -- a python dictionary containing "linear_cache" and "activation_cache";
stored for computing the backward pass efficiently
"""
if activation == "sigmoid":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
# START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = sigmoid(Z)
### END CODE HERE ###
Dt = np.random.rand(A.shape[0], A.shape[1])
elif activation == "relu":
# Inputs: "A_prev, W, b". Outputs: "A, activation_cache".
# START CODE HERE ### (≈ 2 lines of code)
Z, linear_cache = linear_forward(A_prev, W, b)
A, activation_cache = relu(Z)
# Dropout
# Step 1: initialize matrix D2 = np.random.rand(..., ...)
Dt = np.random.rand(A.shape[0], A.shape[1])
# Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the
# threshold)
Dt = Dt < keep_prob
# Step 3: shut down some neurons of A2
A = A * Dt
A = A / keep_prob
### END CODE HERE ###
assert (A.shape == (W.shape[0], A_prev.shape[1]))
cache = (linear_cache, activation_cache, Dt)
return A, cache | 0e4d12142224bfb46af0afb547abe3dde0aa6811 | 13,629 |
import os
import io
def get_version(file, name="__version__"):
"""Get the version of the package from the given file by
executing it and extracting the given `name`.
"""
path = os.path.realpath(file)
version_ns = {}
with io.open(path, encoding="utf8") as f:
exec(f.read(), {}, version_ns)
return version_ns[name] | 80cc05e945eabe84c6be38461bd0e446cae93ac4 | 13,630 |
def load_image(image_path, size):
"""
Load an image as a Numpy array.
:param image_path: Path of the image
:param size: Target size
:return Image array, normalized between 0 and 1
"""
image = img_to_array(load_img(image_path, target_size=size)) / 255.
return image | 3d9a790b762f800a222c26578dc0572587b091fb | 13,631 |
import signal
def _signal_exit_code(signum: signal.Signals) -> int:
"""
Return the exit code corresponding to a received signal.
Conventionally, when a program exits due to a signal its exit code is 128
plus the signal number.
"""
return 128 + int(signum) | 050eee98632216fddcbd71e4eb6b0c973f6d4144 | 13,632 |
import csv
def make_template_matrix(msigdb_file, blacklist, checkblacklist=True):
"""
Retrieve all genes and pathways from given msigdb .gmt file
Output:
sorted gene by pathways pandas dataframe. Entries indicate membership
"""
all_db_pathways = []
all_db_genes = []
# Get a set of all genes and all pathways in MSigDB (not blacklisted)
with open(msigdb_file, 'r') as msigdb_fh:
msigdb_reader = csv.reader(msigdb_fh, delimiter='\t')
for row in msigdb_reader:
signature_name = row[0]
signature_genes = row[2:]
if checkblacklist:
if signature_name.startswith(blacklist):
continue
all_db_pathways.append(signature_name)
all_db_genes += signature_genes
big_msigdb_df = pd.DataFrame(0, index=set(all_db_genes), columns=all_db_pathways)
big_msigdb_df = big_msigdb_df.sort_index()
big_msigdb_df = big_msigdb_df.T.sort_index().T
# Loop through file again to populate dataframe. This is a fast implementation
with open(msigdb_file, 'r') as msigdb_fh:
msigdb_reader = csv.reader(msigdb_fh, delimiter='\t')
for row in msigdb_reader:
signature_name = row[0]
signature_genes = row[2:]
if checkblacklist:
if signature_name.startswith(blacklist):
continue
for gene in signature_genes:
big_msigdb_df.at[gene, signature_name] = 1
return big_msigdb_df | b8068089279dfbe3b3cfc8b16dee016cc0994746 | 13,633 |
def unwrap_key(
security_control: SecurityControlField, wrapping_key: bytes, wrapped_key: bytes
):
"""
Simple function to unwrap a key received.
"""
validate_key(security_control.security_suite, wrapping_key)
validate_key(security_control.security_suite, wrapped_key)
unwrapped_key = aes_key_unwrap(wrapping_key, wrapped_key)
return unwrapped_key | 7720ad8905f6818b1a3fa4132b040560a9ae0dfa | 13,634 |
def checkOwnership(obj, login_session):
"""
This function helps to check if the current logged in user
is the creator of the given category or a given item.
This function return True if the current user owns the category,
otherwise, it will return False.
"""
# the user has logged in at this moment
userID = getUserID(login_session["email"])
# comparing user_id is a better approach
# Because different user still can have same usernames
if obj.user_id == userID:
return True
else:
return False | 851d2dafae633ed92698af525b1c717091edb2b7 | 13,635 |
from sys import path
import os
import shutil
def move() -> str:
"""Move a file."""
if not g.ledger.options["documents"]:
raise FavaAPIException("You need to set a documents folder.")
account = request.args.get("account")
new_name = request.args.get("newName")
filename = request.args.get("filename")
if not account:
raise FavaAPIException("No account specified.")
if not filename:
raise FavaAPIException("No filename specified.")
if not new_name:
raise FavaAPIException("No new filename given.")
new_path = filepath_in_document_folder(
g.ledger.options["documents"][0], account, new_name, g.ledger
)
if not path.isfile(filename):
raise FavaAPIException(f"Not a file: '{filename}'")
if path.exists(new_path):
raise FavaAPIException(f"Target file exists: '{new_path}'")
if not path.exists(path.dirname(new_path)):
os.makedirs(path.dirname(new_path), exist_ok=True)
shutil.move(filename, new_path)
return f"Moved {filename} to {new_path}." | 580ad9d81ce92183ce3066a5326b4c2ce338bd55 | 13,636 |
import logging
def transform_file_name(original_file_name):
"""
Now, this is just whatever I felt like. Whee.
So in this function I could have just used 0 and 1 as my indices directly when I look at the different parts of
the file name, but it's generally better to name these sorts of things, so people know *why* they're 0 and 1.
Another benefit is that you now know exactly why these particular things are 0 and 1 without having to guess,
and you know that these usages of 0 or 1 are different for other usages. For example, I have 2 usages of the
value 1 in this function, but they serve different purposes.
"""
# So script constants are in all caps. But when we're using constants inside a specific function or class or
# something along those lines, then we do something a little different. These values are meant to be used
# inside the function, but they're not meant to be used outside of it, returned, or anything like that. The leading
# underscore is a signal to anyone else who uses this script to indicate that.
_file_name_location = 0
_file_type_ending_location = 1
logging.info("Original file name: {}".format(original_file_name))
# Split the original filename into parts once, based on the specified separator, exactly one time.
# Also, do this by searching for the separator starting from the right-hand side of the string.
file_name_parts = original_file_name.rsplit(
# I don't want this line to be too long, so I've added line breaks here to keep things from getting too wide.
ScriptConstants.FILE_EXTENSION_SEPARATOR,
ScriptConstants.NUM_FILE_EXTENSIONS_IN_FILE_NAME
)
file_ending = file_name_parts[_file_type_ending_location]
file_name = file_name_parts[_file_name_location]
# I forget whether I mentioned this before, but when you add strings together, Python interprets it as
# an instruction to concatenate the strings together (with no separator).
new_file_name = file_name + '_derp_i_moved_this_thing' + ScriptConstants.FILE_EXTENSION_SEPARATOR + file_ending
logging.info('New file name: {}'.format(new_file_name))
return new_file_name | daa5b3be0ae7a40c9d20ac4a8aa37c51dec89c89 | 13,637 |
import pandas
def remove_overlapping_cells(graph):
"""
Takes in a graph in which each node is a cell and edges connect cells that
overlap eachother in space. Removes overlapping cells, preferentially
eliminating the cell that overlaps the most cells (i.e. if cell A overlaps
cells B, C, and D, whereas cell B only overlaps cell A, cell C only overlaps
cell A, and cell D only overlaps cell A, then cell A will be removed,
leaving cells B, C, and D remaining because there is no more overlap
within this group of cells).
Args:
graph: An undirected graph, in which each node is a cell and each
edge connects overlapping cells. nodes are expected to have
the following attributes: originalFOV, assignedFOV
Returns:
A pandas dataframe containing the feature ID of all cells after removing
all instances of overlap. There are columns for cell_id, originalFOV,
and assignedFOV
"""
connectedComponents = list(nx.connected_components(graph))
cleanedCells = []
connectedComponents = [list(x) for x in connectedComponents]
for component in connectedComponents:
if len(component) == 1:
originalFOV = graph.nodes[component[0]]['originalFOV']
assignedFOV = graph.nodes[component[0]]['assignedFOV']
cleanedCells.append([component[0], originalFOV, assignedFOV])
if len(component) > 1:
sg = nx.subgraph(graph, component)
verts = list(nx.articulation_points(sg))
if len(verts) > 0:
sg = nx.subgraph(graph,
[x for x in component if x not in verts])
allEdges = [[k, v] for k, v in nx.degree(sg)]
sortedEdges = sorted(allEdges, key=lambda x: x[1], reverse=True)
maxEdges = sortedEdges[0][1]
while maxEdges > 0:
sg = nx.subgraph(graph, [x[0] for x in sortedEdges[1:]])
allEdges = [[k, v] for k, v in nx.degree(sg)]
sortedEdges = sorted(allEdges, key=lambda x: x[1],
reverse=True)
maxEdges = sortedEdges[0][1]
keptComponents = list(sg.nodes())
cellIDs = []
originalFOVs = []
assignedFOVs = []
for c in keptComponents:
cellIDs.append(c)
originalFOVs.append(graph.nodes[c]['originalFOV'])
assignedFOVs.append(graph.nodes[c]['assignedFOV'])
listOfLists = list(zip(cellIDs, originalFOVs, assignedFOVs))
listOfLists = [list(x) for x in listOfLists]
cleanedCells = cleanedCells + listOfLists
cleanedCellsDF = pandas.DataFrame(cleanedCells,
columns=['cell_id', 'originalFOV',
'assignedFOV'])
return cleanedCellsDF | bd133c5ddd59f950d34ba16fb7bc3ff0215f0cf2 | 13,638 |
def main_page(request) :
"""Renders main page and gets the n (matrix demension number)"""
if request.method != 'POST' :
form = InputForm()
else :
form = InputForm(data=request.POST)
if form.is_valid() :
return redirect('calculator:set_demensions')
context = {'form' : form}
return render(request, 'calculator/main_page.html', context) | a6131ea837c8d9b986e8579a40ada1f7a0a3bb64 | 13,639 |
def int2fin_reference(n):
"""Calculates a checksum for a Finnish national reference number"""
checksum = 10 - (sum([int(c) * i for c, i in zip(str(n)[::-1], it.cycle((7, 3, 1)))]) % 10)
return "%s%s" % (n, checksum) | f21e66cb917631797d62ecc8ba2728b18d36ae1c | 13,640 |
def COLSTR(str, tag):
"""
Utility function to create a colored line
@param str: The string
@param tag: Color tag constant. One of SCOLOR_XXXX
"""
return SCOLOR_ON + tag + str + SCOLOR_OFF + tag | abe3d9111a30ebb678d1f1a2011d3b8a3ad39a75 | 13,641 |
def get_instance_pricing(instance_types):
"""
Get the spot and on demand price of an instance type
in all the regions at current instant
:param instance_types: EC2 instance type
:return: a pandas DataFrame with columns as
region, spot price and on demand price
"""
all_regions = get_all_regions()
price_df = pd.DataFrame({DF_COL_INSTANCE_TYPE: [],
DF_COL_REGION: [],
DF_COL_SPOT_PRICE: [],
DF_COL_ON_DEMAND_PRICE: []})
for region_name in all_regions:
spot_prices = get_spot_price(instance_types, region_name)
on_demand_prices = get_on_demand_price(instance_types, region_name)
both_prices = pd.merge(spot_prices, on_demand_prices,
on=DF_COL_INSTANCE_TYPE)
n_rows = both_prices.shape[0]
region_list = n_rows * [region_name]
both_prices[DF_COL_REGION] = region_list
both_prices = both_prices[[DF_COL_INSTANCE_TYPE, DF_COL_REGION,
DF_COL_SPOT_PRICE,
DF_COL_ON_DEMAND_PRICE]]
price_df = price_df.append(both_prices)
return price_df | 62dba0e3c3f46ac460178da0bc4d615869819f83 | 13,642 |
import itertools
async def get_user_groups(request):
"""Returns the groups that the user in this request has access to.
This function gets the user id from the auth.get_auth function, and passes
it to the ACL callback function to get the groups.
Args:
request: aiohttp Request object
Returns:
If the ACL callback function returns None, this function returns None.
Otherwise this function returns the sequence of group permissions
provided by the callback, plus the Everyone group. If user_id is not
None, the AuthnticatedUser group and the user_id are added to the
groups returned by the function
Raises:
RuntimeError: If the ACL middleware is not installed
"""
acl_callback = request.get(GROUPS_KEY)
if acl_callback is None:
raise RuntimeError('acl_middleware not installed')
user_id = await get_auth(request)
groups = await acl_callback(user_id)
if groups is None:
return None
user_groups = (Group.AuthenticatedUser, user_id) if user_id is not None else ()
return set(itertools.chain(groups, (Group.Everyone,), user_groups)) | 9fd62d6f971c871ce290700f3abb7eb467692533 | 13,643 |
def plot_bivariate_correlations(df, path=None, dpi=150):
"""
Plots heatmaps of 2-variable correlations to the Target function
The bivariate correlations are assmebled using both the arithmatic and geometric means for
two subplots in the figure.
Parameters
----------
df: dataframe
path: optional string path for saving
dpi: integer dots per inch
Returns
-------
fig: figure with 2 subplots of bivariate correlations (using arithmatic and geometric mean)
"""
# Plot function for subplots
def makeit(ax):
bound = np.max(np.abs(correlations))
img = ax.matshow(correlations, cmap=cm.coolwarm, vmin=-bound, vmax=bound)
ax.set(xticks=np.arange(df.shape[1]),
yticks=np.arange(df.shape[1]),
xticklabels=df.columns,
yticklabels=df.columns
)
for label in ax.xaxis.get_ticklabels():
label.set_rotation(75)
label.set_fontsize(16)
for label in ax.yaxis.get_ticklabels():
label.set_fontsize(16)
if matplotlib.__version__ == '3.1.1':
ax.set_ylim(len(df.columns) - 0.5, -0.5)
# create an axes on the right side of ax. The width of cax will be 5%
# of ax and the padding between cax and ax will be fixed at 0.05 inch.
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="8%", pad=0.1)
cb = plt.colorbar(img, cax=cax)
cb.set_ticks([])
try:
target = df.Target
except AttributeError:
print('Not set up for working without Target series in DataFrame')
df = df.drop(columns=["Target"])
features = list(df.columns)
arr = np.array(df)
correlations = np.zeros((len(features), len(features)))
# First the arithmatic mean
for i in range(len(features)):
dic = {}
for j in range(len(features)):
dic["{}+{}".format(features[i], features[j])] = (arr[:, i] + arr[:, j]) / 2
_df = pd.DataFrame(dic)
correlations[i, :] = _df.corrwith(target)
fig, axes = plt.subplots(2, 1, figsize=(10, 20))
ax = axes[0]
makeit(ax)
ax.set_title('Arithmatic Mean Bivariate Correlation', y=1.3, fontweight="bold", fontsize=18)
correlations = np.zeros((len(features), len(features)))
# Second the geometrix mean
for i in range(len(features)):
dic = {}
for j in range(len(features)):
dic["{}*{}".format(features[i], features[j])] = np.sqrt((arr[:, i] * arr[:, j]))
_df = pd.DataFrame(dic)
correlations[i, :] = _df.corrwith(target)
ax = axes[1]
makeit(ax)
ax.set_title('Geometric Mean Bivariate Correlation', y=1.3, fontweight="bold", fontsize=18)
plt.tight_layout()
if path: plt.savefig(path, dpi=dpi)
return fig | d5dc7da98228aa7b7865510bd4dcd6531e7049bc | 13,644 |
from torch.utils.data import DataLoader
def create_datastream(dataset_path, **kwargs):
""" create data_loader to stream images 1 by 1 """
if osp.isfile(osp.join(dataset_path, 'calibration.txt')):
db = ETH3DStream(dataset_path, **kwargs)
elif osp.isdir(osp.join(dataset_path, 'image_left')):
db = TartanAirStream(dataset_path, **kwargs)
elif osp.isfile(osp.join(dataset_path, 'rgb.txt')):
db = TUMStream(dataset_path, **kwargs)
elif osp.isdir(osp.join(dataset_path, 'mav0')):
db = EurocStream(dataset_path, **kwargs)
elif osp.isfile(osp.join(dataset_path, 'calib.txt')):
db = KITTIStream(dataset_path, **kwargs)
else:
# db = TartanAirStream(dataset_path, **kwargs)
db = TartanAirTestStream(dataset_path, **kwargs)
stream = DataLoader(db, shuffle=False, batch_size=1, num_workers=4)
return stream | 145f8c44e8e718fea9a9bdabf5e1f9497a00241a | 13,645 |
def is_contained(target, keys):
"""Check is the target json object contained specified keys
:param target: target json object
:param keys: keys
:return: True if all of keys contained or False if anyone is not contained
Invalid parameters is always return False.
"""
if not target or not keys:
return False
# if keys is just a string convert it to a list
if type(keys) == str:
keys = [keys]
# traverse the list to check json object
# if key does not exist or value is None then return False
try:
for key in keys:
if target[key] is None:
return False
except KeyError:
return False
# All seems to be going well
return True | 948196d4b470788199506bd7768e03554fa67b40 | 13,646 |
def map(x, in_min, in_max, out_min, out_max):
"""
Map a value from one range to another
:param in_min: minimum of input range
:param in_max: maximum of input range
:param out_min: minimum of output range
:param out_max: maximum of output range
:return: The value scaled to the new range
:rtype: int
"""
return int((x-in_min) * (out_max-out_min) / (in_max-in_min) + out_min) | 4117af35b0061df1fd271306accf198692442dac | 13,647 |
import requests
def get_points(sess: requests.Session, console: Console, status: Status, projectID: int):
"""
Get all exisiting points in a project
"""
base_url = f"https://mapitfast.agterra.com/api/Points"
resp = sess.get(base_url, params={"projectId": projectID})
points_obj_list = list()
for raw_resp in resp.json():
points_obj_list.append(Points(raw_data=raw_resp))
return points_obj_list | c5f1fce542b06d1680637750f51c3bd7a6e6ebc4 | 13,648 |
def calculate_discounted_returns(rewards):
"""
Calculate discounted reward and then normalize it
(see Sutton book for definition)
Params:
rewards: list of rewards for every episode
"""
returns = np.zeros(len(rewards))
next_return = 0 # 0 because we start at the last timestep
for t in reversed(range(0, len(rewards))):
next_return = rewards[t] + args.gamma * next_return
returns[t] = next_return
# normalize for better statistical properties
returns = (returns - returns.mean()) / (returns.std() + np.finfo(np.float32).eps)
return returns | 538c3d5636bc6105ddf603f0928e4e891fea774c | 13,649 |
import time
import sys
def mock_tensorboard(logdir, host, port, print_nonsense, print_nothing,
address_in_use, sleep_time):
"""Run fake TensorBoard."""
if logdir is None:
print('A logdir must be specified. Run `tensorboard --help` for '
'details and examples.')
return -1
elif print_nothing:
time.sleep(sleep_time)
elif print_nonsense:
for i in range(0, 150):
print('Lorem ipsum %d' % i, file=sys.stderr)
time.sleep(0.1)
elif address_in_use:
print('TensorBoard attempted to bind to port %d, but it was already in use' % 1234, file=sys.stderr)
else:
time.sleep(1)
print('TensorBoard 1.8.0 at http://ntbthinkpad:%d' % 6006, file=sys.stderr) | 26a793264fa9561fabc9fa9d2fcb1377a6b60783 | 13,650 |
def parse_binskim_old(bin_an_dic, output):
"""Parse old version of binskim."""
current_run = output['runs'][0]
if 'results' in current_run:
rules = output['runs'][0]['rules']
for res in current_run['results']:
if res['level'] != 'pass':
if len(res['formattedRuleMessage']['arguments']) > 2:
info = ('{}, {}').format(
res['formattedRuleMessage']['arguments'][1],
res['formattedRuleMessage']['arguments'][2])
else:
info = ''
result = {
'rule_id': res['ruleId'],
'status': 'Insecure',
'info': info,
'desc': rules[res['ruleId']]['shortDescription'],
}
else:
result = {
'rule_id': res['ruleId'],
'status': 'Secure',
'info': '',
'desc': rules[res['ruleId']]['shortDescription'],
}
bin_an_dic['results'].append(result)
else:
logger.warning('binskim has no results.')
# Create an warining for the gui
warning = {
'rule_id': 'No Binskim-Results',
'status': 'Info',
'info': '',
'desc': 'No results from Binskim.',
}
bin_an_dic['warnings'].append(warning)
if 'configurationNotifications' in current_run:
for warn in current_run['configurationNotifications']:
warning = {
'rule_id': warn['ruleId'],
'status': 'Info',
'info': '',
'desc': warn['message'],
}
bin_an_dic['warnings'].append(warning)
# Return updated dict
return bin_an_dic | bd927aa972148b1171dcf2d5c60aa219cf4527b6 | 13,651 |
import operator
def binary_elementwise_compute(
ifm: te.Tensor,
ifm2: te.Tensor,
lut: te.Tensor,
operator_type: str,
ifm_scale: float,
ifm_zero_point: int,
ifm2_scale: float,
ifm2_zero_point: int,
ofm_scale: float,
ofm_zero_point: int,
ifm_channels: int,
ifm2_channels: int,
reversed_operands: bool,
activation: str,
clip_min: int,
clip_max: int,
rounding_mode: str,
ifm_layout: str,
ifm2_layout: str,
ofm_layout: str,
ofm_dtype: str,
) -> te.Tensor:
"""A compute operator representing the capabilities of binary_elementwise for the NPU.
Parameters
----------
ifm : te.Tensor
The Input Feature Map tensor (IFM).
ifm2 : te.Tensor
The Input Feature Map tensor 2 (IFM2).
lut : te.Tensor
The look-up table values to use if activation = "LUT".
operator_type: str
The type of the binary elementwise operator.
"ADD"
"SUB"
"MUL"
"MIN"
"MAX"
"SHR"
"SHL"
ifm_scale : float
The quantization scale for the Input Feature Map tensor.
ifm_zero_point : int
The quantization zero point for the Input Feature Map tensor.
ifm2_scale : float
The quantization scale for the Input Feature Map tensor 2.
ifm2_zero_point : int
The quantization zero point for the Input Feature Map tensor 1.
ofm_scale : float
The quantization scale for the Output Feature Map tensor.
ofm_zero_point : int
The quantization zero point for the Output Feature Map tensor.
ifm_channels : int
The number of the Input Feature Map channels.
ifm2_channels : int
The number of the Input Feature Map 2 channels.
reversed_operands : bool
True if IFM2 is the first operand and IFM is the second operand.
activation : str
The activation function to use.
"NONE" - no activation function.
"CLIP" - clip the output between clip_min and clip_max.
"TANH" - tanh activation function.
"SIGMOID" - sigmoid activation function.
"LUT" - use a look-up table to perform the activation function.
Available activations for activation type:
{int8, uint8}: "NONE", "CLIP", "TANH", "SIGMOID", "LUT"
{int32}: "NONE"
clip_min : int
The minimum clipping value if activation = "CLIP".
clip_max : int
The maximum clipping value if activation = "CLIP".
rounding_mode : str
The rounding mode to apply to the Output Feature Map tensor.
"TFL" - Tensorflow Lite rounding scheme.
"TRUNCATE" - Truncate towards zero.
"NATURAL" - Round to nearest value, with x.5 rounded up towards +infinity.
ifm_layout : str, optional
The layout of the Input Feature Map tensor. Can be "NHWC" or "NHCWB16".
ifm2_layout : str, optional
The layout of the Input Feature Map tensor 2. Can be "NHWC" or "NHCWB16".
ofm_layout : str, optional
The layout of the Output Feature Map tensor. Can be "NHWC" or "NHCWB16".
ofm_dtype: str
The Output Feature Map tensor type.
MUL, ADD, SUB {IFM}->{OFM}:
{uint8, int8 int32} -> {uint8, int8, int32}, any pairing
MAX, MIN:
IFM and OFM must be of the same type, one of:
{int8, uint8}
SHR {IFM}->{OFM}:
{int32}->{int8, uint8, int32}, any pairing"
SHL:
{int32}->{int32} only
Returns
-------
te.Tensor
The Output Feature Map tensor.
"""
assert ifm.shape[0] == 1
assert ifm2.shape[0] == 1
assert ifm_layout in {"NHWC", "NHCWB16"}
assert ifm2_layout in {"NHWC", "NHCWB16"}
assert ofm_layout in {"NHWC", "NHCWB16"}
# Compute operation for the IFM DMA pipeline
dmaed_ifm = dma_ifm_compute(
ifm, ifm_layout, ifm_zero_point, ifm_scale, ifm_channels, (0, 0, 0, 0)
)
dmaed_ifm2 = dma_ifm_compute(
ifm2, ifm2_layout, ifm2_zero_point, ifm2_scale, ifm2_channels, (0, 0, 0, 0)
)
# Binary elementwise compute operation
ofm_height = dmaed_ifm.shape[1]
ofm_width = dmaed_ifm.shape[2]
binary_elementwise_attrs = {
"op": "ethosu_binary_elementwise",
"operator_type": operator_type,
"reversed_operands": reversed_operands,
"activation": activation,
"clip_min": clip_min,
"clip_max": clip_max,
"rounding_mode": rounding_mode,
}
operators = {
"ADD": operator.add,
"SUB": operator.sub,
"MUL": operator.mul,
"MIN": te.min,
"MAX": te.max,
"SHR": operator.add,
"SHL": operator.add,
}
broadcast = [value == 1 for value in dmaed_ifm2.shape]
if reversed_operands:
binary_elementwise = te.compute(
(1, ofm_height, ofm_width, ifm_channels),
lambda nn, hh, ww, cc: operators[operator_type](
dmaed_ifm2(
0 if broadcast[0] else nn,
0 if broadcast[1] else hh,
0 if broadcast[2] else ww,
0 if broadcast[3] else cc,
).astype(ifm.dtype),
dmaed_ifm(nn, hh, ww, cc).astype(ifm.dtype),
).astype(ofm_dtype),
name="ethosu_binary_elementwise",
attrs=binary_elementwise_attrs,
)
else:
binary_elementwise = te.compute(
(1, ofm_height, ofm_width, ifm_channels),
lambda nn, hh, ww, cc: operators[operator_type](
dmaed_ifm(nn, hh, ww, cc).astype(ifm.dtype),
dmaed_ifm2(
0 if broadcast[0] else nn,
0 if broadcast[1] else hh,
0 if broadcast[2] else ww,
0 if broadcast[3] else cc,
).astype(ifm.dtype),
).astype(ofm_dtype),
name="ethosu_binary_elementwise",
attrs=binary_elementwise_attrs,
)
nhwc_to_nhcwb16 = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1 / 16, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 16],
[0, 0, 0, 0, 1],
]
nhcwb16_to_nhwc = [
[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0],
[0, 0, 16, 0, 1, -16],
[0, 0, 0, 0, 0, 1],
]
ifm_matrix = [
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1],
]
ifm2_matrix = [
[1, 0, 0, 0, 0],
[0, (1 - int(broadcast[1])), 0, 0, int(broadcast[1])],
[0, 0, (1 - int(broadcast[2])), 0, int(broadcast[2])],
[0, 0, 0, (1 - int(broadcast[3])), int(broadcast[3])],
[0, 0, 0, 0, 1],
]
if ofm_layout == "NHCWB16":
ifm_matrix = np.matmul(ifm_matrix, nhcwb16_to_nhwc).tolist()
ifm2_matrix = np.matmul(ifm2_matrix, nhcwb16_to_nhwc).tolist()
if ifm_layout == "NHCWB16":
ifm_matrix = np.matmul(nhwc_to_nhcwb16, ifm_matrix).tolist()
if ifm2_layout == "NHCWB16":
ifm2_matrix = np.matmul(nhwc_to_nhcwb16, ifm2_matrix).tolist()
ifm_propagator = Propagator(
ifm_matrix,
[0, 0, 0, 0] if ifm_layout == "NHWC" else [0, 0, 0, 0, 0],
)
ifm2_propagator = Propagator(
ifm2_matrix,
[0, 0, 0, 0] if ifm2_layout == "NHWC" else [0, 0, 0, 0, 0],
)
propagator_attrs = {
"ifm_propagator": ifm_propagator,
"ifm2_propagator": ifm2_propagator,
}
# Compute operation for the OFM DMA pipeline
return dma_ofm_compute(
binary_elementwise,
ofm_layout,
ofm_zero_point,
ofm_scale,
ifm_channels,
attrs=propagator_attrs,
) | 2bbac91e8606512180b6a652538eeac23e369c7c | 13,652 |
def x_power_dependence(n, dep_keys, ctfs=list(), force_zero=None, **kwargs):
"""Returns a fit function that allows x^n depdendence on the constants
associated with each of the dep_keys
y(x) = (a0 * b0 + a1 * b1 + ...) * x^n
where each of the a's are fit parameters and each of the b's are either
a constant associated with the keys in dep_keys or a constant constructed
by a ctf (constant transform function) in ctfs
"""
return _dependence(
f=lambda p, x: p[0] * x ** n, n_params=1,
dep_keys=dep_keys, ctfs=ctfs, force_zero=force_zero,
name='x^{} dependence'.format(n), code='x{}'.format(n)+':{}', **kwargs
) | 49b1a605001003b52f38f7f469a7c7bfafd43d6b | 13,653 |
from typing import Iterable
def get_subseqs(s, ops):
"""Returns a list of sequences given when applying the list of (ops)
on them, until a constant one is found, thus:
new[0] = next seq of s with ops[0]
new[i] = next seq of new[i-1] with op[i]
If 'ops' is not a list, then the same operation will be repeated.
The length of 'ops' should be equal to the length of 's' minus 1"""
if len(s) < 2:
# We can't get the next sequence based on two terms if there's only one
return []
if not isinstance(ops, Iterable):
ops = [ops for _ in range(len(s)-1)]
# Start with the initial subsequence
subseqs = [get_subseq(s, ops[0])]
# And base the next subsequences on the previous one until they're constant
i = 1
while not is_constant(subseqs[-1]) and len(subseqs[-1]) > 1:
subseqs.append(get_subseq(subseqs[-1], ops[i]))
i += 1
return subseqs | 3ad7a955c7b55596f327ae52d34368451ef79737 | 13,654 |
from pysiaf import aperture # runtime import to avoid circular import on startup
import os
from typing import OrderedDict
def read_hst_siaf(file=None):#, AperNames=None):
"""Read apertures from HST SIAF file and return a collection.
This was partially ported from Lallo's plotap.f.
Parameters
----------
file : str
AperNames : str list
Returns
-------
apertures: dict
Dictionary of apertures
"""
if file is None:
file = os.path.join(HST_PRD_DATA_ROOT, 'siaf.dat')
# read all lines
siaf_stream = open(file)
data = siaf_stream.readlines()
siaf_stream.close()
# initialize dict of apertures
apertures = OrderedDict()
# inspect SIAF and populate Apertures
CAJ_index = 0
CAK_index = 0
for l, text in enumerate(data):
skip_aperture = False
if (text.rstrip()[-3::] == 'CAJ') & (CAJ_index == 0):
a = aperture.HstAperture()
# Process the first 'CAJ' record.
a.ap_name = text[0:10].strip() # Aperture Identifier.
a.v2_cent = np.float(text[10:25]) # SICS V2 Center. (same as a_v2_ref)
a.v3_cent = np.float(text[25:40]) # SICS V3 Center. (same as a_v3_ref)
a.a_shape = text[40:44] # Aperture Shape.
try:
a.maj = np.float(text[44:59]) # Major Axis Dimension.
except ValueError: # when field is empty
a.maj = None
a.Mac_Flag = text[59] # !SI Macro Aperture Flag.
a.BR_OBJ_Flag = text[60] # !Bright Object Alert Flag.
a.brt_obj_thres = text[61:66] # !Bright Object Alert Threshold.
a.Macro_ID = text[66:70] # !SI Macro Aperture Identifier.
rec_type = text[70:73] # !Record type.
CAJ_index = 1
aperture_name = a.ap_name
elif (text.rstrip()[-3::] == 'CAJ') & (CAJ_index == 1):
# Process the second 'CAJ' record.
try:
a.min = np.float(text[0:15]) # !Minor Axis Dimension.
except ValueError: # when field is empty
a.min = None
a.plate_scale = np.float(text[15:30]) # !Arcsecond per Pixel plate scale.
a.a_area = np.float(text[30:45]) # !Area of SI Aperture.
a.theta = np.float(text[45:60]) # !Aperture Rotation Angle.
a.SIAS_Flag = text[60] # !SIAS coordinate system flag. (If set then AK rec.)
rec_type = text[70:73] # !Record type.
CAJ_index = 2
elif (text.rstrip()[-3::] == 'CAJ') & (CAJ_index == 2):
# Process the third 'CAJ' record.
a.im_par = np.int(text[0:2]) # Image Parity.
a.ideg = np.int(text[2]) # !Polynomial Degree.
a.xa0 = np.float(text[3:18]) # !SIAS X Center. -> like JWST SCIENCE frame
a.ya0 = np.float(text[18:33]) # !SIAS Y Center.
a.xs0 = np.float(text[33:48]) # !SICS X Center. -> like JWST IDEAL frame
a.ys0 = np.float(text[48:63]) # !SICS Y Center.
rec_type = text[70:73] # !Record type.
CAJ_index = 0
elif text.rstrip()[-2::] == 'AJ':
a.SI_mne = text[0:4].strip() # !Science Instrument Mnemonic
a.Tlm_mne = text[4] # !SI Telemetry Mnemonic.
a.Det_mne = text[5] # !SI Detector Mnemonic.
a.A_mne = text[6:10] # !SI Aperture Mnemonic.
a.APOS_mne = text[10] # !SI Aperture Position Mnemonic.
rec_type = text[70:73] # !Record type.
elif text.rstrip()[-3::] == 'CAQ':
a.v1x = np.float(text[0:15]) # !SICS Vertex 1_X -> like JWST IDEAL frame
a.v1y = np.float(text[15:30]) # !SICS Vertex 1_Y
a.v2x = np.float(text[30:45]) # !SICS Vertex 2_X
a.v2y = np.float(text[45:60]) # !SICS Vertex 2_Y
rec_type = text[70:73] # !Record type.
elif text.rstrip()[-2::] == 'AQ':
a.v3x = np.float(text[0:15]) # !SICS Vertex 3_X
a.v3y = np.float(text[15:30]) # !SICS Vertex 3_Y
a.v4x = np.float(text[30:45]) # !SICS Vertex 4_X
a.v4y = np.float(text[45:60]) # !SICS Vertex 4_Y
rec_type = text[70:73] # !Record type.
elif text.rstrip()[-2::] == 'AP':
# FGS pickles
a.pi_angle = np.float(text[0:15]) # !Inner Radius Orientation Angle.
a.pi_ext = np.float(text[15:30]) # !Angular Extent of the Inner Radius.
a.po_angle = np.float(text[30:45]) # !Outer Radius Orientation Angle.
a.po_ext = np.float(text[45:60]) # !Angular Extent of the Outer Radius.
rec_type = text[70:73] # !Record type.
elif text.rstrip()[-2::] == 'AM':
a.a_v2_ref = np.float(text[0:15]) # !V2 Coordinate of Aperture Reference Point. (same as v2_cent)
a.a_v3_ref = np.float(text[15:30]) # !V3 Coordinate of Aperture Reference Point. (same as v3_cent)
a.a_x_incr = np.float(text[30:45]) # !First Coordinate Axis increment.
a.a_y_incr = np.float(text[45:60]) # !Second Coordinate Axis increment.
elif text.rstrip()[-2::] == 'AN':
if (a.a_shape == 'PICK') and ('FGS' in a.ap_name):
# HST FGS are special in the sense that the idl_to_tel transformation is implemented via the TVS matrix
# and not the standard way
# a.set_fgs_tel_reference_point(a.a_v2_ref, a.a_v2_ref)
a.set_idl_reference_point(a.a_v2_ref, a.a_v3_ref, verbose=False)
# pass
if (a.a_shape == 'PICK') | (a.a_shape == 'CIRC'):
# TO BE IMPLEMENTED
# FGS pickle record ends here
# apertures.append(a)
# read(10,1250)Beta1, !Angle of increasing first coordinate axis.
# * Beta2, !Angle of increasing second coordinate axis.
# * a_x_ref, !X reference.
# * a_y_ref, !Y reference.
# * X_TOT_PIX, !Total X-axis pixels.
# * Y_TOT_PIX, !Total Y-axis pixels.
# * rec_type !Record type.
# 1250 format(4(G15.8),2(I5),a3)
# apertures.append(a)
apertures[a.AperName] = a
elif (text.rstrip()[-3::] == 'CAK') & (CAK_index == 0):
# Process the first 'CAK' record.
n_polynomial_coefficients = np.int(((a.ideg + 1) * (a.ideg + 2)) / 2)
# the order is
# SIAS to SICS X Transformation.
# SIAS to SICS Y Transformation.
# SICS to SIAS X Transformation.
# SICS to SIAS X Transformation.
polynomial_coefficients = np.ones((n_polynomial_coefficients, 4)) * -99
for jj in np.arange(4):
polynomial_coefficients[CAK_index, jj] = np.float(text[15 * jj:15 * (jj + 1)])
CAK_index += 1
elif (text.rstrip()[-3::] == 'CAK') & (CAK_index != 0):
# Process the remaining 'CAK' records
for jj in np.arange(4):
polynomial_coefficients[CAK_index, jj] = np.float(text[15 * jj:15 * (jj + 1)])
CAK_index += 1
elif text.rstrip()[-2::] == 'AK':
# Process the last polynomial coefficient record.
for jj in np.arange(4):
polynomial_coefficients[CAK_index, jj] = np.float(text[15 * jj:15 * (jj + 1)])
a.polynomial_coefficients = polynomial_coefficients
CAK_index = 0
apertures[a.AperName] = a
# apertures.append(a)
return apertures | f7281b1411deefbec86dd50feb40739b59cad3d1 | 13,655 |
def update_s(C,k):
"""
Args: C: 2d array
k: 1d array
Return: 1d array
"""
if np.shape(C)[0]==0:
s = np.array([1])
else:
temp = np.dot(C,k)
s = np.append(temp,1)
return s | ce4604d71b05d328d6b8b60bea9f611d8d12f6eb | 13,656 |
def test_handler_callback_failure():
"""Test failure mode for inappropriate handlers."""
class BadHandler(object):
def handler(self, one):
return 'too many'
ob = EventTest()
handler = BadHandler()
with pytest.raises(TypeError):
ob.PublicEvent += handler.handler
ob.OnPublicEvent(EventArgsTest(10))
ob.PublicEvent -= handler.handler
class BadHandler(object):
def handler(self, one, two, three, four, five):
return 'not enough'
ob = EventTest()
handler = BadHandler()
with pytest.raises(TypeError):
ob.PublicEvent += handler.handler
ob.OnPublicEvent(EventArgsTest(10))
ob.PublicEvent -= handler.handler | c5d8daf4cca81ef8dee8ba5a10b9e572899bd23e | 13,657 |
def get_chord_type(chord):
"""'Parses' input for a chord and returns the type of chord from it"""
cleaned_chord = chord[1:]
cleaned_chord = cleaned_chord.replace('b', '')
cleaned_chord = cleaned_chord.replace('#', '')
mapping = {
'7': 'seven',
'9': 'nine',
'm7': 'minor7',
'm9': 'minor9',
'm': 'minor',
'M7': 'major7',
'M9': 'major9',
'': 'major',
}
return mapping[cleaned_chord] | 4a753eb31f1e33340a7aa4df6942c4752b208fdd | 13,658 |
import os
import pickle
def compile_stats(path):
""" combines all items from the given folder of stats arrays """
df = pd.DataFrame()
for item in os.listdir(path):
print(item)
with open(path + '/' + item, 'rb') as file:
df1 = pickle.load(file)
# df1 = df1.loc[df1.pred_var < 1.0]
# df1 = df1.loc[df1.pred_var > 0.0]
df1 = df1.loc[df1.next_hold != np.inf]
df1 = df1.loc[df1.next_hold != -np.inf]
df = df.append(df1)
return df | 58a2a9f4962b2882c7da589af27ba55e9ca601ed | 13,659 |
from typing import Union
def transpile(model: Union[SympyOpt, Model]) -> SympyOpt:
"""Transpile optimization problem into SympyOpt model
Only accepts SympyOpt or Docplex model.
:param model: model to be transpiled
:raises ValueError: if the argument is of inappropriate type
:return: transpiled model
"""
if isinstance(model, SympyOpt):
return deepcopy(model)
elif isinstance(model, Model):
return DocplexToSympyopt().transpile(model)
elif isinstance(model, LpProblem):
return PulpToSympyopt().transpile(model)
elif isinstance(model, (QuadraticProgram, PauliSumOp)):
return QiskitToSympyopt().transpile(model)
elif isinstance(model, (BinaryQuadraticModel, ConstrainedQuadraticModel)):
return DimodToSympyopt().transpile(model)
else:
raise ValueError(f"Unknown model type: {type(model)}") | f2b4895cb980e535166d9749eb93925722981828 | 13,660 |
def definition():
"""View of the finances with subtotals generated."""
return sql.format(source=source) | c0b9add49b9c7403328449b8989e29739be267a9 | 13,661 |
import math
def random_mini_batches(X, Y, mini_batch_size = 32, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples) (m, Hi, Wi, Ci)
Y -- true "label" vector (containing 0 if control, 1 if case), of shape (1, number of examples) (m, n_y)
mini_batch_size - size of the mini-batches, integer
seed -- this is only for the purpose of grading, so that you're "random minibatches are the same as ours.
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
m = X.shape[0] # number of training examples
mini_batches = []
np.random.seed(seed)
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[permutation,:,:,:]
shuffled_Y = Y[permutation,:]
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
num_complete_minibatches = int(num_complete_minibatches)
for k in range(0, int(num_complete_minibatches)):
mini_batch_X = shuffled_X[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:,:,:]
mini_batch_Y = shuffled_Y[k * mini_batch_size : k * mini_batch_size + mini_batch_size,:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size : m,:,:,:]
mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size : m,:]
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches | 8baa63be638a1706c49176a51013524594a59452 | 13,662 |
def file_base_features(path, record_type):
"""Return values for BASE_SCHEMA features."""
base_feature_dict = {
"record_id": path,
"record_type": record_type,
# "utc_last_access": os.stat(path).st_atime,
"utc_last_access": 1600000000.0,
}
return base_feature_dict | 12f16684002892d7af59a1e26e8a40501098ca4f | 13,663 |
def split_ref(ref):
"""
セル参照をセル文字と1ベース行番号文字に分割する。
Params:
ref(str):
Returns:
Tuple[str, str]: 列、行
"""
m = re_cellref.match(ref)
if m:
return m.group(1), m.group(2)
return None, None | 1ae8e058a47ad0410b7131d4b89061dea822ed68 | 13,664 |
def table_definition(dataset):
"""print an azure synapse table definition for a kartothek dataset"""
index_col = list(dataset.dataset_metadata.index_columns)[
0
] ##works only with one index column
cols = synapse_columns(
dataset.dataset_metadata.table_meta[dataset.table], index_col
)
template = """
with {dataset.dataset_uuid} as (
SELECT
result.filepath(1) as [{index_col}],
*
FROM
OPENROWSET(
BULK '{dataset.url}/{index_col}=*/*.parquet',
FORMAT='PARQUET'
) with(
{cols}
) as [result]
)
select top 100 * from {dataset.dataset_uuid};
"""
return template.format(dataset=dataset, cols=cols, index_col=index_col) | 75a2f55fa31025899e9adb05e20dbc89ae8dabd4 | 13,665 |
import os
def identify_image_set(imagedir, image_names_pattern):
"""
Find all the images within the *imagedir*.
:param imagedir:
:param image_names_pattern:
:return: a list of image names that are part of the image set
"""
image_names_from_os = sorted(os.listdir(imagedir))
image_names = [re_identify_image_set(fn, image_names_pattern) for fn in image_names_from_os]
image_names = [name for name in image_names if name is not None]
return image_names | 57826e43d7d3e241ec8f61664b007d5fea2ef43f | 13,666 |
import itertools
def node_extractor(dataframe, *columns):
"""
Extracts the set of nodes from a given dataframe.
:param dataframe: dataframe from which to extract the node list
:param columns: list of column names that contain nodes
:return: list of all unique nodes that appear in the provided dataset
"""
data_list = [dataframe[column].unique().tolist() for column in columns]
return list(set(itertools.chain.from_iterable(data_list))) | 7a4ab889257a0f2c5ddfe18e65d0a7f5f35d8d98 | 13,667 |
def _get_bag(environ, bag_name):
"""
Get the named bag out of the store.
"""
store = environ['tiddlyweb.store']
bag = Bag(bag_name)
try:
bag = store.get(bag)
except NoBagError as exc:
raise HTTP404('%s not found, %s' % (bag.name, exc))
return bag | db4e2425f6c4d839fa091c08b524ea8ecd3c7c27 | 13,668 |
def missing_values_operation(files):
"""Will take iterable file objects and eliminate features or samples with missing values or inputing missing values if necessary"""
for i in files:
with open(i,'rw') as f:
if missing_values(f)==True:
file_data=load_data(i)
#Dropping rows with missing values
file_data.dropna(axis=0)
#Dropping columns with missing values
file_data.dropna(axis=1)
return "dropped rows and columns"
else:
return "no values to be dropped" | df5a6f6809605107db9b008b877fa913a3dc686d | 13,669 |
def _object_id(value):
"""Return the object_id of the device value.
The object_id contains node_id and value instance id
to not collide with other entity_ids.
"""
object_id = "{}_{}".format(slugify(_value_name(value)),
value.node.node_id)
# Add the instance id if there is more than one instance for the value
if value.instance > 1:
return "{}_{}".format(object_id, value.instance)
return object_id | 34c21de533a99ffdabfdabf21540492f7ce33b7f | 13,670 |
def _apply_attention_constraint(
e, last_attended_idx, backward_window=1, forward_window=3
):
"""Apply monotonic attention constraint.
**Note** This function is copied from espnet.nets.pytorch_backend.rnn.attention.py
"""
if e.size(0) != 1:
raise NotImplementedError(
"Batch attention constraining is not yet supported.")
backward_idx = last_attended_idx - backward_window
forward_idx = last_attended_idx + forward_window
if backward_idx > 0:
e[:, :backward_idx] = -float("inf")
if forward_idx < e.size(1):
e[:, forward_idx:] = -float("inf")
return e | 213ef514a9cff31134185e38c57d46921eba763a | 13,671 |
import time
import sys
def RetryInvocation(return_handler, exc_handler, max_retry, functor, *args,
**kwds):
"""Generic retry loop w/ optional break out depending on exceptions.
Generally speaking you likely want RetryException or RetryReturned
rather than this; they're wrappers around this and are friendlier for
end usage.
Arguments:
return_handler: A functor invoked with the returned results from
functor(*args, **kwds). If it returns True, then a retry
is attempted. If False, the result is returned.
If this value is None, then no retries are attempted for
non-excepting invocations of functor(*args, **kwds) .
exc_handler: A functor invoked w/ the exception instance that
functor(*args, **kwds) threw. If it returns True, then a
retry is attempted. If False, the exception is re-raised.
If this value is None, then no exception based retries will
occur.
max_retry: A positive integer representing how many times to retry
the command before giving up. Worst case, the command is invoked
(max_retry + 1) times before failing.
functor: A callable to pass args and kargs to.
args: Positional args passed to functor.
kwds: Optional args passed to functor.
sleep: Optional keyword. Multiplier for how long to sleep between
retries; will delay (1*sleep) the first time, then (2*sleep),
continuing via attempt * sleep.
Returns:
Whatever functor(*args, **kwds) returns.
Raises:
Exception: Whatever exceptions functor(*args, **kwds) throws and
isn't suppressed is raised. Note that the first exception encountered
is what's thrown; in the absense of an exception (meaning ran out
of retries based on testing the result), a generic RetriesExhausted
exception is thrown.
"""
if max_retry < 0:
raise ValueError("max_retry needs to be zero or more: %s" % max_retry)
sleep = kwds.pop('sleep', 0)
stopper = lambda x: False
return_handler = stopper if return_handler is None else return_handler
exc_handler = stopper if exc_handler is None else exc_handler
exc_info = None
for attempt in xrange(max_retry + 1):
if attempt and sleep:
time.sleep(sleep * attempt)
try:
ret = functor(*args, **kwds)
if not return_handler(ret):
return ret
except Exception as e:
# Note we're not snagging BaseException, so MemoryError/KeyboardInterrupt
# and friends don't enter this except block.
if not exc_handler(e):
raise
# We intentionally ignore any failures in later attempts since we'll
# throw the original failure if all retries fail.
if exc_info is None:
exc_info = sys.exc_info()
#pylint: disable=E0702
if exc_info is None:
raise RetriesExhausted(max_retry, functor, args, kwds)
raise Exception(exc_info[0], exc_info[1], exc_info[2]) | 3cb6e4f6793fbf2f6633efec74b7ed8495233e6c | 13,672 |
from bs4 import BeautifulSoup
import re
def parse_reolink(email):
"""Parse Reolink tracking numbers."""
tracking_numbers = []
soup = BeautifulSoup(email[EMAIL_ATTR_BODY], 'html.parser')
links = [link.get('href') for link in soup.find_all('a')]
for link in links:
if not link:
continue
match = re.search('qtc_tLabels1=(.*?)$', link)
if match and match.group(1) not in tracking_numbers:
tracking_numbers.append(match.group(1))
return tracking_numbers | cc96d35edb2ace40d83464f4cc3bed1c91480f0f | 13,673 |
def HMF(state, Delta, N):
"""Computes the result of the MF hamiltonian acting on a given state."""
#kinetic term: sum_i(eps(i)*(n_i,up + n_i,down))
kinetic_state = dict_list_sum(
[dict_prod(eps(i, N), dict_sum(number_op(state, i, 0, N), number_op(state, i, 1, N))) for i in range(N)])
#interaction term: sum_i( Delta c_iUP^dag c_iDOWN^dag + conj.(Delta) c_iDOWN c_iUP )
interaction_state = dict_list_sum(
[dict_sum(dict_prod(Delta, cr(cr(state, i, 1, N), i, 0, N)), dict_prod(np.conj(Delta), an(an(state, i, 0, N), i, 1, N))) for i in range(N)])
return dict_sum(kinetic_state, interaction_state) | 3c608d42a328e05fd59c55cbaeded3b6d0b4970b | 13,674 |
def calculate_probability_of_multicoincidence(ambient_size: int = 0,
set_sizes: tuple = (),
intersection_size: int = 0):
"""
Calculates the probability that subsets of a set of a given size, themselves of
prescribed sizes, have mutual intersection of a given cardinality.
Parameters
----------
ambient_size : int
The size of the ambient set.
set_sizes : tuple
The integer sizes of some subsets.
intersection_size : int
The size of the intersection of the subsets.
Returns
-------
probability : float
The probability. Calculated as the number of configurations with the given
intersection size, divided by the number of all configurations.
"""
reduced_sizes = [size - intersection_size for size in set_sizes]
if any(size < 0 for size in reduced_sizes):
return 0
initial_choices = binom(
ambient_size=ambient_size,
subset_size=intersection_size,
)
reduced_ambient_size = ambient_size - intersection_size
covers_of_remaining = compute_number_of_covers(
set_sizes=tuple(reduced_ambient_size - size for size in reduced_sizes),
ambient_size=reduced_ambient_size,
)
all_configurations = count_all_configurations(
set_sizes=set_sizes,
ambient_size=ambient_size,
)
return initial_choices * covers_of_remaining / all_configurations | 1d9deb083f0a0397b067f6efa989a94d68d11b69 | 13,675 |
def check_date(option, opt, value):
"""check a file value
return the filepath
"""
try:
return DateTime.strptime(value, "%Y/%m/%d")
except DateTime.Error :
raise OptionValueError(
"expected format of %s is yyyy/mm/dd" % opt) | 3f817bf2286b459b11ded67abba33b654b090caf | 13,676 |
def no_cloud_fixture():
"""Multi-realization cloud data cube with no cloud present."""
cloud_area_fraction = np.zeros((3, 10, 10), dtype=np.float32)
thresholds = [0.265, 0.415, 0.8125]
return cloud_probability_cube(cloud_area_fraction, thresholds) | 5128c40485fdbc9c8646bec25d1949aac4cddb58 | 13,677 |
from typing import Iterable
def make_slicer_query(
database: Database,
base_table: Table,
joins: Iterable[Join] = (),
dimensions: Iterable[Field] = (),
metrics: Iterable[Field] = (),
filters: Iterable[Filter] = (),
orders: Iterable = (),
):
"""
Creates a pypika/SQL query from a list of slicer elements.
This is the base implementation shared by two implementations: the query to fetch data for a slicer request and
the query to fetch choices for dimensions.
This function only handles dimensions (select+group by) and filtering (where/having), which is everything needed
for the query to fetch choices for dimensions.
The slicer query extends this with metrics, references, and totals.
:param database:
:param base_table:
pypika.Table - The base table of the query, the one in the FROM clause
:param joins:
A collection of joins available in the slicer. This should include all slicer joins. Only joins required for
the query will be used.
:param dimensions:
A collection of dimensions to use in the query.
:param metrics:
A collection of metrics to use in the query.
:param filters:
A collection of filters to apply to the query.
:param orders:
A collection of orders as tuples of the metric/dimension to order by and the direction to order in.
:return:
"""
query = database.query_cls.from_(base_table, immutable=False)
elements = flatten([metrics, dimensions, filters])
# Add joins
join_tables_needed_for_query = find_required_tables_to_join(elements, base_table)
for join in find_joins_for_tables(joins, base_table, join_tables_needed_for_query):
query = query.join(join.table, how=join.join_type).on(join.criterion)
# Add dimensions
for dimension in dimensions:
dimension_term = make_term_for_field(dimension, database.trunc_date)
query = query.select(dimension_term)
query = query.groupby(dimension_term)
# Add filters
for fltr in filters:
query = (
query.having(fltr.definition)
if fltr.is_aggregate
else query.where(fltr.definition)
)
# Add metrics
metric_terms = [make_term_for_field(metric) for metric in metrics]
if metric_terms:
query = query.select(*metric_terms)
# In the case that the orders are determined by a field that is not selected as a metric or dimension, then it needs
# to be added to the query.
select_aliases = {el.alias for el in query._selects}
for (orderby_field, orientation) in orders:
orderby_term = make_term_for_field(orderby_field)
query = query.orderby(orderby_term, order=orientation)
if orderby_term.alias not in select_aliases:
query = query.select(orderby_term)
return query | 31821bdbb0ab94c8971a70d35c1165f5245d90fb | 13,678 |
def build_grid_generator(cfg, input_shape):
"""
Built an grid generator from `cfg.MODEL.GRID_GENERATOR.NAME`.
"""
grid_generator = cfg.MODEL.GRID_GENERATOR.NAME
return GRID_GENERATOR_REGISTRY.get(grid_generator)(cfg, input_shape) | 5f6edbaeece026fc56068aec0fc75549a71ce4a8 | 13,679 |
def main_page(request):
"""
This function is used to display the main page of programme_curriculum
@param:
request - contains metadata about the requested page
"""
return render(request, 'programme_curriculum/mainpage.html') | fdee3342d369112abb2560c4ecfda17a8dfe01e4 | 13,680 |
def _write_detailed_dot(graph, dotfilename):
"""Create a dot file with connection info
digraph structs {
node [shape=record];
struct1 [label="<f0> left|<f1> mid\ dle|<f2> right"];
struct2 [label="<f0> one|<f1> two"];
struct3 [label="hello\nworld |{ b |{c|<here> d|e}| f}| g | h"];
struct1:f1 -> struct2:f0;
struct1:f0 -> struct2:f1;
struct1:f2 -> struct3:here;
}
"""
text = ['digraph structs {', 'node [shape=record];']
# write nodes
edges = []
replacefunk = lambda x: x.replace('_', '').replace('.', ''). \
replace('@', '').replace('-', '')
for n in nx.topological_sort(graph):
nodename = str(n)
inports = []
for u, v, d in graph.in_edges_iter(nbunch=n, data=True):
for cd in d['connect']:
if isinstance(cd[0], str):
outport = cd[0]
else:
outport = cd[0][0]
inport = cd[1]
ipstrip = 'in' + replacefunk(inport)
opstrip = 'out' + replacefunk(outport)
edges.append('%s:%s:e -> %s:%s:w;' % (str(u).replace('.', ''),
opstrip,
str(v).replace('.', ''),
ipstrip))
if inport not in inports:
inports.append(inport)
inputstr = '{IN'
for ip in sorted(inports):
inputstr += '|<in%s> %s' % (replacefunk(ip), ip)
inputstr += '}'
outports = []
for u, v, d in graph.out_edges_iter(nbunch=n, data=True):
for cd in d['connect']:
if isinstance(cd[0], str):
outport = cd[0]
else:
outport = cd[0][0]
if outport not in outports:
outports.append(outport)
outputstr = '{OUT'
for op in sorted(outports):
outputstr += '|<out%s> %s' % (replacefunk(op), op)
outputstr += '}'
srcpackage = ''
if hasattr(n, '_interface'):
pkglist = n._interface.__class__.__module__.split('.')
interface = n._interface.__class__.__name__
if len(pkglist) > 2:
srcpackage = pkglist[2]
srchierarchy = '.'.join(nodename.split('.')[1:-1])
nodenamestr = '{ %s | %s | %s }' % (nodename.split('.')[-1],
srcpackage,
srchierarchy)
text += ['%s [label="%s|%s|%s"];' % (nodename.replace('.', ''),
inputstr,
nodenamestr,
outputstr)]
# write edges
for edge in sorted(edges):
text.append(edge)
text.append('}')
filep = open(dotfilename, 'wt')
filep.write('\n'.join(text))
filep.close()
return text | 793983b56b8fff32fde4e9dc5379a93e4edcb16e | 13,681 |
import functools
def ResidualBlock(name, input_dim, output_dim, filter_size, inputs, resample=None, he_init=True, bn=False):
"""
resample: None, 'down', or 'up'
"""
if resample=='down':
conv_shortcut = MeanPoolConv
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(ConvMeanPool, input_dim=input_dim, output_dim=output_dim)
elif resample=='up':
conv_shortcut = UpsampleConv
conv_1 = functools.partial(UpsampleConv, input_dim=input_dim, output_dim=output_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=output_dim, output_dim=output_dim)
elif resample==None:
conv_shortcut = lib.ops.conv2d.Conv2D
conv_1 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=input_dim)
conv_2 = functools.partial(lib.ops.conv2d.Conv2D, input_dim=input_dim, output_dim=output_dim)
else:
raise Exception('invalid resample value')
if output_dim==input_dim and resample==None:
shortcut = inputs # Identity skip-connection
else:
shortcut = conv_shortcut(name+'.Shortcut', input_dim=input_dim, output_dim=output_dim, filter_size=1,
he_init=False, biases=True, inputs=inputs)
output = inputs
if bn:
output = Normalize(name+'.BN1', [0,2,3], output)
output = tf.nn.relu(output)
output = conv_1(name+'.Conv1', filter_size=filter_size, inputs=output, he_init=he_init, biases=False)
if bn:
output = Normalize(name+'.BN2', [0,2,3], output)
output = tf.nn.relu(output)
output = conv_2(name+'.Conv2', filter_size=filter_size, inputs=output, he_init=he_init)
return shortcut + output | 8871553f11975edef2a1b0bbf96aff8c54417adf | 13,682 |
import pylleo
import yamlord
import os
def callback_save_poly():
"""Perform polyfit once regions selected
Globals: cal_fname, data (read-only, so no declaration)
"""
def _check_param_regions(param, regions, cal_dict):
msg = """
<b>{}</b> was not found in the calibration dictionary.
Process that parameter and then try saving the polyfit again.
""".format(
param
)
params_present = True
if param not in cal_dict["parameters"]:
params_present = False
msg.format(param)
else:
for region in regions:
if region not in cal_dict["parameters"][param]:
params_present = False
msg.format("{}/{}".format(param, region))
output_window.text = output_template.format(msg)
return params_present
def _check_index_order(param, regions, cal_dict):
"""Check that index positions exist for each calibration region"""
indices_present = True
for region in regions:
start = cal_dict["parameters"][param][region]["start"]
end = cal_dict["parameters"][param][region]["end"]
# Check if start comes after end
if int(start) > int(end):
indices_present = False
msg = """
The start index ({}) comes after the end index ({}).
Please set new start/end indexes for <b>{}/{}</b>
""".format(
start, end, param, region
)
msg.format(start, end, param, region)
output_window.text = output_template.format(msg)
return indices_present
if datadirs_select.value != "None":
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, cal_fname)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Get currently selected parameter
param = param_select.value
regions = region_select.options
# Check that index positions have been recorded in `cal.yml`
if not _check_index_order(param, regions, cal_dict):
return None
# Check that index positions are in sequence
if not _check_index_order(param, regions, cal_dict):
return None
param = (param_select.value).lower().replace("-", "_")
try:
msg = """
Saved polyfit for <b>{}</b> to <b>{}</b>.
""".format(
param, cal_fname
)
output_window.text = output_template.format(msg)
lower, upper = pylleo.lleocal.get_cal_data(data, cal_dict, param)
poly = list(pylleo.lleocal.fit1d(lower, upper))
poly = [float(str(i)) for i in poly]
cal_dict["parameters"][param]["poly"] = poly
yamlord.write_yaml(cal_dict, cal_yaml_path)
except Exception as e:
msg = "Problem saving polyfit: {}".format(e)
output_window.text = output_template.format(msg)
else:
msg = """
You must first load data and select indices for calibration
regions before you can save to polyfit to `cal.yml`
"""
output_window.text = output_template.format(msg)
return None | 2daa24b0bec9428ba6d45657e08a1ecbe5edd39f | 13,683 |
def timer(func):
"""Logging elapsed time of funciton (decorator)."""
@wraps(func)
def wrapper(*args, **kwargs):
with timing(func.__name__):
return func(*args, **kwargs)
return wrapper | eb38d9856f59328188ac24e66f3bb4f9356ebe89 | 13,684 |
def peak_ana(x, y, nb=3, plotpoints_axis=None):
""" nb = number of point (on each side) to use as background"""
## get background
xb = np.hstack((x[0:nb], x[-(nb):]))
yb = np.hstack((y[0:nb], y[-(nb):]))
a = np.polyfit(xb, yb, 1)
b = np.polyval(a, x)
yf = y - b
yd = np.diff(yf)
## determine whether peak or step
ispeak = np.abs(skew(yf)) > np.abs(skew(yd))
if ispeak:
yw = yf
xw = x
else:
yw = yd
xw = (x[1:] + x[0:-1]) / 2
## get background
xwb = np.hstack((xw[0:nb], xw[-(nb):]))
ywb = np.hstack((yw[0:nb], yw[-(nb):]))
aw = np.polyfit(xwb, ywb, 1)
bw = np.polyval(aw, xw)
yw = yw - bw
Iw = (xw[1:] - xw[0:-1]) * (yw[1:] + yw[0:-1]) / 2
if sum(Iw) < 0:
yw = -yw
## get parameters
mm = yw.argmax(0)
PEAK = xw[mm]
ywmax = yw[mm]
gg = (yw[:mm][::-1] < (ywmax / 2)).argmax()
ip = interp1d(
yw.take([mm - gg - 1, mm - gg]), xw.take([mm - gg - 1, mm - gg]), kind="linear"
)
xhm1 = ip(ywmax / 2)
gg = (yw[mm:] < (ywmax / 2)).argmax()
ip = interp1d(
yw.take([mm + gg, mm + gg - 1]), xw.take([mm + gg, mm + gg - 1]), kind="linear"
)
xhm2 = ip(ywmax / 2)
FWHM = np.abs(xhm2 - xhm1)
CEN = (xhm2 + xhm1) / 2
if plotpoints_axis and ispeak:
# plot the found points for center and FWHM edges
plotpoints_axis.plot(x, b, "g--")
plotpoints_axis.plot(x, b + ywmax, "g--")
plotpoints_axis.plot([xhm1, xhm1], np.polyval(a, xhm1) + [0, ywmax], "g--")
plotpoints_axis.plot([xhm2, xhm2], np.polyval(a, xhm2) + [0, ywmax], "g--")
plotpoints_axis.plot([CEN, CEN], np.polyval(a, CEN) + [0, ywmax], "g--")
plotpoints_axis.plot([xhm1, xhm2], [np.polyval(a, xhm1), np.polyval(a, xhm2)] + ywmax / 2, "gx")
if not ispeak:
try:
# findings start of step coming from left.
std0 = sp.std(y[0:nb])
nt = nb
while (sp.std(y[0:nt]) < (2 * std0)) and (nt < len(y)):
nt = nt + 1
lev0 = sp.mean(y[0:nt])
# findings start of step coming from right.
std0 = sp.std(y[-nb:])
nt = nb
while (sp.std(y[-nt:]) < (2 * std0)) and (nt < len(y)):
nt = nt + 1
lev1 = sp.mean(y[-nt:])
gg = np.abs(y - ((lev0 + lev1) / 2)).argmin()
ftx = y[gg - 2 : gg + 2]
fty = x[gg - 2 : gg + 2]
if ftx[-1] < ftx[0]:
ftx = ftx[::-1]
fty = fty[::-1]
ip = interp1d(ftx, fty, kind="linear")
CEN = ip((lev0 + lev1) / 2)
gg = np.abs(y - (lev1 + (lev0 - lev1) * 0.1195)).argmin()
ftx = y[gg - 2 : gg + 2]
fty = x[gg - 2 : gg + 2]
if ftx[-1] < ftx[0]:
ftx = ftx[::-1]
fty = fty[::-1]
# print " %f %f %f %f %f" % (ftx[0],ftx[1],fty[0],fty[1],lev1+(lev0-lev1)*0.1195)
ip = interp1d(ftx, fty, kind="linear")
H1 = ip((lev1 + (lev0 - lev1) * 0.1195))
# print "H1=%f" % H1
gg = np.abs(y - (lev0 + (lev1 - lev0) * 0.1195)).argmin()
ftx = y[gg - 2 : gg + 2]
fty = x[gg - 2 : gg + 2]
if ftx[-1] < ftx[0]:
ftx = ftx[::-1]
fty = fty[::-1]
# print " %f %f %f %f %f" % (ftx[0],ftx[1],fty[0],fty[1],lev0+(lev1-lev0)*0.1195)
ip = interp1d(ftx, fty, kind="linear")
H2 = ip((lev0 + (lev1 - lev0) * 0.1195))
# print "H2=%f" % abs(H2-H1)
FWHM = abs(H2 - H1)
if plotpoints is True:
# plot the found points for center and FWHM edges
plotpoints_axis.plot([x.min(), x.max()], [lev0, lev0], "g--")
plotpoints_axis.plot([x.min(), x.max()], [lev1, lev1], "g--")
plotpoints_axis.plot([H2, H2], [lev0, lev1], "g--")
plotpoints_axis.plot([H1, H1], [lev0, lev1], "g--")
plotpoints_axis.plot([CEN, CEN], [lev0, lev1], "g--")
plotpoints_axis.plot(
[H2, CEN, H1],
[
lev0 + (lev1 - lev0) * 0.1195,
(lev1 + lev0) / 2,
lev1 + (lev0 - lev1) * 0.1195,
],
"gx",
)
except:
CEN = np.nan
FWHM = np.nan
PEAK = np.nan
return (CEN, FWHM, PEAK) | 1f9ea444b09684ac7764ced8ba5ca3fdbd3e8593 | 13,685 |
import os
import time
import pandas
def run_benchmarks(benchmark_params, test_root, force=False):
"""Run the benchmarks
For every row in benchmark params, run a trace on the input video
using the params specified.
benchmark_params: DataFrame with columns corresponding to keywords
to pass to pipeline_trace. Should have columns 'name',
'input_video', 'chunk_sz_frames', 'epoch_sz_frames',
'frame_start', 'frame_stop', 'n_trace_processes', etc
Returns:
test_results, durations
test_results : Dict from test['name'] to results read from hdf5 file
durations : list of durations taken
"""
WhiskiWrap.utils.probe_needed_commands()
test_results = {}
durations = []
for idx, test in benchmark_params.iterrows():
print(test['name'])
test_dir = os.path.expanduser(os.path.join(test_root, test['name']))
fn = setup_session_directory(test_dir, test['input_video'], force=force)
# Run
start_time = time.time()
WhiskiWrap.pipeline_trace(
fn.video('mp4'),
fn.hdf5,
chunk_sz_frames=test['chunk_sz_frames'],
epoch_sz_frames=test['epoch_sz_frames'],
frame_start=test['frame_start'],
frame_stop=test['frame_stop'],
n_trace_processes=test['n_trace_processes'])
stop_time = time.time()
durations.append(stop_time - start_time)
# Get the summary
with tables.open_file(fn.hdf5) as fi:
test_results[test['name']] = pandas.DataFrame.from_records(
fi.root.summary.read())
return test_results, durations | 03fe6a3ae58ec73e8b85b670827ee4e7bf7f2d1d | 13,686 |
from jams.distributions import sep_fs_mean, sep_fs_std
def sample_sep01(nn, xi=1., beta=0.):
"""
Samples from the skew exponential power distribution with location zero and scale one.
Definition
----------
def sample_sep01(nn, xi=1., beta=0.):
Input
-----
nn number of samples
Optional Input
--------------
xi parameter which controls the skewness
beta parameter which controls the kurtosis
Output
------
Samples from the standardized skew exponential power distribution
Examples
--------
None
Literature
--------
Schoups G & Vrugt JA (2010) A formal likelihood function for parameter and predictive
inference of hydrologic models with correlated, heteroscedastic, and non-Gaussian errors.
Water Resources Research 46, W10531.
--> Steps (6) described on page 5
History
-------
Written, JM, May 2016
"""
SEP_fs = sample_sep01_fs(nn, xi=xi, beta=beta)
# (6) Standardize SEP_fs
mean_sep_fs = sep_fs_mean(xi=xi, beta=beta)
std_sep_fs = sep_fs_std(xi=xi, beta=beta)
sSEP = (SEP_fs - mean_sep_fs) / std_sep_fs # standardized SEP (=Schoups and Vrugt's a_t)
return sSEP | dbeda8efa38db5d55b688c4bfc30350262c39f32 | 13,687 |
def pandas_from_feather(file: str = None) -> pd.DataFrame:
""" Load a feather file to a pandas DataFrame.
Uses pyarrow to load a csv file into a [pyarrow.Table](https://arrow.apache.org/docs/python/generated/pyarrow.Table.html) and convert to pandas format.
Args:
file (str): the feather file path.
"""
return feather.read_feather(file).to_pandas() | 2bd7679581690095865d9f9d2cae85cf9d736f8d | 13,688 |
import pwd
import os
def _get_user_name():
"""
Get the current user.
"""
return pwd.getpwuid(os.getuid())[0] | d25549cf12b3dd258446e4d1d7bae386f68139d0 | 13,689 |
def email_coas():
"""
Email certificates of analysis to their recipients.
"""
# Get the certificate data.
# Email links (optional attachments) to the contacts.
return NotImplementedError | b09c6650c498618b77a5e0beab0caf63a2cbf99d | 13,690 |
import random
def dropout(x, key, keep_rate):
"""Implement a dropout layer.
Arguments:
x: np array to be dropped out
key: random.PRNGKey for random bits
keep_rate: dropout rate
Returns:
np array of dropped out x
"""
# The shenanigans with np.where are to avoid having to re-jit if
# keep rate changes.
do_keep = random.bernoulli(key, keep_rate, x.shape)
kept_rates = np.where(do_keep, x / keep_rate, 0.0)
return np.where(keep_rate < 1.0, kept_rates, x) | f9686e64a11e17ca35eefacaa8f0b356cc0f065e | 13,691 |
def band_spd_spin_polarized(
folder,
output='band_spd_sp.png',
scale_factor=2,
order=['s', 'p', 'd'],
color_dict=None,
legend=True,
linewidth=0.75,
band_color='black',
unprojected_band_color='gray',
unprojected_linewidth=0.6,
fontsize=7,
annotations=['$\\uparrow$ ', '$\\downarrow$ '],
annotation_xy=(0.02, 0.98),
figsize=(4, 3),
erange=[-6, 6],
stack='vertical',
hse=False,
kpath=None,
n=None,
save=True,
):
"""
This function generates a spin polarized s, p, d projected band structure. This will plot two plots
stacked on top or eachother or next to eachother. The top or left plot will project on the
spin up bands and the bottom or right plot will project onto the spin down bands.
Parameters:
folder (str): This is the folder that contains the VASP files
output (str): File name of the resulting plot.
scale_factor (float): Factor to scale weights. This changes the size of the
points in the scatter plot
order (list): This determines the order in which the points are plotted on the
graph. This is an option because sometimes certain orbitals can be hidden
under others because they have a larger weight. For example, if the
weights of the d orbitals are greater than that of the s orbitals, it
might be smart to choose ['d', 'p', 's'] as the order so the s orbitals are
plotted over the d orbitals.
color_dict (dict[str][str]): This option allow the colors of the s, p, and d
orbitals to be specified. Should be in the form of:
{'s': <s color>, 'p': <p color>, 'd': <d color>}
legend (bool): Determines if the legend should be included or not.
linewidth (float): Line width of the plain band structure plotted in the background
band_color (str): Color of the plain band structure
unprojected_band_color (str): Color of the unprojected band
unprojected_linewidth (float): Line width of the unprojected bands
annotations (list): Annotations to put on the top and bottom (left and right) figures.
By default it will show the spin up and spin down arrows.
annotation_xy (list / tuple): Fractional (x, y) coordinated of the annotation location
figsize (list / tuple): Desired size of the image in inches (width, height)
erange (list / tuple): Range of energy to show in the plot [low, high]
stack (str): Determines how the plots are stacked (vertical or horizontal)
kpath (str): High symmetry k-point path of band structure calculation
Due to the nature of the KPOINTS file for HSE calculations this
information is a required input for proper labeling of the figure
for HSE calculations. This information is extracted from the KPOINTS
files for non-HSE calculations. (G is automaticall converted to \\Gamma)
n (int): Number of points between each high symmetry points.
This is also only required for HSE calculations. This number should be
known by the user, as it was used to generate the KPOINTS file.
fontsize (float): Font size of the text in the figure.
save (bool): Determines whether to automatically save the figure or not. If not
the figure and axis are return for further manipulation.
Returns:
If save == True, this function will return nothing and directly save the image as
the output name. If save == False, the function will return the matplotlib figure
and axis for further editing. (fig, ax1, ax2)
"""
band_up = Band(
folder=folder,
spin='up',
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
band_down = Band(
folder=folder,
spin='down',
projected=True,
hse=hse,
kpath=kpath,
n=n,
)
if stack == 'vertical':
fig = plt.figure(figsize=(figsize[0], 2 * figsize[1]), dpi=400)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
_figure_setup(ax=ax1, fontsize=fontsize, ylim=[erange[0], erange[1]])
_figure_setup(ax=ax2, fontsize=fontsize, ylim=[erange[0], erange[1]])
elif stack == 'horizontal':
fig = plt.figure(figsize=(2 * figsize[0], figsize[1]), dpi=400)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
_figure_setup(ax=ax1, fontsize=fontsize, ylim=[erange[0], erange[1]])
_figure_setup(ax=ax2, fontsize=fontsize, ylim=[erange[0], erange[1]])
bbox = dict(boxstyle='round', fc='white',
edgecolor='gray', alpha=0.95, pad=0.3)
ax1.annotate(
annotations[0],
xy=annotation_xy,
xycoords='axes fraction',
va='top',
ha='left',
bbox=bbox,
fontsize=fontsize,
)
ax2.annotate(
annotations[1],
xy=annotation_xy,
xycoords='axes fraction',
va='top',
ha='left',
bbox=bbox,
fontsize=fontsize,
)
band_up.plot_spd(
ax=ax1,
scale_factor=scale_factor,
order=order,
color_dict=color_dict,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
band_down.plot_plain(
ax=ax1,
color=unprojected_band_color,
linewidth=unprojected_linewidth,
)
band_down.plot_spd(
ax=ax2,
scale_factor=scale_factor,
order=order,
color_dict=color_dict,
legend=legend,
linewidth=linewidth,
band_color=band_color,
)
band_up.plot_plain(
ax=ax2,
color=unprojected_band_color,
linewidth=unprojected_linewidth,
)
plt.tight_layout(pad=0.2)
if save:
plt.savefig(output)
else:
return fig, ax1, ax2 | 4cd0ef74a2ad4ce46d28aad296a9156ec91dc301 | 13,692 |
def initial_queries(bo):
"""
script which explores the initial query points of a BayesianOptimization
instance, reports errors to Slack
Input: instance of a BayesianOptimization
"""
# loop to try a second time in case of error
errcount = 0
for i in range(2):
try:
bo.maximize(init_points=3, n_iter=1, kappa=5) # would be just this line without errorhandling
except KeyBoardInterrupt:
raise
except:
if errcount == 1:
text = "Exception occured twice in initialization, aborting!"
print(text)
sc.api_call("chat.postMessage",channel="CA26521FW",
text=text,username="Botty",
unfurl_links="true")
raise
errcount =+ 1
return bo | 3419cd89724a23296688f321469a68c8209d2a25 | 13,693 |
import logging
import codecs
def _parse_start_test_log(start_test_log):
"""Parse start_test logfile and return results in python data structure.
:type start_test_log: str
:arg start_test_log: start_test log filename
:rtype: list of dicts
:returns: list of dicts; each dict contains info about a single test case
"""
logging.debug('Parsing start_test log: {0}'.format(start_test_log))
with codecs.open(start_test_log, 'r', encoding='utf-8', errors='ignore') as fp:
start_test_lines = fp.readlines()
logging.debug('Read {0} lines from "{1}".'.format(
len(start_test_lines), start_test_log))
test_cases = []
while len(start_test_lines) > 0:
subtest_start, subtest_end = _get_block(
start_test_lines, '[Starting subtest - ', '[Finished subtest ')
# No more sub_tests; delete the remaining lines and finish up.
if subtest_start == -1:
del start_test_lines[:]
continue
# Copy subtest lines into new list for further processing and delete
# them from start_test_lines.
sub_test_lines = start_test_lines[subtest_start:subtest_end+1:1]
del start_test_lines[:subtest_end+1]
while len(sub_test_lines) > 0:
test_start, test_end = _get_block(
sub_test_lines,
'[test: ',
'[Elapsed time to compile and execute all versions of "')
test_start_skip, test_end_skip = _get_block(
sub_test_lines,
'[test: ',
'[Skipping')
test_skipped = False
if test_end_skip != -1 and (test_end == -1 or test_end_skip < test_end):
test_start, test_end = test_start_skip, test_end_skip
test_skipped = True
noperf_start, noperf_end = _get_block(
sub_test_lines,
'[test: ',
'[Skipping noperf test:')
# If the test was skipped because it did not have performance
# configuration files, drop the lines and continue. We don't
# care about these for performance tests (as opposed to real
# perf tests that are skipped due to environment/etc).
if noperf_end != -1:
del sub_test_lines[noperf_start:noperf_end+1]
continue
# If test_end is still -1 (i.e. not found), look for end of subtest
# call (usually means subtest failed and did not tests).
if test_start != -1 and test_end == -1:
test_start, test_end = _get_block(
sub_test_lines,
'[test: ',
'[Finished subtest "')
if test_end == -1:
raise ValueError('Failed to parse test case from: {0}'.format(
sub_test_lines))
# No more test cases; delete remaining lines and finish up.
if test_start == -1:
del sub_test_lines[:]
continue
# Copy test lines into new list for further processing and delete
# from sub_test_lines.
test_case_lines = sub_test_lines[test_start:test_end+1:1]
del sub_test_lines[:test_end+1]
# Extract test name from "[test: <path to .chpl file>]" line.
classname, test_name = _get_test_name(test_case_lines)
if test_skipped:
test_time = 0.0
error = None
else:
test_time = _get_test_time(test_case_lines)
error = _get_test_error(test_case_lines)
test_content = ''.join(test_case_lines)
test_case = {
'name': test_name,
'classname': classname,
'time': test_time,
'error': error,
'skipped': test_skipped,
'system-out': test_content,
}
test_cases.append(test_case)
logging.info('Parsed {0} test cases from "{1}".'.format(
len(test_cases), start_test_log))
return test_cases | 850f3d3da51208483955f14b29546df3ea9f746d | 13,694 |
def cell2AB(cell):
"""Computes orthogonalization matrix from unit cell constants
:param tuple cell: a,b,c, alpha, beta, gamma (degrees)
:returns: tuple of two 3x3 numpy arrays (A,B)
A for crystal(x) to Cartesian(X) transformations A*x = np.inner(A,x) =X
B (= inverse of A) for Cartesian to crystal transformation
B*X = np.inner(B,X) = x
in reciprocal space
X* = B.T @ x* or x @ B
A = |ax bx cx| B = |a*x a*y a*z|
|ay by cy| |b*x b*y b*z|
|az bz cz| |c*x c*y c*z|
"""
G, g = cell2Gmat(cell)
cellstar = Gmat2cell(G)
A = np.zeros(shape=(3, 3))
# from Giacovazzo (Fundamentals 2nd Ed.) p.75
A[0, 0] = cell[0] # a
A[0, 1] = cell[1] * cosd(cell[5]) # b cos(gamma)
A[0, 2] = cell[2] * cosd(cell[4]) # c cos(beta)
A[1, 1] = cell[1] * sind(cell[5]) # b sin(gamma)
# - c cos(alpha*) sin(beta)
A[1, 2] = -cell[2] * cosd(cellstar[3]) * sind(cell[4])
A[2, 2] = 1. / cellstar[2] # 1/c*
B = nl.inv(A)
return A, B | 970acf484a701efcdb024e7cad5981ded314209e | 13,695 |
from typing import Tuple
def _parse_header(line: bytes) -> Tuple[HeaderLine, bytes]:
"""
Parse the header line of the received input.
:param line:
:return: a tuple of the parsed header and the remaining input that is not
part of the header.
"""
end_index = line.find(b"\r\n")
header, remaining = line[:end_index], line[end_index + 2 :]
del line
if len(header) < 2:
raise exceptions.HeaderParseError("header is too short")
# Determine the status category.
try:
category_value = int(chr(header[0]))
except ValueError:
raise exceptions.HeaderParseError(
f"status category '{chr(header[0])}' is not an integer"
)
try:
category = constants.Category(category_value)
except ValueError:
category = constants.Category.UNKNOWN
# Determine the status detail.
try:
detail_value = int(chr(header[1]))
except ValueError:
raise exceptions.HeaderParseError(
f"status detail '{chr(header[1])}' is not an integer"
)
detail = constants.CATEGORY_TO_DETAILS_MAP[category].get(
detail_value, constants.Detail.UNKNOWN
)
# Determine the meta line, which is the rest of the line.
meta = header[3:].decode()
# TODO: further parsing of the meta line.
return HeaderLine(category, category_value, detail, detail_value, meta), remaining | e68cbcdd5e25e0d4690671cc3be5b4985c8f5c2c | 13,696 |
def sendMessage(qry):
"""
Message sending handling, either update if the query suggests it otherwise send the message.
:param qry: current query
:return: Status of Message sending.
"""
try: getUserName()
except: return _skypeError()
if(qry == "skype update"):
_writeFriends()
_getAvatars()
return len(_readFriends()).__str__()+" friends found and cached!"
else:
m = qry.partition(": ")
ret = skype("MESSAGE " + m[0]+" "+m[2])
if("SENDING" in ret):
return "Message sent to "+m[0]
else:
return "ERROR sending message to: "+m[0] | c13e187170015d3e9a786ceb7cb9a364928fa8c0 | 13,697 |
def scrape_detail_page(response):
"""
get detail page info as dict type
"""
root = lxml.html.fromstring(response.content)
ebook = {
'url': response.url,
'title': root.cssselect('#bookTitle')[0].text_content(),
'price': root.cssselect('.buy')[0].text,
'content': [h3.text_content() for h3 in root.cssselect('#content > h3')],
}
return ebook | 5c3b7e743cd109fe2d05e0cc261e46884c673421 | 13,698 |
import tqdm
from pathlib import Path
import torch
def reload_from_numpy(device, metadata, reload_dir):
"""Reload the output of voice conversion model."""
conv_mels = []
for pair in tqdm(metadata["pairs"]):
file_path = Path(reload_dir) / pair["mel_path"]
conv_mel = torch.load(file_path)
conv_mels.append(conv_mel.to(device))
return metadata, conv_mels | 7cf5b2c1f12886f8fcded9072a86c53384b93760 | 13,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.