content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def collect_subclasses(mod, cls, exclude=None):
"""Collecting all subclasses of `cls` in the module `mod`
@param mod: `ModuleType` The module to collect from.
@param cls: `type` or (`list` of `type`) The parent class(es).
@keyword exclude: (`list` of `type`) Classes to not include.
"""
out = []
for name in dir(mod):
attr = getattr(mod, name)
if (
isinstance(attr, type) and
(attr not in cls if isinstance(cls, (list, tuple)) else attr != cls) and
issubclass(attr, cls) and
(attr not in exclude if exclude else True)):
out.append(attr)
return out
|
30e64b93fca4d68c3621cae54bb256350875eb77
| 19,000 |
import typing
from typing import Counter
def check_collections_equivalent(a: typing.Collection, b: typing.Collection,
allow_duplicates: bool = False,
element_converter: typing.Callable = identity) -> typing.Tuple[str, list]:
"""
:param a: one collection to compare
:param b: other collection to compare
:param allow_duplicates: allow collections to contain multiple elements
:param element_converter: optional function to convert elements of collections to a different value
for comparison
:return: (message, differences)
"""
a = Counter(map(element_converter, a))
b = Counter(map(element_converter, b))
if not allow_duplicates:
duplicates = []
for name, counts in [['a', a], ['b', b]]:
for key, count in counts.items():
if count > 1:
duplicates.append([name, key, count])
if duplicates:
return 'Duplicate elements ', ['|'.join(map(str, dup)) for dup in duplicates]
diffs = []
for el in a | b:
ac = a.get(el, 0)
bc = b.get(el, 0)
if ac != bc:
'Inconsistent element frequencies', diffs.append(f'{el} a={ac} b={bc}')
if diffs:
return "Inconsistent element frequencies: ", diffs
return 'Collections equivalent', []
|
61d78f522a6e87927db6b32b46637f0bb6a10513
| 19,001 |
def voting_classifier(*args, **kwargs):
"""
same as in gradient_boosting_from_scratch()
"""
return VotingClassifier(*args, **kwargs)
|
ed92138c23b699672197d1b436773a5250685250
| 19,002 |
import re
def replace_subject_with_object(sent, sub, obj):
"""Replace the subject with object and remove the original subject"""
sent = re.sub(r'{}'.format(obj), r'', sent, re.IGNORECASE)
sent = re.sub(r'{}'.format(sub), r'{} '.format(obj), sent, re.IGNORECASE)
return re.sub(r'{\s{2,}', r' ', sent, re.IGNORECASE)
|
1c7f8115968c4e4ef10dcc3b83f0f259433f5082
| 19,003 |
def estimate_using_user_recent(list_type: str, username: str) -> int:
"""
Estimate the page number of a missing (entry which was just approved) entry
and choose the max page number
this requests a recent user's list, and uses checks if there are any
ids in that list which arent in the approved cache
"""
assert list_type in {"anime", "manga"}
logger.info(f"Estimating {list_type}list using {username}")
appr = approved_ids()
recently_updated_ids = user_recently_updated(
list_type=list_type, username=username, offset=0
)
ids = appr.anime if list_type == "anime" else appr.manga
sorted_approved = list(sorted(ids, reverse=True))
missing_approved = []
for aid in recently_updated_ids:
if aid not in ids:
missing_approved.append(aid)
estimate_pages = [_estimate_page(aid, sorted_approved) for aid in missing_approved]
max_page: int
if len(estimate_pages) == 0:
max_page = 0
else:
max_page = max(estimate_pages) + 1
logger.info(f"Estimated {max_page} {list_type} pages for {username}")
return max_page
|
b6a7a5bf6c0fa6e13021f10bf2fe613c4186f430
| 19,004 |
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_html2pdf package"""
reload_params = {"package": u"fn_html2pdf",
"incident_fields": [],
"action_fields": [],
"function_params": [u"html2pdf_data", u"html2pdf_data_type", u"html2pdf_stylesheet"],
"datatables": [],
"message_destinations": [u"fn_html2pdf"],
"functions": [u"fn_html2pdf"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_html2pdf"],
"actions": [u"Example: HTML2PDF"]
}
return reload_params
|
45a5e974f3e02953a6d121e37c05022c448adae6
| 19,005 |
def instrument_keywords(instrument, caom=False):
"""Get the keywords for a given instrument service
Parameters
----------
instrument: str
The instrument name, i.e. one of ['niriss','nircam','nirspec',
'miri','fgs']
caom: bool
Query CAOM service
Returns
-------
pd.DataFrame
A DataFrame of the keywords
"""
# Retrieve one dataset to get header keywords
sample = instrument_inventory(instrument, return_data=True, caom=caom,
add_requests={'pagesize': 1, 'page': 1})
data = [[i['name'], i['type']] for i in sample['fields']]
keywords = pd.DataFrame(data, columns=('keyword', 'dtype'))
return keywords
|
271f58615dbdbcde4fda9a5248d8ae3b40b90f6d
| 19,006 |
from struct import unpack
from time import mktime, strftime, gmtime
def header_info(data_type, payload):
"""Report additional non-payload in network binary data.
These can be status, time, grapic or control structures"""
# Structures are defined in db_access.h.
if payload == None:
return ""
data_type = type_name(data_type)
if data_type.startswith("STS_"):
status, severity = unpack(">HH", payload[0:4])
# Expecting status = 0 (normal), severity = 1 (success)
return "{status:%d,severity:%d}" % (status, severity)
elif data_type.startswith("TIME_"):
status, severity = unpack(">HH", payload[0:4])
# The time stamp is represented as two uint32 values. The first is the
# number of seconds passed since 1 Jan 1990 00:00 GMT. The second is the
# number of nanoseconds within the second.
seconds, nanoseconds = unpack(">II", payload[4:12])
offset = mktime((1990, 1, 1, 0, 0, 0, 0, 0, 0)) - mktime(
(1970, 1, 1, 0, 0, 0, 0, 0, 0)
)
t = seconds + nanoseconds * 1e-9 + offset
timestamp = strftime("%Y-%m-%d %H:%M:%S GMT", gmtime(t))
return "{status:%d,severity:%d, timestamp:%s}" % (status, severity, timestamp)
elif data_type.startswith("GR_"):
status, severity = unpack(">HH", payload[0:4])
info = "status:%d,severity:%d, " % (status, severity)
if data_type.endswith("STRING"):
pass
elif data_type.endswith("SHORT"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("6h", payload[16 : 16 + 6 * 2])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("FLOAT"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">6f", payload[16 : 16 + 6 * 4])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
elif data_type.endswith("ENUM"):
nstrings, = unpack(">h", payload[4:6])
strings = payload[6 : 6 + 16 * 26]
info += "nstrings=%r" % nstrings
elif data_type.endswith("CHAR"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("6b", payload[16 : 16 + 6 * 1])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("LONG"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("6i", payload[16 : 16 + 6 * 4])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("DOUBLE"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">6d", payload[16 : 16 + 6 * 8])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
else:
info += "?"
info = info.restrip(", ")
return "{" + info + "}"
elif data_type.startswith("CTRL_"):
status, severity = unpack(">HH", payload[0:4])
info = "status:%d,severity:%d, " % (status, severity)
if data_type.endswith("STRING"):
pass
elif data_type.endswith("SHORT"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("8h", payload[16 : 16 + 8 * 2])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("FLOAT"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">8f", payload[16 : 16 + 8 * 4])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
elif data_type.endswith("ENUM"):
nstrings, = unpack(">h", payload[4:6])
strings = payload[6 : 6 + 16 * 26]
info += "nstrings=%r" % nstrings
elif data_type.endswith("CHAR"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("8b", payload[16 : 16 + 8 * 1])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("LONG"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("8i", payload[16 : 16 + 8 * 4])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("DOUBLE"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">8d", payload[16 : 16 + 8 * 8])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
else:
info += "?"
info = info.rstrip(", ")
return "{" + info + "}"
return ""
|
6e83be0dff2d7f81a99419baf505e82957518c64
| 19,007 |
def dsa_verify(message, public, signature, constants=None):
"""Checks if the signature (r, s) is correct"""
r, s = signature
p, q, g = get_dsa_constants(constants)
if r <= 0 or r >= q or s <= 0 or s >= q:
return False
w = inverse_mod(s, q)
u1 = (bytes_to_num(sha1_hash(message)) * w) % q
u2 = (r * w) % q
v = ((pow(g, u1, p) * pow(public, u2, p)) % p) % q
return v == r
|
9d6eeb9b5b2d84edd054cba01bdd47cb9bab120e
| 19,008 |
def set_up_cgi():
"""
Return a configured instance of the CGI simulator on RST.
Sets up the Lyot stop and filter from the configfile, turns off science instrument (SI) internal WFE, and reads
the FPM setting from the configfile.
:return: CGI instrument instance
"""
webbpsf.setup_logging('ERROR')
#Set actuators numbesr
mode_in = CONFIG_PASTIS.get('RST', 'mode')
nbactuator = int(CONFIG_PASTIS.get('RST', 'nb_subapertures'))
nbactuator_in = int(np.sqrt(nbactuator))
if nbactuator_in**2 != nbactuator:
error_msg = f"The number of subapertures from config_pastis.ini is {nbactuator}, which is not the square of the actuators per row (={nbactuator_in})!"
log.error(error_msg)
raise ValueError(error_msg)
cgi = webbpsf.roman.CGI(mode=mode_in, nbactuator=int(nbactuator_in))
cgi.include_si_wfe = False
cgi.apodizer = CONFIG_PASTIS.get('RST', 'apodizer')
cgi.fpm = CONFIG_PASTIS.get('RST', 'fpm')
cgi.lyotstop = CONFIG_PASTIS.get('RST', 'lyotstop')
cgi.camera = CONFIG_PASTIS.get('RST', 'camera')
cgi.filter = CONFIG_PASTIS.get('RST', 'filter_name')
return cgi
|
68559e88b9cebb5e2edb049f63d80c9545112804
| 19,009 |
def plot_line(
timstof_data, # alphatims.bruker.TimsTOF object
selected_indices: np.ndarray,
x_axis_label: str,
colorscale_qualitative: str,
title: str = "",
y_axis_label: str = "intensity",
remove_zeros: bool = False,
trim: bool = True,
height: int = 400
) -> go.Figure:
"""Plot an XIC, mobilogram or spectrum as a lineplot.
Parameters
----------
timstof_data : alphatims.bruker.TimsTOF object
An alphatims.bruker.TimsTOF data object.
selected_indices : np.ndarray
The raw indices that are selected for this plot. These are typically obtained by slicing the TimsTOF data object with e.g. data[..., "raw"].
x_axis_label : str
The label of the x-axis. Options are:
- mz
- rt
- mobility
y_axis_label : str
Should not be set for a 1D line plot. Default is "intensity".
title : str
The title of the plot. Default is "".
remove_zeros : bool
If True, zeros are removed. Note that a line plot connects consecutive points, which can lead to misleading plots if non-zeros are removed. If False, use the full range of the appropriate dimension of the timstof_data. Default is False.
trim : bool
If True, zeros on the left and right are trimmed. Default is True.
height : int
Plot height. Default is 400.
Returns
-------
plotly.graph_objects.Figure object
A lne plot showing an XIC, mobilogram or spectrum.
"""
axis_dict = {
"mz": "m/z, Th",
"rt": "RT, min",
"mobility": "Inversed IM, V·s·cm\u207B\u00B2",
"intensity": "Intensity",
}
x_axis_label = axis_dict[x_axis_label]
y_axis_label = axis_dict[y_axis_label]
labels = {
'm/z, Th': "mz_values",
'RT, min': "rt_values",
'Inversed IM, V·s·cm\u207B\u00B2': "mobility_values",
}
x_dimension = labels[x_axis_label]
intensities = timstof_data.bin_intensities(selected_indices, [x_dimension])
if x_dimension == "mz_values":
x_ticks = timstof_data.mz_values
plot_title = "Spectrum"
elif x_dimension == "mobility_values":
x_ticks = timstof_data.mobility_values
plot_title = "Mobilogram"
elif x_dimension == "rt_values":
x_ticks = timstof_data.rt_values / 60
plot_title = "XIC"
non_zeros = np.flatnonzero(intensities)
if len(non_zeros) == 0:
x_ticks = np.empty(0, dtype=x_ticks.dtype)
intensities = np.empty(0, dtype=intensities.dtype)
else:
if remove_zeros:
x_ticks = x_ticks[non_zeros]
intensities = intensities[non_zeros]
elif trim:
start = max(0, non_zeros[0] - 1)
end = non_zeros[-1] + 2
x_ticks = x_ticks[start: end]
intensities = intensities[start: end]
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=x_ticks,
y=intensities,
mode='lines',
text=[f'{x_axis_label}'.format(i + 1) for i in range(len(x_ticks))],
hovertemplate='<b>%{text}:</b> %{x};<br><b>Intensity:</b> %{y}.',
name=" ",
marker=dict(color=getattr(px.colors.qualitative, colorscale_qualitative)[0])
)
)
fig.update_layout(
title=dict(
text=plot_title,
font=dict(
size=16,
),
x=0.5,
xanchor='center',
yanchor='top'
),
xaxis=dict(
title=x_axis_label,
titlefont_size=14,
tickmode='auto',
tickfont_size=14,
),
yaxis=dict(
title=y_axis_label,
),
template="plotly_white",
height=height,
hovermode="x"
)
return fig
|
5db0468710e49158c4b13fc56446a23949544e57
| 19,010 |
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
data_list = [None] * world_size
dist.all_gather_object(data_list, data)
return data_list
|
46f34975d89766c842b6c20e312ad3dea4f3d7ff
| 19,011 |
import sys
def get_title_count(titles, is_folder):
""" Gets the final title count """
final_title_count = 0
if len(titles.all) == 0:
if is_folder == False:
sys.exit()
else:
return 0
else:
for group, disc_titles in titles.all.items():
for title in disc_titles:
final_title_count += 1
return final_title_count
|
bdd239698f98c845cbecb27924725c38257547b6
| 19,012 |
from ..utils import check_adata
def draw_graph(
adata,
layout=None,
color=None,
alpha=None,
groups=None,
components=None,
legend_loc='right margin',
legend_fontsize=None,
legend_fontweight=None,
color_map=None,
palette=None,
right_margin=None,
size=None,
title=None,
show=None,
save=None,
ax=None):
"""Scatter plot in graph-drawing basis.
Parameters
----------
adata : AnnData
Annotated data matrix.
layout : {'fr', 'drl', ...}, optional (default: last computed)
One of the `draw_graph` layouts, see sc.tl.draw_graph. By default,
the last computed layout is taken.
color : string or list of strings, optional (default: None)
Keys for sample/cell annotation either as list `["ann1", "ann2"]` or
string `"ann1,ann2,..."`.
groups : str, optional (default: all groups)
Restrict to a few categories in categorical sample annotation.
components : str or list of str, optional (default: '1,2')
String of the form '1,2' or ['1,2', '2,3'].
legend_loc : str, optional (default: 'right margin')
Location of legend, either 'on data', 'right margin' or valid keywords
for matplotlib.legend.
legend_fontsize : int (default: None)
Legend font size.
color_map : str (default: `matplotlib.rcParams['image.cmap']`)
String denoting matplotlib color map.
palette : list of str (default: None)
Colors to use for plotting groups (categorical annotation).
right_margin : float or list of floats (default: None)
Adjust the width of the space right of each plotting panel.
size : float (default: None)
Point size.
title : str, optional (default: None)
Provide title for panels either as `["title1", "title2", ...]` or
`"title1,title2,..."`.
show : bool, optional (default: None)
Show the plot.
save : bool or str, optional (default: None)
If True or a str, save the figure. A string is appended to the
default filename.
ax : matplotlib.Axes
A matplotlib axes object.
Returns
-------
matplotlib.Axes object
"""
adata = check_adata(adata)
if layout is None: layout = adata.add['draw_graph_layout'][-1]
if 'X_draw_graph_' + layout not in adata.smp_keys():
raise ValueError('Did not find {} in adata.smp. Did you compute layout {}?'
.format('draw_graph_' + layout, layout))
axs = scatter(
adata,
basis='draw_graph_' + layout,
color=color,
alpha=alpha,
groups=groups,
components=components,
projection='2d',
legend_loc=legend_loc,
legend_fontsize=legend_fontsize,
legend_fontweight=legend_fontweight,
color_map=color_map,
palette=palette,
right_margin=right_margin,
size=size,
title=title,
show=show,
save=save,
ax=ax)
return axs
|
006121b0162afcf6b91703dac8c1ea3e6d1351bc
| 19,013 |
def post_token():
"""
Receives authentication credentials in order to generate an access
token to be used to access protected models. Tokens generated
by this endpoint are JWT Tokens.
"""
# First we verify the request is an actual json request. If not, then we
# responded with a HTTP 400 Bad Request result code.
if not request.is_json:
app.logger.warning('Request without JSON payload received on token endpoint')
return jsonify({"msg": "Only JSON request is supported"}), 400
# Read credentials from json request
params = request.get_json()
# Try to ready username and password properties. If one of them is not found,
# then we generate an error and stop execution.
username = params.get('username', None)
password = params.get('password', None)
if not username:
app.logger.warning('Request without username parameter received on token endpoint')
return jsonify({"msg": "A username parameter must be provided"}), 400
if not password:
app.logger.warning('Request without password parameter received on token endpoint')
return jsonify({"msg": "A password parameter must be provided"}), 400
# If we get here, is because a username and password credentials were
# provided, so now we must verify them.
user = get_user_by_username(username)
if user is not None:
if user.authenticate(password):
# ACCESS TOKEN
access_token_expires = app.config['JWT_ACCESS_TOKEN_VALIDITY_HOURS']
access_token = create_access_token(identity=user.user_id, expires_delta=access_token_expires)
# REFRESH TOKEN
refresh_token_expires = app.config['JWT_REFRESH_TOKEN_VALIDITY_DAYS']
refresh_token = create_refresh_token(identity=user.user_id, expires_delta=refresh_token_expires)
app.logger.info('A new token has been generated for user [' + user.user_id + "]")
return jsonify({
'access_token': access_token,
'expiration': access_token_expires.total_seconds(),
'refresh_token': refresh_token
}), 200
else:
app.logger.warning('Request with invalid username was received')
return jsonify({"msg": "Unable to find user with [" + username + "] username"}), 404
|
d51de9aa201fdb0d879190c5f08352b43f425be4
| 19,014 |
def judgement(seed_a, seed_b):
"""Return amount of times last 16 binary digits of generators match."""
sample = 0
count = 0
while sample <= 40000000:
new_a = seed_a * 16807 % 2147483647
new_b = seed_b * 48271 % 2147483647
bin_a = bin(new_a)
bin_b = bin(new_b)
last16_a = bin_a[-16:]
last16_b = bin_b[-16:]
if last16_a == last16_b:
count += 1
seed_a = new_a
seed_b = new_b
sample += 1
return count
|
9d778909ba6b04e4ca3adbb542fce9ef89d7b2b7
| 19,015 |
def GJK(shape1, shape2):
""" Implementation of the GJK algorithm
PARAMETERS
----------
shape{1, 2}: Shape
RETURN
------
: bool
Signifies if the given shapes intersect or not.
"""
# Initialize algorithm parameters
direction = Vec(shape1.center, shape2.center).direction
A = support(shape1, shape2, direction)
simplex = [A]
direction = Vec(simplex[0], Point()).direction
while True: # while new valid support found. `direction` is updated each iteration.
B = support(shape1, shape2, direction)
AB = Vec(simplex[0], B)
if dot_vec_dir(AB, direction) <= 0: # No support past the origin
return False
else:
simplex.append(B)
if handle_simplex(simplex, direction):
return True
|
4e3b24ec9fab1d2625c3d99ae3ffc2325c1dcaf8
| 19,016 |
def _set_int_config_parameter(value: OZWValue, new_value: int) -> int:
"""Set a ValueType.INT config parameter."""
try:
new_value = int(new_value)
except ValueError as err:
raise WrongTypeError(
(
f"Configuration parameter type {value.type} does not match "
f"the value type {type(new_value)}"
)
) from err
if (value.max is not None and new_value > value.max) or (
value.min is not None and new_value < value.min
):
raise InvalidValueError(
f"Value {new_value} out of range of parameter (Range: {value.min}-{value.max})"
)
value.send_value(new_value) # type: ignore
return new_value
|
e9e168aa1959dfab141622a0d0f3751a1e042dfd
| 19,017 |
def split_dataset(dataset_file, trainpct):
"""
Split a file containing the full path to individual annotation files into
train and test datasets, with a split defined by trainpct.
Inputs:
- dataset_file - a .txt or .csv file containing file paths pointing to annotation files.
(Expects that these have no header)
- trainpct = 0.8 produces an 80:20 train:test split
"""
if type(dataset_file) is list:
full_dataset = pd.DataFrame(dataset_file, columns=["Filename"])
else:
full_dataset = pd.read_csv(dataset_file, names=["Filename"])
print(
"You've chosen a training percentage of: {} (this variable has type: {})".format(
trainpct, type(trainpct)
)
)
testsize = 1.0 - trainpct
train, test = train_test_split(
full_dataset, test_size=testsize, shuffle=True, random_state=42
) # set the random seed so we get reproducible results!
return train, test
|
0f24d29efdf3645a743bbb6d9e2e27b9087552be
| 19,018 |
def accession(data):
"""
Get the accession for the given data.
"""
return data["mgi_marker_accession_id"]
|
132dcbdd0712ae30ce7929e58c4bc8cdf73aacb2
| 19,019 |
def get_phase_dir(self):
"""Get the phase rotating direction of stator flux stored in LUT
Parameters
----------
self : LUT
a LUT object
Returns
----------
phase_dir : int
rotating direction of phases +/-1
"""
if self.phase_dir not in [-1, 1]:
# recalculate phase_dir from Phi_wind
self.phase_dir = get_phase_dir_DataTime(self.Phi_wind[0])
return self.phase_dir
|
e335f78d6219f0db5a390cf47aaa7aa093f7c329
| 19,020 |
def atomic_number(request):
"""
An atomic number.
"""
return request.param
|
6f1a868c94d0a1ee4c84a76f04b4cabc3e0356e0
| 19,021 |
def plot_metric(title = 'Plot of registration metric vs iterations'):
"""Plots the mutual information over registration iterations
Parameters
----------
title : str
Returns
-------
fig : matplotlib figure
"""
global metric_values, multires_iterations
fig, ax = plt.subplots()
ax.set_title(title)
ax.set_xlabel('Iteration Number', fontsize=12)
ax.set_ylabel('Mutual Information Cost', fontsize=12)
ax.plot(metric_values, 'r')
ax.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*', label = 'change in resolution')
ax.legend()
return fig
|
488d96876a469522263f6c7118b94b35a25e36de
| 19,022 |
from re import T
def cross_entropy(model, _input, _target):
""" Compute Cross Entropy between target and output diversity.
Parameters
----------
model : Model
Model for generating output for compare with target sample.
_input : theano.tensor.matrix
Input sample.
_target : theano.tensor.matrix
Target sample.
Returns
-------
theano.tensor.matrix
Return Cross Entropy.
"""
return T.nnet.categorical_crossentropy(model.output(_input), _target).mean()
|
c65efe3185269d8f7132e23abbd517ca9273d481
| 19,023 |
def paste():
"""Paste and redirect."""
text = request.form['text']
# TODO: make this better
assert 0 <= len(text) <= ONE_MB, len(text)
with UploadedFile.from_text(text) as uf:
get_backend().store_object(uf)
lang = request.form['language']
if lang != 'rendered-markdown':
with HtmlToStore.from_html(render_template(
'paste.html',
text=text,
highlighter=get_highlighter(text, lang),
raw_url=app.config['FILE_URL'].format(name=uf.name),
)) as paste_obj:
get_backend().store_html(paste_obj)
else:
with HtmlToStore.from_html(render_template(
'markdown.html',
text=text,
raw_url=app.config['FILE_URL'].format(name=uf.name),
)) as paste_obj:
get_backend().store_html(paste_obj)
url = app.config['HTML_URL'].format(name=paste_obj.name)
return redirect(url)
|
079b9ccda1cd652034ea0f0c2f83e115ecd5f8a4
| 19,024 |
def get_groups_links(groups, tenant_id, rel='self', limit=None, marker=None):
"""
Get the links to groups along with 'next' link
"""
url = get_autoscale_links(tenant_id, format=None)
return get_collection_links(groups, url, rel, limit, marker)
|
35188c3c6d01026153a6e18365ae0b4b596a8883
| 19,025 |
def over(expr: ir.ValueExpr, window: win.Window) -> ir.ValueExpr:
"""Construct a window expression.
Parameters
----------
expr
A value expression
window
Window specification
Returns
-------
ValueExpr
A window function expression
See Also
--------
ibis.window
"""
prior_op = expr.op()
if isinstance(prior_op, ops.WindowOp):
op = prior_op.over(window)
else:
op = ops.WindowOp(expr, window)
result = op.to_expr()
try:
name = expr.get_name()
except com.ExpressionError:
pass
else:
result = result.name(name)
return result
|
e9c8f656403520d5f3287de38c139b8fd8446d13
| 19,026 |
def node_value(node: Node) -> int:
"""
Computes the value of node
"""
if not node.children:
return sum(node.entries)
else:
value = 0
for entry in node.entries:
try:
# Entries start at 1 so subtract all entries by 1
value += node_value(node.children[entry - 1])
except IndexError:
pass
return value
|
c22ac3f73995e138f7eb329499caba3fc67175a5
| 19,027 |
import re
def load_mac_vendors() :
""" parses wireshark mac address db and returns dict of mac : vendor """
entries = {}
f = open('mac_vendors.db', 'r')
for lines in f.readlines() :
entry = lines.split()
# match on first column being first six bytes
r = re.compile(r'^([0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2})$')
if len(entry) > 0 and r.match(entry[0]) :
# lowercase as convention
entries[entry[0].lower()] = entry[1]
return entries
|
361e9c79de8b473c8757ae63384926d266b68bbf
| 19,028 |
def parse_time(s):
"""
Parse time spec with optional s/m/h/d/w suffix
"""
if s[-1].lower() in secs:
return int(s[:-1]) * secs[s[-1].lower()]
else:
return int(s)
|
213c601143e57b5fe6cd123631c6cd562f2947e9
| 19,029 |
def resize_labels(labels, size):
"""Helper function to resize labels.
Args:
labels: A long tensor of shape `[batch_size, height, width]`.
Returns:
A long tensor of shape `[batch_size, new_height, new_width]`.
"""
n, h, w = labels.shape
labels = F.interpolate(labels.view(n, 1, h, w).float(),
size=size,
mode='nearest')
labels = labels.squeeze_(1).long()
return labels
|
87c7127643e9e46878bc526ebed4068d40f25ece
| 19,030 |
import re
def _extract_urls(html):
"""
Try to find all embedded links, whether external or internal
"""
# substitute real html symbols
html = _replace_ampersands(html)
urls = set()
hrefrx = re.compile("""href\s*\=\s*['"](.*?)['"]""")
for url in re.findall(hrefrx, html):
urls.add(str(url))
srcrx = re.compile("""src\s*\=\s*['"](.*?)['"]""")
for url in re.findall(srcrx, html):
urls.add(str(url))
html = re.sub('%20', ' ', html, flags=re.DOTALL)
# extract URLs that are not surrounded by quotes
urlrx = re.compile("""[^'"](http[s]?://[\.a-zA-Z0-9/]+?)\s""")
for url in re.findall(urlrx, html):
urls.add(str(url))
# extract URLs that are surrounded by quotes
# remove whitespace
html = re.sub('\s+', '', html)
urlrx = re.compile("'(http[s]?://[\.a-zA-Z0-9/]+?)'", flags=re.DOTALL)
urlrx = re.compile('"(http[s]?://[\.a-zA-Z0-9/]+?)"', flags=re.DOTALL)
for url in re.findall(urlrx, html):
urls.add(url)
# remove empty string if exists
try:
urls.remove('')
except KeyError:
pass
return sorted(urls)
|
5303cf7b750926aa5919bbfa839bd227319aa9f7
| 19,031 |
def reorganize_data(texts):
"""
Reorganize data to contain tuples of a all signs combined and all trans combined
:param texts: sentences in format of tuples of (sign, tran)
:return: data reorganized
"""
data = []
for sentence in texts:
signs = []
trans = []
for sign, tran in sentence:
signs.append(sign)
trans.append(tran)
data.append((signs, trans))
return data
|
27b4efd99bbf470a9f8f46ab3e34c93c606d0234
| 19,032 |
def client_new():
"""Create new client."""
form = ClientForm(request.form)
if form.validate_on_submit():
c = Client(user_id=current_user.get_id())
c.gen_salt()
form.populate_obj(c)
db.session.add(c)
db.session.commit()
return redirect(url_for('.client_view', client_id=c.client_id))
return render_template(
'invenio_oauth2server/settings/client_new.html',
form=form,
)
|
b355f43cd80e0f7fef3027f5f1d1832c4e4ece5a
| 19,033 |
def query_schema_existence(conn, schema_name):
"""Function to verify whether the current database schema ownership is correct."""
with conn.cursor() as cur:
cur.execute('SELECT EXISTS(SELECT 1 FROM information_schema.schemata WHERE SCHEMA_NAME = %s)',
[schema_name])
return cur.fetchone().exists
|
9c556283d255f580fc69a9e41a4d452d15e1eb17
| 19,034 |
def get_number_of_params(model, trainable_only=False):
"""
Get the number of parameters in a PyTorch Model
:param model(torch.nn.Model):
:param trainable_only(bool): If True, only count the trainable parameters
:return(int): The number of parameters in the model
"""
return int(np.sum([np.prod(param.size()) for param in model.parameters()
if param.requires_grad or (not trainable_only)]))
|
4e02e977e9fc2949a62ce433c9ff6d732d74a746
| 19,035 |
import time
import requests
def chart1(request):
"""
This view tests the server speed for transferring JSON and XML objects.
:param request: The AJAX request
:return: JsonResponse of the dataset.
"""
full_url = HttpRequest.build_absolute_uri(request)
relative = HttpRequest.get_full_path(request)
base_url = full_url[:-len(relative)]
request_amount = ['10', '100', '200', '500', '1000']
json_urls = list()
xml_urls = list()
for x in request_amount:
json_urls.append(reverse('objects:leads_json', args=[x]))
xml_urls.append(reverse('objects:leads_xml', args=[x]))
json_data = list()
xml_data = list()
for x in json_urls:
start = time.perf_counter()
requests.get(base_url + x)
end = time.perf_counter()
json_data.append((end - start))
for x in xml_urls:
start = time.perf_counter()
requests.get(base_url + x)
end = time.perf_counter()
xml_data.append((end - start))
final_data = {
'labels': request_amount,
'datasets': [
{
'label': 'JSON',
'backgroundColor': 'rgba(255, 99, 132, 0.2)',
'borderColor': 'rgba(255,99,132,1)',
'data': json_data,
'borderWidth': 2,
'yAxisID': 'first-y-axis'
},
{
'label': 'XML',
'backgroundColor': 'rgba(54, 162, 235, 0.2)',
'borderColor': 'rgba(54, 162, 235, 1)',
'data': xml_data,
'borderWidth': 2,
'yAxisID': 'first-y-axis'
}
]
}
return JsonResponse(final_data)
|
6eb88d3ef1aed85799832d5751ec4e30c54aaa07
| 19,036 |
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, bias=False)
|
115dd9e8afdaa850293fa08c103ab2966eceedbf
| 19,037 |
import mimetypes
def img_mime_type(img):
"""Returns image MIME type or ``None``.
Parameters
----------
img: `PIL.Image`
PIL Image object.
Returns
-------
mime_type : `str`
MIME string like "image/jpg" or ``None``.
"""
if img.format:
ext = "." + img.format
return mimetypes.types_map.get(ext.lower())
return None
|
fe46af6e5c03a1ae80cb809c81ab358ac5c085fa
| 19,038 |
import logging
def set_log_level(verbose, match=None, return_old=False):
"""Convenience function for setting the logging level
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
match : str | None
String to match. Only those messages that both contain a substring
that regexp matches ``'match'`` (and the ``verbose`` level) will be
displayed.
return_old : bool
If True, return the old verbosity level and old match.
Notes
-----
If ``verbose=='debug'``, then the ``vispy`` method emitting the log
message will be prepended to each log message, which is useful for
debugging. If ``verbose=='debug'`` or ``match is not None``, then a
small performance overhead is added. Thus it is suggested to only use
these options when performance is not crucial.
See also
--------
vispy.util.use_log_level
"""
# This method is responsible for setting properties of the handler and
# formatter such that proper messages (possibly with the vispy caller
# prepended) are displayed. Storing log messages is only available
# via the context handler (use_log_level), so that configuration is
# done by the context handler itself.
if isinstance(verbose, bool):
verbose = 'info' if verbose else 'warning'
if isinstance(verbose, str):
verbose = verbose.lower()
if verbose not in logging_types:
raise ValueError('Invalid argument "%s"' % verbose)
verbose = logging_types[verbose]
else:
raise TypeError('verbose must be a bool or string')
logger = logging.getLogger('vispy')
old_verbose = logger.level
old_match = _lh._vispy_set_match(match)
logger.setLevel(verbose)
if verbose <= logging.DEBUG:
_lf._vispy_set_prepend(True)
else:
_lf._vispy_set_prepend(False)
out = None
if return_old:
out = (old_verbose, old_match)
return out
|
62fbf9ad0639625073e6aff23f93389b1b0be24e
| 19,039 |
def check_satisfy_dataset(w, D, involved_predicates=[]):
"""
This function is to check whether all facts in ``D'' have been installed in each of ruler intervals
of the given Window ``w'' if facts in ruler intervals holds in ``D''.
Args:
w (a Window instance):
D (dictionary of dictionary object): contain all facts
involved_predicates (a list of str): contain all predicates that are needed to be checked.
Returns:
boolean
"""
for ruler_interval in w.ruler_intervals:
for predicate in involved_predicates:
if type(D[predicate]) == list:
interval_list = D[predicate]
if interval_intesection_intervallist(ruler_interval, interval_list) and Atom(predicate) not \
in w.get_ruler_intervals_literals()[ruler_interval]:
return False
else:
for entity, interval_list in D[predicate].items():
if interval_intesection_intervallist(ruler_interval, interval_list) and Atom(predicate) not \
in w.get_ruler_intervals_literals()[ruler_interval]:
return False
return True
|
b2715ca1eba03bbcf0581fdfeb97177cde8d12d7
| 19,040 |
def interp_at(d, g, varargs=None, dim=None, dask="parallelized"):
"""
Interpolates a variable to another.
Example : varargs = [THETA, mld] : THETA(t, z, y, x) is interpolated with Z=mld(t, y, x)
"""
var, coordvar = varargs
dim = (
dim if dim is not None else set(d[var].dims).difference(d[coordvar].dims).pop()
)
X = d[dim].values
data = xr.apply_ufunc(
_interp1DAt,
d[var],
d[coordvar],
input_core_dims=[[dim], []],
dask=dask,
output_dtypes=[float],
kwargs={"X": X},
keep_attrs=True,
)
data.attrs.update(
long_name=d[var].attrs.get("long_name", var)
+ " interpolated to {} along {}".format(coordvar, dim),
name="{}_{}_{}".format(var, dim, coordvar),
)
return data
|
a31cccee447deb2fd2612460471598d74e347c53
| 19,041 |
def get_history():
"""Get command usage history from History.sublime-project"""
f = open('%s/%s/%s' % (sublime.packages_path(),
"TextTransmute",
"History.sublime-project"), 'r')
content = f.readlines()
f.close()
return [x.strip() for x in content]
|
fab11f52c2b90d1fb29ace944c8f80f67fc9170e
| 19,042 |
from typing import Dict
from typing import Callable
from typing import Any
import asyncio
def inprogress(metric: Gauge, labels: Dict[str, str] = None) -> Callable[..., Any]:
"""
This decorator provides a convenient way to track in-progress requests
(or other things) in a callable.
This decorator function wraps a function with code to track how many
of the measured items are in progress.
The metric is incremented before calling the wrapped function and
decremented when the wrapped function is complete.
:param metric: a metric to increment and decrement. The metric object
being updated is expected to be a Gauge metric object.
:param labels: a dict of extra labels to associate with the metric.
:return: a coroutine function that wraps the decortated function
"""
if not isinstance(metric, Gauge):
raise Exception(
"inprogess decorator expects a Gauge metric but got: {}".format(metric)
)
def track(func):
"""
This function wraps a decorated callable with metric incremeting
and decrementing logic.
:param func: the callable to be tracked.
:returns: the return value from the decorated callable.
"""
@wraps(func)
async def func_wrapper(*args, **kwds):
metric.inc(labels)
rv = func(*args, **kwds)
if isinstance(rv, asyncio.Future) or asyncio.iscoroutine(rv):
rv = await rv
metric.dec(labels)
return rv
return func_wrapper
return track
|
c10adbb07796c26fd59d1038a786b25b6346fd97
| 19,043 |
from typing import Optional
import base64
def b58_wrapper_to_b64_public_address(b58_string: str) -> Optional[str]:
"""Convert a b58-encoded PrintableWrapper address into a b64-encoded PublicAddress protobuf"""
wrapper = b58_wrapper_to_protobuf(b58_string)
if wrapper:
public_address = wrapper.public_address
public_address_bytes = public_address.SerializeToString()
return base64.b64encode(public_address_bytes).decode("utf-8")
return None
|
a4c46800c0d22ef96d3fa622b6a37bf55e1960da
| 19,044 |
def render_to_AJAX(status, messages):
"""return an HTTP response for an AJAX request"""
xmlc = Context({'status': status,
'messages': messages})
xmlt = loader.get_template("AJAXresponse.xml")
response = xmlt.render(xmlc)
return HttpResponse(response)
|
cb5ad3ead4d5bee9a710767d1f49c81b2b137441
| 19,045 |
def parse_params(environ, *include):
"""Parse out the filter, sort, etc., parameters from a request"""
if environ.get('QUERY_STRING'):
params = parse_qs(environ['QUERY_STRING'])
else:
params = {}
param_handlers = (
('embedded', params_serializer.unserialize_string, None),
('filter', params_serializer.unserialize_string, None),
('sort', params_serializer.unserialize_string, None),
('offset', int, 0),
('limit', int, 0),
('show_hidden', bool_field, False)
)
results = {}
if len(include) > 0:
include = set(include)
else:
include = None
for name, fn, default in param_handlers:
if include and name not in include:
continue
results[name] = parse_param(params, name, fn, default=default)
if not include or 'context' in include:
results['context'] = get_context(environ)
return results
|
ff8d263e4495804e1bb30d0c4da352b2437fc8f8
| 19,046 |
def create_dict_facade_for_object_vars_and_mapping_with_filters(cls, # type: Type[Mapping]
include, # type: Union[str, Tuple[str]]
exclude, # type: Union[str, Tuple[str]]
private_name_prefix=None # type: str
):
# type: (...) -> DictMethods
"""
:param cls:
:param include:
:param exclude:
:param private_name_prefix: if provided, only the fields not starting with this prefix will be exposed. Otherwise
all will be exposed
:return:
"""
public_fields_only = private_name_prefix is not None
def __iter__(self):
"""
Generated by @autodict.
Implements the __iter__ method from collections.Iterable by relying on a filtered vars(self)
:param self:
:return:
"""
myattrs = tuple(att_name for att_name in iterate_on_vars(self))
for att_name in chain(myattrs, (o for o in super(cls, self).__iter__() if o not in myattrs)):
# filter based on the name (include/exclude + private/public)
if is_attr_selected(att_name, include=include, exclude=exclude) and \
(not public_fields_only or not att_name.startswith(private_name_prefix)):
# use that name
yield att_name
def __getitem__(self, key):
"""
Generated by @autodict.
Implements the __getitem__ method from collections.Mapping by relying on a filtered getattr(self, key)
"""
if hasattr(self, key):
key = possibly_replace_with_property_name(self.__class__, key)
if is_attr_selected(key, include=include, exclude=exclude) and \
(not public_fields_only or not key.startswith(private_name_prefix)):
return getattr(self, key)
else:
try:
# noinspection PyUnresolvedReferences
return super(cls, self).__getitem__(key)
except Exception as e:
raise KeyError('@autodict generated dict view - {key} is a '
'hidden field and super[{key}] raises an exception: {etyp} {err}'
''.format(key=key, etyp=type(e).__name__, err=e))
else:
try:
# noinspection PyUnresolvedReferences
return super(cls, self).__getitem__(key)
except Exception as e:
raise KeyError('@autodict generated dict view - {key} is an '
'invalid field name (was the constructor called?). Delegating to '
'super[{key}] raises an exception: {etyp} {err}'
''.format(key=key, etyp=type(e).__name__, err=e))
return DictMethods(iter=__iter__, getitem=__getitem__)
|
cccdc19b43ca269cfedb4f7d6ad1d7b8abba78e1
| 19,047 |
import time
def now():
"""
此时的时间戳
:return:
"""
return int(time.time())
|
39c05a695bfe4239ebb3fab6f3a5d0967bea6820
| 19,048 |
def get_yourContactINFO(rows2):
"""
Function that returns your personal contact info details
"""
yourcontactINFO = rows2[0]
return yourcontactINFO
|
beea815755a2e6817fb57a37ccc5aa479455bb81
| 19,049 |
def hafnian(
A, loop=False, recursive=True, rtol=1e-05, atol=1e-08, quad=True, approx=False, num_samples=1000
): # pylint: disable=too-many-arguments
"""Returns the hafnian of a matrix.
For more direct control, you may wish to call :func:`haf_real`,
:func:`haf_complex`, or :func:`haf_int` directly.
Args:
A (array): a square, symmetric array of even dimensions.
loop (bool): If ``True``, the loop hafnian is returned. Default is ``False``.
recursive (bool): If ``True``, the recursive algorithm is used. Note:
the recursive algorithm does not currently support the loop hafnian.
If ``loop=True``, then this keyword argument is ignored.
rtol (float): the relative tolerance parameter used in ``np.allclose``.
atol (float): the absolute tolerance parameter used in ``np.allclose``.
quad (bool): If ``True``, the hafnian algorithm is performed with quadruple precision.
approx (bool): If ``True``, an approximation algorithm is used to estimate the hafnian. Note that
the approximation algorithm can only be applied to matrices ``A`` that only have non-negative entries.
num_samples (int): If ``approx=True``, the approximation algorithm performs ``num_samples`` iterations
for estimation of the hafnian of the non-negative matrix ``A``.
Returns:
np.int64 or np.float64 or np.complex128: the hafnian of matrix A.
"""
# pylint: disable=too-many-return-statements,too-many-branches
input_validation(A, rtol=rtol, atol=atol)
matshape = A.shape
if matshape == (0, 0):
return 1
if matshape[0] % 2 != 0 and not loop:
return 0.0
if np.allclose(np.diag(np.diag(A)), A, rtol=rtol, atol=atol):
if loop:
return np.prod(np.diag(A))
return 0
if matshape[0] % 2 != 0 and loop:
A = np.pad(A, pad_width=((0, 1), (0, 1)), mode="constant")
A[-1, -1] = 1.0
matshape = A.shape
if matshape[0] == 2:
if loop:
return A[0, 1] + A[0, 0] * A[1, 1]
return A[0][1]
if matshape[0] == 4:
if loop:
result = (
A[0, 1] * A[2, 3]
+ A[0, 2] * A[1, 3]
+ A[0, 3] * A[1, 2]
+ A[0, 0] * A[1, 1] * A[2, 3]
+ A[0, 1] * A[2, 2] * A[3, 3]
+ A[0, 2] * A[1, 1] * A[3, 3]
+ A[0, 0] * A[2, 2] * A[1, 3]
+ A[0, 0] * A[3, 3] * A[1, 2]
+ A[0, 3] * A[1, 1] * A[2, 2]
+ A[0, 0] * A[1, 1] * A[2, 2] * A[3, 3]
)
return result
return A[0, 1] * A[2, 3] + A[0, 2] * A[1, 3] + A[0, 3] * A[1, 2]
if approx:
if np.any(np.iscomplex(A)):
raise ValueError("Input matrix must be real")
if np.any(A < 0):
raise ValueError("Input matrix must not have negative entries")
if A.dtype == np.complex:
# array data is complex type
if np.any(np.iscomplex(A)):
# array values contain non-zero imaginary parts
return haf_complex(A, loop=loop, recursive=recursive, quad=quad)
# all array values have zero imaginary parts
return haf_real(np.float64(A.real), loop=loop, recursive=recursive, quad=quad)
if np.issubdtype(A.dtype, np.integer) and not loop:
# array data is an integer type, and the user is not
# requesting the loop hafnian
return haf_int(np.int64(A))
if np.issubdtype(A.dtype, np.integer) and loop:
# array data is an integer type, and the user is
# requesting the loop hafnian. Currently no
# integer function for loop hafnians, have to instead
# convert to float and use haf_real
A = np.float64(A)
return haf_real(
A, loop=loop, recursive=recursive, quad=quad, approx=approx, nsamples=num_samples
)
|
0e90f1d372bf7be636ae8eb333d2c59a387b58f1
| 19,050 |
def filter_out_nones(data):
"""
Filter out any falsey values from data.
"""
return (l for l in data if l)
|
39eb0fb7aafe799246d231c5a7ad8a150ed4341e
| 19,051 |
import IPython
import IPython.display
def start(args_string):
"""Launch and display a TensorBoard instance as if at the command line.
Args:
args_string: Command-line arguments to TensorBoard, to be
interpreted by `shlex.split`: e.g., "--logdir ./logs --port 0".
Shell metacharacters are not supported: e.g., "--logdir 2>&1" will
point the logdir at the literal directory named "2>&1".
"""
context = _get_context()
try:
except ImportError:
IPython = None
if context == _CONTEXT_NONE:
handle = None
print("Launching TensorBoard...")
else:
handle = IPython.display.display(
IPython.display.Pretty("Launching TensorBoard..."),
display_id=True,
)
def print_or_update(message):
if handle is None:
print(message)
else:
handle.update(IPython.display.Pretty(message))
parsed_args = shlex.split(args_string, comments=True, posix=True)
start_result = manager.start(parsed_args)
if isinstance(start_result, manager.StartLaunched):
_display(
port=start_result.info.port,
print_message=False,
display_handle=handle,
)
elif isinstance(start_result, manager.StartReused):
template = (
"Reusing TensorBoard on port {port} (pid {pid}), started {delta} ago. "
"(Use '!kill {pid}' to kill it.)"
)
message = template.format(
port=start_result.info.port,
pid=start_result.info.pid,
delta=_time_delta_from_info(start_result.info),
)
print_or_update(message)
_display(
port=start_result.info.port,
print_message=False,
display_handle=None,
)
elif isinstance(start_result, manager.StartFailed):
def format_stream(name, value):
if value == "":
return ""
elif value is None:
return "\n<could not read %s>" % name
else:
return "\nContents of %s:\n%s" % (name, value.strip())
message = (
"ERROR: Failed to launch TensorBoard (exited with %d).%s%s"
% (
start_result.exit_code,
format_stream("stderr", start_result.stderr),
format_stream("stdout", start_result.stdout),
)
)
print_or_update(message)
elif isinstance(start_result, manager.StartExecFailed):
the_tensorboard_binary = (
"%r (set by the `TENSORBOARD_BINARY` environment variable)"
% (start_result.explicit_binary,)
if start_result.explicit_binary is not None
else "`tensorboard`"
)
if start_result.os_error.errno == errno.ENOENT:
message = (
"ERROR: Could not find %s. Please ensure that your PATH contains "
"an executable `tensorboard` program, or explicitly specify the path "
"to a TensorBoard binary by setting the `TENSORBOARD_BINARY` "
"environment variable." % (the_tensorboard_binary,)
)
else:
message = "ERROR: Failed to start %s: %s" % (
the_tensorboard_binary,
start_result.os_error,
)
print_or_update(textwrap.fill(message))
elif isinstance(start_result, manager.StartTimedOut):
message = (
"ERROR: Timed out waiting for TensorBoard to start. "
"It may still be running as pid %d." % start_result.pid
)
print_or_update(message)
else:
raise TypeError(
"Unexpected result from `manager.start`: %r.\n"
"This is a TensorBoard bug; please report it." % start_result
)
|
dae8ed95b7989af185b5681dfef79fcbd5d354ab
| 19,052 |
def BytesToGb(size):
"""Converts a disk size in bytes to GB."""
if not size:
return None
if size % constants.BYTES_IN_ONE_GB != 0:
raise calliope_exceptions.ToolException(
'Disk size must be a multiple of 1 GB. Did you mean [{0}GB]?'
.format(size // constants.BYTES_IN_ONE_GB + 1))
return size // constants.BYTES_IN_ONE_GB
|
49bc846ce6887fd47ac7be70631bddd8353c72ed
| 19,053 |
def build_report(drivers: dict, desc=False) -> [[str, str, str], ...]:
"""
Creates a race report: [[Driver.name, Driver.team, Driver.time], ...]
Default order of drivers from best time to worst.
"""
sorted_drivers = sort_drivers_dict(drivers, desc)
return [driver.get_stats for driver in sorted_drivers.values()]
|
c8c84319c0a14867b21d09c8b66ea434d13786aa
| 19,054 |
def add_sites_sheet(ws, cols, lnth):
"""
"""
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='Capacity_km2_MNO'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='Capacity_km2_MNO'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
part1 = "=MIN(IF('Lookups'!$H$3:$H$250>'Data_km2'!{}".format(cell)
part2 = ",'Lookups'!$E$3:$E$250))*Area!{}".format(cell)
ws[cell] = part1 + part2
ws.formula_attributes[cell] = {'t': 'array', 'ref': "{}:{}".format(cell, cell)}
columns = ['C','D','E','F','G','H','I','J','K','L']
ws = format_numbers(ws, columns, (1, 200), 'Comma [0]', 0)
set_border(ws, 'A1:L{}'.format(lnth-1), "thin", "000000")
return ws
|
c1db2b585021e8eef445963f8c300d726600e12a
| 19,055 |
import itertools
def testBinaryFile(filePath):
"""
Test if a file is in binary format
:param fileWithPath(str): File Path
:return:
"""
file = open(filePath, "rb")
#Read only a couple of lines in the file
binaryText = None
for line in itertools.islice(file, 20):
if b"\x00" in line:
#Return to the beginning of the binary file
file.seek(0)
#Read the file in one step
binaryText = file.read()
break
file.close()
#Return the result
return binaryText
|
809a962881335ce0a3a05e341a13b413c381fedf
| 19,056 |
import psutil
import sys
import os
import subprocess
def launch_experiment(
script,
run_slot,
affinity_code,
log_dir,
variant,
run_ID,
args,
python_executable=None,
set_egl_device=False,
):
"""Launches one learning run using ``subprocess.Popen()`` to call the
python script. Calls the script as:
``python {script} {slot_affinity_code} {log_dir} {run_ID} {*args}``
If ``affinity_code["all_cpus"]`` is provided, then the call is prepended
with ``tasket -c ..`` and the listed cpus (this is the most sure way to
keep the run limited to these CPU cores). Also saves the `variant` file.
Returns the process handle, which can be monitored.
Use ``set_egl_device=True`` to set an environment variable
``EGL_DEVICE_ID`` equal to the same value as the cuda index for the
algorithm. For example, can use with DMControl environment modified
to look for this environment variable when selecting a GPU for headless
rendering.
"""
slot_affinity_code = prepend_run_slot(run_slot, affinity_code)
affinity = affinity_from_code(slot_affinity_code)
pp = psutil.Process()
availabele_cpus = pp.cpu_affinity()
all_cpus = tuple([availabele_cpus[this_cpu%len(availabele_cpus)] for this_cpu in affinity['all_cpus']])
affinity['all_cpus'] = affinity['master_cpus'] = all_cpus
workers_cpus = tuple([tuple([availabele_cpus[this_cpu%len(availabele_cpus)] for this_cpu in this_worker_cpus]) for this_worker_cpus in affinity['workers_cpus']])
affinity['workers_cpus'] = workers_cpus
call_list = list()
if isinstance(affinity, dict) and affinity.get("all_cpus", False):
cpus = ",".join(str(c) for c in affinity["all_cpus"])
elif isinstance(affinity, list) and affinity[0].get("all_cpus", False):
cpus = ",".join(str(c) for aff in affinity for c in aff["all_cpus"])
else:
cpus = ()
if cpus:
call_list += ["taskset", "-c", cpus] # PyTorch obeys better than just psutil.
py = python_executable if python_executable else sys.executable or "python"
call_list += [py, script, "-a",slot_affinity_code,"-d", log_dir,"-i", str(run_ID)]
call_list += [str(a) for a in args]
save_variant(variant, log_dir)
print("\ncall string:\n", " ".join(call_list))
if set_egl_device and affinity.get("cuda_idx", None) is not None:
egl_device_id = str(affinity["cuda_idx"])
egl_env = os.environ.copy()
egl_env["EGL_DEVICE_ID"] = egl_device_id
print(f"Assigning EGL_DEVICE_ID={egl_device_id}")
p = subprocess.Popen(call_list, env=egl_env)
else:
p = subprocess.Popen(call_list)
return p
|
29acb675619943d88424231fbfb9d574e5fc03b2
| 19,057 |
def dup_max_norm(f, K):
"""
Returns maximum norm of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_max_norm(-x**2 + 2*x - 3)
3
"""
if not f:
return K.zero
else:
return max(dup_abs(f, K))
|
1ba4744781a3f5cb8e71b59bc6588579f88a6d43
| 19,058 |
def greedy_algorithm(pieces, material_size):
"""Implementation of the First-Fit Greedy Algorithm
Inputs:
pieces - list[] of items to place optimally
material_size - length of Boards to cut from, assumes unlimited supply
Output:
Optimally laid out BoardCollection.contents, which is a list[] of Boards"""
bc = BoardCollection()
bc.append(Board(material_size))
pieces.sort(reverse=True) # sort in ascending order
# we must copy pieces, else our actual list will get modified
for piece in pieces.copy():
piece_added = False # for recording state: did we add this piece to BoardCollection yet?
# if piece fits, add it on that Board, remove it from the list, mark it as such and break out of for loop
for board in bc.contents:
if board.space_remaining >= piece:
board.insert(piece)
pieces.remove(piece)
piece_added = True
break
# if it hasn't been added yet, make a new Board and put it there
if piece_added is False:
bc.append(Board(material_size))
bc.last.insert(piece)
pieces.remove(piece)
return bc.contents
|
f42b2372b50385c693765d65614d73a8b21f496b
| 19,059 |
def start_tv_session(hypes):
"""
Run one evaluation against the full epoch of data.
Parameters
----------
hypes : dict
Hyperparameters
Returns
-------
tuple
(sess, saver, summary_op, summary_writer, threads)
"""
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Create a saver for writing training checkpoints.
if 'keep_checkpoint_every_n_hours' in hypes['solver']:
kc = hypes['solver']['keep_checkpoint_every_n_hours']
else:
kc = 10000.0
saver = tf.train.Saver(max_to_keep=utils.cfg.max_to_keep,
keep_checkpoint_every_n_hours=kc)
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
init = tf.initialize_all_variables()
sess.run(init)
# Start the queue runners.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.train.SummaryWriter(hypes['dirs']['output_dir'],
graph=sess.graph)
return sess, saver, summary_op, summary_writer, coord, threads
|
f54cfa17871badf2cbe92200144a758d0fc2dc41
| 19,060 |
def factor_size(value, factor):
"""
Factors the given thumbnail size. Understands both absolute dimensions
and percentages.
"""
if type(value) is int:
size = value * factor
return str(size) if size else ''
if value[-1] == '%':
value = int(value[:-1])
return '{0}%'.format(value * factor)
size = int(value) * factor
return str(size) if size else ''
|
41b061fb368d56ba18b52cd7a6a3322292671d83
| 19,061 |
def categoryProfile(request, pk):
"""
Displays the profile of a :class:`gestion.models.Category`.
pk
The primary key of the :class:`gestion.models.Category` to display profile.
"""
category = get_object_or_404(Category, pk=pk)
return render(request, "gestion/category_profile.html", {"category": category})
|
87318332c7e317a49843f5becda8196951743ce5
| 19,062 |
def eoms(_x, t, _params):
"""Rigidy body equations of motion.
_x is an array/list in the following order:
q1: Yaw q2: Lean |-(Euler 3-1-2 angles used to orient A
q3: Pitch /
q4: N[1] displacement of mass center.
q5: N[2] displacement of mass center.
q6: N[3] displacement of mass center.
u1: A[1] measure number of angular velocity
u2: A[2] measure number of angular velocity
u3: A[3] measure number of angular velocity
u4: N[1] velocity of mass center.
u5: N[2] velocity of mass center.
u6: N[3] velocity of mass center.
_params is an array/list in the following order:
m: Mass of first pendulum point mass.
g: Gravitational constant.
I11: Principal moment of inertia about A[1]
I22: Principal moment of inertia about A[2]
I33: Principal moment of inertia about A[3]
"""
# Unpack function arguments
q1, q2, q3, q4, q5, q6, u1, u2, u3, u4, u5, u6 = _x
# Unpack function parameters
m, g, I11, I22, I33 = _params
# Trigonometric functions
c2 = cos(q2)
c3 = cos(q3)
s3 = sin(q3)
t2 = tan(q2)
# Calculate return values
q1d = c3*u3/c2 - s3*u1/c2
q2d = c3*u1 + s3*u3
q3d = s3*t2*u1 - c3*t2*u3 + u2
q4d = u4
q5d = u5
q6d = u6
u1d = (I22 - I33)*u2*u3/I11
u2d = (I33 - I11)*u1*u3/I22
u3d = -(I22 - I11)*u1*u2/I33
u4d = 0
u5d = 0
u6d = g
# Return calculated values
return [q1d, q2d, q3d, q4d, q5d, q6d, u1d, u2d, u3d, u4d, u5d, u6d]
|
3868411e2c082617311f59f3b39deec9d3a370fa
| 19,063 |
def memoize_with_hashable_args(func):
"""Decorator for fast caching of functions which have hashable args.
Note that it will convert np.NaN to None for caching to avoid this common
case causing a cache miss.
"""
_cached_results_ = {}
hash_override = getattr(func, "__hash_override__", None)
if hash_override is None:
hash_override = get_hash(func)
@wraps(func)
def memoized(*args):
try:
lookup_args = tuple(x if pd.notnull(x) else None for x in args)
res = _cached_results_[lookup_args]
except KeyError:
res = func(*args)
_cached_results_[lookup_args] = res
return res
memoized._cached_results_ = _cached_results_ # pylint: disable=protected-access
memoized.__hash_override__ = hash_override
return memoized
|
b5e55b35042688d9131e05e36a56bf0f6515f336
| 19,064 |
def orthogonalize(vec1, vec2):
"""Given two vectors vec1 and vec2, project out the component of vec1
that is along the vec2-direction.
@param[in] vec1 The projectee (i.e. output is some modified version of vec1)
@param[in] vec2 The projector (component subtracted out from vec1 is parallel to this)
@return answer A copy of vec1 but with the vec2-component projected out.
"""
v2u = vec2/np.linalg.norm(vec2)
return vec1 - v2u*np.dot(vec1, v2u)
|
aceca85edfc6ed4a6c3b21cb169f993b0b30c889
| 19,065 |
import re
def tokenize(text):
"""
Function to process text data taking following steps:
1) normalization and punctuation removal: convert to lower case and remove punctuations
2) tokenization: splitting each sentence into sequence of words
3) stop words removal: removal of words which do not add a meaning to the sentence
4) lemmatization: reducting words to their root form
Args:
text (str): string with message
Returns:
clean_tokens: cleaned tokens of the message with word list
"""
# normalize case and remove punctuation
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower())
# tokenize text and innitiate lemmatizer
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
# remove stopwords
tokens = [w for w in tokens if w not in stopwords.words('english')]
# iterate through each token
clean_tokens = []
for tok in tokens:
# lemmatize and remove leading/ trailing white space
clean_tok = lemmatizer.lemmatize(tok).strip()
clean_tokens.append(clean_tok)
return clean_tokens
|
769949bc2d4a23d4064b8addcaa7dbfc29346549
| 19,066 |
def extract_text(bucketname, filepath):
"""Return OCR data associated with filepaths"""
textract = boto3.client('textract')
response = textract.detect_document_text(
Document={
'S3Object': {
'Bucket': bucketname,
'Name': filepath
}
})
return response
|
fde7c1bc99003bf8f094538d402a66b6d0c8bcb4
| 19,067 |
def box1_input(input):
"""uses above to return input to player 1"""
return get_input(1, input)
|
c619be2f73fc124eb3198c27542af014eeffd3f8
| 19,068 |
def _get_xy_from_geometry(df):
"""
Return a numpy array with two columns, where the
first holds the `x` geometry coordinate and the second
column holds the `y` geometry coordinate
"""
# NEW: use the centroid.x and centroid.y to support Polygon() and Point() geometries
x = df.geometry.centroid.x
y = df.geometry.centroid.y
return np.column_stack((x, y))
|
6a1345607d3c75190dd9fd22ea45aad82901282a
| 19,069 |
def create_styled_figure(
title,
name=None,
tooltips=None,
plot_width=PLOT_WIDTH,
):
"""Return a styled, empty figure of predetermined height and width.
Args:
title (str): Title of the figure.
name (str): Name of the plot for later retrieval by bokeh. If not given the
title is set as name
tooltips (list, optional): List of bokeh tooltips to add to the figure.
Returns:
fig (bokeh Figure)
"""
assert plot_width is not None
name = name if name is not None else title
fig = figure(
plot_height=PLOT_HEIGHT,
plot_width=plot_width,
title=title.title(),
tooltips=tooltips,
name=name,
y_axis_type="linear",
sizing_mode="scale_width",
)
fig.title.text_font_size = "15pt"
# set minimum borders
fig.min_border_left = MIN_BORDER_LEFT
fig.min_border_right = MIN_BORDER_RIGHT
fig.min_border_top = MIN_BORDER_TOP
fig.min_border_bottom = MIN_BORDER_BOTTOM
# remove toolbar
fig.toolbar_location = TOOLBAR_LOCATION
# remove grid
fig.grid.visible = GRID_VISIBLE
# remove minor ticks
fig.axis.minor_tick_line_color = MINOR_TICK_LINE_COLOR
# remove tick lines
fig.axis.major_tick_out = MAJOR_TICK_OUT
fig.axis.major_tick_in = MAJOR_TICK_IN
# remove outline
fig.outline_line_width = OUTLINE_LINE_WIDTH
return fig
|
caeb7eb887d84c5e1ebaf83b01a71ce15917a27f
| 19,070 |
def render_text(string, padding=5, width=None, height=None,
size=12, font="Arial", fgcolor=(0, 0, 0), bgcolor=None):
"""
Render text to an image and return it
Not specifying bgcolor will give a transparent image, but that will take a *lot* more work to build.
Specifying a bgcolor, width, and height will heavily optimize things.
"""
actor = text.text_actor(string, fgcolor, size, font)
if bgcolor is None:
mask = True
# Set it to the opposite of fgcolor so we can mask using it
bgcolor = (1 - fgcolor[0], 1 - fgcolor[1], 1 - fgcolor[1])
else:
mask = False
lines = string.split("\n")
if width is None:
# EM is defined as the square of the line height, and is the guide for making fonts
# We can use that as an upper bound (assuming font size is ~ line
# height)
width = size * max([len(s) for s in lines])
if height is None:
height = size * len(lines)
image = actor_to_image(actor, bgcolor, width, height)
if mask:
image = mask_color(image, bgcolor)
image = crop_blank_space(image)
width, height, _ = image.GetDimensions()
return pad_image(
image, pad_width=width + padding * 2, pad_height=height + padding * 2)
else:
return image
|
9402aa9cbcbb920b73723d74b944f5940db9e0e0
| 19,071 |
def pretty_spectrogram(d,log = True, thresh= 5, fft_size = 512, step_size = 64):
"""
creates a spectrogram
log: take the log of the spectrgram
thresh: threshold minimum power for log spectrogram
"""
specgram = np.abs(stft(d, fftsize=fft_size, step=step_size, real=False,
compute_onesided=True))
if log == True:
specgram /= specgram.max() # volume normalize to max 1
specgram = np.log10(specgram) # take log
specgram[specgram < -thresh] = -thresh # set anything less than the threshold as the threshold
else:
specgram[specgram < thresh] = thresh # set anything less than the threshold as the threshold
return specgram
|
eaae2893944df28dcefb607bac3e8648db265acf
| 19,072 |
def check_for_win(position, board, player):
"""
check for wins on 3x3 board on rows,cols,diag,anti-diag
args: position (int 1-9, user input)
board (np.array 2d)
player ("X" or "O")
"""
#initialize win to False
win = False
#check win on rows
for row in board:
if np.all(row==player):
win = True
#check win on cols (index 0,1,2)
for i in range(3):
if(np.all(board[:,i]==player)):
win = True
#check win on diagonals
if np.all(board.diagonal()==player):
win = True
#check win on anti-diagonals
if np.all(np.fliplr(board).diagonal()==player):
win = True
return win
|
fad580912f3a281ce605fd743732481915c352ea
| 19,073 |
def wrap_http_exception(app: FastAPI):
"""
https://doc.acrobits.net/api/client/intro.html#web-service-responses
"""
@app.exception_handler(StarletteHTTPException)
async def http_exception_handler(request, exc):
return JSONResponse({'message': exc.detail}, exc.status_code)
|
b43eea9b59eb50eaefd3d52a5569d6952518f61b
| 19,074 |
import random
def perm_2sample(group1, group2, nrand=10000, tail=0, paired=True):
# Take from JW's functions
"""
non-parametric permutation test (Efron & Tibshirani, 1998)
tail = 0 (test A~=B), 1 (test A>B), -1 (test A<B)
"""
a = group1
b = group2
ntra = len(a)
ntrb = len(b)
meana = np.mean(a)
meanb = np.mean(b)
triala = np.zeros(nrand)
trialb = np.zeros(nrand)
if paired:
for i in range(nrand):
alldat = np.vstack((a,b)).T
for j in range(ntra):
alldat[j,:] = alldat[j,np.argsort(np.random.rand(2))]
triala[i] = alldat[:,0].mean()
trialb[i] = alldat[:,1].mean()
else:
alldat = np.concatenate((a,b))
indices = np.arange(alldat.shape[0])
for i in range(nrand):
random.shuffle(indices)
triala[i] = np.mean(alldat[indices[:ntra]])
trialb[i] = np.mean(alldat[indices[ntra:]])
if tail == 0:
p_value = sum(abs(triala-trialb)>=abs(meana-meanb)) / float(nrand)
else:
p_value = sum((tail*(triala-trialb))>=(tail*(meana-meanb))) / float(nrand)
return(meana-meanb, p_value)
|
afb3c56d277c583eeb34089bcd808e7e6e662ec7
| 19,075 |
def softmax_op(node):
""" This function computes its softmax along an axis.
Parameters:
----
node : Node
Input variable.
Returns:
----
A new Node instance created by Op.
"""
return SoftmaxOp()(node)
|
42004658214b7b7c083d40fe43393f1cf450175b
| 19,076 |
def full_chain():
"""
:return: Returns entire blockchain in memory (current_chain.blockchain)
"""
response = {
'chain': current_chain.blockchain,
'length': len(current_chain.blockchain),
}
return response, 200
|
0161195e3dc28b9157ee824f8d3103029f058498
| 19,077 |
def normalizer(x, mi, ma, eps=1e-20, dtype=np.float32):
"""
Number expression evaluation for normalization
Parameters
----------
x : np array of Image patch
mi : minimum input percentile value
ma : maximum input percentile value
eps: avoid dividing by zero
dtype: type of numpy array, float 32 defaut
"""
if dtype is not None:
x = x.astype(dtype, copy=False)
mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype, copy=False)
ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype, copy=False)
eps = dtype(eps)
x = (x - mi) / (ma - mi + eps)
x = normalizeZeroOne(x)
return x
|
0f39a4d02bc3f3897d0b2070567a45eb68ade2d4
| 19,078 |
def do(args):
""" Main Entry Point. """
build_worktree = qibuild.parsers.get_build_worktree(args)
sourceme = build_worktree.generate_sourceme()
print(sourceme)
return sourceme
|
d706fe7f8a277f58dd8f184212851d087b7890d1
| 19,079 |
import json
import traceback
def policy_action(module,
state=None,
policy_name=None,
policy_arn=None,
policy_document=None,
path=None,
description=None):
"""
Execute the actions needed to bring the policy into the specified state.
Args:
module (obj): Ansible module
state (str): Ansible state - 'present' | 'absent'
policy_name (str): Policy name. One and only one of policy name or policy ARN must be given.
policy_arn (str): Policy ARN. One and only one of policy name or policy ARN must be given.
policy_document(dict): JSON policy document
path (str): Policy path
description (str): Policy description. Defaults to 'policy_name'
Returns:
Success:
(bool) changed, (dict) policy object (see boto3.get_policy docs)
Failure:
Invokes module.fail_json with suitable text at point of error
"""
changed = False
policy = None
error = {}
if state == 'present':
try:
if isinstance(policy_document, dict):
policy_document = json.dumps(policy_document)
response = policy_m.create_policy(
policy_name=policy_name,
path=path,
policy_document=policy_document,
description=description)
if 'error' in response:
error = response['error']
else:
if response['state'] == 'New':
changed = True
policy = response['policy']
except Exception as e:
module.fail_json(msg='policy action {0} failed: {1} {2}'.format('present', e,traceback.format_exc()))
elif state == 'absent':
try:
response = policy_m.delete_policy(
policy_name=policy_name,
path=path)
if 'error' in response:
error = response['error']
else:
changed = True
policy = response['policy']
except Exception as e:
module.fail_json(msg='policy action {0} failed: {1} {2}'.format('absent', e,traceback.format_exc()))
else:
error = {"error": "state must be either 'present' or 'absent'"}
if error:
module.fail_json(msg='policy action failed: {0}'.format(error))
return changed, policy
|
5da4c4649170e81569cc3e77fa102fd0043cebd9
| 19,080 |
def GET_v1_metrics_location(days=1):
"""Return some data about the locations users have reported from.
"""
if days > 7:
days = 7
from_time = f'-{days}d'
locations = fetch_graphite_sum('*.geoip.*', from_time=from_time)
return jsonify(locations=locations)
|
e9765f338c1adbbb46e203024b7637af45e2f217
| 19,081 |
def add_default_to_usage_help(
usage_help: str, default: str or int or float or bool
) -> str:
"""Adds default value to usage help string.
Args:
usage_help (str):
usage help for click option.
default (str or int or float):
default value as string for click option.
Returns:
str:
New usage_help value.
"""
if default is not None:
return f"{usage_help} default={default}"
return usage_help
|
a40cf9a68f18beeafcb965c51e0329b4e8216fb4
| 19,082 |
def describe_deformation(el_disps, bfg):
"""
Describe deformation of a thin incompressible 2D membrane in 3D
space, composed of flat finite element faces.
The coordinate system of each element (face), i.e. the membrane
mid-surface, should coincide with the `x`, `y` axes of the `x-y`
plane.
Parameters
----------
el_disps : array
The displacements of element nodes, shape `(n_el, n_ep, dim)`.
bfg : array
The in-plane base function gradients, shape `(n_el, n_qp, dim-1,
n_ep)`.
Returns
-------
mtx_c ; array
The in-plane right Cauchy-Green deformation tensor
:math:`C_{ij}`, :math:`i, j = 1, 2`.
c33 : array
The component :math:`C_{33}` computed from the incompressibility
condition.
mtx_b : array
The discrete Green strain variation operator.
"""
sh = bfg.shape
n_ep = sh[3]
dim = el_disps.shape[2]
sym2 = dim2sym(dim-1)
# Repeat el_disps by number of quadrature points.
el_disps_qp = insert_strided_axis(el_disps, 1, bfg.shape[1])
# Transformed (in-plane) displacement gradient with
# shape (n_el, n_qp, 2 (-> a), 3 (-> i)), du_i/dX_a.
du = dot_sequences(bfg, el_disps_qp)
# Deformation gradient F w.r.t. in plane coordinates.
# F_{ia} = dx_i / dX_a,
# a \in {1, 2} (rows), i \in {1, 2, 3} (columns).
mtx_f = du + nm.eye(dim - 1, dim, dtype=du.dtype)
# Right Cauchy-Green deformation tensor C.
# C_{ab} = F_{ka} F_{kb}, a, b \in {1, 2}.
mtx_c = dot_sequences(mtx_f, mtx_f, 'ABT')
# C_33 from incompressibility.
c33 = 1.0 / (mtx_c[..., 0, 0] * mtx_c[..., 1, 1]
- mtx_c[..., 0, 1]**2)
# Discrete Green strain variation operator.
mtx_b = nm.empty((sh[0], sh[1], sym2, dim * n_ep), dtype=nm.float64)
mtx_b[..., 0, 0*n_ep:1*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 0:1]
mtx_b[..., 0, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 0, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 0, 2:3]
mtx_b[..., 1, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 1, 1*n_ep:2*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 1:2]
mtx_b[..., 1, 2*n_ep:3*n_ep] = bfg[..., 1, :] * mtx_f[..., 1, 2:3]
mtx_b[..., 2, 0*n_ep:1*n_ep] = bfg[..., 1, :] * mtx_f[..., 0, 0:1] \
+ bfg[..., 0, :] * mtx_f[..., 1, 0:1]
mtx_b[..., 2, 1*n_ep:2*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 1:2] \
+ bfg[..., 1, :] * mtx_f[..., 0, 1:2]
mtx_b[..., 2, 2*n_ep:3*n_ep] = bfg[..., 0, :] * mtx_f[..., 1, 2:3] \
+ bfg[..., 1, :] * mtx_f[..., 0, 2:3]
return mtx_c, c33, mtx_b
|
e748a8ecf3cc369fb03ba835b6f0c762eeafbf07
| 19,083 |
import re
def remove_emails(text):
"""Returns A String with the emails removed """
result = re.sub(EMAIL_REGEX, "", text)
return result
|
08d30119f5e32a92c3df3a7b5612ba99390e7df9
| 19,084 |
def _fit_model_residual_with_radial(lmparams, star, self, interpfunc):
"""Residual function for fitting individual profile parameters
:param lmparams: lmfit Parameters object
:param star: A Star instance.
:param self: PSF instance
:param interpfunc: The interpolation function
:returns chi: Chi of observed pixels to model pixels
"""
all_params = lmparams.valuesdict().values()
flux, du, dv = all_params[:3]
params = all_params[3:]
prof = self.getProfile(params)
image, weight, image_pos = star.data.getImage()
# use for getting drawprofile
star.fit.flux = flux
star.fit.center = (du, dv)
star_drawn = drawProfile(self, star, prof, params, use_fit=True, interpfunc=interpfunc)
image_model = star_drawn.image
chi = (np.sqrt(weight.array) * (image_model.array - image.array)).flatten()
return chi
|
77f4031dbf7522236c36a8e096c965766df16ccb
| 19,085 |
def _whoami():
# type: () -> Tuple[str,str]
"""
Return the current operating system account as (username, fullname)
"""
username = getuser()
fullname = username
if GET_PW_NAM:
pwnam = getpwnam(username)
if pwnam:
fullname = pwnam.pw_gecos.split(",", 1)[0]
return (username, fullname)
|
508189e4798e83425b754ad6cde617a3b0d1ec9f
| 19,086 |
from typing import Sequence
def text_set_class(
set_class: Sequence,
) -> str:
"""Converts a set class into a string representing its interval vector.
"""
id_dict = {0: "one",
1: "two",
2: "three",
3: "four",
4: "five",
5: "six"}
result = ""
for i, el in enumerate(interval_vector(set_class)):
for _ in range(el):
result += id_dict[i] + " "
return result.rstrip()
|
f430ddf4b32f64f37df5fb9ec984ce5a809b09a1
| 19,087 |
def Span_read(stream):
"""Read a span from an 88.1 protocol stream."""
start = Address_read(stream)
width = Offset_read(stream)
return Span(start, width)
|
0bbd5d62a1111dd056a939ee272ea200e1f7abd9
| 19,088 |
def temp_database(tmpdir_factory):
""" Initalize the Database """
tmpdb = str(tmpdir_factory.mktemp('temp'))+"/testdb.sqlite"
return tmpdb
|
5cfcb27e6ac76766e21a1612691dbe79d1713abd
| 19,089 |
import ast
def get_classes(pyfile_path):
"""
Obtiene las clases que están dentro de un fichero python
:param str pyfile_path: nombre del fichero a inspeccionar
:return: devuelve una lista con todas las clases dentro de un fichero python
:rtype: list
.. code-block:: python
>> get_classes('./data.py')
['Module', 'PythonFile']
"""
with open(pyfile_path, 'r') as f:
inspection = ast.parse(f.read())
return [class_.name for class_ in inspection.body if isinstance(class_, ast.ClassDef)]
|
72f376d10fd02574085a0236e10ea8901033ebd0
| 19,090 |
from typing import List
def transpose_outer_dimensions(outer_dimensions: ST_Type, diff_dimensions: ST_Type, ports_to_transpose: List) -> Kind:
"""
Transpose the outer dimensions of a set of ports, move them inside the diff dimensions. The outer dimensions
that are sseqs are the same for all elements, so treat as inner dimensions.
:param outer_dimensions: The outer dimensions that need to be moved inside
:param diff_dimensions: The dimensions that need to be moved outside
:param ports_to_transpose: The ports
:return:
"""
# always remove tseqs as they don't affect the magma types
num_outer_dimensions = num_nested_layers(remove_tseqs(outer_dimensions))
num_diff_dimensions = num_nested_layers(remove_tseqs(diff_dimensions))
# these are the indexes of the dimensions on the untransposed type
outer_dimensions_indexes_untransposed = list(range(num_outer_dimensions))
diff_dimensions_indexes_untransposed = list(range(num_outer_dimensions, num_outer_dimensions + num_diff_dimensions))
sseq_dims_transposed = diff_dimensions_indexes_untransposed + outer_dimensions_indexes_untransposed
# performing the transpose with blockers added so right dimensions not converted
ports_to_transpose_with_block = add_blocker(ports_to_transpose, len(sseq_dims_transposed))
orig_arr = np.asarray(ports_to_transpose_with_block)
transposed_arr = orig_arr.transpose(sseq_dims_transposed)
transposed_list_with_blocks = transposed_arr.tolist()
return remove_blocker(transposed_list_with_blocks)
|
ca51943223bbca58f871a9cb4c6b296ae941e87d
| 19,091 |
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor,
begin=tf.zeros(len(clip_size), dtype=tf.int32),
size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[
tf.zeros(len(trailing_paddings), dtype=tf.int32),
trailing_paddings
],
axis=1)
padded_tensor = tf.pad(clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
|
a22b6872b4af4424411d26232af0c21fda7c55df
| 19,092 |
def dynamic_lstm(x, n_neuron, act_fn=tanh, seq_len=None):
""" assert x is batch_major, aka [batch, time, ...] """
cell_class = lstm
with tf.variable_scope("fw"):
cell_fw = cell_class(n_neuron, activation=act_fn, cell_clip=15.0)
o, s = tf.nn.dynamic_rnn(
cell_fw, x, seq_len, dtype=tf.float32)
return o, s
|
4f2df8155281e3664d5d8521918af5c84a211a34
| 19,093 |
def ho2ax_single(ho):
"""Conversion from a single set of homochoric coordinates to an
un-normalized axis-angle pair :cite:`rowenhorst2015consistent`.
Parameters
----------
ho : numpy.ndarray
1D array of (x, y, z) as 64-bit floats.
Returns
-------
ax : numpy.ndarray
1D array of (x, y, z, angle) as 64-bit floats.
Notes
-----
This function is optimized with Numba, so care must be taken with
array shapes and data types.
"""
# Constants stolen directly from EMsoft
# fmt: off
fit_parameters = np.array([
0.9999999999999968, -0.49999999999986866, -0.025000000000632055,
-0.003928571496460683, -0.0008164666077062752, -0.00019411896443261646,
-0.00004985822229871769, -0.000014164962366386031, -1.9000248160936107e-6,
-5.72184549898506e-6, 7.772149920658778e-6, -0.00001053483452909705,
9.528014229335313e-6, -5.660288876265125e-6, 1.2844901692764126e-6,
1.1255185726258763e-6, -1.3834391419956455e-6, 7.513691751164847e-7,
-2.401996891720091e-7, 4.386887017466388e-8, -3.5917775353564864e-9
])
# fmt: on
ho_magnitude = np.sum(ho**2)
if (ho_magnitude > -1e-8) and (ho_magnitude < 1e-8):
ax = np.array([0, 0, 1, 0], dtype=np.float64)
else:
# Convert the magnitude to the rotation angle
hom = ho_magnitude
s = fit_parameters[0] + fit_parameters[1] * hom
for i in nb.prange(2, 21):
hom = hom * ho_magnitude
s = s + fit_parameters[i] * hom
hon = ho / np.sqrt(ho_magnitude)
s = 2 * np.arccos(s)
if np.abs(s - np.pi) < 1e-8: # pragma: no cover
ax = np.append(hon, np.pi)
else:
ax = np.append(hon, s)
return ax
|
50ec25eb488ea894f6a5c6a00feab27b93954200
| 19,094 |
import json
def task_export_commit(request):
"""提交导出任务"""
try:
datas = json.loads(request.body.decode())
taskSetting = PlTaskSetting.objects.get(id=datas["params"]["id"])
try:
exportJob = PlExportJob.objects.get(task_setting_id=taskSetting.id)
except ObjectDoesNotExist:
exportJob = PlExportJob(task_setting_id=taskSetting.id, run_time=timezone.now() - timedelta(weeks=100))
exportJob.save() # 先保存一遍,保证数据库中一定存在,因为下面要使用update语句更新符合条件的这个任务,防止并发问题
if 0 != exportJob.status:
return response(-3, message="已经有导出任务提交,请先终止")
# 执行更新,使用更新带条件操作是为了防止并发
updateRows = PlExportJob.objects.filter(task_setting_id=taskSetting.id, status=0).update(
status = 1,
req_stop = 0,
process = 0,
worker_name = "",
download_addr = "",
task_setting_info = json.dumps(model_to_dict(taskSetting)),
export_setting_info = json.dumps(datas["params"]["setting"])
)
if updateRows <= 0:
return response(-4, message="更新失败")
result = response()
except ObjectDoesNotExist:
result = response(-1, message="监控任务不存在,可能已经被删除。")
except DatabaseError:
result = response(-2, message="数据库查询异常")
return result
|
4ec543af62e9abab9194b22cf54e22de2551ce24
| 19,095 |
def getExceptionMessage(exceptionDetails: dict) -> str:
"""Get exception message from `exceptionDetails` object."""
exception = exceptionDetails.get('exception')
if exception:
return exception.get('description')
message = exceptionDetails.get('text', '')
stackTrace = exceptionDetails.get('stackTrace', dict())
if stackTrace:
for callframe in stackTrace.get('callFrames'):
location = (
str(callframe.get('url', '')) + ':' +
str(callframe.get('lineNumber', '')) + ':' +
str(callframe.get('columnNumber'))
)
functionName = callframe.get('functionName', '<anonymous>')
message = message + f'\n at {functionName} ({location})'
return message
|
ba3d15aa383de9f55600a72ba113c37fd042d3a4
| 19,096 |
def evaluate_by_net(net, input_fn, **kwargs):
"""encapsulate evaluate
"""
ret = evaluate(
graph=net.graph, sess=net.session,
fea_ph=net.features_ph, label_ph=net.labels_ph, outputs=net.outputs,
input_fn=input_fn, **kwargs
)
return ret
|
a58b80b5e93dbb4251eb71393f8a9f70ddff813b
| 19,097 |
import array
def ordinate(values,maxrange,levels):
"""Ordinate values given a maximum data range and number of levels
Parameters:
1. values: an array of continuous values to ordinate
2. maxrange: the maximum data range. Values larger than this will be saturated.
3. levels: the number of levels at which values are ordinated
"""
quantizer=lambda dist,maxrange,levels: int(1.0*max(1,dist-1)*levels/maxrange)+1
if type(values)==list or type(values)==tuple or type(values)==array:
ordinated=[]
for v in values:
if v==0:
ordinated.append(v)
else:
ordinated.append(quantizer(v,maxrange,levels))
return ordinated
else:
if values==0:
return values
else:
return quantizer(values,maxrange,levels)
|
4db4a26579d9208cd90ec630cf82e54a4a7ec3fe
| 19,098 |
def is_start_state(state):
"""
Checks if the given state is a start state.
"""
return (state.g_pos.value == 0) and (state.theta.value == 'N')
|
0f58e7a193533ba3d5db15c4e79ed98e190fa1be
| 19,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.