content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def extract_keys(keys, dic, drop=True):
"""
Extract keys from dictionary and return a dictionary with the extracted
values.
If key is not included in the dictionary, it will also be absent from the
output.
"""
out = {}
for k in keys:
try:
if drop:
out[k] = dic.pop(k)
else:
out[k] = dic[k]
except KeyError:
pass
return out | 15a66fff5207df18d8ece4959e485068f1bd3c9c | 21,466 |
from flask import current_app
def jsonresolver_loader(url_map):
"""Resolve the referred EItems for a Document record."""
def eitems_resolver(document_pid):
"""Search and return the EItems that reference this Document."""
eitems = []
eitem_search = current_app_ils.eitem_search_cls()
for hit in eitem_search.search_by_document_pid(document_pid).scan():
eitem = hit.to_dict()
eitems.append({
"pid": eitem.get("pid"),
"description": eitem.get("description"),
"internal_notes": eitem.get("internal_notes"),
"open_access": eitem.get("open_access"),
"bucket_id": eitem.get("bucket_id", None),
"files": eitem.get("files", []),
})
return {
"total": len(eitems),
"hits": eitems
}
url_map.add(
Rule(
"/api/resolver/documents/<document_pid>/eitems",
endpoint=eitems_resolver,
host=current_app.config.get("JSONSCHEMAS_HOST"),
)
) | 9da05e92850cbdbedb8d49cdf2cdf3763d0b1ab6 | 21,467 |
import sqlite3
def getStations(options, type):
"""Query stations by specific type ('GHCND', 'ASOS', 'COOP', 'USAF-WBAN')
"""
conn = sqlite3.connect(options.database)
c = conn.cursor()
if type == "ALL":
c.execute("select rowid, id, name, lat, lon from stations")
else:
c.execute("select rowid, id, name, lat, lon from stations where type = ?",(type,))
stations = []
for r in c:
stations.append(r)
conn.close()
return stations | 59d45a79542e68cd691cf848f3d4fe250389732c | 21,468 |
def test_name(request):
"""Returns (module_name, function_name[args]) for a given test"""
return (
request.module.__name__,
request._parent_request._pyfuncitem.name, # pylint: disable=protected-access
) | 4ef40de8a2c917c0b12cb83db9fd39f6b59777a0 | 21,469 |
import textwrap
def inputwrap(x, ARG_indented: bool=False, ARG_end_with: str=" "):
"""Textwrapping for regular 'input' commands.
Parameters
----------
x
The text to be wrapped.
ARG_indented : bool (default is 'False')
Whether or not the textwrapped string should be indented.
ARG_end_with : str (default is ' ')
The string that the textwrapped string will end with.
Returns
-------
str
User input.
"""
if ARG_indented is True:
_input = input (textwrap.fill (x, width=70, subsequent_indent=" ") + ARG_end_with)
return _input
else:
_input = input (textwrap.fill (x, width=70) + ARG_end_with)
return _input | af0ab3b69205965b40d3e03bdcfe3148889f7080 | 21,470 |
from .utils import phys_size
def SBP_single(ell_fix, redshift, pixel_scale, zeropoint, ax=None, offset=0.0,
x_min=1.0, x_max=4.0, alpha=1, physical_unit=False, show_dots=False, show_grid=False,
show_banner=True, vertical_line=None, linecolor='firebrick', linestyle='-',
linewidth=3, labelsize=25, ticksize=30, label='SBP', labelloc='lower left'):
"""Display the 1-D profiles, without showing PA and ellipticity.
Parameters:
ell_fix: astropy Table or numpy table, should be the output of IRAF ELLIPSE.
redshift (float): redshift of the object.
pixel_scale (float): pixel scale in arcsec/pixel.
zeropoint (float): zeropoint of the photometry system.
ax (``matplotlib.pyplot.axes`` object): The user could provide axes on which the figure will be drawn.
offset (float): offset of single surface brightness profile, in the unit of ``count``.
x_min (float): Minimum value of x-axis, in ``$x^{1/4}$`` scale.
x_max (float): Maximum value of x-axis, in ``$x^{1/4}$`` scale.
alpha (float): transparency.
physical_unit (bool): If true, the figure will be shown in physical scale.
show_dots (bool): If true, it will show all the data points.
show_grid (bool): If true, it will show a grid.
vertical_line (list of floats): positions of vertical lines. Maximum length is three.
linecolor (str): Color of surface brightness profile.
linestyle (str): Style of surface brightness profile. Could be "--", "-.", etc.
label (string): Label of surface brightness profile.
Returns:
ax: If the input ``ax`` is not ``None``.
"""
if ax is None:
fig = plt.figure(figsize=(10, 10))
fig.subplots_adjust(left=0.0, right=1.0,
bottom=0.0, top=1.0,
wspace=0.00, hspace=0.00)
ax1 = fig.add_axes([0.08, 0.07, 0.85, 0.88])
ax1.tick_params(direction='in')
else:
ax1 = ax
ax1.tick_params(direction='in')
# Calculate physical size at this redshift
phys_sclae = phys_size(redshift, verbose=False)
# 1-D profile
if physical_unit is True:
x = ell_fix['sma'] * pixel_scale * phys_scale
y = -2.5 * np.log10((ell_fix['intens'] + offset) / (pixel_scale)**2) + zeropoint
y_upper = -2.5 * np.log10((ell_fix['intens'] + offset + ell_fix['int_err']) / (pixel_scale)**2) + zeropoint
y_lower = -2.5 * np.log10((ell_fix['intens'] + offset - ell_fix['int_err']) / (pixel_scale)**2) + zeropoint
upper_yerr = y_lower - y
lower_yerr = y - y_upper
asymmetric_error = [lower_yerr, upper_yerr]
xlabel = r'$(R/\mathrm{kpc})^{1/4}$'
ylabel = r'$\mu\,[\mathrm{mag/arcsec^2}]$'
else:
x = ell_fix['sma'] * pixel_scale
y = -2.5 * np.log10((ell_fix['intens'] + offset) / (pixel_scale)**2) + zeropoint
y_upper = -2.5 * np.log10((ell_fix['intens'] + offset + ell_fix['int_err']) / (pixel_scale) ** 2) + zeropoint
y_lower = -2.5 * np.log10((ell_fix['intens'] + offset - ell_fix['int_err']) / (pixel_scale) ** 2) + zeropoint
upper_yerr = y_lower - y
lower_yerr = y - y_upper
asymmetric_error = [lower_yerr, upper_yerr]
xlabel = r'$(R/\mathrm{arcsec})^{1/4}$'
ylabel = r'$\mu\,[\mathrm{mag/arcsec^2}]$'
if show_grid:
ax1.grid(linestyle='--', alpha=0.4, linewidth=2)
if show_dots:
ax1.errorbar((x ** 0.25), y,
yerr=asymmetric_error,
color='k', alpha=0.2, fmt='o',
capsize=4, capthick=1, elinewidth=1)
if label is not None:
ax1.plot(x**0.25, y, color=linecolor, linewidth=linewidth, linestyle=linestyle,
label=r'$\mathrm{' + label + '}$', alpha=alpha)
leg = ax1.legend(fontsize=labelsize, frameon=False, loc=labelloc)
for l in leg.legendHandles:
l.set_alpha(1)
else:
ax1.plot(x**0.25, y, color=linecolor, linewidth=linewidth, linestyle=linestyle, alpha=alpha)
ax1.fill_between(x**0.25, y_upper, y_lower, color=linecolor, alpha=0.3*alpha)
for tick in ax1.xaxis.get_major_ticks():
tick.label.set_fontsize(ticksize)
for tick in ax1.yaxis.get_major_ticks():
tick.label.set_fontsize(ticksize)
ax1.set_xlim(x_min, x_max)
ax1.set_xlabel(xlabel, fontsize=ticksize)
ax1.set_ylabel(ylabel, fontsize=ticksize)
ax1.invert_yaxis()
# Twin axis with linear scale
if physical_unit and show_banner is True:
ax4 = ax1.twiny()
ax4.tick_params(direction='in')
lin_label = [1, 2, 5, 10, 50, 100, 150, 300]
lin_pos = [i**0.25 for i in lin_label]
ax4.set_xticks(lin_pos)
ax4.set_xlim(ax1.get_xlim())
ax4.set_xlabel(r'$\mathrm{kpc}$', fontsize=ticksize)
ax4.xaxis.set_label_coords(1, 1.025)
ax4.set_xticklabels([r'$\mathrm{'+str(i)+'}$' for i in lin_label], fontsize=ticksize)
for tick in ax4.xaxis.get_major_ticks():
tick.label.set_fontsize(ticksize)
# Vertical line
if vertical_line is not None:
if len(vertical_line) > 3:
raise ValueError('Maximum length of vertical_line is 3.')
ylim = ax1.get_ylim()
style_list = ['-', '--', '-.']
for k, pos in enumerate(vertical_line):
ax1.axvline(x=pos**0.25, ymin=0, ymax=1,
color='gray', linestyle=style_list[k], linewidth=3, alpha=0.75)
plt.ylim(ylim)
# Return
if ax is None:
return fig
return ax1 | 5aab69adfc38afad84b6f45972ffb7ce05d547ac | 21,471 |
def min_conflicts_value(csp, var, current):
"""Return the value that will give var the least number of conflicts.
If there is a tie, choose at random."""
return argmin_random_tie(csp.domains[var],
key=lambda val: csp.nconflicts(var, val, current)) | ab338ce8b0abd7a77078193fcac3041155ed3e78 | 21,472 |
from app.model import TokenRepository
def init(jwt):
"""Initialize the JWTManager.
Parameters:
jwt (JWTManager): an instance of the jwt manager.
"""
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decoded_token):
"""Callback to check if a token is in the blacklist.
Parameters:
decrypted_token (dict): a jwt token decrypted into a dictionary.
"""
return TokenRepository().is_token_revoked(decoded_token) | 406ff6e8ce6169dff6559b141f5c4453cce68f1e | 21,473 |
from typing import Callable
import inspect
def get_component_rst_string(module: ModuleType, component: Callable, level: int) -> str:
"""Get a rst string, to autogenerate documentation for a component (class or function)
:param module: the module containing the component
:param component: the component (class or function)
:param level: the level in nested directory structure
"""
object_name = f"{module.__name__}.{component.__name__}"
rst_documentation = ""
level_underline = RST_LEVEL_SYMBOLS[level] * 6
if inspect.isclass(component):
rst_documentation = SPHINX_CLASS_STRING.format(
object_name=object_name, var=component.__name__, level=level_underline
)
elif inspect.isfunction(component):
rst_documentation = SPHINX_FUNC_STRING.format(
object_name=object_name, var=component.__name__, level=level_underline
)
elif type(component).__name__ == "Dispatcher":
rst_documentation = get_multidispatch_string(component, module, level_underline)
return rst_documentation | 511c718610456b4c5df5df2bc9e0ae5e7ac6823c | 21,474 |
def log_mse_loss(source, separated, max_snr=1e6, bias_ref_signal=None):
"""Negative log MSE loss, the negated log of SNR denominator."""
err_pow = tf.math.reduce_sum(tf.math.square(source - separated), axis=-1)
snrfactor = 10.**(-max_snr / 10.)
if bias_ref_signal is None:
ref_pow = tf.math.reduce_sum(tf.square(source), axis=-1)
else:
ref_pow = tf.math.reduce_sum(tf.math.square(bias_ref_signal), axis=-1)
bias = snrfactor * ref_pow
return 10. * _stabilized_log_base(bias + err_pow) | 46203582f0d0ec2a98248ec000805c9e43f54091 | 21,475 |
from typing import Union
def crack(password: str) -> Union[str, None]:
"""
Crack the given password
"""
# found 96% by caesar
return caesar(password) | 058779267c400501eecac2d3e43d691e2152ef8d | 21,476 |
def fetchOne(query):
"""
Returns a dict result from the fetch of one query row
"""
return sqliteRowToDict(query.fetchone()) | 5be4753ea541a6e27ece16fb375c8a0664487a71 | 21,477 |
def _get_class_rgb(num_classes, predicted_class):
"""Map from class to RGB value for a specific colormap.
Args:
num_classes: Integer, the total number of classes.
predicted_class: Integer, the predicted class, in [0, num_classes).
Returns:
Tuple of 3 floats in [0.0, 1.0] representing an RGB color.
Raises:
ValueError: If predicted class is not in [0, num_classes).
"""
if not 0 <= predicted_class < num_classes:
raise ValueError('Predicted class %d must be in [0, %d).' %
(predicted_class, num_classes))
# Map [0, num_classes) to [0, 255)
colormap_index = int(predicted_class * 255.0 / num_classes)
# Return just the RGB values of the colormap.
return matplotlib.pyplot.cm.get_cmap(CLASS_ANNOTATION_COLORMAP)(colormap_index)[0:3] | 914824eef57a829a7d67e74f45f56088d73ea34e | 21,478 |
from datetime import datetime
def PUT(request):
"""Update a project's name."""
request.check_required_parameters(body={'project': {'name': 'name'}}, path={'projectId': 'string'})
project = Project.from_id(request.params_path['projectId'])
project.check_exists()
project.check_user_access(request.google_id, True)
project.set_property('name', request.params_body['project']['name'])
project.set_property('datetime_last_edited', Database.datetime_to_string(datetime.now()))
project.update()
return Response(200, 'Successfully updated project.', project.obj) | ab5cc9bea5f5a933293761a852532f2c7c6004ec | 21,479 |
def load_book_details(file_path):
""" Read book details from a csv file into a pandas DataFrame. """
books_df = pd.read_csv(file_path, index_col='book_id')
return books_df | 9240efd9778198e34464fe6f95d312a82fd3894e | 21,480 |
import random
import string
def random_string_fx() -> str:
"""
Creates a 16 digit alphanumeric string. For use
with logging tests.
Returns:
16 digit alphanumeric string.
"""
result = "".join(random.sample(string.ascii_letters, 16))
return result | 835c2dc2716c6ef0ad37f5ae03cfc9dbe2e16725 | 21,481 |
from typing import Dict
from typing import List
import logging
def parse_scheduler_nodes(
pbscmd: PBSCMD, resource_definitions: Dict[str, PBSProResourceDefinition]
) -> List[Node]:
"""
Gets the current state of the nodes as the scheduler sees them, including resources,
assigned resources, jobs currently running etc.
"""
ret: List[Node] = []
for ndict in pbscmd.pbsnodes_parsed("-a"):
node = parse_scheduler_node(ndict, resource_definitions)
if not node.available.get("ccnodeid"):
node.metadata["override_resources"] = False
logging.fine(
"'ccnodeid' is not defined so %s has not been joined to the cluster by the autoscaler"
+ " yet or this is not a CycleCloud managed node",
node,
)
ret.append(node)
return ret | bceec54b302b0b70e77181d3940fd6e41b8922c4 | 21,482 |
def GaugeSet(prefix, *, name, index, **kwargs):
"""
Factory function for Gauge Set.
Parameters
----------
prefix : str
Gauge base PV (up to 'GCC'/'GPI').
name : str
Name to refer to the gauge set.
index : str or int
Index for gauge (e.g. '02' or 3).
prefix_controller : str, optional
Base PV for the controller.
onlyGCC : optional
If defined and not :keyword:`False`, set has no Pirani.
"""
onlyGCC = kwargs.pop('onlyGCC', None)
if onlyGCC:
if 'prefix_controller' in kwargs:
return GaugeSetMks(
prefix, name=name, index=index,
prefix_controller=kwargs.pop('prefix_controller'),
**kwargs)
else:
return GaugeSetBase(prefix, name=name, index=index, **kwargs)
else:
if 'prefix_controller' in kwargs:
return GaugeSetPiraniMks(
prefix, name=name, index=index,
prefix_controller=kwargs.pop('prefix_controller'),
**kwargs)
else:
return GaugeSetPirani(prefix, name=name, index=index, **kwargs) | ef856c77e414d8bc4532483e5b65aa3ebb0cc132 | 21,483 |
def user_rating(user, object, category=""):
"""
Usage:
{% user_rating user obj [category] as var %}
"""
return user_rating_value(user, object, category) | 09ac3ea8d1efcc3dc70d82bf5266f3e768a35c3b | 21,484 |
import numpy
def weighted_mean(
x: NumericOrIter,
w: NumericOrIter = None,
na_rm: bool = False,
) -> NumericType:
"""Calculate weighted mean"""
if is_scalar(x):
x = [x] # type: ignore
if w is not None and is_scalar(w):
w = [w] # type: ignore
x = Array(x)
if w is not None:
w = Array(w)
if len(x) != len(w):
raise ValueError("'x' and 'w' must have the same length")
if na_rm:
notna = ~numpy.isnan(x)
x = x[notna]
if w is not None:
w = w[notna]
if w is not None and sum(w) == 0:
return NA
return numpy.average(x, weights=w) | 4034d642629696f1be73c62384bf6633ccb6efe1 | 21,485 |
import socket
def internet(host="8.8.8.8", port=53, timeout=10):
"""
Check Internet Connections.
:param host: the host that check connection to
:param port: port that check connection with
:param timeout: times that check the connnection
:type host:str
:type port:int
:type timeout:int
:return bool: True if Connection is Stable
>>> internet() # if there is stable internet connection
True
>>> internet() # if there is no stable internet connection
False
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except Exception as ex:
return False | c0ee11cf7aa9699a077e238d993136aeb4efcead | 21,486 |
def parse_date(td):
"""helper function to parse time"""
resYear = float(td.days)/364.0 # get the number of years including the the numbers after the dot
resMonth = int((resYear - int(resYear))*364/30) # get the number of months, by multiply the number after the dot by 364 and divide by 30.
resYear = int(resYear)
return str(resYear) + "y" + str(resMonth) + "m" | bda78b0968b59c13f763e51f5f15340a377eeb35 | 21,487 |
import math
def hue_angle(a, b):
"""
Returns the *hue* angle :math:`h` in degrees.
Parameters
----------
a : numeric
Opponent colour dimension :math:`a`.
b : numeric
Opponent colour dimension :math:`b`.
Returns
-------
numeric
*Hue* angle :math:`h` in degrees.
Examples
--------
>>> a = -0.0006241120682426434
>>> b = -0.0005062701067729668
>>> hue_angle(a, b) # doctest: +ELLIPSIS
219.0484326...
"""
h = math.degrees(np.arctan2(b, a)) % 360
return h | 2f508be9ed0cdcb0e8b193eefb441a6281a464c7 | 21,488 |
def perform_similarity_checks(post, name):
"""
Performs 4 tests to determine similarity between links in the post and the user name
:param post: Test of the post
:param name: Username to compare against
:return: Float ratio of similarity
"""
max_similarity, similar_links = 0.0, []
# Keep checking links until one is deemed "similar"
for link in post_links(post):
domain = get_domain(link)
# Straight comparison
s1 = similar_ratio(domain, name)
# Strip all spaces
s2 = similar_ratio(domain, name.replace(" ", ""))
# Strip all hyphens
s3 = similar_ratio(domain.replace("-", ""), name.replace("-", ""))
# Strip all hyphens and all spaces
s4 = similar_ratio(domain.replace("-", "").replace(" ", ""), name.replace("-", "").replace(" ", ""))
similarity = max(s1, s2, s3, s4)
max_similarity = max(max_similarity, similarity)
if similarity >= SIMILAR_THRESHOLD:
similar_links.append(domain)
return max_similarity, similar_links | 78813c3b2223072a4a5b15a5a71837424a648470 | 21,489 |
def create_getters(tuples):
"""Create a series of itemgetters that return tuples
:param tuples: a list of tuples
:type tuples: collections.Iterable
:returns: a generator of item getters
:rtype: generator
::
>>> gs = list(create_getters([(0, 2), (), (1,)]))
>>> d = ['a', 'b', 'c', 'd']
>>> gs[0](d)
('a', 'c')
>>> gs[1](d)
()
>>> gs[2](d)
('b',)
"""
def tgetter0():
return lambda x: ()
def tgetter1(key):
it = itemgetter(key)
return lambda x: (it(x),)
for t in tuples:
if not t:
yield tgetter0()
elif len(t) == 1:
yield tgetter1(*t)
else:
yield itemgetter(*t) | 43d6fed8233ee56b91a52c024c533ae72c8e6890 | 21,490 |
def report_cots_cv2x_bsm(bsm: dict) -> str:
"""A function to report the BSM information contained in an SPDU from a COTS C-V2X device
:param bsm: a dictionary containing BSM fields from a C-V2X SPDU
:type bsm: dict
:return: a string representation of the BSM fields
:rtype: str
"""
report = ""
for key in bsm.keys():
report += key + "\t\t\t" + str(bsm[key]) + "\n"
report += "\n"
return report | df0aa5ae4b50980088fe69cb0b776abbf0b0998d | 21,491 |
import logging
def get_level_matrix(matrix, level):
"""Returns a binary matrix with positions exceeding a threshold.
matrix = numpy array object
level = floating number
The matrix it returns has 1 in the positions where matrix
has values above level and 0 elsewhere."""
logging.info("Selecting the amino acids contacts.")
(n1, n2) = matrix.shape
out_matrix = np.empty([n1, n2], dtype=float, order='F')
for i in range(n1):
for j in range(n2):
if i == j:
out_matrix[i, j] = 0
elif matrix[i, j] >= level:
out_matrix[i, j] = 1
else:
out_matrix[i, j] = 0
return out_matrix | 1516f14970471c4f9402fcbf2cfb2a0d017e754e | 21,492 |
def bookmark(repo, subset, x):
"""``bookmark([name])``
The named bookmark or all bookmarks.
If `name` starts with `re:`, the remainder of the name is treated as
a regular expression. To match a bookmark that actually starts with `re:`,
use the prefix `literal:`.
"""
# i18n: "bookmark" is a keyword
args = getargs(x, 0, 1, _('bookmark takes one or no arguments'))
if args:
bm = getstring(args[0],
# i18n: "bookmark" is a keyword
_('the argument to bookmark must be a string'))
kind, pattern, matcher = _stringmatcher(bm)
bms = set()
if kind == 'literal':
bmrev = repo._bookmarks.get(pattern, None)
if not bmrev:
raise error.RepoLookupError(_("bookmark '%s' does not exist")
% bm)
bms.add(repo[bmrev].rev())
else:
matchrevs = set()
for name, bmrev in repo._bookmarks.iteritems():
if matcher(name):
matchrevs.add(bmrev)
if not matchrevs:
raise error.RepoLookupError(_("no bookmarks exist"
" that match '%s'") % pattern)
for bmrev in matchrevs:
bms.add(repo[bmrev].rev())
else:
bms = set([repo[r].rev()
for r in repo._bookmarks.values()])
bms -= set([node.nullrev])
return subset & bms | 71fd382ad0710e2e54a80b0b739d04c6d5410719 | 21,493 |
def indel_protein_processor(df, refgene, proteincdd=None):
"""Calculate protein features
Features not used in the final model are commented out
Args:
df (pandas.DataFrame)
refgene (str): path to refCodingExon.bed.gz
proteincdd (str): optional, path to proteinConservedDomains.txt
Returns:
df (pandas.DataFrame)
"""
# cds length & indel location
acc_len = acc_len_dict(refgene)
df["cds_length"], df["indel_location"] = zip(
*df.apply(partial(len_loc, d=acc_len), axis=1)
)
# check if the indel is in conserved domain (CDD)
# acc_dom = acc_domain_dict(proteincdd)
# df['is_in_cdd'] = df.apply(partial(is_in_conserved_domain, d=acc_dom), axis=1)
return df | 721b4b19838ac2d6cd21f471d647c34c3586ebb2 | 21,494 |
def perdict_raw(model, *args, **kwargs):
"""
Tries to call model.predict(*args, **kwargs, prediction_type="RawFormulaVal"). If that fail,
calls model.predict(*args, **kwargs)
"""
try:
return model.predict(*args, **kwargs, prediction_type="RawFormulaVal")
except TypeError:
return model.predict(*args, **kwargs) | 2ab7790c0cd48cc9b26f6e7888dd61436cb728b4 | 21,495 |
def login_required(arg):
""" Decorator to check if a user is logged in"""
@wraps(arg)
def wrap(*args, **kwargs):
"""Checking if token exists in the request header"""
if request.headers.get('Authorization'):
auth_token = request.headers.get('Authorization')
token = auth_token.split(" ")[1]
resp = User.decode_token(token)
user = User.query.filter_by(id=resp).first()
if user:
return arg(*args, **kwargs)
response = jsonify({
'status': 'error',
'message': "Unauthorized"
})
response.status_code = 401
return response
return wrap | 2d41e2cd41621a0ce6015182badd7a4117c1daf6 | 21,496 |
def calc_manual_numbers(n):
"""
>>> calc_manual_numbers(1)
20151125
>>> calc_manual_numbers(2)
31916031
>>> calc_manual_numbers(3)
18749137
>>> calc_manual_numbers(21)
33511524
"""
return (BASE * pow(FACTOR, n - 1, MOD)) % MOD | d0a276da3eb931afcf6b60b1b6172b468e59b95c | 21,497 |
def accuracy(y0, y1):
"""
compute accuracy for y1 and y2 does not meter if either of them is in vector or integer form
:param y0: list of - labels or vector of probabilities
:param y1: list of - labels or vector of probabilities
:return: accuracy
"""
if not isinstance(y0[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)):
y0 = np.argmax(y0, axis=1)
elif isinstance(y0, list):
y0 = np.array(y0)
if not isinstance(y1[0], (int, float, np.int, np.int32, np.int64, np.float, np.float32, np.float64)):
y1 = np.argmax(y1, axis=1)
elif isinstance(y1, list):
y1 = np.array(y1)
out = np.sum(y0==y1)/len(y0)
return out | b0e1077a8443e3d325b3238355c1a578af8823e3 | 21,498 |
import random
def random_function(*args):
"""Picks one of its arguments uniformly at random, calls it, and returns the result.
Example usage:
>>> random_function(lambda: numpy.uniform(-2, -1), lambda: numpy.uniform(1, 2))
"""
choice = random.randint(0, len(args) - 1)
return args[choice]() | 3f8d11becc52fde5752671e3045a9c64ddfeec97 | 21,499 |
def get_transceiver_sensor_sub_id(ifindex, sensor):
"""
Returns sub OID for transceiver sensor. Sub OID is calculated as folows:
sub OID = transceiver_oid + XCVR_SENSOR_PART_ID_MAP[sensor]
:param ifindex: interface index
:param sensor: sensor key
:return: sub OID = {{index}} * 1000 + {{lane}} * 10 + sensor id
"""
transceiver_oid, = get_transceiver_sub_id(ifindex)
return (transceiver_oid + XCVR_SENSOR_PART_ID_MAP[sensor],) | 4c718feb45384ab6bef11e1f3c42ab4cd8d0ae2c | 21,500 |
def patch_twitter_get_following_users(value):
"""Return a function decorator which patches the TwitterClient.get_following_user_ids method."""
return patch_twitter_client_method("get_following_user_ids", value) | 296d20a3dbce3684cb2af2568c64fac25ab345c9 | 21,501 |
def conv1x1_1d(inplanes: int,
outplanes: int,
stride: int = 1) -> nn.Conv1d:
"""1x1一维卷积,用于短接时降采样"""
return nn.Conv1d(
inplanes,
outplanes,
kernel_size=(1,),
stride=(stride,),
padding=0,
bias=False
) | 481dc7b71b31ae6199bcafd1112bf1541d7a5d25 | 21,502 |
import struct
def unpack_mmap_block(mm, n):
"""Decode the nth 4-byte long byte string from mapped memory."""
return struct.unpack("<L", mm[n*DATA_BLOCK_SIZE:(n+1)*DATA_BLOCK_SIZE])[0] | a75ac48e188e03e1ec7d3c289ffdd8e12173bc6f | 21,504 |
def tobs():
"""Return a list of temperatures for prior year"""
# * Query for the dates and temperature observations from the last year.
# * Convert the query results to a Dictionary using `date` as the key and `tobs` as the value.
# * Return the json representation of your dictionary.
last_date = session.query(Measurements.date).order_by(Measurements.date.desc()).first()
last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
temperature = session.query(Measurements.date, Measurements.tobs).\
filter(Measurements.date > last_year).\
order_by(Measurements.date).all()
# Create a list of dicts with `date` and `tobs` as the keys and values
temperature_totals = []
for result in temperature:
row = {}
row["date"] = temperature[0]
row["tobs"] = temperature[1]
temperature_totals.append(row)
return jsonify(temperature_totals) | c7411130bb5c8d956d10a2ba3ce535f41ca04474 | 21,505 |
def reactToAMQPMessage(message, send_back):
"""
React to given (AMQP) message. `message` is expected to be
:py:func:`collections.namedtuple` structure from :mod:`.structures` filled
with all necessary data.
Args:
message (object): One of the request objects defined in
:mod:`.structures`.
send_back (fn reference): Reference to function for responding. This is
useful for progress monitoring for example. Function takes
one parameter, which may be response structure/namedtuple, or
string or whatever would be normally returned.
Returns:
object: Response class from :mod:`structures`.
Raises:
ValueError: if bad type of `message` structure is given.
"""
if _instanceof(message, ConversionRequest):
return ConversionResponse(
marcxml2mods(
marc_xml=message.marc_xml,
uuid=message.uuid,
url=message.url
)
)
raise ValueError("'%s' is unknown type of request!" % str(type(message))) | fd34510d58e6b164f37f93c0b17f2b2a1c8d32d2 | 21,506 |
def recE(siEnergy, layer):
""" Reconstructed energy from sim energy """
return ( (siEnergy/mipSiEnergy) * layerWeights[layer-1] + siEnergy)*\
secondOrderEnergyCorrection | 9efde4432f3a81ff06505c6fbb529be7404027d4 | 21,507 |
def get_account_id():
"""
Retrieve the AWS account ID
"""
client = boto3.client("sts")
account_id = client.get_caller_identity()["Account"]
return account_id | 579bdc686a0ceb5d71e180bf8ce7a17243cff849 | 21,508 |
def process_tag(item, profile, level=0):
"""
Processes element with <code>tag</code> type
@type item: ZenNode
@type profile: dict
@type level: int
"""
if not item.name:
# looks like it's root element
return item
attrs = make_attributes_string(item, profile)
cursor = profile['place_cursor'] and zen_coding.get_caret_placeholder() or ''
self_closing = ''
is_unary = item.is_unary() and not item.children
if profile['self_closing_tag'] and is_unary:
self_closing = '/'
# define tag name
tag_name = '%' + (profile['tag_case'] == 'upper' and item.name.upper() or item.name.lower())
if tag_name.lower() == '%div' and '{' not in attrs:
# omit div tag
tag_name = ''
item.end = ''
item.start = _replace(item.start, tag_name + attrs + self_closing)
if not item.children and not is_unary:
item.start += cursor
return item | 7d06160cadb0d828799713d888327a41e7ab0b80 | 21,509 |
from typing import Optional
import google
def read_secret(project_id: str, secret_name: str) -> Optional[str]:
"""Reads the latest version of a GCP Secret Manager secret.
Returns None if the secret doesn't exist."""
secret_manager = secretmanager.SecretManagerServiceClient()
secret_path = secret_manager.secret_path(project_id, secret_name)
try:
response = secret_manager.access_secret_version(
request={'name': f'{secret_path}/versions/latest'}
)
return response.payload.data.decode('UTF-8')
except google.api_core.exceptions.ClientError:
# Fail gracefully if there's no secret version yet.
return None
except AttributeError:
# Sometimes the google API fails when no version is present, with:
# File "{site-packages}/google/api_core/exceptions.py",
# line 532, in from_grpc_error if isinstance(rpc_exc, grpc.Call) or _is_informative_grpc_error(rpc_exc):
# AttributeError: 'NoneType' object has no attribute 'Call'
return None | 388fa51983452f0852646d1ed1ab183da706c0ab | 21,510 |
import xml
def find_in_xml(data, search_params):
"""Try to find an element in an xml
Take an xml from string or as xml.etree.ElementTree
and an iterable of strings (and/or tuples in case of findall) to search.
The tuple should contain the string to search for and a true value.
"""
if isinstance(data, str):
data = xml.etree.ElementTree.fromstring(data)
param = search_params[0]
if isinstance(data, list):
result = iterate_search_data(data, param)
else:
result = xml_search_helper(data, param)
if len(search_params) == 1:
return result
return find_in_xml(result, search_params[1:]) | 1cb4685a042349231cd946116a4894ca5b9d68d5 | 21,511 |
def conditional_expect(
X,
func,
reg,
method=None,
quantile_method=None,
n_integration_samples=10,
quad_dict=None,
random_state=None,
include_x=False,
include_idx=False,
vector_func=False,
):
"""Calculates the conditional expectation, i.e. E[func(Y)|X=x_eval], where
Y | X ~ reg.predict_target_distribution, for x_eval in `X_eval`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The samples where the expectation should be evaluated.
func : callable
The function that transforms the random variable.
reg: ProbabilisticRegressor
Predicts the target distribution over which the expectation is calculated.
method: string, optional, optional (default=None)
The method by which the expectation is computed.
-'assume_linear' assumes E[func(Y)|X=x_eval] ~= func(E[Y|X=x_eval]) and
thereby only takes the function value at the expected y value.
-'monte_carlo' Basic monte carlo integration. Taking the average
of randomly drawn samples. `n_integration_samples` specifies the
number of monte carlo samples.
-'quantile' Uses the quantile function to transform the integration
space into the interval from 0 to 1 and than uses the method from
'quantile_method' to calculate the integral. The number of integration
points is specified by `n_integration_samples`.
-'gauss_hermite' Uses Gauss-Hermite quadrature. This assumes Y | X
to be gaussian distributed. The number of evaluation points is given
by `n_integration_samples`.
-'dynamic_quad' uses `scipy's` function `expect` on the `rv_continuous`
random variable of `reg`, which in turn uses a dynamic gaussian
quadrature routine for calculating the integral. Performance is worse
using a vector function.
If `method is None` 'gauss_hermite' is used.
quantile_method: string, optional (default=None)
Specifies the integration methods used after the quantile
transformation.
-'trapezoid' Trapezoidal method for integration using evenly spaced
samples.
-'simpson' Simpson method for integration using evenly spaced samples.
-'average' Taking the average value for integration using evenly spaced
samples.
-'romberg' Romberg method for integration. If `n_integration_samples` is
not equal to `2**k + 1` for a natural number k, the number of
samples used for integration is put to the smallest such number greater
than `n_integration_samples`.
-'quadrature' Gaussian quadrature method for integration.
If `quantile_method is None` quadrature is used.
n_integration_samples: int, optional (default=10)
The number of integration samples used in 'quantile', 'monte_carlo' and
'gauss-hermite'.
quad_dict: dict, optional (default=None)
Further arguments for using `scipy's` `expect`
random_state: numeric | np.random.RandomState, optional (default=None)
Random state for fixing the number generation.
include_x: bool, optional (default=False)
If `include_x` is `True`, `func` also takes the x value.
include_idx: bool, optional (default=False)
If `include_idx` is `True`, `func` also takes the index of the x value.
vector_func: bool or str, optional (default=False)
If `vector_func` is `True`, the integration values are passed as a whole
to the function `func`. If `vector_func` is 'both', the integration
values might or might not be passed as a whole. The integration values
if passed as a whole are of the form (n_samples, n_integration), where
n_integration denotes the number of integration values.
Returns
-------
expectation : numpy.ndarray of shape (n_samples)
The conditional expectation for each value applied.
"""
X = check_array(X, allow_nd=True)
check_type(reg, "reg", ProbabilisticRegressor)
check_type(
method,
"method",
target_vals=[
"monte_carlo",
"assume_linear",
"dynamic_quad",
"gauss_hermite",
"quantile",
None,
],
)
check_type(
quantile_method,
"quantile_method",
target_vals=[
"trapezoid",
"simpson",
"average",
"romberg",
"quadrature",
None,
],
)
check_scalar(n_integration_samples, "n_monte_carlo", int, min_val=1)
check_type(quad_dict, "scipy_args", dict, target_vals=[None])
check_type(include_idx, "include_idx", bool)
check_type(include_x, "include_x", bool)
check_type(vector_func, "vector_func", bool, target_vals=["both"])
check_callable(func, "func", n_free_parameters=1 + include_idx + include_x)
if method is None:
method = "gauss_hermite"
if quantile_method is None:
quantile_method = "quadrature"
if quad_dict is None:
quad_dict = {}
if method == "quantile" and quantile_method == "romberg":
# n_integration_samples need to be of the form 2**k + 1
n_integration_samples = (
2 ** int(np.log2(n_integration_samples) + 1) + 1
)
is_optional = vector_func == "both"
if is_optional:
vector_func = True
random_state = check_random_state(random_state)
def arg_filter(idx_y, x_y, y):
ret = tuple()
if include_idx:
ret += (idx_y,)
if include_x:
ret += (x_y,)
ret += (y,)
return ret
def evaluate_func(inner_potential_y):
if vector_func:
inner_output = func(
*arg_filter(np.arange(len(X)), X, inner_potential_y)
)
else:
inner_output = np.zeros_like(inner_potential_y)
for idx_x, inner_x in enumerate(X):
for idx_y, y_val in enumerate(inner_potential_y[idx_x]):
inner_output[idx_x, idx_y] = func(
*arg_filter(idx_x, inner_x, y_val)
)
return inner_output
expectation = np.zeros(len(X))
if method in ["assume_linear", "monte_carlo"]:
if method == "assume_linear":
potential_y = reg.predict(X).reshape(-1, 1)
else: # method equals "monte_carlo"
potential_y = reg.sample_y(
X=X,
n_samples=n_integration_samples,
random_state=random_state,
)
expectation = np.average(evaluate_func(potential_y), axis=1)
elif method == "quantile":
if quantile_method in ["trapezoid", "simpson", "average", "romberg"]:
eval_points = np.arange(1, n_integration_samples + 1) / (
n_integration_samples + 1
)
cond_dist = _reshape_scipy_dist(
reg.predict_target_distribution(X), shape=(-1, 1)
)
potential_y = cond_dist.ppf(eval_points.reshape(1, -1))
output = evaluate_func(potential_y)
if quantile_method == "trapezoid":
expectation = integrate.trapezoid(
output, dx=1 / n_integration_samples, axis=1
)
elif quantile_method == "simpson":
expectation = integrate.simpson(
output, dx=1 / n_integration_samples, axis=1
)
elif quantile_method == "average":
expectation = np.average(output, axis=-1)
else: # quantile_method equals "romberg"
expectation = integrate.romb(
output, dx=1 / n_integration_samples, axis=1
)
else: # quantile_method equals "quadrature"
def fixed_quad_function_wrapper(inner_eval_points):
inner_cond_dist = _reshape_scipy_dist(
reg.predict_target_distribution(X), shape=(-1, 1)
)
inner_potential_y = inner_cond_dist.ppf(
inner_eval_points.reshape(1, -1)
)
return evaluate_func(inner_potential_y)
expectation, _ = integrate.fixed_quad(
fixed_quad_function_wrapper, 0, 1, n=n_integration_samples
)
elif method == "gauss_hermite":
unscaled_potential_y, weights = roots_hermitenorm(
n_integration_samples
)
cond_mean, cond_std = reg.predict(X, return_std=True)
potential_y = (
cond_std[:, np.newaxis] * unscaled_potential_y[np.newaxis, :]
+ cond_mean[:, np.newaxis]
)
output = evaluate_func(potential_y)
expectation = (
1
/ (2 * np.pi) ** (1 / 2)
* np.sum(weights[np.newaxis, :] * output, axis=1)
)
else: # method equals "dynamic_quad"
for idx, x in enumerate(X):
cond_dist = reg.predict_target_distribution([x])
def quad_function_wrapper(y):
if is_optional or not vector_func:
return func(*arg_filter(idx, x, y))
else:
return func(
*arg_filter(
np.arange(len(X)), X, np.full((len(X), 1), y)
)
)[idx]
expectation[idx] = cond_dist.expect(
quad_function_wrapper,
**quad_dict,
)
return expectation | fb89adedeada5e100f2483d927d945ffe2d99034 | 21,512 |
def getSumOfSquaresPixel16_Image16(Image):
"""getSumOfSquaresPixel16_Image16(Image) -> unsigned __int16"""
return _ImageFunctions.getSumOfSquaresPixel16_Image16(Image) | 9ba21171e26fc32d938a3f377684830df5f03b8f | 21,513 |
def parse_statement(parsed, output):
"""Parses a tokenized sql_parse token and returns an encoded table."""
# Get the name of the table being created
table_name = next(token.value for token in parsed.tokens if isinstance(token, Identifier))
# Add the table metadata to the cached tables to access later.
if len(table_name.split('.')) == 2\
and not found_table(table_name.split('.')[0], table_name.split('.')[1]):
this_table = Table(
table_name.split('.')[0], table_name.split('.')[1], cursor
)
print(f'Appending this table ({this_table.alias}):')
print(this_table)
this_table.query_data()
tables.append(this_table)
elif len(table_name.split('.')) == 3 \
and not found_table(table_name.split('.')[1], table_name.split('.')[2]):
this_table = Table(
table_name.split('.')[1], table_name.split('.')[2], cursor
)
print('Appending this table')
print(this_table)
this_table.query_data()
tables.append(this_table)
# print(this_table)
# Get all the FROM statements's metadata
froms = {k: v for d in extract_from_part(parsed, cursor) for k, v in d.items()}
print('Tables:')
print([table for table in tables])
# Get all the JOIN statements's metadata
joins = list(extract_join_part(parsed, cursor))
# Get all of the comparisons to compare the number of comparisons to the number of JOIN
# statements
comparisons = list(extract_comparisons(parsed))
# Get all the columns selected by this query. The table aliases are used to identify where
# the columns originate from.
selects = list(
extract_selects(parsed, {**froms, **{k: v for d in joins for k, v in d.items()}})
)
# When the number of comparisons does not match the number of joins, the parsing was
# incorrect, raise and exception.
if len(comparisons) != len(joins):
raise Exception('Parsing messed up!')
return encode_table(joins, froms, table_name, selects, comparisons, output) | a81791deac59e496145993bba0547294d5d5a7fa | 21,514 |
def create_hue_success_response(entity_number, attr, value):
"""Create a success response for an attribute set on a light."""
success_key = f"/lights/{entity_number}/state/{attr}"
return {"success": {success_key: value}} | c8570ca95ada89bd26d93f659261c91032c915c7 | 21,515 |
import warnings
def get_deaths():
"""***DEPRECATED - Use get_data_jhu instead.***
Get most recent fatality counts from JHU."""
# Deprecated warning
url = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/"
warnings.warn("This function is deprecated. Use get_data_jhu instead; see tutorials at <https://github.com/PayneLab/covid19pandas/tree/master/docs/>.", DeprecatedWarning, stacklevel=2)
print("These data were obtained from Johns Hopkins University (https://github.com/CSSEGISandData/COVID-19).")
return _get_table(url, "time_series_covid19_deaths_global.csv", source="jhu", update=True) | c747d27b10d0845520f4dbbfbb5efcf23c655b7e | 21,516 |
def transform_sentence(text, model):
"""
Mean embedding vector
"""
def preprocess_text(raw_text, model=model):
"""
Excluding unknown words and get corresponding token
"""
raw_text = raw_text.split()
return list(filter(lambda x: x in model.vocab, raw_text))
tokens = preprocess_text(text)
if not tokens:
return np.zeros(model.vector_size)
text_vector = np.mean(model[tokens], axis=0)
return np.array(text_vector) | 57f9002e4f7fccefa824f1691324b4593b11fbe0 | 21,517 |
from typing import Sequence
from typing import List
import inspect
def model_primary_key_columns_and_names(Model: DeclarativeMeta) -> (Sequence[Column], List[str]):
""" Get the list of primary columns and their names as two separate tuples
Example:
pk_columns, pk_names = model_primary_key_columns_and_names(models.User)
pk_columns # -> (models.User.id, )
pk_names # -> ('id', )
"""
pk_columns: Sequence[Column] = inspect(Model).primary_key
pk_names: List[str] = [col.key for col in pk_columns]
return pk_columns, pk_names | 9466524452a77459042081e7becf968302b3dd3b | 21,518 |
def biweekly_test_data():
""" Provides test data for the full system test when using "biweekly" time_scale."""
time_scale = "biweekly"
time_per_task = {
"Free" : 480 * 9 * 2,
"Work" : 480 * 5 * 2,
"Sleep" : 480 * 7 * 2
}
min_task_time = 60
preferences = {
"Free" : {
"avoid" : [ "Monday1,09:00AM-Monday1,05:00PM",
"Tuesday1,09:00AM-Tuesday1,05:00PM",
"Wednesday1,09:00AM-Wednesday1,05:00PM",
"Thursday1,09:00AM-Thursday1,05:00PM",
"Friday1,09:00AM-Friday1,05:00PM",
"Monday2,09:00AM-Monday2,05:00PM",
"Tuesday2,09:00AM-Tuesday2,05:00PM",
"Wednesday2,09:00AM-Wednesday2,05:00PM",
"Thursday2,09:00AM-Thursday2,05:00PM",
"Friday2,09:00AM-Friday2,05:00PM"],
"inconvenient" : [],
"neutral" : [],
"convenient" :[ "Monday1,06:00PM-Monday1,08:00PM",
"Tuesday1,06:00PM-Tuesday1,08:00PM",
"Wednesday1,06:00PM-Wednesday1,08:00PM",
"Thursday1,06:00PM-Thursday1,08:00PM",
"Friday1,06:00PM-Friday1,08:00PM",
"Monday2,06:00PM-Monday2,08:00PM",
"Tuesday2,06:00PM-Tuesday2,08:00PM",
"Wednesday2,06:00PM-Wednesday2,08:00PM",
"Thursday2,06:00PM-Thursday2,08:00PM",
"Friday2,06:00PM-Friday2,08:00PM"],
"preferred" : [],
"required" : []
},
"Work" : {
"avoid" : [],
"inconvenient" : [],
"neutral" : [],
"convenient" : [],
"preferred" : [],
"required" : [ "Monday1,09:00AM-Monday1,05:00PM",
"Tuesday1,09:00AM-Tuesday1,05:00PM",
"Wednesday1,09:00AM-Wednesday1,05:00PM",
"Thursday1,09:00AM-Thursday1,05:00PM",
"Friday1,09:00AM-Friday1,05:00PM",
"Monday2,09:00AM-Monday2,05:00PM",
"Tuesday2,09:00AM-Tuesday2,05:00PM",
"Wednesday2,09:00AM-Wednesday2,05:00PM",
"Thursday2,09:00AM-Thursday2,05:00PM",
"Friday2,09:00AM-Friday2,05:00PM"],
},
"Sleep" : {
"avoid" : [ "Monday1,09:00AM-Monday1,05:00PM",
"Tuesday1,09:00AM-Tuesday1,05:00PM",
"Wednesday1,09:00AM-Wednesday1,05:00PM",
"Thursday1,09:00AM-Thursday1,05:00PM",
"Friday1,09:00AM-Friday1,05:00PM",
"Monday2,09:00AM-Monday2,05:00PM",
"Tuesday2,09:00AM-Tuesday2,05:00PM",
"Wednesday2,09:00AM-Wednesday2,05:00PM",
"Thursday2,09:00AM-Thursday2,05:00PM",
"Friday2,09:00AM-Friday2,05:00PM"],
"inconvenient" : [],
"neutral" : [],
"convenient" : [],
"preferred" : [ "Monday1,10:00PM-Tuesday1,06:00AM",
"Tuesday1,10:00PM-Wednesday1,06:00AM",
"Wednesday1,10:00PM-Thursday1,06:00AM",
"Thursday1,10:00PM-Friday1,06:00AM",
"Friday1,10:00PM-Saturday1,06:00AM",
"Saturday1,10:00PM-Sunday1,06:00AM",
"Sunday1,10:00PM-Monday2,06:00AM",
"Monday2,10:00PM-Tuesday2,06:00AM",
"Tuesday2,10:00PM-Wednesday2,06:00AM",
"Wednesday2,10:00PM-Thursday2,06:00AM",
"Thursday2,10:00PM-Friday2,06:00AM",
"Friday2,10:00PM-Saturday2,06:00AM",
"Saturday2,10:00PM-Sunday2,06:00AM",
"Sunday2,10:00PM-Monday1,06:00AM"],
"required" : []
}
}
return time_scale, time_per_task, min_task_time, preferences | b5f354a17819133c3c29e7652f6b1132599e89b6 | 21,519 |
def plot_bar_whiskers_jitter_significance(data, comparison_columns,
significant_comparison_columns,
heights, ylabel,
xlabels=None,
ax_handle=None,
median_notch=False,
boxplot_color='black',
boxplot_linewidth=2,
markersize=12,
xtick_rotation=90,
marker=None,
color=None,
alpha=0.2,
whis = [2.5, 97.5]):
"""
Make a jittered boxplot significance test
Parameters
-------------------
d : A pandas dataframe, where each column corresponds to data to be plotted with jitter + boxplot
heights : A list, heights of the significance annotations, for each comparison
comparison_columns : A list of lists, where each element corresponds to a pair of columns to compare
significant_comparison_columns : A list of lists, where each element corresponds to a pair of significant column comparisons
heights : A list of floats, the height of each comparison annotation
xlabels : A list of strings, the x-labels
ax_handle : A matplotlib axis handle, for adding onto an existing plot
median_notch : A bool, to plot the lower and upper quartiles of the median
boxplot_color : A string, the boxplot color
boxplot_linewidth : A float, the boxplot linewidth
markersize: An int, the marker size
marker : A string or a list of strings, the marker of the points
color : A string or a list of strings, the color of the points
alpha : A float, transparency
whis : A list of floats, the quantiles for whiskers
Returns
-------------
fig : A matplotlib figure handle (if ax_handle is None)
ax : A matplotlib axis handle (if ax_handle is None)
"""
if ax_handle is None:
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
else:
ax = ax_handle
make_jitter_plots(data, names=data.columns, ylabel=ylabel, ax_handle=ax,
alpha=alpha, markersize=markersize, xlabels=xlabels,
marker=marker, color=color)
bp = data.boxplot(ax=ax,notch=median_notch, grid=False, whis = whis,
showfliers=False, return_type='dict')
for _, line_list in bp.items():
for line in line_list:
line.set_color(boxplot_color)
line.set_linewidth(boxplot_linewidth)
previous_ymaxes = []
for i, comparison in enumerate(comparison_columns):
comp1, comp2 = comparison
x1, x2 = np.nonzero(data.columns==comp1)[0][0]+1, np.nonzero(data.columns==comp2)[0][0]+1
y_max = data.loc[:,[comp1,comp2]].max().values.max()
previous_ymaxes.append(y_max)
y, h, col = max(previous_ymaxes) + heights[i], 2, 'k'
ax.plot([x1, x1, x2, x2], [y, y+h, y+h, y], lw=1.5, c=col)
if comparison in significant_comparison_columns:
ax.text((x1+x2)*.5, y+h, "*", ha='center', va='bottom', color=col, fontsize=20)
else:
ax.text((x1+x2)*.5, y+h, "ns", ha='center', va='bottom', color=col, fontsize=20)
if xlabels is not None:
ax.set_xticklabels(xlabels, rotation=xtick_rotation)
if ax_handle is None:
return fig, ax | dfdaf95034d3d53fac7c79eb4c7b387f9ac18f5b | 21,520 |
def _is_trans_valid(seed, mutate_sample):
"""
Check a mutated sample is valid. If the number of changed pixels in
a seed is less than pixels_change_rate*size(seed), this mutate is valid.
Else check the infinite norm of seed changes, if the value of the
infinite norm less than pixel_value_change_rate*255, this mutate is
valid too. Otherwise the opposite.
"""
is_valid = False
pixels_change_rate = 0.02
pixel_value_change_rate = 0.2
diff = np.array(seed - mutate_sample).flatten()
size = np.shape(diff)[0]
l0_norm = np.linalg.norm(diff, ord=0)
linf = np.linalg.norm(diff, ord=np.inf)
if l0_norm > pixels_change_rate * size:
if linf < 256:
is_valid = True
else:
if linf < pixel_value_change_rate * 255:
is_valid = True
return is_valid | 118dc0e566fc4f5c481d21f8c8aec7fe4f1ece29 | 21,521 |
def split_axis(x, indices_or_sections, axis):
"""Splits given variables along an axis.
Args:
x (tuple of Variables): Variables to be split.
indices_or_sections (int or 1-D array): If this argument is an integer,
N, the array will be divided into N equal arrays along axis.
If it is a 1-D array of sorted integers, it
indicates the positions where the array is split.
axis (int): Axis that the input array is split along.
Returns:
``tuple`` or ``Variable``: Tuple of :class:`~chainer.Variable` objects
if the number of outputs is more than 1 or
:class:`~chainer.Variable` otherwise.
.. note::
This function raises ``ValueError`` if at least
one of the outputs is splitted to zero-size
(i.e. `axis`-th value of its shape is zero).
"""
return SplitAxis(indices_or_sections, axis)(x) | 245841aaef14ea130b20254775152a9199d63c41 | 21,522 |
def status(**kwargs):
"""Execute \"git status\" on the repository."""
status = check_output(["git", "status"]).decode("utf-8")
repo_clean = True
for keyword in ["ahead", "modified", "untracked"]:
if keyword in status:
repo_clean = False
return {"clean": repo_clean, "status": status} | 8e81264579628407e8560a6d89be884179636ea9 | 21,523 |
from typing import Counter
def find_listener_frequent_words(df, num):
"""
Given a conversation dataframe from a certain subreddit, find the top frequent words spoken by listeners.
Args:
df: A specified dataframe from a subreddit.
num: A ranking number used for finding the top frequent words.
Return:
result: A dataframe showing the top frequent words.
"""
# extract listeners' turn
df_listener = df[df['dialog turn'] != 1]
# compute tokens
df_listener_filtered = compute_tokens(df_listener)
# find top (num) frequent words
result = pd.DataFrame(Counter(df_listener_filtered.sum()).most_common(num), columns = ["word", "count"])
return result | 80bc4b95e5713429751ff1725afce4ac02f0bddd | 21,525 |
def is_rescue_entry(boot_entry):
"""
Determines whether the given boot entry is rescue.
:param BootEntry boot_entry: Boot entry to assess
:return: True is the entry is rescue
:rtype: bool
"""
return 'rescue' in boot_entry.kernel_image.lower() | ba456c2724c3ad4e35bef110ed8c4cc08147b42c | 21,526 |
import base64
def estimate_cost(features, ssd):
"""Generate a TensorFlow subgraph to estimate the cost of an architecture.
Args:
features: A 1D float tensor containing features for a single network
architecture.
ssd: The name of the search space definition to use for the cost model.
Returns:
A scalar float tensor containing the estimated cost for the specified
network architecture
"""
kernel_data = cost_model_data.KERNEL_DATA[ssd]
kernel_data = base64.decodebytes(kernel_data)
kernel = np.frombuffer(kernel_data, cost_model_data.SERIALIZATION_DTYPE)
kernel = kernel.reshape([-1, 1]).astype(np.float32)
bias_data = cost_model_data.BIAS_DATA[ssd]
bias_data = base64.decodebytes(bias_data)
bias = np.frombuffer(bias_data, cost_model_data.SERIALIZATION_DTYPE)
bias = bias.reshape([1]).astype(np.float32)
with tf.name_scope('estimate_cost'):
batch_features = tf.expand_dims(features, axis=0)
batch_prediction = tf.linalg.matmul(batch_features, kernel)
batch_prediction = tf.nn.bias_add(batch_prediction, bias)
return tf.squeeze(batch_prediction, axis=[0, 1]) | 2c85cc5d320cd214dae260793a1c779d8019177c | 21,527 |
import yaml
def IsResourceLike(item):
"""Return True if item is a dict like object or list of dict like objects."""
return yaml.dict_like(item) or (yaml.list_like(item) and
all(yaml.dict_like(x) for x in item)) | bc6ae6c4d84949511c679116454343731e8d8bd2 | 21,529 |
import math
def rad_to_gon(angle: float) -> float:
"""Converts from radiant to gon (grad).
Args:
angle: Angle in rad.
Returns:
Converted angle in gon.
"""
return angle * 200 / math.pi | cbf7070a9c3a9796dfe4bffe39fdf2421f7279ed | 21,530 |
def check_interface_status(conn_obj, interface, state, device="dut"):
"""
API to check the interface state
Author: Chaitanya Vella ([email protected])
:param conn_obj:
:param interface:
:param state:
:param device:
:return:
"""
interface_state = get_interface_status(conn_obj, interface, device=device)
if interface_state != state:
return False
return True | 484c1a8dcf96a3160791a63979d47096e5e51fbc | 21,531 |
def hungarian_match(self, y_true, y_pred):
"""Matches predicted labels to original using hungarian algorithm."""
y_true = self.adjust_range(y_true)
y_pred = self.adjust_range(y_pred)
D = max(y_pred.max(), y_true.max()) + 1
w = np.zeros((D, D), dtype=np.int64)
# Confusion matrix.
for i in range(y_pred.size):
w[y_pred[i], y_true[i]] += 1
ind = linear_assignment(-w)
d = {i:j for i, j in ind}
y_pred = np.array([d[v] for v in y_pred])
return y_true, y_pred | 29a9976edcfa4a935d451f6471641b3836343d83 | 21,532 |
import torch
def orthantree(scaled, capacity=8):
"""Constructs a :ref:`tree <presolve>` for the given :func:`~pybbfmm.scale`'d problem.
This is a bit of a mess of a function, but long story short it starts with all the sources allocated to the root
and repeatedly subdivides overfull boxes, constructing the various tree tensors as it goes.
:param scaled: :func:`~pybbfmm.scale`'d problem.
:param capacity: the max number of sources or targets per box.
:return: A :ref:`tree <presolve>`.
"""
D = scaled.sources.shape[1]
points = torch.cat([scaled.sources, scaled.targets])
indices = points.new_zeros((len(points),), dtype=torch.long)
tree = arrdict.arrdict(
parents=indices.new_full((1,), -1),
depths=indices.new_zeros((1,)),
centers=points.new_zeros((1, D)),
terminal=indices.new_ones((1,), dtype=torch.bool),
children=indices.new_full((1,) + (2,)*D, -1),
descent=indices.new_zeros((1, D)))
bases = 2**torch.flip(torch.arange(D, device=indices.device), (0,))
subscript_offsets = sets.cartesian_product(torch.tensor([0, 1], device=indices.device), D)
center_offsets = sets.cartesian_product(torch.tensor([-1, +1], device=indices.device), D)
depthcounts = [torch.as_tensor([1], device=indices.device)]
depth = 0
while True:
used, used_inv = torch.unique(indices, return_inverse=True)
source_idxs, target_idxs = indices[:len(scaled.sources)], indices[-len(scaled.targets):]
tree.terminal[used] = underoccupied(source_idxs, target_idxs, tree.terminal, capacity)[used]
used_is_active = ~tree.terminal[used]
point_is_active = used_is_active[used_inv]
if not point_is_active.any():
break
depth += 1
active = used[used_is_active]
active_inv = (used_is_active.cumsum(0) - used_is_active.long())[used_inv[point_is_active]]
first_child = len(tree.parents) + 2**D*torch.arange(len(active), device=active.device)
point_offset = ((points[point_is_active] >= tree.centers[active][active_inv])*bases).sum(-1)
child_box = first_child[active_inv] + point_offset
indices[point_is_active] = child_box
trailing_ones = (slice(None),) + (None,)*D
tree.children[active] = first_child[trailing_ones] + (subscript_offsets*bases).sum(-1)
centers = tree.centers[active][trailing_ones] + center_offsets.float()/2**depth
descent = center_offsets[None].expand_as(centers)
n_children = len(active)*2**D
children = arrdict.arrdict(
parents=active.repeat_interleave(2**D),
depths=tree.depths.new_full((n_children,), depth),
centers=centers.reshape(-1, D),
descent=descent.reshape(-1, D),
terminal=tree.terminal.new_ones((n_children,)),
children=tree.children.new_full((n_children,) + (2,)*D, -1))
tree = arrdict.cat([tree, children])
depthcounts.append(n_children)
tree['id'] = torch.arange(len(tree.parents), device=points.device)
indices = arrdict.arrdict(
sources=indices[:len(scaled.sources)],
targets=indices[-len(scaled.targets):])
depths = ragged.Ragged(
torch.arange(len(tree.id), device=points.device),
torch.as_tensor(depthcounts, device=points.device))
return tree, indices, depths | 9813697be3b19a2d7e3b71f28b1212c91a590fd3 | 21,533 |
def make_sparse(
docs_to_fit, min_df=50, stop_words=None,
docs_to_transform=None, ngram_range=None,
):
"""
Take a pre-tokenized document and turn into a sparse matrix.
:param docs_to_fit: A list of lists of tokenized words to build the vocabulary from.
:param min_df: Number of records that a word should appear in to be stored as a feature.
:param stop_words: List of words to exclude, if any.
:param docs_to_transform: A list of lists of tokenized words to transform. If none, we transform the first argument.
:return:
"""
cv = CountVectorizer(
tokenizer=no_tokenization, preprocessor=None, ngram_range=ngram_range,
stop_words=stop_words, lowercase=False, min_df=min_df
)
if docs_to_transform is None:
return cv, cv.fit_transform(docs_to_fit)
elif docs_to_transform is not None:
cv.fit(docs_to_fit)
return cv, cv.transform(docs_to_transform) | 467d04f465ed4c19b4e20aa69c05508f6faafdc6 | 21,534 |
import random
def weightedPriorityReliabilityScore(service_instances, last_records):
"""
Algorithm to find highest priority of the service based on
reliability score achieved in past discovery results
"""
priority_list = []
for i in range(0, len(service_instances)):
single_instance = {}
single_instance['ip'] = service_instances[i][1]
single_instance['port'] = service_instances[i][2]
score = 0.0
discovery_instances = sharkradarDbutils.getLatestRecordsDiscoveryLogs(
service_instances[i][0],
service_instances[i][1],
service_instances[i][2],
last_records)
len_discovery = len(discovery_instances)
for i in range(0, len_discovery):
if discovery_instances[i][0] == "FAIL":
score = score + ((-1.0) * (len_discovery - i))
if discovery_instances[i][0] == "SUCCESS":
score = score + ((1.0) * (len_discovery - i))
single_instance['score'] = score
priority_list.append(single_instance)
priority_list.sort(key=lambda x: x['score'], reverse=True)
res = priority_list[0]
res_list = list(
filter(
lambda x: x['score'] == res['score'],
priority_list))
res = random.choice(res_list)
return str(res['ip']), str(res['port']) | e215ae3e4009de7e8e6e8a8a0b66a66238e30f16 | 21,535 |
def calculate_output(param_dict, select_device, input_example):
"""Calculate the output of the imported graph given the input.
Load the graph def from graph file on selected device, then get the tensors based on the input and output name from the graph,
then feed the input_example to the graph and retrieves the output vector.
Args:
param_dict: The dictionary contains all the user-input data in the json file.
select_device: "NGRAPH" or "CPU".
input_example: A map with key is the name of the input tensor, and value is the random generated example
Returns:
The output vector obtained from running the input_example through the graph.
"""
graph_filename = param_dict["graph_location"]
output_tensor_name = param_dict["output_tensor_name"]
if not tf.gfile.Exists(graph_filename):
raise Exception("Input graph file '" + graph_filename +
"' does not exist!")
graph_def = tf.GraphDef()
if graph_filename.endswith("pbtxt"):
with open(graph_filename, "r") as f:
text_format.Merge(f.read(), graph_def)
else:
with open(graph_filename, "rb") as f:
graph_def.ParseFromString(f.read())
set_os_env(select_device)
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def)
if len(output_tensor_name) == 0:
# if no outputs are specified, then compare for all tensors
output_tensor_name = sum(
[[j.name for j in i.outputs] for i in graph.get_operations()],
[])
# Create the tensor to its corresponding example map
tensor_to_example_map = {}
for item in input_example:
t = graph.get_tensor_by_name(item)
tensor_to_example_map[t] = input_example[item]
#input_placeholder = graph.get_tensor_by_name(input_tensor_name)
output_tensor = [graph.get_tensor_by_name(i) for i in output_tensor_name]
config = tf.ConfigProto(
allow_soft_placement=True,
# log_device_placement=True,
inter_op_parallelism_threads=1)
with tf.Session(graph=graph, config=config) as sess:
output_tensor = sess.run(output_tensor, feed_dict=tensor_to_example_map)
return output_tensor, output_tensor_name | e98bf63743d7f940170ca7ab4dcd97b751be178f | 21,536 |
def is_instance_failed_alarm(alarms, instance, guest_hb=False):
"""
Check if an instance failed alarm has been raised
"""
expected_alarm = {'alarm_id': fm_constants.FM_ALARM_ID_VM_FAILED,
'severity': fm_constants.FM_ALARM_SEVERITY_CRITICAL}
return _instance_alarm_raised(alarms, expected_alarm, instance) | 5c886bea0b72d52392ed38217af20b7ebc87bd91 | 21,537 |
def detected(numbers, mode):
"""
Returns a Boolean result indicating whether the last member in a numeric array is the max or
min, depending on the setting.
Arguments
- numbers: an array of numbers
- mode: 'max' or 'min'
"""
call_dict = {'min': min, 'max': max}
if mode not in call_dict.keys():
print('Must specify either max or min')
return
return numbers[-1] == call_dict[mode](numbers) | b0a5b19e7d97db99769f28c4b8ce998dbe318c5b | 21,539 |
import math
def calculate_compass_bearing(point_a, point_b):
"""
Calculates the bearing between two points.
The formulae used is the following:
θ = atan2(sin(Δlong).cos(lat2),
cos(lat1).sin(lat2) − sin(lat1).cos(lat2).cos(Δlong))
:Parameters:
- `pointA: The tuple representing the latitude/longitude for the
first point. Latitude and longitude must be in decimal degrees
- `pointB: The tuple representing the latitude/longitude for the
second point. Latitude and longitude must be in decimal degrees
:Returns:
The bearing in degrees
:Returns Type:
float
"""
# LICENSE: public domain from https://gist.github.com/jeromer/2005586
if (type(point_a) != tuple) or (type(point_b) != tuple):
raise TypeError("Only tuples are supported as arguments")
lat1 = math.radians(point_a[0])
lat2 = math.radians(point_b[0])
diff_long = math.radians(point_b[1] - point_a[1])
x = math.sin(diff_long) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1)
* math.cos(lat2) * math.cos(diff_long))
initial_bearing = math.atan2(x, y)
# Now we have the initial bearing but math.atan2 return values
# from -180° to + 180° which is not what we want for a compass bearing
# The solution is to normalize the initial bearing as shown below
initial_bearing = math.degrees(initial_bearing)
compass_bearing = (initial_bearing + 360) % 360
return compass_bearing | 535fc0cfc086974b1e329df297bdbea4aab1f127 | 21,540 |
import re
def parse_instructions(instruction_list):
"""
Parses the instruction strings into a dictionary
"""
instruction_dict = []
for instruction in instruction_list:
regex_match = re.match(r"(?P<direction>\w)(?P<value>\d*)",instruction)
if regex_match:
instruction_dict.append(regex_match.groupdict())
return instruction_dict | 67b773bae0cb2cc0509503f2ea27f3312ce9d41c | 21,541 |
def calc_elapsed_sleep(in_num, hyp_file, fpath, savedir, export=True):
"""
Calculate minutes of elapsed sleep from a hypnogram file & concatenate stage 2 sleep files
Parameters
----------
in_num: str
patient identifier
hyp_file: str (format: *.txt)
file with hypnogram at 30-second intervals
fpath: str
path to EEG files cut by sleep stage
savedir: str
path to save EEG files cut by hrs elapsed sleep
export: bool (default: True)
whether to export blocked dataframes
Returns
-------
.csv files with EEG data blocked in two-hour chunks (according to Purcell et al. 2017)
OR
pd.dataframes blocked in two-hour chunks (according to Purcell et al. 2017)
"""
# calculate elapsed sleep for each 30-second time interval
print('Loading hypnogram...')
sleep_scores = [1, 2, 3, 4, 5] # exclude 0 and 6 for awake and record break
hyp = pd.read_csv(hyp_file, header=None, index_col=[0], sep='\t', names=['time', 'score'], parse_dates=True)
mins_elapsed = hyp.score.isin(sleep_scores).cumsum()/2
# get a list of all matching files
glob_match = f'{fpath}/{in_num}*_s2_*'
files = glob.glob(glob_match)
# make list of dfs for concat
print('Reading data...')
data = [pd.read_csv(file, header = [0, 1], index_col = 0, parse_dates=True) for file in files]
# add NaN to the end of each df
data_blocked = [df.append(pd.Series(name=df.iloc[-1].name + pd.Timedelta(milliseconds=1))) for df in data]
# concatenate the dfs
print('Concatenating data...')
s2_df = pd.concat(data_blocked).sort_index()
# assign indices to hours elapsed sleep
print('Assigning minutes elapsed...')
idx0_2 = mins_elapsed[mins_elapsed.between(0, 120)].index
idx2_4 = mins_elapsed[mins_elapsed.between(120.5, 240)].index
idx4_6 = mins_elapsed[mins_elapsed.between(240.5, 360)].index
idx6_8 = mins_elapsed[mins_elapsed.between(360.5, 480)].index
# cut dataframe into blocks by elapsed sleep (0-2, 2-4, 4-6, 6-8)
df_two = s2_df[(s2_df.index > idx0_2[0]) & (s2_df.index < idx0_2[-1])]
df_four = s2_df[(s2_df.index > idx2_4[0]) & (s2_df.index < idx2_4[-1])]
df_six = s2_df[(s2_df.index > idx4_6[0]) & (s2_df.index < idx4_6[-1])]
df_eight = s2_df[(s2_df.index > idx6_8[0]) & (s2_df.index < idx6_8[-1])]
if export:
# export blocked data
if not os.path.exists(savedir):
print(savedir + ' does not exist. Creating directory...')
os.makedirs(savedir)
print('Saving files...')
for df, hrs in zip([df_two, df_four, df_six, df_eight], ['0-2hrs', '2-4hrs', '4-6hrs', '6-8hrs']):
date = df.index[0].strftime('%Y-%m-%d')
savename = in_num + '_' + date + '_s2_' + hrs + '.csv'
df.to_csv(os.path.join(savedir, savename))
print(f'Files saved to {savedir}')
else:
return df_two df_four df_six df_eight
print('Done') | 61390b205c7dcbd65884e8f073f1b1395f1d1ca2 | 21,542 |
def valid_pairs(pairs, chain):
"""
Determine if the chain contains any invalid pairs (e.g. ETH_XMR)
"""
for primary, secondary in zip(chain[:-1], chain[1:]):
if not (primary, secondary) in pairs and \
not (secondary, primary) in pairs:
return False
return True | c9e36d0490893e1b1a6cd8c3fb0b14b382d69515 | 21,543 |
from typing import Any
def fqname_for(obj: Any) -> str:
"""
Returns the fully qualified name of ``obj``.
Parameters
----------
obj
The class we are interested in.
Returns
-------
str
The fully qualified name of ``obj``.
"""
if "<locals>" in obj.__qualname__:
raise RuntimeError(
"Can't get fully qualified name of locally defined object. "
f"{obj.__qualname__}"
)
return f"{obj.__module__}.{obj.__qualname__}" | 6d4e5db255715c999d1bb40533f3dbe03b948b07 | 21,545 |
def symbol_size(values):
""" Rescale given values to reasonable symbol sizes in the plot. """
max_size = 50.0
min_size = 5.0
# Rescale max.
slope = (max_size - min_size)/(values.max() - values.min())
return slope*(values - values.max()) + max_size | a33f77ee8eeff8d0e63035c5c408a0788b661886 | 21,547 |
from datetime import datetime
def delete(id):
"""Soft delete a patient."""
check_patient_permission(id)
patient = Patient.query.get(id)
patient.deleted = datetime.datetime.now()
patient.deleted_by = current_user
db.session.commit()
return redirect(url_for('screener.index')) | 0e9c984bb8bf8429c662f1af14945089789b8bc8 | 21,548 |
import torch
import tensorflow as tf
def _to_tensor(args, data):
"""Change data to tensor."""
if vega.is_torch_backend():
data = torch.tensor(data)
if args.device == "GPU":
return data.cuda()
else:
return data
elif vega.is_tf_backend():
data = tf.convert_to_tensor(data)
return data | 77d87982c81232bac4581eb5269629c268a7fe16 | 21,551 |
def materialize_jupyter_deployment(
config: ClusterConfig,
uuid: str,
definition: DeploymentDefinition) -> JupyterDeploymentImpl: # noqa
"""Materializes the Jupyter deployment definition.
:param config: Cluster to materialize the Jupyter deployment with.
:param uuid: Unique deployment id.
:param definition: Deployment definition to materialize.
"""
jupyter_deployment = deserialize_jupyter_deployment_impl(
config=config,
uuid=uuid,
serialized=definition.value)
return jupyter_deployment | d4a12efd7d4f55d5261734cf3eb0dd3b230c363d | 21,552 |
def _CreateLSTMPruneVariables(lstm_obj, input_depth, h_depth):
"""Function to create additional variables for pruning."""
mask = lstm_obj.add_variable(
name="mask",
shape=[input_depth + h_depth, 4 * h_depth],
initializer=tf.ones_initializer(),
trainable=False,
dtype=lstm_obj.dtype)
threshold = lstm_obj.add_variable(
name="threshold",
shape=[],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=lstm_obj.dtype)
# Add old_weights, old_old_weights, gradient for gradient
# based pruning.
old_weight = lstm_obj.add_variable(
name="old_weight",
shape=[input_depth + h_depth, 4 * h_depth],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=lstm_obj.dtype)
old_old_weight = lstm_obj.add_variable(
name="old_old_weight",
shape=[input_depth + h_depth, 4 * h_depth],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=lstm_obj.dtype)
gradient = lstm_obj.add_variable(
name="gradient",
shape=[input_depth + h_depth, 4 * h_depth],
initializer=tf.zeros_initializer(),
trainable=False,
dtype=lstm_obj.dtype)
return mask, threshold, old_weight, old_old_weight, gradient | 398dd89a9b8251f11aef3ba19523e26861ff5874 | 21,553 |
def get_index_fredkin_gate(N, padding = 0):
"""Get paramaters for log2(N) Fredkin gates
Args:
- N (int): dimensional of states
- padding (int, optional): Defaults to 0.
Returns:
- list of int: params for the second and third Frekin gates
"""
indices = []
for i in range(0, int(np.log2(N))):
indices.append(2**i + padding)
return indices | d7ab1f4bc414ad741533d5fabfb0f7c8b4fe0959 | 21,554 |
def import_by_name(name):
"""
动态导入
"""
tmp = name.split(".")
module_name = ".".join(tmp[0:-1])
obj_name = tmp[-1]
module = __import__(module_name, globals(), locals(), [obj_name])
return getattr(module, obj_name) | 714ca90704d99a8eafc8db08a5f3df8e17bc6da4 | 21,555 |
def f1_score(y_true, y_pred):
"""F-measure."""
p = precision(y_true, y_pred)
r = true_positive_rate(y_true, y_pred)
return 2 * (p * r) / (p + r) | e5f79def2db902bb0aa1efd9ea1ccef52b62072a | 21,556 |
def hexColorToInt(rgb):
"""Convert rgb color string to STK integer color code."""
r = int(rgb[0:2],16)
g = int(rgb[2:4],16)
b = int(rgb[4:6],16)
color = format(b, '02X') + format(g, '02X') + format(r, '02X')
return int(color,16) | 59b8815d647b9ca3e90092bb6ee7a0ca19dd46c2 | 21,557 |
import torch
def test(model, X, model_type, test_type, counter=False):
"""Test functions."""
if model_type == 'notear-mlp':
X = np.vstack(X)
y = model(torch.from_numpy(X))
y = y.cpu().detach().numpy()
mse = mean_squared_loss(y.shape[0], y[:, 0], X[:, 0])
elif model_type == 'notear-castle':
X = np.vstack(X)
y = model(torch.from_numpy(X))
y = y.cpu().detach().numpy()
mse = mean_squared_loss(y.shape[0], y[:, 0], X[:, 0])
elif model_type == 'ISL':
y = model.test(X)
mse = mean_squared_loss(y.shape[0] * y.shape[1], y, X[:, :, 0][:, :,
np.newaxis])
if not counter:
if test_type == 'ID':
metrics[f'{model_type}_testID_MSE'] = mse
elif test_type == 'OOD':
metrics[f'{model_type}_testOOD_MSE'] = mse
else:
if test_type == 'ID':
metrics[f'{model_type}_counter_testID_MSE'] = mse
elif test_type == 'OOD':
metrics[f'{model_type}_counter_testOOD_MSE'] = mse
return mse | cb98e2096052270e786bbb81fafc328076b1aa40 | 21,558 |
def scale():
"""
Returns class instance of `Scale`.
For more details, please have a look at the implementations inside `Scale`.
Returns
-------
Scale :
Class instance implementing all 'scale' processes.
"""
return Scale() | f5fb9daf9baaf86674be110aae78b1bf91f09371 | 21,559 |
def imread_rgb(filename):
"""Read image file from filename and return rgb numpy array"""
bgr = cv2.imread(filename)
rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)
return rgb | 6fcef3f9f5d8b02c28c596f706f3e1fcf685dd24 | 21,560 |
def insert_at_index(rootllist, newllist, index):
""" Insert newllist in the llist following rootllist such that newllist is at the provided index in the resulting llist"""
# At start
if index == 0:
newllist.child = rootllist
return newllist
# Walk through the list
curllist = rootllist
for i in range(index-1):
curllist = curllist.child
# Insert
newllist.last().child=curllist.child
curllist.child=newllist
return rootllist | 767cde29fbc711373c37dd3674655fb1bdf3fedf | 21,561 |
def kpi_value(request, body):
"""kpi值接口 根据 indicator 传入参数不同请求不同的 handler"""
params = {
"indicator": body.indicator
}
handler = KpiFactory().create_handler(params["indicator"])
result = handler(params=params)
return DashboardResult(content=result) | 5f242028d7f95b1ffa81690c3ed1b1d7006cf97c | 21,562 |
def safeReplaceOrder( references ):
"""
When inlining a variable, if multiple instances occur on the line, then the
last reference must be replaced first. Otherwise the remaining intra-line
references will be incorrect.
"""
def safeReplaceOrderCmp(self, other):
return -cmp(self.colno, other.colno)
result = list(references)
result.sort(safeReplaceOrderCmp)
return result | dae29bf1c8da84c77c64210c4d897ac4a9d0c098 | 21,563 |
def clean_value(value: str) -> t.Union[int, float, str]:
"""Return the given value as an int or float if possible, otherwise as the original string."""
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value | 52c09e2aaf77cb22e62f47e11226350112390eb2 | 21,565 |
import types
def sumstat(*L):
"""
Sums a list or a tuple L
Modified from pg 80 of Web Programming in Python
"""
if len(L) == 1 and \
( isinstance(L[0],types.ListType) or \
isinstance (L[0], types.TupleType) ) :
L = L[0]
s = 0.0
for k in L:
s = s + k
return s | c37aa0aa0b7dbf6adbe77d82b27b25e469891795 | 21,566 |
def halref_to_data_url(halref: str) -> str:
"""
Given a HAL or HAL-data document URIRef, returns the corresponding HAL-data URL
halref: str
HAL document URL
(Most important!) https://hal.archives-ouvertes.fr/hal-02371715v2 -> https://data.archives-ouvertes.fr/document/hal-02371715v2
https://data.archives-ouvertes.fr/document/hal-02371715v2.rdf -> https://data.archives-ouvertes.fr/document/hal-02371715v2.rdf
https://data.archives-ouvertes.fr/document/hal-02371715 -> https://data.archives-ouvertes.fr/document/hal-02371715
"""
parsed_ref = urlparse(halref)
assert "archives-ouvertes.fr" in parsed_ref.netloc, "Expected HAL (or HAL-data) document URL"
if "hal.archives-ouvertes.fr" in parsed_ref.netloc:
parsed_ref = parsed_ref._replace(netloc="data.archives-ouvertes.fr",
path=f"/document{parsed_ref.path}")
return urlunparse(parsed_ref) | 48ef6629fc3198af2c8004a3dbcbbde6e700cb12 | 21,567 |
def find_best_rate():
"""
Input: Annual salary, semi-annual raise, cost of home
Assumes: a time frame of three years (36 months), a down payment of 25% of the total cost,
current savings starting from 0 and annual return of 4%
Returns the best savings rate within (plus/minus) $100 of the downpayment, and bisection search
else returns false if result is not possible
"""
annual_salary = float(input("Enter your annual salary: "))
total_cost = float(1000000)
semi_annual_raise = float(0.07)
monthly_salary = annual_salary/12
r = 0.04
down_payment = 0.25 * total_cost
current_savings = 0
time = 36
epsilon = 100
low = 0
high = 10000
savings_rate = (low + high)//2
num = 0
while abs(current_savings - down_payment) >= epsilon:
mod_annual_salary = annual_salary #The annual salary we will use to modify/ make changes
current_savings = 0
portion_saved = savings_rate/10000 #Converting our floor/ int division to decimal (as a portion to save)
for month in range(1, time+1):
if month % 6 == 0:
mod_annual_salary += (annual_salary * semi_annual_raise)
monthly_salary = mod_annual_salary/12
monthly_savings = monthly_salary * portion_saved
additional = monthly_savings + (current_savings * r/12) #Additional return considering monthly and current savings
current_savings += additional
#Bisection search
if current_savings < down_payment:
low = savings_rate
else:
high = savings_rate
savings_rate = (low + high)//2
num += 1
if num > 15: #Log_2 (10000) is 13.28... it will not make sense to keep searching after this point
break
if num < 15:
print("Best Savings Rate: {} or {}%".format(portion_saved, portion_saved*100)),
print("Steps in bisection Search: {}".format(num))
return portion_saved
else:
return("It is not possible to pay the down payment in three years") | 451fc72c006182b63233376a701b4cbb855ad39a | 21,568 |
def q_inv(a):
"""Return the inverse of a quaternion."""
return [a[0], -a[1], -a[2], -a[3]] | e8d06e7db6d5b23efab10c07f4b9c6088190fa07 | 21,569 |
def divide_hex_grid_flower(points, hex_radius=None):
"""Partitions a hexagonal grid into a flower pattern (this is what I used for the final product. Returns a list of partition indices for each point."""
if hex_radius is None: # copied from build_mirror_array()
mini_hex_radius = (10 * 2.5 / 2) + 1
hex_radius = mini_hex_radius * 1.1
points = np.array(points)
# Divide into quarters
partition_indices = np.ones(len(points)) * -1
for i, point in enumerate(points):
x, y, z = point
if np.sqrt(x**2 + y**2) <= 3 * (2*hex_radius + 1) * np.sqrt(3)/2:
partition_indices[i] = 0
else:
θ = np.arctan2(x,y) + pi - 1e-10
partition_indices[i] = 1 + np.floor(6 * θ / (2 * pi))
return partition_indices | 9f77e85d4bbfd00ea5eff6905209aad84e3a9191 | 21,570 |
def fis_gauss2mf(x:float, s1:float, c1:float, s2:float, c2:float):
"""Split Gaussian Member Function"""
t1 = 1.0
t2 = 1.0
if x < c1:
t1 = fis_gaussmf(x, s1, c1)
if x > c2:
t2 = fis_gaussmf(x, s2, c2)
return (t1 * t2) | 443e02dff7ab3827ac0006443964f45a6f9f4ce2 | 21,571 |
def _is_trigonal_prism(vectors, dev_cutoff=15):
"""
Triangular prisms are defined by 3 vertices in a triangular pattern on two
aligned planes. Unfortunately, the angles are dependent on the length and
width of the prism. Need more examples to come up with a better way of
detecting this shape.
For now, this code is experimental.
Parameters
----------
vectors : list scitbx.matrix.col
dev_cutoff : float, optional
Returns
-------
bool
"""
if len(vectors) != 6:
return
angles = _bond_angles(vectors)
a_85s, a_135s = [], []
for angle in angles:
if abs(angle[-1] - 85) < abs(angle[-1] - 135):
a_85s.append(angle[-1] - 85)
else:
a_135s.append(angle[-1] - 135)
if len(a_85s) != 9 and len(a_135s) != 6:
return
deviation = sqrt(sum(i ** 2 for i in a_85s + a_135s) / len(angles))
if deviation < dev_cutoff:
return deviation, 6 - len(vectors) | 2b3ffb318b3201923828eea8f4769a6ce854dd58 | 21,572 |
def priority(n=0):
"""
Sets the priority of the plugin.
Higher values indicate a higher priority.
This should be used as a decorator.
Returns a decorator function.
:param n: priority (higher values = higher priority)
:type n: int
:rtype: function
"""
def wrapper(cls):
cls._plugin_priority = n
return cls
return wrapper | 58ab19fd88e9e293676943857a0fa04bf16f0e93 | 21,575 |
import math
def vecangle(u,v):
"""
Calculate as accurately as possible the angle between two 3-component vectors u and v.
This formula comes from W. Kahan's advice in his paper "How Futile are Mindless Assessments
of Roundoff in Floating-Point Computation?" (https://www.cs.berkeley.edu/~wkahan/Mindless.pdf),
section 12 "Mangled Angles."
θ=2 atan2(|| ||v||u−||u||v ||, || ||v||u+||u||v ||)
"""
modu = modvec(u)
modv = modvec(v)
vmodu = [modu*v[0] , modu*v[1], modu*v[2] ]
umodv = [modv*u[0] , modv*u[1], modv*u[2] ]
term1 = [umodv[0]-vmodu[0], umodv[1]-vmodu[1], umodv[2]-vmodu[2]]
modterm1 = modvec(term1)
term2 = [umodv[0]+vmodu[0], umodv[1]+vmodu[1], umodv[2]+vmodu[2]]
modterm2 = modvec(term2)
return (2.0*math.atan2(modterm1,modterm2)) | dde6ebed830130f122b0582d4c19963a061a3d31 | 21,576 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.