content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Dict
from pathlib import Path
from sys import path
from typing import Counter
def get_word_counts(filepath: str) -> Dict[str, int]:
"""
Return a dictionary of key-value pairs where keys are words
from the given file and values are their counts. If there is
no such file, return an empty dictionary.
:param filepath: path to the file
:return: a dictionary of word counts
>>> get_word_counts(Path('scripts/The Invisible Man 1933.txt'))['snow']
6
>>> get_word_counts(Path('scripts/The Time Machine 2002.txt'))['high']
10
"""
filepath = Path(__file__).parent / filepath
if not path.exists(filepath):
return None
with open(filepath, 'r') as file:
words = list(map(lambda word: word.strip('.,!?;:-').lower(),
file.readline().split(' ')))
word_counts = dict(Counter(words))
return word_counts | 30fd14031b00766320ad0f1728042438cb05fd4e | 13,800 |
import ctypes
def getVanHoveDistances(positions, displacements, L):
"""
Compte van Hove distances between particles of a system of size `L', with
`positions' and `displacements'.
Parameters
----------
positions : (*, 2) float array-like
Positions of the particles.
displacements : (*, 2) float array-like
Displacements of the particles.
L : float
Size of the system box.
Returns
-------
distances : (*^2,) float Numpy array
Van Hove distances.
"""
positions = np.array(positions, dtype=np.double)
N = len(positions)
assert positions.shape == (N, 2)
displacements = np.array(displacements, dtype=np.double)
assert displacements.shape == (N, 2)
distances = np.empty((N**2,), dtype=np.double)
_pycpp.getVanHoveDistances.argtypes = [
ctypes.c_int,
ctypes.c_double,
np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(dtype=np.double, ndim=1, flags='C_CONTIGUOUS')]
_pycpp.getVanHoveDistances(
N,
L,
np.ascontiguousarray(positions[:, 0]),
np.ascontiguousarray(positions[:, 1]),
np.ascontiguousarray(displacements[:, 0]),
np.ascontiguousarray(displacements[:, 1]),
np.ascontiguousarray(distances))
return distances | a66150cfe238b151f098733d4570c438f1c93906 | 13,801 |
from typing import Any
from typing import Union
from typing import List
def plot_local_coordinate_system_matplotlib(
lcs,
axes: plt.Axes.axes = None,
color: Any = None,
label: str = None,
time: Union[pd.DatetimeIndex, pd.TimedeltaIndex, List[pd.Timestamp]] = None,
time_ref: pd.Timestamp = None,
time_index: int = None,
show_origin: bool = True,
show_trace: bool = True,
show_vectors: bool = True,
) -> plt.Axes.axes:
"""Visualize a `weldx.transformations.LocalCoordinateSystem` using matplotlib.
Parameters
----------
lcs : weldx.transformations.LocalCoordinateSystem
The coordinate system that should be visualized
axes : matplotlib.axes.Axes
The target matplotlib axes. If `None` is provided, a new one will be created
color : Any
An arbitrary color. The data type must be compatible with matplotlib.
label : str
Name of the coordinate system
time : pandas.DatetimeIndex, pandas.TimedeltaIndex, List[pandas.Timestamp], or \
LocalCoordinateSystem
The time steps that should be plotted
time_ref : pandas.Timestamp
A reference timestamp that can be provided if the ``time`` parameter is a
`pandas.TimedeltaIndex`
time_index : int
Index of a specific time step that should be plotted
show_origin : bool
If `True`, the origin of the coordinate system will be highlighted in the
color passed as another parameter
show_trace :
If `True`, the trace of a time dependent coordinate system will be visualized in
the color passed as another parameter
show_vectors : bool
If `True`, the the coordinate axes of the coordinate system are visualized
Returns
-------
matplotlib.axes.Axes :
The axes object that was used as canvas for the plot
"""
if axes is None:
_, axes = plt.subplots(subplot_kw={"projection": "3d", "proj_type": "ortho"})
if lcs.is_time_dependent and time is not None:
lcs = lcs.interp_time(time, time_ref)
if lcs.is_time_dependent and time_index is None:
for i, _ in enumerate(lcs.time):
draw_coordinate_system_matplotlib(
lcs,
axes,
color=color,
label=label,
time_idx=i,
show_origin=show_origin,
show_vectors=show_vectors,
)
label = None
else:
draw_coordinate_system_matplotlib(
lcs,
axes,
color=color,
label=label,
time_idx=time_index,
show_origin=show_origin,
show_vectors=show_vectors,
)
if show_trace and lcs.coordinates.values.ndim > 1:
coords = lcs.coordinates.values
if color is None:
color = "k"
axes.plot(coords[:, 0], coords[:, 1], coords[:, 2], ":", color=color)
return axes | 2df77f0e3343f6ac541ff37991208fb894b44660 | 13,802 |
def saved_searches_list(request):
"""
Renders the saved_searches_list html
"""
args = get_saved_searches_list(request.user)
return render('saved_searches_list.html', args, request) | a2f92c06733113f05501cb242d2ee2bad91917be | 13,803 |
import os
def upload():
"""Upload files. This endpoint is used to upload "trusted" files;
E.i. files created by CERT-EU
E.g. CITAR, CIMBL, IDS signatures, etc.
**Example request**:
.. sourcecode:: http
POST /api/1.0/upload HTTP/1.1
Host: do.cert.europa.eu
Accept: application/json
Content-Type: multipart/form-data; boundary=----WebKitFormBoundaryklDA9
------WebKitFormBoundaryklDA94BtcALil3R2
Content-Disposition: form-data; name="files[0]"; filename="test.gz"
Content-Type: application/x-gzip
------WebKitFormBoundaryklDA94BtcALil3R2--
**Example response**:
.. sourcecode:: http
HTTP/1.0 201 CREATED
Content-Type: application/json
{
"files": [
"test.gz"
],
"message": "Files uploaded"
}
:reqheader Accept: Content type(s) accepted by the client
:reqheader Content-Type: multipart/form-data required
:resheader Content-Type: this depends on `Accept` header or request
:>json array files: List of files saved to disk
:>json string message: Status message
:statuscode 201: Files successfully saved
"""
uploaded_files = []
for idx, file in request.files.items():
filename = secure_filename(file.filename)
file.save(os.path.join(current_app.config['APP_UPLOADS'], filename))
uploaded_files.append(filename)
return ApiResponse({
'message': 'Files uploaded',
'files': uploaded_files
}, 201) | 5da57c64928c9212162667d036423070acbdb4f7 | 13,804 |
def get_active_loan_by_item_pid(item_pid):
"""Return any active loans for the given item."""
return search_by_pid(
item_pid=item_pid,
filter_states=current_app.config.get(
"CIRCULATION_STATES_LOAN_ACTIVE", []
),
) | 6922336876fddd72ce7655bf2cfee298fdc4a766 | 13,805 |
from typing import Set
from re import X
def _get_szymkiewicz_simpson_coefficient(a: Set[X], b: Set[X]) -> float:
"""Calculate the Szymkiewicz–Simpson coefficient.
.. seealso:: https://en.wikipedia.org/wiki/Overlap_coefficient
"""
if a and b:
return len(a.intersection(b)) / min(len(a), len(b))
return 0.0 | 42d39edf9fa2465605717e0892bcbca05df7799b | 13,806 |
def data_splitter(
input: pd.DataFrame,
) -> Output(train=pd.DataFrame, test=pd.DataFrame,):
"""Splits the input dataset into train and test slices."""
train, test = train_test_split(input, test_size=0.1, random_state=13)
return train, test | ddcc28b4430a8901fcde540cf321d4ea43f123d7 | 13,807 |
def reload(module, exclude=('sys', 'os.path', 'builtins', '__main__',
'numpy', 'numpy._globals')):
"""Recursively reload all modules used in the given module. Optionally
takes a list of modules to exclude from reloading. The default exclude
list contains sys, __main__, and __builtin__, to prevent, e.g., resetting
display, exception, and io hooks.
"""
global found_now
for i in exclude:
found_now[i] = 1
try:
with replace_import_hook(deep_import_hook):
return deep_reload_hook(module)
finally:
found_now = {} | c97fd1942dae583ff236ed73d33b53b685cafd32 | 13,808 |
def get_words_for_board(words, board_size, packing_constant=1.1):
"""Pick a cutoff which is just beyond limit of the board size."""
# Order the words by length. It's easier to pack shorter words, so prioritize them.
# This is SUPER hacky, should have a Word class that handles these representational differences.
words = sorted(words, key=lambda w: len(w.replace(" ", "").replace("-", "")))
cum_len = np.cumsum([len(word.replace(" ", "").replace("-", "")) for word in words])
num_words = None
for word_idx, cum_letters in enumerate(cum_len):
# Try to pack in slightly more letters than would fit on the word without overlaps,
# as governed by the packing constant.
if cum_letters > packing_constant * board_size**2:
num_words = word_idx
break
if not num_words:
raise ValueError(f"Too few semantic neighbor words to pack a {board_size}x{board_size} board.")
return words[:num_words] | e5f74806fa15c1f1fbe78e0ac218d6d808611dfe | 13,809 |
def booleans(key, val):
"""returns ucsc formatted boolean"""
if val in (1, True, "on", "On", "ON"):
val = "on"
else:
val = "off"
return val | f210a2ce6b998e65d2e5934f1318efea0f96c709 | 13,810 |
def merge_param_classes(*cls_list,
merge_positional_params: bool = True) -> type(Params):
"""
Merge multiple Params classes into a single merged params class and return the merged class.
Note that this will not flatten the nested classes.
:param cls_list: A list of Params subclasses or classes to merge into a single
Params class
:param merge_positional_params: Whether or not to merge the positional params in the classes
"""
if len(cls_list) == 1:
return cls_list[0]
class MergedParams(Params):
__doc__ = f'A Combination of {len(cls_list)} Params Classes:\n'
append_params_attributes(MergedParams, *cls_list)
for params_cls in cls_list:
MergedParams.__doc__ += f'\n\t {params_cls.__name__} - {params_cls.__doc__}'
# resolve positional arguments:
if merge_positional_params:
params_to_delete, positional_param = _merge_positional_params(
[(k, v) for k, v in MergedParams.__dict__.items() if not k.startswith('_')])
if positional_param is None and params_to_delete == []:
return MergedParams
setattr(MergedParams, 'positionals', positional_param)
positional_param.__set_name__(MergedParams, 'positionals')
for k in params_to_delete:
delattr(MergedParams, k)
return MergedParams | c42907652f971d7cd6d208017b8faaacacddb5b2 | 13,811 |
import random
import collections
def make_pin_list(eff_cnt):
"""Generates a pin list with an effect pin count given by eff_cnt."""
cards = [1] * eff_cnt
cards.extend([0] * (131 - len(cards)))
random.shuffle(cards)
deck = collections.deque(cards)
pin_list = []
for letters, _ in KEY_WHEEL_DATA:
pins = [c for c in letters if deck.pop()]
pin_list.append(''.join(pins))
return pin_list | 2c15a09928231993f09a373354ee29723463280d | 13,812 |
import select
def drop(cols, stmt):
"""
Function: Drops columns from the statement.
Input: List of columns to drop.
Output: Statement with columns that are not dropped.
"""
col_dict = column_dict(stmt)
col_names = [c for c in col_dict.keys()]
colintention = [c.evaluate(stmt).name if isinstance(c, Intention) else c for c in cols]
new_cols = list(filter(lambda c: c not in colintention, col_names))
undrop = select(new_cols, stmt)
return undrop | 73ecf35077824281a5ebc4e26776b963e0cb378e | 13,813 |
def ConvertVolumeSizeString(volume_size_gb):
"""Converts the volume size defined in the schema to an int."""
volume_sizes = {
"500 GB (128 GB PD SSD x 4)": 500,
"1000 GB (256 GB PD SSD x 4)": 1000,
}
return volume_sizes[volume_size_gb] | b1f90e5ded4d543d88c4f129ea6ac03aeda0c04d | 13,814 |
def render_template_with_system_context(value):
"""
Render provided template with a default system context.
:param value: Template string.
:type value: ``str``
:param context: Template context.
:type context: ``dict``
"""
context = {
SYSTEM_KV_PREFIX: KeyValueLookup(),
}
rendered = render_template(value=value, context=context)
return rendered | 6df2e7a652595b35919638791aae5465258edf0f | 13,815 |
def ToTranslation(tree, placeholders):
"""Converts the tree back to a translation, substituting the placeholders
back in as required.
"""
text = tree.ToString()
assert text.count(PLACEHOLDER_STRING) == len(placeholders)
transl = tclib.Translation()
for placeholder in placeholders:
index = text.find(PLACEHOLDER_STRING)
if index > 0:
transl.AppendText(text[:index])
text = text[index + len(PLACEHOLDER_STRING):]
transl.AppendPlaceholder(placeholder)
if text:
transl.AppendText(text)
return transl | 36fca25dfc78e0f37ddc6193a17f2d29c6192228 | 13,816 |
import torch
def complex(real, imag):
"""Return a 'complex' tensor
- If `fft` module is present, returns a propert complex tensor
- Otherwise, stack the real and imaginary compoenents along the last
dimension.
Parameters
----------
real : tensor
imag : tensor
Returns
-------
complex : tensor
"""
if _torch_has_complex:
return torch.complex(real, imag)
else:
return torch.stack([real, imag], -1) | 272a293e3918e5e067f251a7dae10a4d2c56abf4 | 13,817 |
def get_snps(x: str) -> tuple:
"""Parse a SNP line and return name, chromsome, position."""
snp, loc = x.split(' ')
chrom, position = loc.strip('()').split(':')
return snp, chrom, int(position) | 52672c550c914d70033ab45fd582fb9e0f97f023 | 13,818 |
def qr_match(event, context, user=None):
"""
Function used to associate a given QR code with the given email
"""
user_coll = coll('users')
result = user_coll.update_one({'email': event["link_email"]}, {'$push': {'qrcode': event["qr_code"]}})
if result.matched_count == 1:
return {"statusCode": 200, "body": "success"}
else:
return {"statusCode": 404, "body": "User not found"} | 7af48bc9fc97d34eb182eb8f429d93396079db87 | 13,819 |
def update_has_started(epoch, settings):
"""
Tells whether update has started or not
:param epoch: epoch number
:param settings: settings dictionary
:return: True if the update has started, False otherwise
"""
return is_baseline_with_update(settings['baseline']) and epoch >= settings['update']['start_epoch'] | d2d2c8d7d8de0a13414a116121fb3cec47bc1d3f | 13,820 |
import torch
def compute_i_th_moment_batches(input, i):
"""
compute the i-th moment for every feature map in the batch
:param input: tensor
:param i: the moment to be computed
:return:
"""
n, c, h, w = input.size()
input = input.view(n, c, -1)
mean = torch.mean(input, dim=2).view(n, c, 1, 1)
eps = 1e-5
var = torch.var(input, dim=2).view(n, c, 1, 1) + eps
std = torch.sqrt(var)
if i == 1:
return mean
elif i == 2:
return std
else:
sol = ((input.view(n, c, h, w) - mean.expand(n, c, h, w)) / std).pow(i)
sol = torch.mean(sol.view(n, c, -1), dim=2).view(n, c, 1, 1)
return sol | 2ab3b7bfd34b482cdf55d5a066b57852182b5b6a | 13,821 |
def hs_online_check(onion, put_url):
"""Online check for hidden service."""
try:
print onion
return hs_http_checker(onion, put_url)
except Exception as error:
print "Returned nothing."
print error
return "" | 19b7b2f45581e2bdb907d416be1885f569841a86 | 13,822 |
def plotfile(fname, cols=(0,), plotfuncs=None,
comments='#', skiprows=0, checkrows=5, delimiter=',',
names=None, subplots=True, newfig=True, **kwargs):
"""
Plot the data in a file.
*cols* is a sequence of column identifiers to plot. An identifier
is either an int or a string. If it is an int, it indicates the
column number. If it is a string, it indicates the column header.
matplotlib will make column headers lower case, replace spaces with
underscores, and remove all illegal characters; so ``'Adj Close*'``
will have name ``'adj_close'``.
- If len(*cols*) == 1, only that column will be plotted on the *y* axis.
- If len(*cols*) > 1, the first element will be an identifier for
data for the *x* axis and the remaining elements will be the
column indexes for multiple subplots if *subplots* is *True*
(the default), or for lines in a single subplot if *subplots*
is *False*.
*plotfuncs*, if not *None*, is a dictionary mapping identifier to
an :class:`~matplotlib.axes.Axes` plotting function as a string.
Default is 'plot', other choices are 'semilogy', 'fill', 'bar',
etc. You must use the same type of identifier in the *cols*
vector as you use in the *plotfuncs* dictionary, e.g., integer
column numbers in both or column names in both. If *subplots*
is *False*, then including any function such as 'semilogy'
that changes the axis scaling will set the scaling for all
columns.
- *comments*: the character used to indicate the start of a comment
in the file, or *None* to switch off the removal of comments
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *delimiter*: is the character(s) separating row items
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
If *newfig* is *True*, the plot always will be made in a new figure;
if *False*, it will be made in the current figure if one exists,
else in a new figure.
kwargs are passed on to plotting functions.
Example usage::
# plot the 2nd and 4th column against the 1st in two subplots
plotfile(fname, (0,1,3))
# plot using column names; specify an alternate plot type for volume
plotfile(fname, ('date', 'volume', 'adj_close'),
plotfuncs={'volume': 'semilogy'})
Note: plotfile is intended as a convenience for quickly plotting
data from flat files; it is not intended as an alternative
interface to general plotting with pyplot or matplotlib.
"""
if newfig:
fig = figure()
else:
fig = gcf()
if len(cols) < 1:
raise ValueError('must have at least one column of data')
if plotfuncs is None:
plotfuncs = {}
with cbook._suppress_matplotlib_deprecation_warning():
r = mlab._csv2rec(fname, comments=comments, skiprows=skiprows,
checkrows=checkrows, delimiter=delimiter,
names=names)
def getname_val(identifier):
'return the name and column data for identifier'
if isinstance(identifier, str):
return identifier, r[identifier]
elif isinstance(identifier, Number):
name = r.dtype.names[int(identifier)]
return name, r[name]
else:
raise TypeError('identifier must be a string or integer')
xname, x = getname_val(cols[0])
ynamelist = []
if len(cols) == 1:
ax1 = fig.add_subplot(1, 1, 1)
funcname = plotfuncs.get(cols[0], 'plot')
func = getattr(ax1, funcname)
func(x, **kwargs)
ax1.set_ylabel(xname)
else:
N = len(cols)
for i in range(1, N):
if subplots:
if i == 1:
ax = ax1 = fig.add_subplot(N - 1, 1, i)
else:
ax = fig.add_subplot(N - 1, 1, i, sharex=ax1)
elif i == 1:
ax = fig.add_subplot(1, 1, 1)
yname, y = getname_val(cols[i])
ynamelist.append(yname)
funcname = plotfuncs.get(cols[i], 'plot')
func = getattr(ax, funcname)
func(x, y, **kwargs)
if subplots:
ax.set_ylabel(yname)
if ax.is_last_row():
ax.set_xlabel(xname)
else:
ax.set_xlabel('')
if not subplots:
ax.legend(ynamelist)
if xname == 'date':
fig.autofmt_xdate() | 493fccdf7d3661b9acffd22dbfd5799126a3d4f8 | 13,823 |
import argparse
def parse_options():
"""Parses and checks the command-line options.
Returns:
A tuple containing the options structure.
"""
usage = 'Usage: ./update_mapping.py [options]'
desc = ('Example: ./update_mapping.py -o mapping.json.\n'
'This script generates and stores a file that gives the\n'
'mapping between phone serial numbers and BattOr serial numbers\n'
'Mapping is based on which physical ports on the USB hubs the\n'
'devices are plugged in to. For instance, if there are two hubs,\n'
'the phone connected to port N on the first hub is mapped to the\n'
'BattOr connected to port N on the second hub, for each N.')
parser = argparse.ArgumentParser(usage=usage, description=desc)
parser.add_argument('-o', '--output', dest='out_file',
default='mapping.json', type=str,
action='store', help='mapping file name')
parser.add_argument('-u', '--hub', dest='hub_types',
action='append', choices=['plugable_7port'],
help='USB hub types.')
options = parser.parse_args()
if not options.hub_types:
options.hub_types = ['plugable_7port']
return options | 7f7ee6a90e152023dbf6c6e163361a8c327108ae | 13,824 |
def retrieve(object_type, **kwargs):
"""Get objects from the Metatlas object database.
This will automatically select only objects created by the current
user unless `username` is provided. Use `username='*'` to search
against all users.
Parameters
----------
object_type: string
The type of object to search for (i.e. "Groups").
**kwargs
Specific search queries (i.e. name="Sargasso").
Use '%' for wildcard patterns (i.e. description='Hello%').
If you want to match a '%' character, use '%%'.
Returns
-------
objects: list
List of Metatlas Objects meeting the criteria. Will return the
latest version of each object.
"""
workspace = Workspace.get_instance()
out = workspace.retrieve(object_type, **kwargs)
workspace.close_connection()
return out | 54d35c23dd92ad65c5911d8c451b5b1fcbd131da | 13,825 |
def read_plot_pars() :
"""
Parameters are (in this order):
Minimum box width,
Maximum box width,
Box width iterations,
Minimum box length,
Maximum box length,
Box length iterations,
Voltage difference
"""
def extract_parameter_from_string(string):
#returns the part of the string after the ':' sign
parameter = ""
start_index = string.find(':')
for i in range(start_index+1, len(string)-1):
parameter += string[i]
return parameter
f = open("input.txt", "r")
pars = []
line_counter = 0
for line in f:
if ((line_counter > 0) and (line_counter < 8)):
pars.append(extract_parameter_from_string(line))
line_counter += 1
return pars | c78dc8e2a86b20eb6007850a70c038de5bf9f841 | 13,826 |
import typing
def create(subscribe: typing.Subscription) -> Observable:
"""Creates an observable sequence object from the specified
subscription function.
.. marble::
:alt: create
[ create(a) ]
---1---2---3---4---|
Args:
subscribe: Subscription function.
Returns:
An observable sequence that can be subscribed to via the given
subscription function.
"""
return Observable(subscribe) | 79c149545475a7686f8f8dffaed8f343604dd4aa | 13,827 |
def tocl(d):
"""Generate TOC, in-page links to the IDs we're going to define below"""
anchors = sorted(d.keys(), key=_lower)
return TemplateData(t='All The Things', e=[a for a in anchors]) | 8c27c42f05e4055a8e195d4d352345acc7821bae | 13,828 |
def get_upper_parentwidget(widget, parent_position: int):
"""This function replaces this:
self.parentWidget().parentWidget().parentWidget()
with this:
get_upper_parentwidget(self, 3)
:param widget: QWidget
:param parent_position: Which parent
:return: Wanted parent widget
"""
while parent_position > 0:
widget = widget.parentWidget()
parent_position -= 1
else:
return widget | ff010f3d9e000cfa3c58160e150c858490f2412d | 13,829 |
def DirectorySizeAsString(directory):
"""Returns size of directory as a string."""
return SizeAsString(DirectorySize(directory)) | 3e3d3b029da40502c2f0e7e5867786d586ad8109 | 13,830 |
def patch_is_tty(value):
""" Wrapped test function will have peltak.core.shell.is_tty set to *value*. """
def decorator(fn): # pylint: disable=missing-docstring
@wraps(fn)
def wrapper(*args, **kw): # pylint: disable=missing-docstring
is_tty = shell.is_tty
shell.is_tty = value
try:
return fn(*args, **kw)
finally:
shell.is_tty = is_tty
return wrapper
return decorator | 77655d32a5572824978910a12378a54d83b7e81e | 13,831 |
import torch
import math
def uniform_unit_scaling(tensor: torch.Tensor, nonlinearity: str = "linear"):
"""
An initaliser which preserves output variance for approximately gaussian
distributed inputs. This boils down to initialising layers using a uniform
distribution in the range `(-sqrt(3/dim[0]) * scale, sqrt(3 / dim[0]) * scale)`, where
`dim[0]` is equal to the input dimension of the parameter and the `scale`
is a constant scaling factor which depends on the non-linearity used.
See `Random Walk Initialisation for Training Very Deep Feedforward Networks
<https://www.semanticscholar.org/paper/Random-Walk-Initialization-for-Training-Very-Deep-Sussillo-Abbott/be9728a0728b6acf7a485225b1e41592176eda0b>`_
for more information.
# Parameters
tensor : `torch.Tensor`, required.
The tensor to initialise.
nonlinearity : `str`, optional (default = `"linear"`)
The non-linearity which is performed after the projection that this
tensor is involved in. This must be the name of a function contained
in the `torch.nn.functional` package.
# Returns
The initialised tensor.
"""
size = 1.0
# Estimate the input size. This won't work perfectly,
# but it covers almost all use cases where this initialiser
# would be expected to be useful, i.e in large linear and
# convolutional layers, as the last dimension will almost
# always be the output size.
for dimension in list(tensor.size())[:-1]:
size *= dimension
activation_scaling = torch.nn.init.calculate_gain(nonlinearity, tensor)
max_value = math.sqrt(3 / size) * activation_scaling
return tensor.data.uniform_(-max_value, max_value) | aa14ec45c389c55c141d9bffd6ef370313fdf446 | 13,832 |
def get_results(elfFile):
"""Converts and returns collected data."""
staticSizes = parseElf(elfFile)
romSize = sum([size for key, size in staticSizes.items() if key.startswith("rom_")])
ramSize = sum([size for key, size in staticSizes.items() if key.startswith("ram_")])
results = {
"rom": romSize,
"rom_rodata": staticSizes["rom_rodata"],
"rom_code": staticSizes["rom_code"],
"rom_misc": staticSizes["rom_misc"],
"ram": ramSize,
"ram_data": staticSizes["ram_data"],
"ram_zdata": staticSizes["ram_zdata"],
}
return results | b60052f702e53655ab1a109ea2bb039e78aabaf5 | 13,833 |
def path_element_to_dict(pb):
"""datastore.entity_pb.Path_Element converter."""
return {
'type': pb.type(),
'id': pb.id(),
'name': pb.name(),
} | 2a4e757dedf6707dc412248f84b377c2f375e70c | 13,834 |
import tempfile
import os
def copy_to_tmp(in_file):
"""Copies a file to a tempfile.
The point of this is to copy small files from CNS to tempdirs on
the client when using code that's that hasn't been Google-ified yet.
Examples of files are the vocab and config files of the Hugging Face
tokenizer.
Arguments:
in_file: Path to the object to be copied, likely in CNS
Returns:
Path where the object ended up (inside of the tempdir).
"""
# We just want to use Python's safe tempfile name generation algorithm
with tempfile.NamedTemporaryFile(delete=False) as f_out:
target_path = os.path.join(tempfile.gettempdir(), f_out.name)
gfile.Copy(in_file, target_path, overwrite=True)
return target_path | 97b98214df23079d5aa9ee0ece072204d36d2f33 | 13,835 |
def get_orr_tensor(struct):
""" Gets orientation of all molecules in the struct """
molecule_list = get_molecules(struct)
orr_tensor = np.zeros((len(molecule_list),3,3))
for i,molecule_struct in enumerate(molecule_list):
orr_tensor[i,:,:] = get_molecule_orientation(molecule_struct)
return orr_tensor | faf42cf76168191835d9dd354ae9bc03198829ad | 13,836 |
def make_request_for_quotation(supplier_data=None):
"""
:param supplier_data: List containing supplier data
"""
supplier_data = supplier_data if supplier_data else get_supplier_data()
rfq = frappe.new_doc('Request for Quotation')
rfq.transaction_date = nowdate()
rfq.status = 'Draft'
rfq.company = '_Test Company'
rfq.message_for_supplier = 'Please supply the specified items at the best possible rates.'
for data in supplier_data:
rfq.append('suppliers', data)
rfq.append("items", {
"item_code": "_Test Item",
"description": "_Test Item",
"uom": "_Test UOM",
"qty": 5,
"warehouse": "_Test Warehouse - _TC",
"schedule_date": nowdate()
})
rfq.submit()
return rfq | ee0663231fc0bb06f6f43fa5abecdced048c1458 | 13,837 |
def mlrPredict(W, data):
"""
mlrObjFunction predicts the label of data given the data and parameter W
of Logistic Regression
Input:
W: the matrix of weight of size (D + 1) x 10. Each column is the weight
vector of a Logistic Regression classifier.
X: the data matrix of size N x D
Output:
label: vector of size N x 1 representing the predicted label of
corresponding feature vector given in data matrix
"""
label = np.zeros((data.shape[0], 1))
row = data.shape[0]
##################
# YOUR CODE HERE #
##################
# HINT: Do not forget to add the bias term to your input data
# Adding biases
biases = np.full((row,1),1)
X = np.concatenate((biases,data), axis=1)
t = np.sum(np.exp(np.dot(X,W)),axis=1)
t = t.reshape(t.shape[0],1)
theta_value = np.exp(np.dot(X,W))/t
label = np.argmax(theta_value,axis=1)
label = label.reshape(row,1)
return label | a37359433b020eb625b37ea57cb15282c4f82c8d | 13,838 |
import glob
import filecmp
import subprocess
def write_urdb_rate_data(urdb_rate_data, urdb_filepath = './', overwrite_identical=True):
"""
Takes Pandas DataFrame containing URDB rate data and stores as .csv at
urdb_filepath. The 'overwrite_identical' variable indicates whether
'urdb_rate_data' should be compared to previous URDB download and replace it
if they are found to be identical. This avoids data duplication when it is
unnecessary. Function returns True if data previously exists, False if it is
unique, and None if the comparison was never performed (overwrite_identical
== False).
"""
todays_date = helpers.todays_date()
new_urdb_file = urdb_filepath+'usurdb_{}.csv'.format(todays_date)
urdb_rate_data.to_csv(new_urdb_file, index=False)
# Check if db has changed since last download
if overwrite_identical:
prev_urdb_files = glob.glob(urdb_filepath+'*.csv')
if len(prev_urdb_files)>1:
prev_urdb_dates = [fp.split('usurdb_')[1].split('.csv')[0] for fp in prev_urdb_files]
prev_urdb_dates.remove(todays_date)
most_recent_date = pd.Series(prev_urdb_dates).map(int).max()
most_recent_urdb_file = urdb_filepath+'usurdb_{}.csv'.format(most_recent_date)
if filecmp.cmp(new_urdb_file, most_recent_urdb_file, shallow=True):
subprocess.run('rm {}'.format(most_recent_urdb_file), shell=True)
prev_exists = True
else:
prev_exists = False
else:
prev_exists = False
else:
prev_exists = None
return prev_exists | cde6f16f4ba8abf2c744fb6ef579c0beddd407fd | 13,839 |
def add(n):
"""Add 1."""
return n + 1 | c62cee4660540ae62b5b73369bdeb56ccb0088d6 | 13,840 |
import psutil
import os
def get_system_status(memory_total=False,
memory_total_actual=False,
memory_total_usage=False,
memory_total_free=False,
all_pids=False,
swap_memory=False,
pid=False):
"""
Parameters
----------
threads: bool
return dict {id: (user_time, system_time)}
memory_maps: bool
return dict {path: rss}
Note
----
All memory is returned in `MiB`
To calculate memory_percent:
get_system_status(memory_usage=True) / get_system_status(memory_total=True) * 100
"""
# ====== general system query ====== #
if memory_total:
return psutil.virtual_memory().total / float(2**20)
if memory_total_actual:
return psutil.virtual_memory().available / float(2**20)
if memory_total_usage:
return psutil.virtual_memory().used / float(2**20)
if memory_total_free:
return psutil.virtual_memory().free / float(2**20)
if swap_memory:
tmp = psutil.swap_memory()
tmp.total /= float(2**20)
tmp.used /= float(2**20)
tmp.free /= float(2**20)
tmp.sin /= float(2**20)
tmp.sout /= float(2**20)
return tmp
if all_pids:
return psutil.pids()
if pid:
return os.getpid() | 79b3b43a3e046c2fc1237cde103c6f416ae1f01b | 13,841 |
def _area(x1, y1, x2, y2, x3, y3):
"""Heron's formula."""
a = np.sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))
b = np.sqrt(pow(x3 - x2, 2) + pow(y3 - y2, 2))
c = np.sqrt(pow(x1 - x3, 2) + pow(y3 - y1, 2))
s = (a + b + c) / 2
return np.sqrt(s * (s - a) * (s - b) * (s - c)) | 456ffe56a76fbea082939c278b5f0f2ebaf8c395 | 13,842 |
def plot_coarray(array, ax=None, show_location_errors=False):
"""Visualizes the difference coarray of the input array.
Args:
array (~doatools.model.arrays.ArrayDesign): A sensor array.
ax (~matplotlib.axes.Axes): Matplotlib axes used for the plot. If not
specified, a new figure will be created. Default value is ``None``.
show_location_errors (bool): If set to ``True``, will visualized the
perturbed array if the input array has location errors.
Returns:
The axes object containing the plot.
"""
return _plot_array_impl(array, ax, True, show_location_errors) | e4a0d1fe4ab48b5050c55d44bd4ca4342cc9f9a9 | 13,843 |
def get_publicKey(usrID): # TODO: from barbican
"""
Get the user's public key
Returns:
Public key from meta-container (Keys) in meta-tenant
"""
auth = v3.Password(auth_url=AUTH_URL,username=SWIFT_USER,password=SWIFT_PASS,project_name='demo',project_domain_id="Default",user_domain_name='Default')
sess = session.Session(auth=auth)
barbican = bc.Client(session=sess)
keystone = kc.Client(session=sess)
try:
user = keystone.users.get(usrID)
dict_keys = json.loads(user.description)
ref = dict_keys.get('Public_Key','')
ref = "%s/secrets/%s" %(BARBICAN_URL,ref)
secret_node = barbican.secrets.get(ref)
except Exception,err:
return
return secret_node.payload | 545dec9826273830767f395903cae878df8213b0 | 13,844 |
def pad_sequence(sequences, batch_first=False, padding_value=0.0):
"""Pad a list of variable-length Variables.
This method stacks a list of variable-length :obj:`nnabla.Variable` s with the padding_value.
:math:`T_i` is the length of the :math:`i`-th Variable in the sequences.
:math:`B` is the batch size equal to the length of the sequences.
:math:`T` is the max of :math:`T_i` for all :math:`i`.
:math:`*` is the remaining dimensions including none.
.. note::
This function **must** be used the dynamic computation mode.
Example:
.. code-block:: python
import numpy as np
import nnabla as nn
import nnabla.functions as F
import nnabla.utils.rnn as rnn_utils
nn.set_auto_forward(True)
l2v = lambda ldata: nn.Variable.from_numpy_array(np.asarray(ldata))
a = l2v([1, 1, 1, 1])
b = l2v([2, 2, 2])
c = l2v([2, 2, 2])
d = l2v([3, 3])
e = l2v([3, 3])
sequences = [a, b, c, d, e]
padded_sequence = rnn_utils.pad_sequence(sequences)
print(padded_sequence.d)
Args:
sequences (list of :obj:`nnabla.Variable`): Sequence of the variable of (:math:`T_i`, :math:`*`) shape.
batch_first (bool): If False, output is of (:math:`T`, :math:`B`, :math:`*`) shape,
otherwise (:math:`B`, :math:`T`, :math:`*`).
padding_value (float): Padding value.
Returns:
:obj:`nnabla.Variable` of (:math:`T`, :math:`B`, :math:`*`) or (:math:`B`, :math:`T`, :math:`*`) shape
"""
B = len(sequences)
T = max([e.shape[0] for e in sequences])
shape0 = (B, T) if batch_first else (T, B)
shape1 = sequences[0].shape[1:]
padded_sequence = F.constant(padding_value, shape0 + shape1)
for b, s in enumerate(sequences):
l = s.shape[0]
if batch_first:
padded_sequence[b, :l, ...] = s
else:
padded_sequence[:l, b, ...] = s
return padded_sequence | 449c7681d39edc0494269aefd488aa44548a68df | 13,845 |
import urllib
import yaml
import requests
def _fetch_global_config(config_url, github_release_url, gh_token):
"""
Fetch the index_runner_spec configuration file from the Github release
using either the direct URL to the file or by querying the repo's release
info using the GITHUB API.
"""
if config_url:
print('Fetching config from the direct url')
# Fetch the config directly from config_url
with urllib.request.urlopen(config_url) as res: # nosec
return yaml.safe_load(res) # type: ignore
else:
print('Fetching config from the release info')
# Fetch the config url from the release info
if gh_token:
headers = {'Authorization': f'token {gh_token}'}
else:
headers = {}
tries = 0
# Sometimes Github returns usage errors and a retry will solve it
while True:
release_info = requests.get(github_release_url, headers=headers).json()
if release_info.get('assets'):
break
if tries == _FETCH_CONFIG_RETRIES:
raise RuntimeError(f"Cannot fetch config from {github_release_url}: {release_info}")
tries += 1
for asset in release_info['assets']:
if asset['name'] == 'config.yaml':
download_url = asset['browser_download_url']
with urllib.request.urlopen(download_url) as res: # nosec
return yaml.safe_load(res)
raise RuntimeError("Unable to load the config.yaml file from index_runner_spec") | c436bfb7692ce0d100367691588d511ed95bce99 | 13,846 |
def parse_color(c, desc):
"""Check that a given value is a color."""
return c | ebabefbd56de120a753723f1dccb0f7c12af2fe6 | 13,847 |
import os
def get_package_dir():
"""
Gets directory where package is installed
:return:
"""
return os.path.dirname(ndextcgaloader.__file__) | 447b6ed962119787c3c11f457fcf81bc31b0ae0d | 13,848 |
def __virtual__():
"""Only load gnocchiv1 if requirements are available."""
if REQUIREMENTS_MET:
return 'gnocchiv1'
else:
return False, ("The gnocchiv1 execution module cannot be loaded: "
"os_client_config or keystoneauth are unavailable.") | 5dc2a83ba6a93a37f037978bfe89edf6ec2fe103 | 13,849 |
def setup():
"""Start headless Chrome in docker container."""
options = webdriver.ChromeOptions()
options.add_argument('--no-sandbox')
options.add_argument('--headless')
options.add_argument('--disable-gpu')
driver = webdriver.Chrome(options=options)
driver.implicitly_wait(5)
return driver | 79c135732b39513f270ac0f670ffddc89b576f75 | 13,850 |
import json
def lambdaResponse(statusCode,
body,
headers={},
isBase64Encoded=False):
"""
A utility to wrap the lambda function call returns with the right status code,
body, and switches.
"""
# Make sure the body is a json object
if not isinstance(body, str):
body = json.dumps(body)
# Make sure the content type is json
header = headers
header["Content-Type"] = "application/json"
header["Access-Control-Allow-Origin"]= "*"
response = {
"isBase64Encoded": isBase64Encoded,
"statusCode": statusCode,
"headers": header,
"body": body
}
return response | 0159ba871c38ce550752d47ffea536c33a5d6b3e | 13,851 |
def singleton(class_):
"""
Specify that a class is a singleton
:param class_:
:return:
"""
instances = {}
def getinstance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return getinstance | 678205d133783f6b0720876546deed9ed7c59d72 | 13,852 |
from typing import Optional
from typing import Union
from typing import List
from datetime import datetime
def is_datetime(
value: Scalar, formats: Optional[Union[str, List[str]]] = None,
typecast: Optional[bool] = True
) -> bool:
"""Test if a given string value can be converted into a datetime object for
a given data format. The function accepts a single date format or a list of
formates. If no format is given, ISO format is assumed as the default.
Parameters
----------
value: scalar
Scalar value that is tested for being a date.
formats: string or list(string)
Date format string using Python strptime() format directives. This
can be a list of date formats.
typecast: bool, default=True
Attempt to parse string values as dates if True.
Returns
-------
bool
"""
if isinstance(value, datetime):
return True
elif not typecast or not isinstance(value, str):
return False
# Try to convert the given string to a datatime object with the format
# that was specified at object instantiation. This will raise an
# exception if the value does not match the datetime format string.
# Duplicate code depending on whether format is a list of a string.
if formats is None:
# Issue \#39: dateutil.parse (falsely?) identifies the following
# strings as dates. For column profiling we want to exclude these:
# 14A; 271 1/2; 41-05; 6-8
#
# As a work-around for now we expect a valid date to have at least six
# characters (one for day, month, two for year and at least two
# non-alphanumeric characters.
#
# An alternative solution was pointed out by @remram44:
# https://gitlab.com/ViDA-NYU/datamart/datamart/-/blob/39462a5dca533a1e55596ddcbfc0ac7e98dce4de/lib_profiler/datamart_profiler/temporal.py#L63 # noqa: E501
#
# All solutions seem to suffer from the problem that values like
# 152-12 are valid dates (e.g., 152-12-01 in this case) but also
# valid house numbers, for example. There is no good solution here.
# For now we go with the assumption that if someone wants to specify
# a date it should have at least a day, month and year separated by
# some special (non-alphanumeric) charcater.
if len(value) >= 6 and has_two_spec_chars(value):
try:
parse(value, fuzzy=False)
return True
except (OverflowError, TypeError, ValueError):
pass
else:
return to_datetime_format(value=value, formats=formats) is not None
return False | 642fbe509c7b13a905dc4c65b43dcec20f36fb7e | 13,853 |
def sortkey(d):
"""Split d on "_", reverse and return as a tuple."""
parts=d.split("_")
parts.reverse()
return tuple(parts) | 1d8f8864a3d0bfd7dae8711bca183317e0f3fc0e | 13,854 |
def resolve_stream_name(streams, stream_name):
"""Returns the real stream name of a synonym."""
if stream_name in STREAM_SYNONYMS and stream_name in streams:
for name, stream in streams.items():
if stream is streams[stream_name] and name not in STREAM_SYNONYMS:
return name
return stream_name | 48fe2f5eca72b30bd669477807c9b7476eb4ef18 | 13,855 |
def get_split_cifar100_tasks(num_tasks, batch_size,run,paradigm,dataset):
"""
Returns data loaders for all tasks of split CIFAR-100
:param num_tasks:
:param batch_size:
:return:
datasets = {}
# convention: tasks starts from 1 not 0 !
# task_id = 1 (i.e., first task) => start_class = 0, end_class = 4
cifar_transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),])
cifar_train = torchvision.datasets.CIFAR100('./data/', train=True, download=True, transform=cifar_transforms)
cifar_test = torchvision.datasets.CIFAR100('./data/', train=False, download=True, transform=cifar_transforms)
for task_id in range(1, num_tasks+1):
train_loader, test_loader = get_split_cifar100(task_id, batch_size, cifar_train, cifar_test)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
return datasets
"""
"""
datasets = {}
paradigm = 'class_iid'
run = 0
dataset = core50( paradigm, run)
for task_id in range(0, num_tasks):
train_loader, val, test_loader = dataset.getNextClasses(task_id) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
return datasets
"""
datasets = {}
#paradigm = 'class_iid'
#run = 0
#dataset = load_datasets( paradigm, run)
if dataset == 'core50':
for task_id in range(0, num_tasks):
train_loader, test_loader = dataset_core50(task_id,batch_size,run,paradigm,dataset) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
if dataset == 'toybox':
for task_id in range(0, num_tasks):
train_loader, test_loader = dataset_toybox(task_id,batch_size,run,paradigm,dataset) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
if dataset == 'ilab':
for task_id in range(0, num_tasks):
train_loader, test_loader = dataset_ilab(task_id,batch_size,run,paradigm,dataset) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
if dataset == 'cifar100':
for task_id in range(0, num_tasks):
train_loader, test_loader = dataset_cifar100(task_id,batch_size,run,paradigm,dataset) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test)
datasets[task_id] = {'train': train_loader, 'test': test_loader}
return datasets | 003c74a55a4e9a1f645a6bc930abf65342abd0fc | 13,856 |
def is_point_in_triangle(pt, v1, v2, v3):
"""Returns True if the 2D point pt is within the triangle defined by v1-3.
https://www.gamedev.net/forums/topic/295943-is-this-a-better-point-in-triangle-test-2d/
"""
b1 = sign(pt, v1, v2) < 0.0
b2 = sign(pt, v2, v3) < 0.0
b3 = sign(pt, v3, v1) < 0.0
return ((b1 == b2) and (b2 == b3)) | 2ff58dfb4efe939513cc901772aa744296ebb960 | 13,857 |
def precomputed_aug_experiment(
clf,
auged_featurized_x_train,
auged_featurized_y_train,
auged_featurized_x_train_to_source_idxs,
auged_featurized_x_test,
auged_featurized_y_test,
auged_featurized_x_test_to_source_idxs,
aug_iter,
train_idxs_scores,
n_aug_sample_points,
update_scores=False,
weight_aug_samples=False,
use_loss=False,
stratified_sampling_x_train_ks=None,
):
"""
This is a precomputed version of the aug_experiment.
Here, we expect training sets to be augmented and featurized up front.
This function will index into the augmented set (with featurization)
to get the input that would be fed into the classifier.
@param clf The classifier to use (e.g., logistic regression)
@param auged_featurized_x_train The augmented and featurized training set.
@param auged_featurized_y_train The labels of the training set.
@param auged_featurized_x_train_to_source_idxs A list of idxs corresponding
to the source of augmented images from the original training set. -1 means
that the point is an original point.
@param auged_featurized_x_test The augmented and featurized test set.
@param auged_featurized_y_test The labels of the test set.
@param auged_featurized_x_test_to_source_idxs A list of idxs corresponding
to the source of augmented images from the original test set. -1 means
that the point is an original point.
@param aug_iter The policy to use.
@param train_idxs_scores The scores to use for the policies (e.g.,
LOO influence or loss).
@param stratified_sampling_x_train_ks The population type of each train
sample for stratified sampling. Sampling is round robin in numeric order.
@return An list of accuracies on the test set and a list of the points that
were chosen for augmentation.
"""
influence_acc = []
aug_iter_idxs = []
original_mask_train = auged_featurized_x_train_to_source_idxs < 0
original_x_train = auged_featurized_x_train[original_mask_train]
original_y_train = auged_featurized_y_train[original_mask_train]
auged_x_train = np.copy(original_x_train)
auged_y_train = np.copy(original_y_train)
n_aug_sample_points = set(n_aug_sample_points)
if weight_aug_samples:
sample_weight = np.ones(len(original_x_train))
else:
sample_weight = None
if stratified_sampling_x_train_ks is not None:
aug_idxs = stratified_sampling_to_aug_idxs(
train_idxs_scores,
aug_iter,
stratified_sampling_x_train_ks,
)
else:
aug_idxs = np.array(list(aug_iter(train_idxs_scores))).flatten()
assert len(np.unique(aug_idxs)) == len(aug_idxs)
already_auged = set()
while len(already_auged) < len(original_x_train):
assert len(train_idxs_scores) == len(original_x_train)
next_idxs = [idx for idx in aug_idxs if idx not in already_auged]
idx = next_idxs[0]
already_auged.add(idx)
aug_mask = auged_featurized_x_train_to_source_idxs == idx
x_aug_ = auged_featurized_x_train[aug_mask]
auged_x_train = np.concatenate(
[
auged_x_train,
x_aug_,
],
axis=0)
y_aug_ = auged_featurized_y_train[aug_mask]
auged_y_train = np.concatenate(
[
auged_y_train,
y_aug_,
],
axis=0)
if weight_aug_samples:
# We downweight all points from the original train point
rescale_weight = 1.0 / (len(x_aug_) + 1)
weight_aug_ = np.full(len(x_aug_), rescale_weight)
sample_weight = np.concatenate([
sample_weight,
weight_aug_,
],
axis=0)
sample_weight[idx] = rescale_weight
if len(already_auged) in n_aug_sample_points:
fit_params = {"logistic_reg__sample_weight": sample_weight}
clf.fit(auged_x_train, auged_y_train, **fit_params)
aug_train_poisoned_acc = clf.score(
auged_featurized_x_test,
auged_featurized_y_test)
influence_acc.append(aug_train_poisoned_acc)
aug_iter_idxs.append(idx)
if update_scores:
if isinstance(clf, sklearn.model_selection.GridSearchCV):
if use_loss:
train_idxs_scores = (clf
.best_estimator_
.named_steps["logistic_reg"]
.log_losses(L2_alpha=0.0))
else:
train_idxs_scores = (clf
.best_estimator_
.named_steps["logistic_reg"]
.LOO_influence())
else:
if use_loss:
train_idxs_scores = (clf
.named_steps["logistic_reg"]
.log_losses(L2_alpha=0.0))
else:
train_idxs_scores = (clf
.named_steps["logistic_reg"]
.LOO_influence())
train_idxs_scores = train_idxs_scores[:len(original_x_train)]
if stratified_sampling_x_train_ks is not None:
aug_idxs = stratified_sampling_to_aug_idxs(
train_idxs_scores,
aug_iter,
stratified_sampling_x_train_ks,
)
else:
aug_idxs = np.array(
list(aug_iter(train_idxs_scores))
).flatten()
return influence_acc, aug_iter_idxs | 50f03f08c7ce0777658ca3f84691b940f190e4cd | 13,858 |
def get_yahoo_data(symbol, start_date, end_date):
"""Returns pricing data for a YAHOO stock symbol.
Parameters
----------
symbol : str
Symbol of the stock in the Yahoo. You can refer to this link:
https://www.nasdaq.com/market-activity/stocks/screener?exchange=nasdaq.
start_date : str
Starting date (YYYY-MM-DD) of the period that you want to get data on
end_date : str
Ending date (YYYY-MM-DD) of the period you want to get data on
Returns
-------
pandas.DataFrame
Stock data (in OHLCAV format) for the specified company and date range
"""
df = yf.download(symbol, start=start_date, end=end_date)
df = df.reset_index()
rename_dict = {
"Date": "dt",
"Open": "open",
"High": "high",
"Low": "low",
"Close": "close",
"Adj Close": "adj_close",
"Volume": "volume",
}
rename_list = ["dt", "open", "high", "low", "close", "adj_close", "volume"]
df = df.rename(columns=rename_dict)[rename_list].drop_duplicates()
df["dt"] = pd.to_datetime(df.dt)
return df.set_index("dt") | adc2a6186d96c76a75a62391c7f8d7534836f5bd | 13,859 |
def first_n(m: dict, n: int):
"""Return first n items of dict"""
return {k: m[k] for k in list(m.keys())[:n]} | 57ccc9f8913c60c592b38211900fe8d28feffb4c | 13,860 |
from typing import List
from typing import Dict
from typing import Union
def listdictnp_combine(
lst: List,
method: str = "concatenate",
axis: int = 0,
keep_nested: bool = False,
allow_error: bool = False,
) -> Dict[str, Union[np.ndarray, List]]:
"""Concatenate or stack a list of dictionaries contains numpys along with error handling
Parameters
----------
lst : list
list of dicts containings np arrays
method : str
'concatenate' or 'stack'
axis : int
axis to concat or stack over
keep_nested : bool
keep nested structure of list or not
allow_error : bool
allow for error handling. If op does not succes, list is provided
Returns
-------
np.array OR list of np.array in case of error
"""
for k in range(len(lst)):
assert (
lst[0].keys() == lst[k].keys()
), "Dict keys do not match in listdictnp_combine fct"
# get keys
keys = lst[0].keys()
output_dict = dict()
for key in keys:
# merge nested list
if keep_nested:
tmp = [None] * len(lst)
for k in range(len(lst)):
tmp[k] = lst[k][key]
else:
tmp = list()
for k in range(len(lst)):
tmp = [*tmp, *lst[k][key]]
# convert to numpy if possible
output_dict[key] = listnp_combine(
tmp, method=method, axis=axis, allow_error=allow_error
)
return output_dict | b4527342c8a3b90c797e7ef88326c97b4933d1b0 | 13,861 |
def find_pure_symbol(symbols, clauses):
"""Find a symbol and its value if it appears only as a positive literal
(or only as a negative) in clauses.
>>> find_pure_symbol([A, B, C], [A|~B,~B|~C,C|A])
(A, True)
"""
for s in symbols:
found_pos, found_neg = False, False
for c in clauses:
if not found_pos and s in disjuncts(c): found_pos = True
if not found_neg and ~s in disjuncts(c): found_neg = True
if found_pos != found_neg: return s, found_pos
return None, None | 657c011fb0ee865252e7deed4672cde08c6db2e9 | 13,862 |
def cross_entropy_emphasized_loss(labels,
predictions,
corrupted_inds,
axis=0,
alpha=0.3,
beta=0.7,
regularizer=None):
"""
Compute cross entropy loss over training examples that have been
corrupted along certain dimensions
:param labels: tensor of training example with no corruption added
:param predictions: output tensor of autoencoder
:param corrupted_inds: indices of corrupted dimensions (if any)
:param axis: axis along which components are taken
:param alpha: weight for error on components that were corrupted
:param beta: weight for error on components that were not corrupted
:return: cross entropy loss, emphasized by corrupted component weight
"""
assert (labels.shape[axis] == predictions.shape[axis])
assert (labels.dtype == predictions.dtype)
num_elems = labels.shape[axis].value * FLAGS.batch_size
# corrupted features
x_c = tf.boolean_mask(labels, corrupted_inds)
z_c = tf.boolean_mask(predictions, corrupted_inds)
# uncorrupted features
x = tf.boolean_mask(labels, ~corrupted_inds)
z = tf.boolean_mask(predictions, ~corrupted_inds)
# if training on examples with corrupted indices
if x_c is not None:
lhs = alpha * (-tf.reduce_sum(tf.add(tf.multiply(x_c, tf.log(z_c)),
tf.multiply(1.0 - x_c, tf.log(1.0 - z_c)))))
rhs = beta * (-tf.reduce_sum(tf.add(tf.multiply(x, tf.log(z)),
tf.multiply(1.0 - x, tf.log(1.0 - z)))))
else:
lhs = 0
rhs = -tf.reduce_sum(tf.add(tf.multiply(labels, tf.log(predictions)),
tf.multiply(1.0 - labels, tf.log(1.0 - predictions))))
return tf.add(lhs, rhs) / num_elems | e4ebb4e3198dea085789c81388522130ed867e3f | 13,863 |
def get_process_list(node: Node):
"""Analyse the process description and return the Actinia process chain and the name of the processing result
:param node: The process node
:return: (output_objects, actinia_process_list)
"""
input_objects, process_list = check_node_parents(node=node)
output_objects = []
# First analyse the data entry
if "id" not in node.arguments:
raise Exception("Process %s requires parameter <id>" % PROCESS_NAME)
input_object = DataObject.from_string(node.arguments["id"])
spatial_extent = None
if "spatial_extent" in node.arguments:
spatial_extent = node.arguments["spatial_extent"]
temporal_extent = None
if "temporal_extent" in node.arguments:
temporal_extent = node.arguments["temporal_extent"]
bands = None
if "bands" in node.arguments:
bands = node.arguments["bands"]
if input_object.is_strds() and \
(temporal_extent is not None or bands is not None):
output_object = DataObject(
name=create_output_name(input_object.name, PROCESS_NAME),
datatype=input_object.datatype)
else:
output_object = input_object
output_objects.append(output_object)
node.add_output(output_object)
pc = create_process_chain_entry(input_object,
spatial_extent,
temporal_extent,
bands,
output_object)
process_list.extend(pc)
return output_objects, process_list | 00f5e6c767975def09fbea800a8b74cfcd12f935 | 13,864 |
def _validate_image_formation(the_sicd):
"""
Validate the image formation.
Parameters
----------
the_sicd : sarpy.io.complex.sicd_elements.SICD.SICDType
Returns
-------
bool
"""
if the_sicd.ImageFormation is None:
the_sicd.log_validity_error(
'ImageFormation attribute is not populated, and ImageFormType is {}. This '
'cannot be valid.'.format(the_sicd.ImageFormType))
return False # nothing more to be done.
alg_types = []
for alg in ['RgAzComp', 'PFA', 'RMA']:
if getattr(the_sicd, alg) is not None:
alg_types.append(alg)
if len(alg_types) > 1:
the_sicd.log_validity_error(
'ImageFormation.ImageFormAlgo is set as {}, and multiple SICD image formation parameters {} are set.\n'
'Only one image formation algorithm should be set, and ImageFormation.ImageFormAlgo '
'should match.'.format(the_sicd.ImageFormation.ImageFormAlgo, alg_types))
return False
elif len(alg_types) == 0:
if the_sicd.ImageFormation.ImageFormAlgo is None:
the_sicd.log_validity_warning(
'ImageFormation.ImageFormAlgo is not set, and there is no corresponding\n'
'RgAzComp, PFA, or RMA SICD parameters set. Setting ImageFormAlgo '
'to "OTHER".'.format(the_sicd.ImageFormation.ImageFormAlgo))
the_sicd.ImageFormation.ImageFormAlgo = 'OTHER'
return True
elif the_sicd.ImageFormation.ImageFormAlgo != 'OTHER':
the_sicd.log_validity_error(
'No RgAzComp, PFA, or RMA SICD parameters populated, but ImageFormation.ImageFormAlgo '
'is set as {}.'.format(the_sicd.ImageFormation.ImageFormAlgo))
return False
return True
# there is exactly one algorithm type populated
return _validate_image_form_parameters(the_sicd, alg_types[0]) | b68c9a767e2499b8149389e2e207a0f05d20bf44 | 13,865 |
def handle_closet(player, level, reward_list):
"""
Handle a closet
:param player: The player object for the player
:param level: The level that the player is on
:return reward: The reward given to the player
"""
# Print the dialogue for the closet
print "You found a closet. It appears to be unlocked."
print "Should you open it?"
# Get the players move
player_move = handle_options(player, ["Open the Closet!", "No! Its a trap!"])
reward = None
if player_move == 1:
# Decide what happens when the person opens the closet
closet_outcome = randint(0, 5)
if closet_outcome < level: # There is a rat inside the closet
print "OH NO! There is a giant man eating rat in there!"
handle_fight(player, 3, 10)
else: # You get a helpful reward from the closet
reward = reward_list[randint(0, len(reward_list)-1)]
print "Congratulations! You found a " + reward + "!"
print "This item increases your damage points by", 2 * level
player.add_damage_points(2*level)
return reward | ab170cb556fd688edeac80eac9bb7577df771a33 | 13,866 |
def module_path_to_test_path(module):
"""Convert a module locator to a proper test filename.
"""
return "test_%s.py" % module_path_to_name(module) | 17997d17d64686deec97d4aa9f23a14f04ff5516 | 13,867 |
def inspect_bom(filename):
"""Inspect file for bom."""
encoding = None
try:
with open(filename, "rb") as f:
encoding = has_bom(f.read(4))
except Exception: # pragma: no cover
# print(traceback.format_exc())
pass
return encoding | 84da40bc941053c4e6d18934c27b3e1d63318762 | 13,868 |
from packaging.specifiers import SpecifierSet
def parse_requirement(text):
"""
Parse a requirement such as 'foo>=1.0'.
Returns a (name, specifier) named tuple.
"""
match = REQUIREMENT_RE.match(text)
if not match:
raise ValueError("Invalid requirement: %s" % text)
name = match.group('name').strip()
spec = SpecifierSet(match.group('specifier') or '')
return Requirement(name, spec) | 95dab6f3dd6784bf73233e80cfb946f904984a1d | 13,869 |
def H_split(k, N, eps):
"""Entropy of the split in binary search including overlap, specified by
eps"""
return (k / N) * (np.log(k) + H_epsilon(k, eps)) + ((N - k) / N) * (np.log(N - k) + H_epsilon(N - k, eps)) | 555d5e56550851084fdfa148dc7936c75649a197 | 13,870 |
def date_features(inputs, features_slice, columns_index) -> tf.Tensor:
"""Return an input and output date tensors from the features tensor."""
date = features(inputs, features_slice, columns_index)
date = tf.cast(date, tf.int32)
date = tf.strings.as_string(date)
return tf.strings.reduce_join(date, separator="-", axis=-1, keepdims=True) | 3362019b24a6f3104d858d2ddf17f0fae4060d7b | 13,871 |
import pickle
def save_calib(filename, calib_params):
""" Saves calibration parameters as '.pkl' file.
Parameters
----------
filename : str
Path to save file, must be '.pkl' extension
calib_params : dict
Calibration parameters to save
Returns
-------
saved : bool
Saved successfully.
"""
if type(calib_params) != dict:
raise TypeError("calib_params must be 'dict'")
output = open(filename, 'wb')
try:
pickle.dump(calib_params, output)
except:
raise IOError("filename must be '.pkl' extension")
output.close()
saved = True
return saved | 6735c8a6e96158b9fc580b6e61609b5ae7733fe0 | 13,872 |
def context_to_dict(context):
"""convert a django context to a dict"""
the_dict = {}
for elt in context:
the_dict.update(dict(elt))
return the_dict | b319c6be4efa83c91eefa249c8be90824bc0158f | 13,873 |
def returnItemsWithMinSupport(itemSet, transactionList, minSupport, freqSet):
"""calculates the support for items in the itemSet and returns a subset
of the itemSet each of whose elements satisfies the minimum support"""
_itemSet = set()
localSet = defaultdict(int)
for item in itemSet:
for transaction in transactionList:
if item.issubset(transaction):
freqSet[item] += 1
localSet[item] += 1
for item, count in list(localSet.items()):
support = float(count)/len(transactionList)
if support >= minSupport:
_itemSet.add(item)
return _itemSet | e1290778548f198f87fc210c8a78bbfadaf0de9f | 13,874 |
def create_P(P_δ, P_ζ, P_ι):
"""
Combine `P_δ`, `P_ζ` and `P_ι` into a single matrix.
Parameters
----------
P_δ : ndarray(float, ndim=1)
Probability distribution over the values of δ.
P_ζ : ndarray(float, ndim=2)
Markov transition matrix for ζ.
P_ι : ndarray(float, ndim=1)
Probability distribution over the values of ι.
Returns
----------
P : ndarray(float, ndim=3)
Joint probability distribution over the values of δ, ζ and ι.
Probabilities vary by δ on the first axis, by ζ on the second axis,
and by ι on the third axis.
"""
P = \
P_δ[:, None, None, None] * P_ζ[None, :, :, None] * \
P_ι[None, None, None, :]
return P | 0afdef50c50563421bb7c6f3f928fa6b3e5f4733 | 13,875 |
import attrs
def sel_nearest(
dset,
lons,
lats,
tolerance=2.0,
unique=False,
exact=False,
dset_lons=None,
dset_lats=None,
):
"""Select sites from nearest distance.
Args:
dset (Dataset): Stations SpecDataset to select from.
lons (array): Longitude of sites to interpolate spectra at.
lats (array): Latitude of sites to interpolate spectra at.
tolerance (float): Maximum distance to use site for interpolation.
unique (bool): Only returns unique sites in case of repeated inexact matches.
exact (bool): Require exact matches.
dset_lons (array): Longitude of stations in dset.
dset_lats (array): Latitude of stations in dset.
Returns:
Selected SpecDataset at locations defined by (lons, lats).
Note:
Args `dset_lons`, `dset_lats` are not required but can improve performance when
`dset` is chunked with site=1 (expensive to access station coordinates) and
improve precision if projected coordinates are provided at high latitudes.
"""
coords = Coordinates(dset, lons=lons, lats=lats, dset_lons=dset_lons, dset_lats=dset_lats)
station_ids = []
for lon, lat in zip(coords.lons, coords.lats):
closest_id, closest_dist = coords.nearest(lon, lat)
if closest_dist > tolerance:
raise AssertionError(
f"Nearest site from (lat={lat}, lon={lon}) is {closest_dist:g} "
f"deg away but tolerance is {tolerance:g} deg."
)
if exact and closest_dist > 0:
raise AssertionError(
f"Exact match required but no site at (lat={lat}, lon={lon}), "
f"nearest site is {closest_dist} deg away."
)
station_ids.append(closest_id)
if unique:
station_ids = list(set(station_ids))
dsout = dset.isel(**{attrs.SITENAME: station_ids})
# Return longitudes in the convention provided
if coords.consistent is False:
dsout.lon.values = coords._swap_longitude_convention(dsout.lon.values)
dsout = dsout.assign_coords({attrs.SITENAME: np.arange(len(station_ids))})
return dsout | ebf22cdeb30215a76312f2cdd8223a2d24bf6af6 | 13,876 |
import datasets
def evaluate(dataset, predictions, gts, output_folder):
"""evaluate dataset using different methods based on dataset type.
Args:
dataset: Dataset object
predictions(dict): each item in the list represents the
prediction results for one image.
gt(dict): Ground truth for each batch
output_folder: output folder, to save evaluation files or results.
Returns:
evaluation result
"""
args = dict(
predictions=predictions, gts=gts, output_folder=output_folder,
)
if isinstance(dataset, datasets.MNIST):
return do_mnist_evaluation(**args)
elif isinstance(dataset, datasets.MWPose):
return do_mwpose_evaluation(dataset=dataset, **args)
elif isinstance(dataset, datasets.ModelNetHdf):
return do_modelnet_evaluation(**args)
else:
dataset_name = dataset.__class__.__name__
raise NotImplementedError("Unsupported dataset type {}.".format(dataset_name)) | 85c0232c53de091f2293d042b944fe8768a9ac91 | 13,877 |
from vlescrapertools import getAuthedSession
def html_xml_save(
s=None, possible_sc_link=None, table="htmlxml", course_presentation=None
):
"""Save the HTML and XML for a VLE page page."""
if not possible_sc_link:
# should really raise error here
print("need a link")
if not s:
if "learn2.open.ac.uk" in possible_sc_link:
s = getAuthedSession()
else:
s = possible_sc_link
typ, html_page_url, rawxml, html_src = get_sc_page(possible_sc_link, s)
if typ:
dbrowdict = {
"possible_sc_link": possible_sc_link,
"doctype": typ,
"html_url": html_page_url,
"xml": rawxml,
"html_src": html_src,
"course_presentation": course_presentation,
"courseCode": "",
"courseTitle": "",
"itemTitle": "",
}
else:
dbrowdict = {}
# Get some metadata from the XML
# Item/CourseCode
# Item/CourseTitle
# Item/ItemTitle
if typ == "XML":
root = etree.fromstring(rawxml.encode("utf-8"))
# If the course code is contaminated by a presentation suffix, get rid of the presentation code
dbrowdict["courseCode"] = flatten(root.find("CourseCode")).split("-")[0]
dbrowdict["courseTitle"] = flatten(root.find("CourseTitle"))
dbrowdict["itemTitle"] = flatten(root.find("ItemTitle"))
if dbrowdict:
DB[table].insert(dbrowdict)
return typ, html_page_url, rawxml, html_src | 37bb86769c86d851e3fec8dabc17534dfdecde60 | 13,878 |
import os
def app(request):
"""An instance of the Flask app that points at a test database.
If the TEST_DATABASE environment variable is set to "postgres", launch a temporary PostgreSQL
server that gets torn down at the end of the test run.
"""
database = os.environ.get('TEST_DATABASE', 'sqlite')
if database == 'postgres':
try:
psql = request.getfixturevalue('postgresql_proc')
uri = f'postgresql+psycopg2://{psql.user}:@{psql.host}:{psql.port}/'
except pytest.FixtureLookupError as error:
raise Exception('TEST_POSTGRESQL was set but pytest-postgresql was not installed') from error
else:
uri = 'sqlite://'
main.app.app.config['SQLALCHEMY_DATABASE_URI'] = uri
return main.app.app | 52cac3b340ef0fb5a92449242644c1061a5cba2b | 13,879 |
def htmr(t,axis="z"):
"""
Calculate the homogeneous transformation matrix of a rotation
respect to x,y or z axis.
"""
from sympy import sin,cos,tan
if axis in ("z","Z",3):
M = Matrix([[cos(t),-sin(t),0,0],
[sin(t),cos(t),0,0],
[0,0,1,0],
[0,0,0,1]])
elif axis in ("y","Y",2):
M = Matrix([[cos(t),0,sin(t),0],
[0,1,0,0],
[-sin(t),0,cos(t),0],
[0,0,0,1]])
elif axis in ("x","X",1):
M = Matrix([[1,0,0,0],
[0,cos(t),-sin(t),0,],
[0,sin(t),cos(t),0],
[0,0,0,1]])
else:
return eye(4)
return M | b3941680f22b2eb48da15b2bb1a6e39c05e3b5c3 | 13,880 |
def vt(n, gm, gsd, dmin=None, dmax=10.):
"""Evaluate the total volume of the particles between two diameters.
The CDF of the lognormal distribution is calculated using equation 8.12
from Seinfeld and Pandis.
Mathematically, it is represented as:
.. math::
V_t=\\frac{π}{6}∫_{-∞}^{∞}D_p^3n_N^e(ln D_p)d lnD_p \\;\\;(\mu m^3 cm^{-3})
Parameters
----------
n : float
Total aerosol number concentration in units of #/cc
gm : float
Median particle diameter (geometric mean) in units of :math:`\mu m`.
gsd : float
Geometric Standard Deviation of the distribution.
dmin : float
The minimum particle diameter in microns. Default value is 0 :math:`\mu m`.
dmax : float
The maximum particle diameter in microns. Default value is 10 :math:`\mu m`.
Returns
-------
Volume | float
Returns the total volume of particles between :math:`D_{min}` and
:math:`D_{max}` in units of :math:`\mu m^3 cm^{-3}`
See Also
--------
opcsim.equations.pdf.dv_ddp
opcsim.equations.pdf.dv_dlndp
opcsim.equations.pdf.dv_dlogdp
Examples
--------
Integrate a sample distribution between 0 and 2.5 microns:
>>> d = opcsim.AerosolDistribution()
>>> d.add_mode(1e3, 100, 1.5, "mode 1")
>>> n = opcsim.equations.cdf.vt(1e3, 0.1, 1.5, dmax=2.5)
"""
res = (np.pi/12.)*n*(gm**3) * np.exp(9./2.*(np.log(gsd)**2)) * \
erfc((1.5*np.sqrt(2) * np.log(gsd)) - (np.log(dmax/gm) / (np.sqrt(2) * np.log(gsd))))
if dmin is not None and dmin > 0.0:
res -= vt(n, gm, gsd, dmin=None, dmax=dmin)
return res | ba407dc86bbf3201bd597f729f2397ef9428e72b | 13,881 |
def core_value_encode(origin):
"""
转换utf-8编码为社会主义核心价值观编码
:param origin:
:return:
"""
hex_str = str2hex(origin)
twelve = hex2twelve(hex_str)
core_value_iter = twelve_2_core_value(twelve)
return ''.join(core_value_iter) | 7b81540f7e7184ec60fb6820e3548201d67eec29 | 13,882 |
def user_query_ahjs_is_ahj_official_of(self, request, queryset):
"""
Admin action for the User model. Redirects the admin to
a change list of AHJs the selected users are AHJ officials of.
"""
model_name = 'ahj'
field_key_pairs = [field_key_pair('AHJPK', 'AHJPK')]
queryset = AHJUserMaintains.objects.filter(UserID__in=queryset, MaintainerStatus=True)
return load_change_list_with_queryset(request, queryset, model_name, field_key_pairs) | 4d97f25f2647a92a9690bf3360bd3fd63b03d631 | 13,883 |
def get_cache_node_count(
cluster_id: str, configuration: Configuration = None, secrets: Secrets = None
) -> int:
"""Returns the number of cache nodes associated to the cluster
:param cluster_id: str: the name of the cache cluster
:param configuration: Configuration
:param secrets: Secrets
:example:
{
"type": "probe",
"name": "validate cache node count",
"tolerance": 3,
"provider": {
"type": "python",
"module": "chaosaws.elasticache.probes",
"func": "get_cache_node_count",
"arguments": {
"cluster_id": "MyTestCluster"
}
}
}
"""
response = describe_cache_cluster(
cluster_id, configuration=configuration, secrets=secrets
)
return response["CacheClusters"][0].get("NumCacheNodes", 0) | e4a4b3cd6d0bf7416ffe5a3d86725a614ad1c41c | 13,884 |
import string
def top_sentences(query, sentences, idfs, n):
"""
Given a `query` (a set of words), `sentences` (a dictionary mapping
sentences to a list of their words), and `idfs` (a dictionary mapping words
to their IDF values), return a list of the `n` top sentences that match
the query, ranked according to idf. If there are ties, preference should
be given to sentences that have a higher query term density.
"""
# Process query.
query = set(
[
word.lower()
for word in query
if word not in string.punctuation
and word not in nltk.corpus.stopwords.words("english")
]
)
# Create a list tuples (sentence, sum_idfs, qt_density) to sort the sentences.
results = []
for sentence, words in sentences.items():
# Determine the total sum of IDF values and query term density for each
# sentence.
sum_idfs = 0
for word in query:
if word in words:
sum_idfs += idfs[word]
qt_density = sum(words.count(word) for word in query) / len(words)
results.append((sentence, sum_idfs, qt_density))
# Sort sentences by their total sum of IDF values and query term density.
ranked_sentences = [
sentence
for sentence, sum_idfs, qt_density in sorted(
results, key=itemgetter(1, 2), reverse=True
)
]
# Return the 'n' top sentences.
return ranked_sentences[:n] | 5533b96848baea5afa614e691d2d2ae07c4a16a9 | 13,885 |
def load_distribution(label):
"""Load sample distributions as described by Seinfeld+Pandis Table 8.3.
There are currently 7 options including: Urban, Marine, Rural, Remote
continental, Free troposphere, Polar, and Desert.
Parameters
----------
label : {'Urban' | 'Marine' | 'Rural' | 'Remote Continental' | 'Free Troposphere' | 'Polar' | 'Desert'}
Choose which sample distribution to load.
Returns
-------
An instance of the AerosolDistribution class
Examples
--------
>>> d = opcsim.load_distribution("Urban")
"""
label = label.lower()
if label not in DISTRIBUTION_DATA.keys():
raise ValueError("Invalid label.")
_tmp = AerosolDistribution(label)
for each in DISTRIBUTION_DATA[label]:
_tmp.add_mode(each[0], each[1], 10**each[2], each[3])
return _tmp | 3dfd2fea5c165c331255e3b350e1f92a37919726 | 13,886 |
from typing import List
def split_4d_itk(img_itk: sitk.Image) -> List[sitk.Image]:
"""
Helper function to split 4d itk images into multiple 3 images
Args:
img_itk: 4D input image
Returns:
List[sitk.Image]: 3d output images
"""
img_npy = sitk.GetArrayFromImage(img_itk)
spacing = img_itk.GetSpacing()
origin = img_itk.GetOrigin()
direction = np.array(img_itk.GetDirection()).reshape(4, 4)
spacing = tuple(list(spacing[:-1]))
assert len(spacing) == 3
origin = tuple(list(origin[:-1]))
assert len(origin) == 3
direction = tuple(direction[:-1, :-1].reshape(-1))
assert len(direction) == 9
images_new = []
for i, t in enumerate(range(img_npy.shape[0])):
img = img_npy[t]
images_new.append(
create_itk_image_spatial_props(img, spacing, origin, direction))
return images_new | 21ad4f6c0cbdb05cf6f67469e3d32e732d1500ee | 13,887 |
from bs4 import BeautifulSoup
def parse_results(html, keyword):
"""[summary]
Arguments:
html {str} -- google search engine html response
keyword {str} -- search term
Returns:
pandas.DataFrame -- Dataframe with the following columns ['keyword', 'rank', 'title', 'link', 'domain']
"""
soup = BeautifulSoup(html, 'html.parser')
found_results = []
rank = 1
result_block = soup.find_all('div', attrs={'class': 'g'})
for result in result_block:
link = result.find('a', href=True)
title = result.find('h3')
# description = result.find('span', attrs={'class': 'st'})
if link and title:
link = link['href']
title = title.get_text()
# if description:
# description = description.get_text()
if link != '#':
domain = DOMAIN_RE.findall(link)[0]
found_results.append(
{'keyword': keyword, 'rank': rank, 'title': title, 'link': link, 'domain': domain})
rank += 1
return pd.DataFrame(found_results, columns=['keyword', 'rank', 'title', 'link', 'domain']) | 4c89e919b3f3285565efe5bdf5c4ec5b87664c79 | 13,888 |
def maybe_iter_configs_with_path(x, with_params=False):
"""
Like x.maybe_iter_configs_with_path(), but returns [(x, [{}])] or [(x, {}, [{}])] if x is just a config object and not a Tuner object.
"""
if is_tuner(x):
return x.iter_configs_with_path(with_params=with_params)
else:
if with_params:
return [(deepcopy(x), {}, [{}])]
else:
return [(deepcopy(x), {})] | 947a62067f3eacb4d5c8ba419d8018ad2ab3320c | 13,889 |
import typing
def median(vals: typing.List[float]) -> float:
"""Calculate median value of `vals`
Arguments:
vals {typing.List[float]} -- list of values
Returns:
float -- median value
"""
index = int(len(vals) / 2) - 1
return sorted(vals)[index] | 9f840d11409a570a718fdfe56d7a282af43bc798 | 13,890 |
def melody_mapper(notes):
"""
Makes a map of a melody to be played
each item in the list 'notes' should be formatted using these chars:
duration - length in seconds the sound will be played
note - the note to play
sleep - time in seconds to pause
(note, duration)
example:
[('A4', 1), ('C3', 0.5)]
:param notes: List of notes
:return: list of melody map info
"""
m_map = {}
num_of_notes = 1
for note_info in notes:
note, duration, sleep = note_info
m_map[str(num_of_notes)] = {'note': note,
'frequency': get_note(note)[1],
'duration': duration,
'sleep': sleep}
num_of_notes += 1
return m_map | cf4c8f7864e91e771d3a70bfc4d8a7f4edb38967 | 13,891 |
def sample_bounding_box_scale_balanced_black(landmarks):
"""
Samples a bounding box for cropping so that the distribution of scales in the training data is uniform.
"""
bb_min = 0.9
bb_old = image.get_bounding_box(landmarks)
bb_old_shape = np.array((bb_old[2] - bb_old[0], bb_old[3] - bb_old[1]))
bb_old_size = np.max(bb_old_shape)
margin = (1 - bb_min) / 2
bb_old_min = np.round([bb_old[0] + bb_old_shape[0] * margin,
bb_old[1] + bb_old_shape[1] * margin,
bb_old[2] - bb_old_shape[0] * margin,
bb_old[3] - bb_old_shape[1] * margin])
scale = np.random.random_sample() * 0.94 + 0.08
bb_crop_size = int(round(bb_old_size / scale))
bb_crop_start_x = np.random.random_integers(low=bb_old_min[2] - bb_crop_size,
high=bb_old_min[0] + 1)
bb_crop_start_y = np.random.random_integers(low=bb_old_min[3] - bb_crop_size,
high=bb_old_min[1] + 1)
bb_crop_end_x = bb_crop_start_x + bb_crop_size
bb_crop_end_y = bb_crop_start_y + bb_crop_size
bb_crop = [bb_crop_start_x,
bb_crop_start_y,
bb_crop_end_x,
bb_crop_end_y]
return np.array(bb_crop) | 789cbe92803b77614ab8a018434745b2d9bba3a4 | 13,892 |
import glob
import os
def get_files(data_path):
"""
获取目录下以及子目录下的图片
:param data_path:
:return:
"""
files = []
exts = ['jpg', 'png', 'jpeg', 'JPG','bmp']
for ext in exts:
# glob.glob 得到所有文件名
# 一层 2层子目录都取出来
files.extend(glob.glob(os.path.join(data_path, '*.{}'.format(ext))))
files.extend(glob.glob(os.path.join(data_path, '*', '*.{}'.format(ext))))
return files | 1a81aa7679eb2c70d29d3e80423c4b2e860c307d | 13,893 |
def get_trainable_layers(layers):
"""Returns a list of layers that have weights."""
layers = []
# Loop through all layers
for l in layers:
# If layer is a wrapper, find inner trainable layer
l = find_trainable_layer(l)
# Include layer if it has weights
if l.get_weights():
layers.append(l)
return layers | 2d3f00cb061a6c2ee7081468be564f0b8621441d | 13,894 |
def outcome_from_application_return_code(return_code: int) -> outcome.Outcome:
"""Create either an :class:`outcome.Value` in the case of a 0 `return_code` or an
:class:`outcome.Error` with a :class:`ReturnCodeError` otherwise.
Args:
return_code: The return code to be processed.
Returns:
The outcome wrapping the passed in return code.
"""
if return_code == 0:
return outcome.Value(return_code)
return outcome.Error(qtrio.ReturnCodeError(return_code)) | c5b786906e0f3fd99ed6660c55213b18139003c0 | 13,895 |
import re
def group_by_scale(labels):
""" Utility that groups attribute labels by time scale """
groups = defaultdict(list)
# Extract scales from labels (assumes that the scale is given by the last numeral in a label)
for s in labels:
m = re.findall("\d+", s)
if m:
groups[m[-1]].append(s)
else:
print("Bad attribute: ", s)
return list(groups.values()) | 661ea03f8d463b1e0d5746df60e9e2cb969737ab | 13,896 |
def FontMapper_GetEncodingDescription(*args, **kwargs):
"""FontMapper_GetEncodingDescription(int encoding) -> String"""
return _gdi_.FontMapper_GetEncodingDescription(*args, **kwargs) | 0f154eaa616c3b18bc8828f63137c26c75397d56 | 13,897 |
from typing import Counter
def create_merged_ngram_dictionaries(indices, n):
"""Generate a single dictionary for the full batch.
Args:
indices: List of lists of indices.
n: Degree of n-grams.
Returns:
Dictionary of hashed(n-gram tuples) to counts in the batch of indices.
"""
ngram_dicts = []
for ind in indices:
ngrams = n_gram.find_all_ngrams(ind, n=n)
ngram_counts = n_gram.construct_ngrams_dict(ngrams)
ngram_dicts.append(ngram_counts)
merged_gen_dict = Counter()
for ngram_dict in ngram_dicts:
merged_gen_dict += Counter(ngram_dict)
return merged_gen_dict | bd313ea7eab835102e94f6c7d66fec8882531385 | 13,898 |
import base64
def compute_hash_base64(*fields):
"""bytes -> base64 string"""
value = compute_hash(*fields)
return base64.b64encode(value).decode() | b29b77b44a51417d63f8cae1970b5c1f4fb40317 | 13,899 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.