content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import random
def new_match(request, cmd_args):
"""
Return a slack message with a link to a new match
"""
# Could potentially add arguments to allow game configuration here
serializer = LiveMatchSerializer(data={"config": cmd_args})
if serializer.is_valid():
live_match = serializer.save()
return {
"response_type": "in_channel",
"text": request.build_absolute_uri(
"/matches/live/{}".format(live_match.id)
),
"attachments": [{"text": random.choice(NEW_MATCH_MESSAGES)}],
}
else:
error_str = "\n".join(
f" {field}: {', '.join(errors)}"
for field, errors in serializer.errors["config"].items()
)
return {"response_type": "in_channel", "text": f"Error:\n{error_str}"} | fe5a1f57a23188eaf000c32623eb5ce4386e4dc0 | 20,902 |
def head(feats, anchors, num_classes):
"""Convert final layer features to bounding box parameters.
Parameters
----------
feats : tensor
Final convolutional layer features.
anchors : array-like
Anchor box widths and heights.
num_classes : int
Number of target classes.
Returns
-------
box_xy : tensor
x, y box predictions adjusted by spatial location in conv layer.
box_wh : tensor
w, h box predictions adjusted by anchors and conv spatial resolution.
box_conf : tensor
Probability estimate for whether each box contains any object.
box_class_pred : tensor
Probability distribution estimate for each box over class labels.
"""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = tf.reshape(
tf.Variable(anchors, dtype=tf.float32, name='anchors'),
[1, 1, 1, num_anchors, 2])
# Dynamic implementation of conv dims for fully convolutional model.
conv_dims = tf.shape(feats)[1:3] # assuming channels last
# In YOLO the height index is the inner most iteration.
conv_height_index = tf.range(0, conv_dims[0])
conv_width_index = tf.range(0, conv_dims[1])
conv_height_index = tf.tile(conv_height_index, [conv_dims[1]])
conv_width_index = tf.tile(tf.expand_dims(conv_width_index, 0),
[conv_dims[0], 1])
conv_width_index = tf.reshape(tf.transpose(conv_width_index), [-1])
conv_index = tf.transpose(tf.stack([conv_height_index, conv_width_index]))
conv_index = tf.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
conv_index = tf.cast(conv_index, feats.dtype)
feats = tf.reshape(
feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
conv_dims = tf.cast(tf.reshape(conv_dims, [1, 1, 1, 1, 2]), feats.dtype)
box_xy = tf.nn.sigmoid(feats[..., :2])
box_wh = tf.exp(feats[..., 2:4])
box_confidence = tf.sigmoid(feats[..., 4:5])
box_class_probs = tf.nn.softmax(feats[..., 5:])
# Adjust preditions to each spatial grid point and anchor size.
# Note: YOLO iterates over height index before width index.
box_xy = (box_xy + conv_index) / conv_dims
box_wh = box_wh * anchors_tensor / conv_dims
return box_xy, box_wh, box_confidence, box_class_probs | d05f47f199d500bb8938396ef9052abfe6815a1f | 20,903 |
def socfaker_azurevmtopology_get():
"""
None
"""
if validate_request(request):
return jsonify(str(socfaker.products.azure.vm.topology)) | c4041c2cb3d124b1ed3a2b124292b669caba3595 | 20,904 |
import json
def read_json_file(filename):
"""Load json object from a file."""
with open(filename, 'r') as f:
content = json.load(f)
return content | b575891ac4b5a5484c9dcf7966e63a7cd9ca748c | 20,905 |
import copy
def createVectorisedTargValObjFunction(functTypeStr:str, averageMethod="mean",catchOverflow=True, errorRetVal=1e30, normToErrorRetVal=False, greaterThanIsOk=False, lessThanIsOk=False, useAbsVals=False,
divideErrorsByNormFactor=None):
""" Creates a comparison function that operators on (iterA,iterB) and returns a single value representing their similarity
Args:
functTypeStr(str): Key for selecting a base function for comparing two single numbers. e.g. "absdev" means a function returning the absolute difference.
All possible values can be found in OBJ_FUNCT_DICT. The function this gives has interface cmpFunct(expVal,actVal)->objVal
averageMethod(str): Determines how we convert an array of errors (obtained by applying functTypeStr function to all pairs of values) to a single error value
catchOverflow(bool): If True we catch overflow errors when comparing numbers, we replace the (overflowed) error value with errorRetVal
errorRetVal(float): see catchOverflow
normToErrorRetVal(bool): If True we ensure that all output values are between - and 1. We do this by divinding values by errorRetVal, and still setting the answer
to 1 even if they go above that value
greaterThanIsOk(bool): If True then the cmpFunct(expVal,actVal) returns 0 if expVal>=actVal, regardless on the actual type of cmpFunct(which is determined by functTypeStr)
lessThanIsOk(bool): If True then the cmpFunct(expVal, actVal) returns 0 if expVal<=actVal, regardless on the actual type of cmpFunct(which is determined by functTypeStr)
useAbsVals(bool): If True then the cmpFunct(expVal,actVal) will use abs(expVal) and abs(actVal) as inputs. Useful if you only care about the magnitude of your errors. Note: This is applied BEFORE less than/greater than functionality; so if mixed the <,> operators are appleid to abs(expVal) and abs(actVal)
divideErrorsByNormFactor(float): If not None, then we divide the output error by this value. The original purpose is to get a normalised error based on target values;
this is accomplished by setting this arg to the average expVal, and using the absdev cmp function.
Returns
outFunct(targIter,actIter)->error: Single function that works on two input iterators. Order of targIter and actIter probably wont matter.
"""
baseFunct = createSimpleTargValObjFunction(functTypeStr, catchOverflow=False, greaterThanIsOk=greaterThanIsOk, lessThanIsOk=lessThanIsOk, useAbsVals=useAbsVals)
def vectorizedFunct(targVals,actVals):
outVals = list()
tVals, aVals = copy.deepcopy(targVals), copy.deepcopy(actVals)
for t,a in it.zip_longest(tVals,aVals):
outVals.append( baseFunct(t,a) )
return outVals
outFunct = vectorizedFunct #Currently takes in lists, and returns a list
if averageMethod.lower()=="mean":
outFunct = applyMeanDecorator(outFunct)
else:
raise ValueError("{} is not a supported option for averageMethod".format(averageMethod))
if divideErrorsByNormFactor is not None:
outFunct = applyDivByConstantDecorator(outFunct, divideErrorsByNormFactor)
if catchOverflow:
outFunct = catchOverflowDecorator(outFunct,errorRetVal)
#Important this comes after catchOverflow, which essentially caps the value
if normToErrorRetVal:
outFunct = applyNormDecorator(outFunct, errorRetVal)
return outFunct | 1e9f0da8ce87c40f1dd461fd052a7c9de69ce86f | 20,906 |
def get_nonoverlap_ra_dataset_conf(dataset_conf):
"""extract segments by shifting segment length"""
if dataset_conf["if_rand"]:
info("disabled dataset_conf if_rand")
dataset_conf["if_rand"] = False
if dataset_conf["seg_rand"]:
info("disabled dataset_conf seg_rand")
dataset_conf["seg_rand"] = False
if dataset_conf["seg_shift"] != dataset_conf["seg_len"]:
info("change seg_shift from %s to %s" % (
dataset_conf["seg_shift"], dataset_conf["seg_len"]))
dataset_conf["seg_shift"] = dataset_conf["seg_len"]
return dataset_conf | 8aba162200d2f020e72a29d9d9c5676792ae0405 | 20,907 |
def _f_model_snaive_wday(a_x, a_date, params, is_mult=False, df_actuals=None):
"""Naive model - takes last valid weekly sample"""
if df_actuals is None:
raise ValueError('model_snaive_wday requires a df_actuals argument')
# df_actuals_model - table with actuals samples,
# adding y_out column with naive model values
df_actuals_model = _fillna_wday(df_actuals.drop_duplicates('x'))
# df_last_week - table with naive model values from last actuals week,
# to use in extrapolation
df_last_week = (
df_actuals_model
# Fill null actual values with data from previous weeks
.assign(y=df_actuals_model.y.fillna(df_actuals_model.y_out))
.drop_duplicates('wday', keep='last')
[['wday', 'y']]
.rename(columns=dict(y='y_out'))
)
# Generate table with extrapolated samples
df_out_tmp = pd.DataFrame({'date': a_date, 'x': a_x})
df_out_tmp['wday'] = df_out_tmp.date.dt.weekday
df_out_extrapolated = (
df_out_tmp
.loc[~df_out_tmp.date.isin(df_actuals_model.date)]
.merge(df_last_week, how='left')
.sort_values('x')
)
# Filter actuals table - only samples in a_x, a_date
df_out_actuals_filtered = (
# df_actuals_model.loc[df_actuals_model.x.isin(a_x)]
# Using merge rather than simple filtering to account for
# dates with multiple samples
df_actuals_model.merge(df_out_tmp, how='inner')
.sort_values('x')
)
df_out = (
pd.concat(
[df_out_actuals_filtered, df_out_extrapolated],
sort=False, ignore_index=True)
)
return df_out.y_out.values | f7668b6e73f1e985304198f1996f3374994e10cf | 20,908 |
def union_exprs(La, Lb):
"""
Union two lists of Exprs.
"""
b_strs = set([node.unique_str() for node in Lb])
a_extra_nodes = [node for node in La if node.unique_str() not in b_strs]
return a_extra_nodes + Lb | 2bd634a22b27314f6d03c8e52c0b09f7f4b692db | 20,910 |
def programs_reload():
"""Reload programs from config file
Parameters (default):
- do_create (True)
- do_update (True)
- do_pause (False)
"""
try:
result = dar.reload_programs(**request.args)
except TypeError as e:
log.info("Caught TypeError: %s" % (str(e)))
return "Error: " + str(e), 400
return jsonify({i: list(j) for i, j in result.items()}) | 06beff9b2f67bb7978ddd31ff338accb90d259bf | 20,911 |
import requests
def generate_urls():
"""Gathers clinical trials from clinicaltrials.gov for search term
defined in build_url() function and downloads to specified file format.
"""
api_call = build_url(expr='Cancer', max_rnk=1, fmt='json')
r = requests.get(api_call)
data = r.json()
n_studies = data['StudyFieldsResponse']['NStudiesFound']
print(f'{n_studies} studies found.\n')
print('\nGenerating request urls...')
urls = []
for i in range(1, n_studies, 1000):
url = build_url(expr='Cancer', field_names=['EligibilityCriteria'],
min_rnk=f'{i}', max_rnk=f'{i+999}',
fmt='csv')
urls.append(url)
return urls | 7de042ff2031d1b49e4fc02535f151d5e1dda913 | 20,912 |
def xyz2luv(xyz, illuminant="D65", observer="2"):
"""XYZ to CIE-Luv color space conversion.
Parameters
----------
xyz : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in XYZ format. Final dimension denotes
channels.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in CIE-Luv format. Same dimensions as input.
Raises
------
ValueError
If `xyz` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
Notes
-----
By default XYZ conversion weights use observer=2A. Reference whitepoint
for D65 Illuminant, with XYZ tristimulus values of ``(95.047, 100.,
108.883)``. See function 'get_xyz_coords' for a list of supported
illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=16#text16
.. [2] http://en.wikipedia.org/wiki/CIELUV
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2luv
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
>>> img_luv = xyz2luv(img_xyz)
"""
arr = _prepare_colorarray(xyz)
# extract channels
x, y, z = arr[..., 0], arr[..., 1], arr[..., 2]
eps = np.finfo(np.float).eps
# compute y_r and L
xyz_ref_white = get_xyz_coords(illuminant, observer)
L = y / xyz_ref_white[1]
mask = L > 0.008856
L[mask] = 116. * np.power(L[mask], 1. / 3.) - 16.
L[~mask] = 903.3 * L[~mask]
u0 = 4 * xyz_ref_white[0] / np.dot([1, 15, 3], xyz_ref_white)
v0 = 9 * xyz_ref_white[1] / np.dot([1, 15, 3], xyz_ref_white)
# u' and v' helper functions
def fu(X, Y, Z):
return (4. * X) / (X + 15. * Y + 3. * Z + eps)
def fv(X, Y, Z):
return (9. * Y) / (X + 15. * Y + 3. * Z + eps)
# compute u and v using helper functions
u = 13. * L * (fu(x, y, z) - u0)
v = 13. * L * (fv(x, y, z) - v0)
return np.concatenate([q[..., np.newaxis] for q in [L, u, v]], axis=-1) | 194f0e76c35257fb1d87fd58f507cc11da45eab4 | 20,913 |
def today():
"""
Today Page:
Displays all the notifications like the word of the day, news highlights or friends who added you or shared a
note with you
"""
return render_template('main/today.html', username=session['username']) | ada12c04456967bba1e506ca0fe27d3efe252307 | 20,915 |
import re
import itertools
def compile_read_regex(read_tags, file_extension):
"""Generate regular expressions to disern direction in paired-end reads."""
read_regex = [re.compile(r'{}\.{}$'.format(x, y))\
for x, y in itertools.product(read_tags, [file_extension])]
return read_regex | e677b8ff622eb31ea5f77bc662845ba0aef91770 | 20,916 |
def filter(start=None, stop=None, **kwargs):
"""
Get commands with ``start`` <= date < ``stop``. Additional ``key=val`` pairs
can be supplied to further filter the results. Both ``key`` and ``val``
are case insensitive. In addition to the any of the command parameters
such as TLMSID, MSID, SCS, STEP, or POS, the ``key`` can be:
date : Exact date of command e.g. '2013:003:22:11:45.530'
type : Command type e.g. COMMAND_SW, COMMAND_HW, ACISPKT, SIMTRANS
Examples::
>>> from kadi import cmds
>>> cs = cmds.filter('2012:001', '2012:030')
>>> cs = cmds.filter('2012:001', '2012:030', type='simtrans')
>>> cs = cmds.filter(type='acispkt', tlmsid='wsvidalldn')
>>> cs = cmds.filter(msid='aflcrset')
>>> print(cs.table)
Parameters
----------
start : DateTime format (optional)
Start time, defaults to beginning of available commands (2002:001)
stop : DateTime format (optional)
Stop time, defaults to end of available commands
**kwargs : any key=val keyword argument pairs
Returns
-------
cmds : CmdList object (list of commands)
"""
cmds = _find(start, stop, **kwargs)
return CmdList(cmds) | 95471d3a69f7566b7e079e04e38fc21324425e90 | 20,917 |
def _get_energy_at_time(masses, pos, vel, time_idx):
"""
Internal function used to calculate kinetic energy and potential energy at
a give time index using a vectorized direct sum approach. This function is
necessary to facilitate the parallelization of the energy calculation
across multiple CPU cores.
:param masses: Array of masses.
:param pos: Array of positions over time.
:param vel: Array of velocities over time.
:param time_idx: Time index at which the energy is to be calculated.
:return: Tuple of kinetic energy and potential energy
at the give time index.
"""
# kinetic energy
kin_energy = 0.5 * np.sum(masses * np.sum(vel[:, :, time_idx] ** 2, axis=1))
# potential energy
# extract x & y coordinates to a (N, 1) array
x = pos[:, 0:1, time_idx]
y = pos[:, 1:2, time_idx]
# matrices that store pairwise body distances
dx = x.T - x
dy = y.T - y
# calculate pairwise inverse norm of distances
# mask operation to avoid divide by zero
norm = np.sqrt(dx ** 2 + dy ** 2)
inv = np.zeros_like(norm) # ensure that diagonal of inv will only contain zeros
np.divide(1, norm, where=norm != 0, out=inv)
# multiply matrix element ij with the masses of bodies i and j
energy_per_body = np.transpose(inv * masses) * masses
# sum energies
pot_energy = -0.5 * energy_per_body.sum()
return kin_energy, pot_energy | 76c72be399cb31e949024123c635ee76e9f4ab3a | 20,918 |
def get_root_url_for_date(date):
"""
Returns the root URL of the TMCDB web I/F for the given date.
The argument date should be an ISO-8601 date string (YYYY-MM-DD).
The returned URL already contains the date.
"""
year = date[:4]
mm = date[5:7]
hostname = get_host_name()
return "%s/index.php?dir=%s/%s/%s/" % (hostname, year, mm, date) | 32a223d672e4878dfb4766ac9fc86a581826701b | 20,919 |
def dummy_annotation_txt_one_segment(tmpdir_factory):
"""Create empty TXT annotations."""
content = ("# MNE-Annotations\n"
"# onset, duration, description\n"
"3.14, 42, AA")
fname = tmpdir_factory.mktemp('data').join('one-annotations.txt')
fname.write(content)
return fname | 4e127391b3f43fe8db7cceca54c5001697483fc9 | 20,920 |
def article_title_meets_posting_requirements(website, article_title):
"""
Validates that the article title meets all requirements to post the list to Reddit.
The validations below check if:
(1) The article contains a number
(2) The article title doesn't contain certain pre-defined keywords
(3) The article title is in english (BuzzFeed only)
Returns True if all validations are met. Returns False otherwise.
"""
if website == ArticleType.BuzzFeed:
try:
if not detect(article_title) == 'en':
return False
except lang_detect_exception.LangDetectException:
return False
if get_article_list_count(article_title) == 0:
return False
if any(words in article_title.lower() for words in get_title_exclusion_words(website)):
return False
return True | c1a36592eff9144ab2dc9703cfdf669294dac229 | 20,921 |
import json
import csv
def about():
""" returns about page """
counter = json.load(open(r'filter/OXAs_dict/counter.txt'))
ids = [*counter]
r = csv.reader(open(r'Training_data/Training_data_IC.csv'))
df = pd.DataFrame(data=list(r))
svm_table = df.to_html(index=False, header=False)
return render_template('About.html', svm_table=svm_table, oxa_ids=ids) | 5565fdd2616df38e223fa4e3dbc6d8963fcf285d | 20,922 |
from typing import List
def phys_mem_regions_from_elf(elf: ElfFile, alignment: int) -> List[MemoryRegion]:
"""Determine the physical memory regions for an ELF file with a given
alignment.
The returned region shall be extended (if necessary) so that the start
and end are congruent with the specified alignment (usually a page size).
"""
assert alignment > 0
return [
MemoryRegion(
round_down(segment.phys_addr, alignment),
round_up(segment.phys_addr + len(segment.data), alignment)
)
for segment in elf.segments
] | 611b5b5d89c999ee5136f98bf5555d9c71e14048 | 20,923 |
def force_float_to_int_in_any_way(x):
"""This force a float to be converted to an int.
Any float is fine. The result is truncated.
Like PHP, if the input float is greater than 2**63, then the result
is 0, even if intmask(int(f)) would return some bits."""
# magic values coming from pypy.rlib.rarithmetic.ovfcheck_float_to_int
# on 64-bit.
if isnan(x):
return -maxint - 1
if -9223372036854776832.0 <= x < 9223372036854775296.0:
x = r_longlong(x)
return intmask(x)
return 0 | 7ad932c213f4c4c3f4195383d87268ba803cc533 | 20,924 |
from typing import Union
def where(condition : pdarray, A : Union[Union[int,float], pdarray],
B : Union[Union[int,float], pdarray]) -> pdarray:
"""
Returns an array with elements chosen from A and B based upon a
conditioning array. As is the case with numpy.where, the return array
consists of values from the first array (A) where the conditioning array
elements are True and from the second array (B) where the conditioning
array elements are False.
Parameters
----------
condition : pdarray
Used to choose values from A or B
A : scalar or pdarray
Value(s) used when condition is True
B : scalar or pdarray
Value(s) used when condition is False
Returns
-------
pdarray
Values chosen from A where the condition is True and B where
the condition is False
Raises
------
TypeError
Raised if the condition object is not a pdarray, if pdarray dtypes are
not supported or do not match, or if multiple condition clauses (see
Notes section) are applied
ValueError
Raised if the shapes of the condition, A, and B pdarrays are unequal
Examples
--------
>>> a1 = ak.arange(1,10)
>>> a2 = ak.ones(9, dtype=np.int64)
>>> cond = a1 < 5
>>> ak.where(cond,a1,a2)
array([1, 2, 3, 4, 1, 1, 1, 1, 1])
>>> a1 = ak.arange(1,10)
>>> a2 = ak.ones(9, dtype=np.int64)
>>> cond = a1 == 5
>>> ak.where(cond,a1,a2)
array([1, 1, 1, 1, 5, 1, 1, 1, 1])
>>> a1 = ak.arange(1,10)
>>> a2 = 10
>>> cond = a1 < 5
>>> ak.where(cond,a1,a2)
array([1, 2, 3, 4, 10, 10, 10, 10, 10])
Notes
-----
A and B must have the same dtype and only one conditional clause
is supported e.g., n < 5, n > 1, which is supported in numpy
is not currently supported in Arkouda
"""
if isinstance(A, pdarray) and isinstance(B, pdarray):
repMsg = generic_msg("efunc3vv {} {} {} {}".\
format("where",
condition.name,
A.name,
B.name))
# For scalars, try to convert it to the array's dtype
elif isinstance(A, pdarray) and np.isscalar(B):
repMsg = generic_msg("efunc3vs {} {} {} {} {}".\
format("where",
condition.name,
A.name,
A.dtype.name,
A.format_other(B)))
elif isinstance(B, pdarray) and np.isscalar(A):
repMsg = generic_msg("efunc3sv {} {} {} {} {}".\
format("where",
condition.name,
B.dtype.name,
B.format_other(A),
B.name))
elif np.isscalar(A) and np.isscalar(B):
# Scalars must share a common dtype (or be cast)
dtA = resolve_scalar_dtype(A)
dtB = resolve_scalar_dtype(B)
# Make sure at least one of the dtypes is supported
if not (dtA in DTypes or dtB in DTypes):
raise TypeError(("Not implemented for scalar types {} " +
"and {}").format(dtA, dtB))
# If the dtypes are the same, do not cast
if dtA == dtB: # type: ignore
dt = dtA
# If the dtypes are different, try casting one direction then the other
elif dtB in DTypes and np.can_cast(A, dtB):
A = np.dtype(dtB).type(A)
dt = dtB
elif dtA in DTypes and np.can_cast(B, dtA):
B = np.dtype(dtA).type(B)
dt = dtA
# Cannot safely cast
else:
raise TypeError(("Cannot cast between scalars {} and {} to " +
"supported dtype").format(A, B))
repMsg = generic_msg("efunc3ss {} {} {} {} {} {}".\
format("where",
condition.name,
dt,
A,
dt,
B))
return create_pdarray(type_cast(str,repMsg)) | 26f42d3e45d81108b5fa2cdeed018f509432854e | 20,925 |
def pre_training_configs(m):
"""
Before training the model, configure it
"""
ordering = range(m.n_visible)
np.random.shuffle(ordering)
trainer = Optimization.MomentumSGD(m.nade, m.nade.__getattribute__(m.loss_function))
trainer.set_datasets([m.training_dataset, m.masks_dataset])
trainer.set_learning_rate(m.options.lr)
trainer.set_datapoints_as_columns(True)
trainer.add_controller(TrainingController.AdaptiveLearningRate(
m.options.lr, 0, epochs=m.options.epochs))
trainer.add_controller(TrainingController.MaxIterations(m.options.epochs))
if m.options.training_ll_stop < np.inf:
# Assumes that we're doing minimization so negative ll
trainer.add_controller(
TrainingController.TrainingErrorStop(-m.options.training_ll_stop))
trainer.add_controller(TrainingController.ConfigurationSchedule(
"momentum", [(2, 0), (float('inf'), m.options.momentum)]))
trainer.set_updates_per_epoch(m.options.epoch_size)
trainer.set_minibatch_size(m.options.batch_size)
# trainer.set_weight_decay_rate(options.wd)
trainer.add_controller(TrainingController.NaNBreaker())
# Instrument the training
trainer.add_instrumentation(Instrumentation.Instrumentation(
[m.console, m.textfile_log, m.hdf5_backend], Instrumentation.Function("training_loss", lambda ins: ins.get_training_loss())))
if not m.options.no_validation:
trainer.add_instrumentation(Instrumentation.Instrumentation([m.console],
m.validation_loss_measurement))
trainer.add_instrumentation(Instrumentation.Instrumentation([m.hdf5_backend],
m.validation_loss_measurement,
at_lowest=[Instrumentation.Parameters()]))
trainer.add_instrumentation(Instrumentation.Instrumentation(
[m.console, m.textfile_log, m.hdf5_backend], Instrumentation.Configuration()))
# trainer.add_instrumentation(Instrumentation.Instrumentation([hdf5_backend], Instrumentation.Parameters(), every = 10))
trainer.add_instrumentation(Instrumentation.Instrumentation(
[m.console, m.textfile_log, m.hdf5_backend], Instrumentation.Timestamp()))
return trainer | 9e726f8b9b136d0ee8f8015b533111f30e6447ce | 20,926 |
import torch
def loadClusterModule(pathCheckpoint):
"""
Load CPC Clustering Module from Clustering checkpoint file.
"""
state_dict = torch.load(pathCheckpoint, map_location=torch.device('cpu'))
clusterModule = kMeanCluster(torch.zeros(1, state_dict["n_clusters"], state_dict["dim"]))
clusterModule.load_state_dict(state_dict["state_dict"])
return clusterModule | e6e950c651e890a60b356b97266f3198f7e0a07d | 20,928 |
import base64
def bash_inline_create_file(name, contents):
"""
Turns a file into bash command.
Parameters
----------
name : str
File name.
contents : bytes
File contents.
Returns
-------
result : str
The resulting command that creates this file.
"""
return f"echo {quote(base64.b64encode(contents).decode())} | base64 -d > {quote(name)}" | 6ecabcb9a35dd760d762f3c4b7e1a15502ae5637 | 20,929 |
import html
def rtf_encode(unicode_string):
"""
Converts HTML encoding and Unicode encoding to RTF.
Be sure that autoescaping is off in the template. Autoescaping converts <, >, ", ', &
The unescape function used here is helpful for catching additional escape sequences used for special
characters, greek letters, symbols, and accents.
:param unicode_string:
:return:
"""
result = None
if unicode_string:
html_parser = html.parser.HTMLParser() # Create an HTML parser
unicode_string = html.unescape(unicode_string) # Convert html encodings to unicode e.g. é -> \ex9
rtf_bytestring = unicode_string.encode('rtfunicode') # Convert unicode to rtf e.g. \ex9 -> \u233?
rtf_string = rtf_bytestring.decode('utf-8')
rtf_string = replace_tags(rtf_string) # replaces common tags with rtf encodings
result = rtf_string
return result | bd9cc1b8b9d736c4746a381ae6ab547af30452af | 20,930 |
def get_P(A):
"""
Markov matrix.
P = D^{-1}*A
"""
D = get_D(A)
return np.linalg.inv(D).dot(A) | bea5a3f10a5e6f6d592fd6df651ce09f5f4d4939 | 20,931 |
def get_movie_and_zmw_from_name(name):
"""Given a string of pacbio zmw name or read name, return movie and zmw"""
try:
fs = name.strip().split(' ')[0].split('/')
movie, zmw = fs[0], fs[1]
return movie, int(zmw)
except ValueError:
raise ValueError("Read %r is not a PacBio read." % name) | 8dfb5e8d85b9e4a8a5ecfc0115416ce7c23bb6db | 20,932 |
def create_sp_history_copy(sp):
"""
Create a history copy of SP, with end_at value and new pk
return: created service provider object
"""
admins = sp.admins.all()
admin_groups = sp.admin_groups.all()
nameidformat = sp.nameidformat.all()
grant_types = sp.grant_types.all()
response_types = sp.response_types.all()
oidc_scopes = sp.oidc_scopes.all()
sp.history = sp.pk
sp.pk = None
sp.end_at = timezone.now()
sp.save()
sp.admins.set(admins)
sp.admin_groups.set(admin_groups)
sp.nameidformat.set(nameidformat)
sp.grant_types.set(grant_types)
sp.response_types.set(response_types)
sp.oidc_scopes.set(oidc_scopes)
return sp | 0508a1763c46e7f57c34647af2d5b00c2dd0f9cf | 20,933 |
def onehot(arr, num_classes=None, safe=True):
"""
Function to take in a 1D label array and returns the one hot encoded
transformation.
"""
arr = exactly_1d(arr)
if num_classes is None:
num_classes = np.unique(arr).shape[0]
if safe:
if num_classes != np.unique(arr).shape[0]:
raise Exception('Number of unique values does not match num_classes argument.')
return np.squeeze(np.eye(num_classes)[arr.reshape(-1)]) | ddeba80964712d85a8933c748fc973577a8fe4a5 | 20,934 |
def scale_relative_sea_level_rise_rate(mmyr: float, If: float = 1) -> float:
"""Scale a relative sea level rise rate to model time.
This function scales any relative sea level rise rate (RSLR) (e.g., sea
level rise, subsidence) to a rate appropriate for the model time. This is
helpful, because most discussion of RSLR uses units of mm/yr, but the
model (and model configuration) require units of m/s. Additionally, the
model framework needs to assume an "intermittency factor" to convert from
real-world time to model time.
Relative sea level rise (subsidence and/or sea level rise) are scaled from
real world dimensions of mm/yr to model input as:
.. math::
\\widehat{RSLR} = (RSLR / 1000) \\cdot \\dfrac{1}{I_f \\cdot 365.25 \\cdot 86400}
This conversion makes it such that when one real-world year has elapsed
(:math:`I_f \\cdot 365.25 \\cdot 86400` seconds in model time), the relative
sea level has changed by the number of millimeters specified in the input
:obj:`mmyr`.
.. note::
Users should use this function to determine the value to specify in
an input YAML configuration file; no scaling is performed
internally.
Parameters
----------
mmyr : :obj:`float`
Millimeters per year, relative sea level rise rate.
If : :obj:`float`, optional
Intermittency factor, fraction of time represented by morphodynamic
activity. Should be in interval (0, 1). Defaults to 1 if not provided,
i.e., no scaling is performed.
Returns
-------
scaled : :obj:`float`
Scaled relative sea level rise rate, in meters per second.
"""
return (mmyr / 1000) * (1 / (shared_tools._scale_factor(
If, units='years'))) | 2150f1e0e9aa5f7f7c0c4c5b1d876676b6556d08 | 20,935 |
def get_covalent1_radius(Z):
"""
Converts array of nuclear charges to array of corresponding valence.
Args:
Z (numpy ndarray): array with nuclear charges
Returns:
numpy ndarray: array of the same size as Z with the valence of the corresponding atom
"""
global _elements
if _elements is None:
_elements=_load_elements()
V=np.array([0]+[line[6] for line in _elements])
ret=V[Z]/100. #angstrom conversion
# assert (ret>0).all()
return ret | c655a5253a088379abdf171383ea8a2dedd52aa7 | 20,936 |
from typing import List
def get_bank_sizes(num_constraints: int,
beam_size: int,
candidate_counts: List[int]) -> List[int]:
"""
Evenly distributes the beam across the banks, where each bank is a portion of the beam devoted
to hypotheses having met the same number of constraints, 0..num_constraints.
After the assignment, banks with more slots than candidates are adjusted.
:param num_constraints: The number of constraints.
:param beam_size: The beam size.
:param candidate_counts: The empirical counts of number of candidates in each bank.
:return: A distribution over banks.
"""
num_banks = num_constraints + 1
bank_size = beam_size // num_banks
remainder = beam_size - bank_size * num_banks
# Distribute any remainder to the end
assigned = [bank_size for x in range(num_banks)]
assigned[-1] += remainder
# Now, moving right to left, push extra allocation to earlier buckets.
# This encodes a bias for higher buckets, but if no candidates are found, space
# will be made in lower buckets. This may not be the best strategy, but it is important
# that you start pushing from the bucket that is assigned the remainder, for cases where
# num_constraints >= beam_size.
for i in reversed(range(num_banks)):
overfill = assigned[i] - candidate_counts[i]
if overfill > 0:
assigned[i] -= overfill
assigned[(i - 1) % num_banks] += overfill
return assigned | 7a515b1e7762d01b7f7a1405a943f03babe26520 | 20,937 |
def ssh_connect(openstack_properties):
"""Create a connection to a server via SSH.
Args:
openstack_properties (dict): OpenStack facts and variables from Ansible
which can be used to manipulate OpenStack objects.
Returns:
def: A factory function object.
"""
connections = [] # Track inventory of SSH connections for teardown.
def _factory(hostname,
username,
retries=10,
key_filename=None,
auth_timeout=180):
"""Connect to a server via SSH.
Note: this function uses an exponential back-off for retries which means
the more retries specified the longer the wait between each retry. The
total wait time is on the fibonacci sequence. (https://bit.ly/1ee23o9)
Args:
hostname (str): The server to connect to.
username (str): The username to authenticate as.
(defaults to the current local username)
retries (int): The maximum number of validation retry attempts.
key_filename (str): The filename, or list of filenames, of optional
private key(s) and/or certs to try for authentication. (Default
is to use the 'rpc_support' key.
auth_timeout (float): An optional timeout (in seconds) to wait for
an authentication response.
Returns:
paramiko.client.SSHClient: A client already connected to the target
server.
Raises:
paramiko.BadHostKeyException: If the server’s host key could not be
verified.
paramiko.AuthenticationException: If authentication failed.
paramiko.SSHException: If there was any other error connecting or
establishing an SSH session.
paramiko.ssh_exception.NoValidConnectionsError: Connection refused
by host. (SSH service is probably not running or host is not
fully booted)
socket.error: If a socket error occurred while connecting.
"""
temp_connection = SSHClient()
temp_connection.set_missing_host_key_policy(AutoAddPolicy())
for attempt in range(1, retries + 1):
try:
temp_connection.connect(
hostname=hostname,
username=username,
key_filename=(
key_filename or openstack_properties['private_key_path']
),
auth_timeout=auth_timeout
)
except NoValidConnectionsError:
if attempt != retries + 1:
sleep(attempt)
else:
raise # Re-raise
connections.append(temp_connection)
return temp_connection
yield _factory
# Teardown
for connection in connections:
connection.close()
HostKeys().clear() | 03d05c78d8ed516f581f01b4c093b05bc1ddcc1c | 20,938 |
def prepare(args: dict, overwriting: bool):
"""Load config and key file,create output directories and setup log files.
Args:
args (dict): argparser dictionary
Returns:
Path: output directory path
"""
output_dir = make_dir(args, "results_tmp", "activity_formatting", overwriting)
mapping_table_dir = make_dir(args, "mapping_table", None, overwriting)
create_log_files(output_dir)
return output_dir, mapping_table_dir | 0da7d1e98cd1a518fd8aa33b53204dce8de63fde | 20,939 |
import ast
def parse_data(data):
"""Takes a string from a repr(WSGIRequest) and transliterates it
This is incredibly gross "parsing" code that takes the WSGIRequest
string from an error email and turns it into something that
vaguely resembles the original WSGIRequest so that we can send
it through the system again.
"""
BEGIN = '<WSGIRequest'
data = data.strip()
data = data[data.find(BEGIN) + len(BEGIN):]
if data.endswith('>'):
data = data[:-1]
container = {}
key = ''
for line in data.splitlines():
# Lines that start with 'wsgi.' have values which are
# objects. E.g. a logger. This won't fly with ast.literal_eval
# so we just ignore all the wsgi. meta stuff.
if not line or line.startswith(' \'wsgi.'):
continue
if line.startswith(' '):
# If it starts with a space, then it's a continuation of
# the current dict.
container[key] += line
else:
key, val = line.split(':', 1)
container[key.strip()] = val.strip()
QUERYDICT = '<QueryDict: '
for key, val in container.items():
val = val.strip(',')
if val.startswith(QUERYDICT):
# GET and POST are both QueryDicts, so we nix the
# QueryDict part and pretend they're regular dicts.
#
# <QueryDict: {...}> -> {...}
val = val[len(QUERYDICT):-1]
elif val.startswith('{'):
# Regular dict that might be missing a } because we
# dropped it when we were weeding out wsgi. lines.
#
# {... -> {...}
val = val.strip()
if not val.endswith('}'):
val = val + '}'
else:
# This needs to have the string ornamentation added so it
# literal_evals into a string.
val = 'u"' + val + '"'
# Note: We use ast.literal_eval here so that we're guaranteed
# only to be getting out strings, lists, tuples, dicts,
# booleans or None and not executing arbitrary Python code.
val = ast.literal_eval(val)
container[key] = val
return container | 63470f1a935ef6218c239f99228eaf7919b9f09c | 20,940 |
from typing import List
def create_slack_context_block(elements: List[SlackBlock]) -> dict:
"""
Creates a "context block" as described in the slack documentation here:
https://api.slack.com/reference/messaging/blocks#context
"""
return {
'type': 'context',
'elements': [element.get_formatted_block() for element in elements],
} | c2d4400f910cdc6756ec94bcd14661d76bc3dfa1 | 20,941 |
def sample_bar_report(request):
"""
Demonstrates a basic horizontal bar chart report.
"""
profile = request.get_user_profile()
if not profile.super_admin:
raise PermissionDenied('Only super admins can view this report.')
# Run your custom report logic to build the following lists:
# categories = ['Tullys', 'Tyrell', 'Lannister', 'Stark', 'Baratheon']
# values = [85, 100, 250, 75, 42]
categories = []
values = []
for group in Group.objects.all():
active_servers = group.server_set.exclude(status='HISTORICAL')
if active_servers:
categories.append(group.name)
values.append(active_servers.count())
# This sample extension renders a generic template for bar charts,
# which requires this view to return just a few context variables.
#
# You could also define your own template that extends one of the following
# and adds customizations:
# 'reports/bar.html' if you want a more customized pie chart
# 'reports/simple_base.html' for more advanced customization, e.g. more
# than one chart or table.
# 'base.html' to start from scratch from the basic CloudBolt template
return render(request, 'reports/bar.html', dict(
pagetitle='Server Counts by Group (Bar)',
subtitle='Excludes historical servers',
report_slug='Server Counts by Group',
intro="""
Sample report extension draws a bar chart.
""",
# Chart data
categories=categories,
values=values,
series_name='Servers',
# Optionally support exporting as CSV by including this dict
export=dict(
csv_headings=['Group', 'Active Servers']
)
)) | ae4fcf560ca3d1ebac149b6ae3a65ea8f7db2190 | 20,943 |
def _is_ref_path(path_elements):
"""
Determine whether the given object path, expressed as an element list
(see _element_list_to_object_path()), ends with a reference and is
therefore eligible for continuation through the reference. The given
object path is assumed to be "completed" down to a single STIX property
value. This means that a *_ref property will be the last component, and
*_refs will be second-to-last, because it requires a subsequent index step.
:param path_elements: An object path, as a list
:return: True if a continuable reference path; False if not
"""
result = False
if path_elements:
last_elt = path_elements[-1]
if isinstance(last_elt, str) and last_elt.endswith("_ref"):
result = True
elif len(path_elements) > 1:
# for _refs properties, the ref property itself must be
# second-to-last, and the last path element must be an index step,
# either "*" or an int. Maybe not necessary to check the index
# step; all we need is to check the second-to-last property.
second_last_elt = path_elements[-2]
if isinstance(second_last_elt, str) \
and second_last_elt.endswith("_refs"):
result = True
return result | da8bc8eb7611ce7b5361209873e871f2a4656a03 | 20,944 |
import numpy
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""Apply the harmonic analysis to 1-D slices along the given axis."""
arr = dask.array.core.asarray(arr)
# Validate and normalize axis.
arr.shape[axis]
axis = len(arr.shape[:axis])
# Rechunk so that analyze is applied over the full axis.
arr = arr.rechunk(arr.chunks[:axis] + (arr.shape[axis:axis + 1], ) +
arr.chunks[axis + 1:])
# Test out some data with the function.
test_data = numpy.ones(args[0].shape[1], dtype=arr.dtype)
test_result = numpy.array(func1d(test_data, *args, **kwargs))
# Map analyze over the data to get the result
# Adds other axes as needed.
result = arr.map_blocks(
_apply_along_axis,
name=dask.utils.funcname(func1d) + '-along-axis',
dtype=test_result.dtype,
chunks=(arr.chunks[:axis] + test_result.shape + arr.chunks[axis + 1:]),
drop_axis=axis,
new_axis=list(range(axis, axis + test_result.ndim, 1)),
func1d=func1d,
func1d_axis=axis,
func1d_args=args,
func1d_kwargs=kwargs,
)
return result | 09e08b00c9df157e1cf3122f34413e2c4d8e3cb3 | 20,945 |
import pytz
from datetime import datetime
def current_time_utc(conf):
""" Get time in UTC """
UTC = pytz.utc
curr_time = datetime.now(UTC)
return curr_time | e4d4a76cbf04ff059b6913d632d3e2636cfa2b17 | 20,946 |
def CORe50(root=expanduser("~") + "/.avalanche/data/core50/",
scenario="nicv2_391",
run=0,
train_transform=None,
eval_transform=None):
"""
Creates a CL scenario for CORe50.
If the dataset is not present in the computer, this method will
automatically download and store it.
This generator can be used to obtain the NI, NC, NIC and NICv2-* scenarios.
The scenario instance returned by this method will have two fields,
`train_stream` and `test_stream`, which can be iterated to obtain
training and test :class:`Experience`. Each Experience contains the
`dataset` and the associated task label.
The task label "0" will be assigned to each experience.
The scenario API is quite simple and is uniform across all scenario
generators. It is recommended to check the tutorial of the "benchmark" API,
which contains usage examples ranging from "basic" to "advanced".
:param root: Path indicating where to store the dataset and related
metadata. By default they will be stored in
"~/.avalanche/datasets/core50/data/".
:param scenario: CORe50 main scenario. It can be chosen between 'ni', 'nc',
'nic', 'nicv2_79', 'nicv2_196' or 'nicv2_391.'
:param run: number of run for the scenario. Each run defines a different
ordering. Must be a number between 0 and 9.
:param train_transform: The transformation to apply to the training data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations). Defaults to None.
:param eval_transform: The transformation to apply to the test data,
e.g. a random crop, a normalization or a concatenation of different
transformations (see torchvision.transform documentation for a
comprehensive list of possible transformations). Defaults to None.
:returns: a properly initialized :class:`GenericCLScenario` instance.
"""
assert (0 <= run <= 9), "Pre-defined run of CORe50 are only 10. Indicate " \
"a number between 0 and 9."
assert (scenario in nbatch.keys()), "The selected scenario is note " \
"recognized: it should be 'ni', 'nc'," \
"'nic', 'nicv2_79', 'nicv2_196' or " \
"'nicv2_391'."
if root is None:
core_data = CORE50_DATA()
else:
core_data = CORE50_DATA(root)
root = core_data.data_folder
root_img = root + "core50_128x128/"
filelists_bp = scen2dirs[scenario] + "run" + str(run) + "/"
train_failists_paths = []
for i in range(nbatch[scenario]):
train_failists_paths.append(
root + filelists_bp + "train_batch_" +
str(i).zfill(2) + "_filelist.txt")
scenario_obj = create_generic_benchmark_from_filelists(
root_img, train_failists_paths,
[root + filelists_bp + "test_filelist.txt"],
task_labels=[0 for _ in range(nbatch[scenario])],
complete_test_set_only=True,
train_transform=train_transform,
eval_transform=eval_transform)
return scenario_obj | 16e1772d7b9d627fdec34657041570b7f4ccfe49 | 20,947 |
def merge(line):
"""
Function that merges a single row or column in 2048.
"""
res = []
for ele in line:
res.append(ele)
for num in res:
if num == 0:
res = shift(res,res.index(num))
for inde in range(len(res)-1):
if res[inde] == res[inde+1]:
res[inde] = res[inde] + res[inde+1]
res[inde+1] = 0
for num in res:
if num == 0:
res = shift(res,res.index(num))
return res | 55418ab717333060455c4f9f41eeafd10e0a9940 | 20,948 |
import PIL
def image_to_bytes(image: "PIL.Image.Image") -> bytes:
"""Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG compression."""
buffer = BytesIO()
format = image.format if image.format in list_image_compression_formats() else "PNG"
image.save(buffer, format=format)
return buffer.getvalue() | aa05f41967dd02dbb544e6d503f555d491d6891e | 20,949 |
def add_transform(transform_type, transform_tag=None, priority=0, status=TransformStatus.New, locking=TransformLocking.Idle,
retries=0, expired_at=None, transform_metadata=None, workprogress_id=None, session=None):
"""
Add a transform.
:param transform_type: Transform type.
:param transform_tag: Transform tag.
:param priority: priority.
:param status: Transform status.
:param locking: Transform locking.
:param retries: The number of retries.
:param expired_at: The datetime when it expires.
:param transform_metadata: The metadata as json.
:raises DuplicatedObject: If a transform with the same name exists.
:raises DatabaseException: If there is a database error.
:returns: transform id.
"""
transform_id = orm_transforms.add_transform(transform_type=transform_type, transform_tag=transform_tag,
priority=priority, status=status, locking=locking, retries=retries,
expired_at=expired_at, transform_metadata=transform_metadata,
workprogress_id=workprogress_id, session=session)
return transform_id | ec6bfe98cecf14b80be5c8c08b86c3ee196b4255 | 20,950 |
import joblib
def load_memmap(path):
"""load_memmap方法用于读取共享数据
Parameters
----------
path : str
文件路径
Returns
----------
"""
memmap_data = joblib.load(path, mmap_mode='r+')
return memmap_data | f17120650ecc232be6f4fd8962c8a5a97dd59e1f | 20,951 |
from typing import Tuple
import enum
from typing import Optional
def _classical_routine_on_result(
N: int, t: int, x: int, measurement
) -> Tuple[enum.Enum, Optional[Tuple[int, ...]]]:
"""Try to find factors, given x,N,t and the result of a single quantum measurement.
:param N: number to factorise
:param t: number of qubits
:param x: random integer between 0 < x < N
:param measurement: a single measurement of the first measurement after the quantum part of Shor's algorithm
:return: Tuple of exit status and a tuple of any factors found
"""
try:
continued_fraction_ints = classical.continued_fraction(measurement, 2 ** t)
convergents_vals = classical._convergents(continued_fraction_ints)
rs = classical.possible_orders(convergents_vals)
r = classical.first_order(N, x, rs)
factor = classical.find_factor_from_order(N, x, r)
except util.ShorError as e:
if e.fail_reason == util.ExitStatus.FAILED_FACTOR:
return (util.ExitStatus.FAILED_FACTOR, e.failed_factors)
return e.fail_reason, None
return (util.ExitStatus.SUCCESS, factor) | c9a7c9fedfc95cffc766fee0814e43487bb374ad | 20,952 |
def get_rst_cls_file_header(collection_name, class_name):
"""produce the rst content to begin an attribute-level *.rst file"""
# use :doc:`class_name<index>` syntax to create reference back to the index.rst file
title = ":doc:`%s<../index>` %s" % (collection_name.capitalize(), class_name)
return get_rst_file_header(title) | e914ed9d18c2f248e928f2e6482eee6e0461cfd5 | 20,953 |
def is_leap(year):
"""
Simply returns true or false depending on if it's leap or not.
"""
return not year%400 or not (year%4 and year%100) | 5cb40664b2e8aa9aea647a356b63708f00891a2c | 20,954 |
def ForwardDynamics(thetalist, dthetalist, taulist, g, Ftip, Mlist, \
Glist, Slist):
"""Computes forward dynamics in the space frame for an open chain robot
:param thetalist: A list of joint variables
:param dthetalist: A list of joint rates
:param taulist: An n-vector of joint forces/torques
:param g: Gravity vector g
:param Ftip: Spatial force applied by the end-effector expressed in frame
{n+1}
:param Mlist: List of link frames i relative to i-1 at the home position
:param Glist: Spatial inertia matrices Gi of the links
:param Slist: Screw axes Si of the joints in a space frame, in the format
of a matrix with axes as the columns
:return: The resulting joint accelerations
This function computes ddthetalist by solving:
Mlist(thetalist) * ddthetalist = taulist - c(thetalist, dthetalist) \
- g(thetalist) - Jtr(thetalist) * Ftip
Example Input (3 Link Robot):
thetalist = np.array([0.1, 0.1, 0.1])
dthetalist = np.array([0.1, 0.2, 0.3])
taulist = np.array([0.5, 0.6, 0.7])
g = np.array([0, 0, -9.8])
Ftip = np.array([1, 1, 1, 1, 1, 1])
M01 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0.089159],
[0, 0, 0, 1]])
M12 = np.array([[ 0, 0, 1, 0.28],
[ 0, 1, 0, 0.13585],
[-1, 0, 0, 0],
[ 0, 0, 0, 1]])
M23 = np.array([[1, 0, 0, 0],
[0, 1, 0, -0.1197],
[0, 0, 1, 0.395],
[0, 0, 0, 1]])
M34 = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0.14225],
[0, 0, 0, 1]])
G1 = np.diag([0.010267, 0.010267, 0.00666, 3.7, 3.7, 3.7])
G2 = np.diag([0.22689, 0.22689, 0.0151074, 8.393, 8.393, 8.393])
G3 = np.diag([0.0494433, 0.0494433, 0.004095, 2.275, 2.275, 2.275])
Glist = np.array([G1, G2, G3])
Mlist = np.array([M01, M12, M23, M34])
Slist = np.array([[1, 0, 1, 0, 1, 0],
[0, 1, 0, -0.089, 0, 0],
[0, 1, 0, -0.089, 0, 0.425]]).T
Output:
np.array([-0.97392907, 25.58466784, -32.91499212])
"""
return np.dot(np.linalg.inv(MassMatrix(thetalist, Mlist, Glist, \
Slist)), \
np.array(taulist) \
- VelQuadraticForces(thetalist, dthetalist, Mlist, \
Glist, Slist) \
- GravityForces(thetalist, g, Mlist, Glist, Slist) \
- EndEffectorForces(thetalist, Ftip, Mlist, Glist, \
Slist)) | 3b6e23e301db5c81ed6b19109c203f177d2a3f82 | 20,955 |
def union(dataframe1, dataframe2) -> pd.DataFrame:
"""The set union between dataframe1 (S) and dataframe2 (T), i.e. it returns the elements that are both in dataframe1
and dataframe2. Formally S ∩ T = {s|s ∈ S and s ∈ T}.
If duplicates exists in either dataframe they are dropped and a UserWarning is issued.
Does not alter the original DataFrame.
Syntactic sugar for the pandas dataframe append method.
Parameters
----------
dataframe1 : pd.DataFrame\n
dataframe2 : pd.DataFrame\n
Returns
-------
pandas DataFrame\n
The set difference between dataframe1 and dataframe2
Raises
------
ValueError\n
Raises ValueError if the columns in datframe1 and dataframe2 are not identical.
Example
-------
```python
import panda as pd
import neat_panda
print(df1)
country continent year actual
0 Sweden Europe 2018 1
1 Denmark Not known 2018 3
2 Iceleand Europe 2019 0
print(df2)
country continent year actual
0 Sweden Europe 2020 1
1 Denmark Not known 2020 3
df3 = df1.union(df2)
print(df3)
country continent year actual
0 Sweden Europe 2018 1
1 Denmark Not known 2018 3
2 Iceleand Europe 2019 0
3 Sweden Europe 2020 1
4 Denmark Not known 2020 3
```
"""
return SetOperations(dataframe1, dataframe2).union() | 8f0f9a86bcb75efdc31615f2e1a9e568c58af3ee | 20,956 |
def get_axis_periodic(self, Nper, is_antiperiod=False):
"""Returns the vector 'axis' taking symmetries into account.
Parameters
----------
self: Data1D
a Data1D object
Nper: int
number of periods
is_antiperiod: bool
return values on a semi period (only for antiperiodic signals)
Returns
-------
New Data1D
"""
# Dynamic import to avoid loop
module = __import__("SciDataTool.Classes.Data1D", fromlist=["Data1D"])
Data1D = getattr(module, "Data1D")
values = self.values
N = self.get_length()
if N % Nper != 0:
raise AxisError(
"ERROR: length of axis is not divisible by the number of periods"
)
values_per = values[: int(N / Nper)]
if is_antiperiod:
sym = "antiperiod"
else:
sym = "period"
New_axis = Data1D(
values=values_per,
name=self.name,
unit=self.unit,
symmetries={sym: Nper},
normalizations=self.normalizations,
is_components=self.is_components,
symbol=self.symbol,
)
return New_axis | aa9c9b14cbe27b2f3b64ca2a1c3fe606ba921686 | 20,957 |
def knapsack_fractional(weights,values,capacity):
""" takes weights and values of items and capacity of knapsack as input
and returns the maximum profit possible for the given capacity of knapsack
using the fractional knapsack algorithm"""
#initialisaing the value of max_profit variable
max_profit=0
for pair in sorted(zip(weights,values),key=lambda x:-x[1]/x[0]): # sorting the pair of values in descending order
#if weight of highest pair is greater than capacity, the amount is added in fractions
if pair[0]>capacity:
# while((pair[1]/(pair[0]/capacity))!=0)
max_profit+=int(pair[1]/(pair[0]/capacity))
capacity=0
#if highest pair is lesser than capacity then the next pair is also added in fractions
elif pair[0]<= capacity:
max_profit+=pair[1]
capacity-=pair[0]
#returns nearest possible integer value of profit
return int(max_profit) | 8cb05c199baf65c24512fa97882085e7bb66a98d | 20,958 |
def invert(values: np.array, inversion: int) -> np.array:
"""Return the specified musical inversion of the values."""
if np.abs(inversion) > (len(values) - 1):
raise ValueError("Inversion out of range")
return np.hstack([values[inversion:], values[:inversion]]).astype(int) | 625d60633efdb44b99c93194f5bdc6dd159df9e7 | 20,959 |
from typing import Any
def engine_factory(database_url: str) -> Engine:
"""Construct database connection pool."""
url = make_url(database_url)
engine_kwargs: dict[str, Any] = {}
if url.host == "localhost":
engine_kwargs = {
**engine_kwargs,
"connect_args": {"connect_timeout": 1},
"echo": True,
}
return create_engine(
database_url,
future=True,
poolclass=NullPool,
executemany_mode="values",
**engine_kwargs,
) | e2eec7c7cfa20426708d02429aee565b070df760 | 20,961 |
import torch
def _shear_x(video: torch.Tensor, factor: float, **kwargs):
"""
Shear the video along the horizontal axis.
Args:
video (torch.Tensor): Video tensor with shape (T, C, H, W).
factor (float): How much to shear along the horizontal axis using the affine
matrix.
"""
_check_fill_arg(kwargs)
translation_offset = video.size(-2) * factor / 2
return F_t.affine(
video,
[1, factor, translation_offset, 0, 1, 0],
fill=kwargs["fill"],
interpolation="bilinear",
) | e09d539bd8f8481bff3376f7b6dd5a21849fb6cd | 20,962 |
from typing import Optional
from typing import Sequence
def get_smtp_credentials(filters: Optional[Sequence[pulumi.InputType['GetSmtpCredentialsFilterArgs']]] = None,
user_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSmtpCredentialsResult:
"""
This data source provides the list of Smtp Credentials in Oracle Cloud Infrastructure Identity service.
Lists the SMTP credentials for the specified user. The returned object contains the credential's OCID,
the SMTP user name but not the SMTP password. The SMTP password is returned only upon creation.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_smtp_credentials = oci.identity.get_smtp_credentials(user_id=oci_identity_user["test_user"]["id"])
```
:param str user_id: The OCID of the user.
"""
__args__ = dict()
__args__['filters'] = filters
__args__['userId'] = user_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:identity/getSmtpCredentials:getSmtpCredentials', __args__, opts=opts, typ=GetSmtpCredentialsResult).value
return AwaitableGetSmtpCredentialsResult(
filters=__ret__.filters,
id=__ret__.id,
smtp_credentials=__ret__.smtp_credentials,
user_id=__ret__.user_id) | 3ce0fbd2ce03d072d1b0903018901ef30f360b1f | 20,963 |
def coord(row, col):
""" returns coordinate values of specific cell within Sudoku Puzzle"""
return row*9+col | 14cda1489215a2b36d61ac6eac56c14981290b16 | 20,964 |
def post_authn_parse(request, client_id, endpoint_context, **kwargs):
"""
:param request:
:param client_id:
:param endpoint_context:
:param kwargs:
:return:
"""
if endpoint_context.args["pkce"]["essential"] is True:
if not "code_challenge" in request:
raise ValueError("Missing required code_challenge")
if not "code_challenge_method" in request:
if "plain" not in endpoint_context.args["pkce"]["code_challenge_method"]:
raise ValueError("No support for code_challenge_method=plain")
request["code_challenge_method"] = "plain"
else: # May or may not
if "code_challenge" in request:
if not "code_challenge_method" in request:
if (
"plain"
not in endpoint_context.args["pkce"]["code_challenge_method"]
):
raise ValueError("No support for code_challenge_method=plain")
request["code_challenge_method"] = "plain"
return request | 6e9e00a5d073a57cf0245b2506abfd822b5f6ff5 | 20,965 |
import torch
def token2hot(seq, max_length, n_features):
"""
takes in tokenized sequences and returns 1-hot encoded
[1 2 2] -> [1 0 0 0], [0 1 0 0 ], [0 1 0 0]
"""
N = max_length - len(seq)
x = np.pad(seq, (0, N), 'constant')
x = F.one_hot(torch.tensor(x),num_classes=n_features)
return x | 8a662703448b4cf8f4e1fddc853436c9677cc664 | 20,967 |
import re
def is_repo_on_known_branch(path):
"""Check if we're on the most recent commit in a well known branch, 'master' or
a version branch."""
remote = find_upstream_remote(None, path)
branches = execute_git(
None,
path,
[
"for-each-ref",
"--format=%(refname:short)",
"--points-at",
"HEAD",
"refs/remotes/%s/*" % remote,
"refs/tags/*",
],
capture=True,
).split()
return any(
[re.search(r"([0-9]+\.[0-9]+\.[0-9x]+|master)$", branch) for branch in branches]
) | 34f96c625c4864d303be84ec6c78f1c3b45761a0 | 20,968 |
def make_key(ns_sep, namespace, *names):
"""Make a redis namespaced key.
>>> make_key(":", "YOO:HOO", "a", "b", "c") == "YOO:HOO:a:b:c"
True
"""
return ns_sep.join(chain((namespace,), names)) | 2b73b00819f888c31c4b21a7ba21536b98a9ab26 | 20,971 |
from typing import Counter
def directly_follows(logs_traces, all_activs, noise_threshold=0):
"""
Gets the allowed directly-follows relations given the traces of the log
Parameters
-------------
logs_traces
Traces of the log
all_activs
All the activities
noise_threshold
Noise threshold
Returns
--------------
rel
List of relations in the log
"""
ret0 = Counter()
for trace in logs_traces:
rs = Counter(trace_skel.directly_follows(list(trace)))
for k in rs:
rs[k] = rs[k] * logs_traces[trace]
ret0 += rs
ret = set(x for x, y in ret0.items() if y >= all_activs[x[0]] * (1.0 - noise_threshold))
return ret | 6b0922113f0774eb7ac074ce65fd29b81ecf946f | 20,973 |
def hfc(x, framesize=1024, hopsize=512, fs=44100):
"""
Calculate HFC (High Frequency Content)
Parameters:
inData: ndarray
input signal
framesize: int
framesize
hopsize: int
hopsize
fs: int
samplingrate
Returns:
result: ndarray
HFC
Notes:
Spectral Centroidとの違いはスペクトログラムのエネルギーで正規化するか否か,のみ.
"""
S,F,T = stft(x, framesize, hopsize, fs, 'hann')
S = sp.absolute(S)
n_frames = S.shape[1]
hfc_data = sp.zeros(n_frames)
hfc_data = (F * S.T).T.sum(0)
return hfc_data | dc28cfd8376b585bf26d85ceaf12777c0f81c471 | 20,975 |
from functools import reduce
def acronym_buster(message):
"""
Find the first group of words that is in all caps
check if it is in the ACRONYMS dict
if it is, return the first occurrence of the acronym
else return [acronym] is an acronym. I do not like acronyms. Please remove them from your email.
:param message: The message to check
:return: new string with the acronyms replaced with full words
:rtype:str
"""
message = reduce(lambda msg, item: msg.replace(*item), ACRONYMS.items(), message)
try:
# find all matching groups with .finditer using next and get the first acronym that is not allows
acronym = next(ACRONYM_PATTERN.finditer(message)).group(0)
return "{} is an acronym. I do not like acronyms. Please remove them from your email.".format(
acronym
)
except StopIteration:
return CAPITAL_PATTERN.sub(CAPITAL_FIX, message) | 0867462af314be01d68e4764f2d4462f12ec1c1f | 20,976 |
import logging
def get_backup_temp_2():
"""
This is the third way to get the temperature
"""
try:
temp = RFM69.temperature
logging.warning("Got second backup temperature")
return temp
except RuntimeError:
logging.error("RFM69 not connected")
return 2222
except Exception as error:
logging.error(error)
return 9999 | cc22a21cf319d30330e32cb70ff54b9117be87b6 | 20,977 |
def constructed(function):
"""A decorator function for calling when a class is constructed."""
def store_constructed(class_reference):
"""Store the key map."""
setattr(class_reference, "__deserialize_constructed__", function)
return class_reference
return store_constructed | 29101fe6deb1112b5e69291377a3d8ab12082268 | 20,978 |
def generate_functions(
function,
parameters,
name,
name_func,
tag_dict,
tag_func,
docstring_func,
summarize,
num_passing,
num_failing,
key_combs_limit,
execution_group,
timeout,
):
"""
Generate test cases using the given parameter context, use the name_func
to generate the name.
If parameters is of type ``tuple`` / ``list`` then a new testcase method
will be created for each item.
If parameters is of type ``dict`` (of ``tuple``/``list``), then a new
method will be created for each item in the Cartesian product of all
combinations of values.
:param function: A testcase method, with extra arguments
for parametrization.
:type function: ``callable``
:param parameters: Parametrization context for the test case method.
:type parameters: ``list`` or ``tuple`` of ``dict`` or ``tuple`` / ``list``
OR a ``dict`` of ``tuple`` / ``list``.
:param name: Customized readable name for testcase.
:type name: ``str``
:param name_func: Function for generating names of parametrized testcases,
should accept ``func_name`` and ``kwargs`` as parameters.
:type name_func: ``callable``
:param docstring_func: Function that will generate docstring,
should accept ``docstring`` and ``kwargs`` as parameters.
:type docstring_func: ``callable``
:param tag_func: Function that will be used for generating tags via
parametrization kwargs. Should accept ``kwargs`` as
parameter.
:type tag_func: ``callable``
:param tag_dict: Tag annotations to be used for each generated testcase.
:type tag_dict: ``dict`` of ``set``
:param summarize: Flag for enabling testcase level
summarization of all assertions.
:type summarize: ``bool``
:param num_passing: Max number of passing assertions
for testcase level assertion summary.
:type num_passing: ``int``
:param num_failing: Max number of failing assertions
for testcase level assertion summary.
:type num_failing: ``int``
:param key_combs_limit: Max number of failed key combinations on fix/dict
summaries that contain assertion details.
:type key_combs_limit: ``int``
:param execution_group: Name of execution group in which the testcases
can be executed in parallel.
:type execution_group: ``str`` or ``NoneType``
:param timeout: Timeout in seconds to wait for testcase to be finished.
:type timeout: ``int``
:return: List of functions that is testcase compliant
(accepts ``self``, ``env``, ``result`` as arguments) and have
unique names.
:rtype: ``list``
"""
if not parameters:
raise ParametrizationError('"parameters" cannot be a empty.')
_check_name_func(name_func)
argspec = callable_utils.getargspec(function)
args = argspec.args[3:] # get rid of self, env, result
defaults = argspec.defaults or []
required_args = args[: -len(defaults)] if defaults else args
default_args = dict(zip(args[len(required_args) :], defaults))
kwarg_list = _generate_kwarg_list(
parameters, args, required_args, default_args
)
functions = [
_generate_func(
function=function,
name=name,
name_func=name_func,
tag_func=tag_func,
docstring_func=docstring_func,
tag_dict=tag_dict,
kwargs=kwargs,
)
for kwargs in kwarg_list
]
for idx, func in enumerate(functions):
# Users request the feature that when `name_func` set to `None`,
# then simply append integer suffixes to the names of testcases
if name_func is None:
func.name = "{} {}".format(func.name, idx)
func.summarize = summarize
func.summarize_num_passing = num_passing
func.summarize_num_failing = num_failing
func.summarize_key_combs_limit = key_combs_limit
func.execution_group = execution_group
func.timeout = timeout
return functions | 16c8dbabb94e377b034445213a18e379f3bc58ea | 20,979 |
from typing import List
def post_multi_tag_datapoints(timeseries_with_datapoints: List[TimeseriesWithDatapoints], **kwargs):
"""Insert data into multiple timeseries.
Args:
timeseries_with_datapoints (List[v04.dto.TimeseriesWithDatapoints]): The timeseries with data to insert.
Keyword Args:
api_key (str): Your api-key.
project (str): Project name.
Returns:
An empty response.
"""
api_key, project = config.get_config_variables(kwargs.get("api_key"), kwargs.get("project"))
url = config.get_base_url(api_version=0.4) + "/projects/{}/timeseries/data".format(project)
headers = {"api-key": api_key, "content-type": "application/json", "accept": "application/json"}
ul_dps_limit = 100000
# Make sure we only work with TimeseriesWithDatapoints objects that has a max number of datapoints
timeseries_with_datapoints_limited = []
for entry in timeseries_with_datapoints:
timeseries_with_datapoints_limited.extend(_split_TimeseriesWithDatapoints_if_over_limit(entry, ul_dps_limit))
# Group these TimeseriesWithDatapoints if possible so that we upload as much as possible in each call to the API
timeseries_to_upload_binned = _utils.first_fit(
list_items=timeseries_with_datapoints_limited, max_size=ul_dps_limit, get_count=lambda x: len(x.datapoints)
)
for bin in timeseries_to_upload_binned:
body = {
"items": [
{"tagId": ts_with_data.tagId, "datapoints": [dp.__dict__ for dp in ts_with_data.datapoints]}
for ts_with_data in bin
]
}
res = _utils.post_request(url, body=body, headers=headers)
return res.json() | 6bb73c3896fbad250574dfd5c5ea5c74f6059521 | 20,981 |
def get_version(pname: str, url: str) -> str:
"""Extract the package version from the url returned by `get_url`."""
match = search(r"/(\d+\.\d+\.\d+)/(\w+(?:-\w+)?)-(\d+\.\d+\.\d+)\.tar\.gz$", url)
# Assert that the package name matches.
assert match[2] == pname
# As a sanity check, also assert that the versions match.
assert match[1] == match[3]
# Return the version.
return match[1] | dc51ad64f4c47f35b5e6944754ccf4225829047f | 20,982 |
import numpy
def SmoothMu(trimset, smoothing):
""" Smooth mu or P(mu>thresh). """
Stot = trimset[0]
newmu = trimset[1]
maxflux = Stot.max()
xfine = numpy.exp(numpy.linspace(0, numpy.log(maxflux), 300))
#tck = interpolate.splrep(Stot, newmu, s=1, k=5)
#yfine = interpolate.splev(xfine, tck, der=0)
yfine = griddata(Stot, newmu, xfine, method='nearest')
bad = yfine * 0 != 0
good = yfine * 0 == 0
replacement = yfine[good][0]
yfine[bad] = replacement
if newmu.max() == 1:
buffmax = 1.
else:
buffmax = 10.
buff0 = numpy.zeros(100) + newmu.min()
buff1 = numpy.zeros(100) + buffmax
yfinebig = numpy.append(buff0, yfine)
yfinebig = numpy.append(yfinebig, buff1)
yfinebig = numpy.log10(yfinebig)
smoothedyfine = gaussian_filter(yfinebig, smoothing)
smoothedyfine = 10 ** (smoothedyfine)
buff0 = numpy.arange(100) - 100
buff1 = numpy.arange(100) + xfine.max()
xfinebig = numpy.append(buff0, xfine)
xfinebig = numpy.append(xfinebig, buff1)
#plt.clf()
#plt.plot(Stot, newmu, 'o')
#plt.plot(xfine, yfine, '.')
#plt.plot(xfinebig, smoothedyfine, '.')
#plt.loglog()
return xfinebig, smoothedyfine | 47dfdcb6b2db607fec65708f03cecaf56b6e3836 | 20,983 |
def spdiags(data, diags, m, n, format=None):
"""
Return a sparse matrix from diagonals.
Parameters
----------
data : array_like
matrix diagonals stored row-wise
diags : diagonals to set
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
m, n : int
shape of the result
format : format of the result (e.g. "csr")
By default (format=None) an appropriate sparse matrix
format is returned. This choice is subject to change.
See Also
--------
dia_matrix : the sparse DIAgonal format.
Examples
--------
>>> data = array([[1,2,3,4],[1,2,3,4],[1,2,3,4]])
>>> diags = array([0,-1,2])
>>> spdiags(data, diags, 4, 4).todense()
matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shape=(m,n)).asformat(format) | ba0301389ab8ca2ef85cdf32aca7851464386ff7 | 20,984 |
def precision_with_fixed_recall(y_true, y_pred_proba, fixed_recall):
""" Compute precision with a fixed recall, for class 1. The chosen threshold for this couple precision is also returned.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred_proba : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier, should be probabilities
fixed_recall : float
Fixed recall, recall will be calculated with respect to this precision
Returns
-------
precision_score : float
threshold : float
"""
if is_valid_y_true(y_true):
_, recall, threshold = precision_recall_curve(y_true, y_pred_proba)
threshold = max([threshold[i] for i in range(len(threshold)) if recall[i] >= fixed_recall])
y_pred_binary = binarize(y_pred_proba, threshold)
return precision_score(y_true, y_pred_binary), threshold
else:
raise ValueError('y_true should not contain only zeros') | 8ff70df3a01a2403735ce68e1158d700b5dd3e6f | 20,987 |
def sample_kollege(user, name='Miguel', crm='222'):
"""Create and return a sample tag"""
return Kollege.objects.create(user=user, name=name, crm=crm) | 6d663e06a33ec576d9d2d96e29abb1522629f11b | 20,988 |
from course_lib.Base.Evaluation.metrics import average_precision, precision, recall
def get_singular_user_metrics(URM_test, recommender_object: BaseRecommender, cutoff=10):
"""
Return a pandas.DataFrame containing the precision, recall and average precision of all the users
:param URM_test: URM to be tested on
:param recommender_object: recommender system to be tested
:param cutoff: the cutoff to be evaluated on
:return: pandas.DataFrame containing the precision, recall and average precision of all the users
"""
URM_test = sps.csr_matrix(URM_test)
n_users = URM_test.shape[0]
average_precision_list = []
precision_list = []
recall_list = []
user_list = []
for user_id in range(n_users):
if user_id % 10000 == 0:
print("Evaluated user {} of {}".format(user_id, n_users))
start_pos = URM_test.indptr[user_id]
end_pos = URM_test.indptr[user_id + 1]
if end_pos - start_pos > 0:
relevant_items = URM_test.indices[start_pos: end_pos]
recommended_items = recommender_object.recommend(user_id, cutoff=cutoff)
is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True)
user_list.append(user_id)
average_precision_list.append(average_precision(is_relevant, relevant_items))
precision_list.append(precision(is_relevant))
recall_list.append(recall(is_relevant, relevant_items))
return pd.DataFrame(data={'user_id': user_list, 'precision': precision_list, 'recall': recall_list,
'AP': average_precision_list}) | ed2e4c8cfa79967b6423f2af214aac4d0b817dd9 | 20,989 |
def detect_license(document, cleaned=False):
"""
Finds a license that is most similar to the provided `document`.
/
:document: a license, whose name should be identified
:type document: string
/
:cleaned: shows whether a `document` is prepared for vectorization.
/
Returns the name of the license document in string.
"""
vectorizer = TfidfVectorizer(stop_words='english',
strip_accents='unicode',
use_idf=True,
smooth_idf=True,
norm='l2')
names, licenses = load_license_templates()
# `tfidf` is a docXvocab matrix, where each row is a document and each
# column is a token in vocabulary
cleaned_doc = document if cleaned else clean(document)
tfidf = vectorizer.fit_transform(licenses + [cleaned_doc])
# Last row in this matrix is our `document`
vectorized_document = tfidf[-1]
index_of_most_similar_license = 0
max_similarity = -1
# Searching for most similar license
for i in range(0, len(licenses)):
next_license = tfidf[i]
cos = cosine_similarity(vectorized_document, next_license)
if cos > max_similarity:
max_similarity = cos
index_of_most_similar_license = i
return names[index_of_most_similar_license] | 66ae25e7d1d29ed2d0541e7e8136aa9c6d53f6a6 | 20,990 |
from models import Participant
def user_profile(request):
"""
Puts user_profile into context if available.
"""
profile = None
if request.user.is_authenticated():
try:
profile = Participant.objects.get(user__id=request.user.id)
except ObjectDoesNotExist:
pass
return {'user_profile': profile} | 3ffdd2a851caebc865901fa118b19c813b4bffed | 20,991 |
import torch
def get_reduced_model(model: torch.nn.Module, x_sample: torch.Tensor,
bias: bool = True, activation: bool = True) -> torch.nn.Module:
"""
Get 1-layer model corresponding to the firing path of the model for a specific sample.
:param model: pytorch model
:param x_sample: input sample
:param device: cpu or cuda device
:param bias: True if model has bias
:param activation: True if you want to add a sigmoid activation on top
:return: reduced model
"""
x_sample_copy = deepcopy(x_sample)
n_linear_layers = 0
for i, module in enumerate(model.children()):
if isinstance(module, torch.nn.Linear):
n_linear_layers += 1
# compute firing path
count_linear_layers = 0
weights_reduced = None
bias_reduced = None
b = None
for i, module in enumerate(model.children()):
if isinstance(module, torch.nn.Linear):
weight = deepcopy(module.weight.detach())
if bias:
b = deepcopy(module.bias.detach())
# linear layer
hi = module(x_sample_copy)
# relu activation
ai = torch.relu(hi)
# prune nodes that are not firing
# (except for last layer where we don't have a relu!)
if count_linear_layers != n_linear_layers - 1:
weight[hi <= 0] = 0
if bias:
b[hi <= 0] = 0
# compute reduced weight matrix
if i == 0:
weights_reduced = weight
if bias:
bias_reduced = b
else:
weights_reduced = torch.matmul(weight, weights_reduced)
if bias:
bias_reduced = torch.matmul(weight, bias_reduced) + bias
# the next layer will have the output of the current layer as input
x_sample_copy = ai
count_linear_layers += 1
# build reduced network
linear = torch.nn.Linear(weights_reduced.shape[1],
weights_reduced.shape[0])
state_dict = linear.state_dict()
state_dict['weight'].copy_(weights_reduced.clone().detach())
if bias:
state_dict['bias'].copy_(bias_reduced.clone().detach())
layers = [linear]
if activation:
layers.append(torch.nn.Sigmoid())
model_reduced = torch.nn.Sequential(*layers)
model_reduced.eval()
return model_reduced | d46c4d6194b77b689618f3980d56ec459247f033 | 20,992 |
def cosine_sim(a: np.ndarray, b: np.ndarray) -> float:
"""
Computes the cosine similarity between two vectors
Parameters
----------
a: np.ndarray
the first vector
b: np.ndarray
the second vector
Returns
-------
float: the cosine similarity of the two vectors
"""
a_norm = norm(a)
b_norm = norm(b)
if a_norm == 0 or b_norm == 0:
return 0
else:
return inner(a, b) / (a_norm * b_norm) | bc8bd6c156f11e3ea16fb3ad20e1a4740ad2e22d | 20,993 |
def datetime_to_hours(dt):
"""Converts datetime.timedelta to hours
Parameters:
-----------
dt: datetime.timedelta
Returns:
--------
float
"""
return dt.days * 24 + dt.seconds / 3600 | e7373cbb49e21340fef1590a655059fd39c6ce88 | 20,994 |
def create_data_element(ds: "Dataset") -> "DataElement":
"""Return a ``gdcm.DataElement`` for the *Pixel Data*.
Parameters
----------
ds : dataset.Dataset
The :class:`~pydicom.dataset.Dataset` containing the *Pixel
Data*.
Returns
-------
gdcm.DataElement
The converted *Pixel Data* element.
"""
tsyntax = ds.file_meta.TransferSyntaxUID
data_element = gdcm.DataElement(gdcm.Tag(0x7fe0, 0x0010))
if tsyntax.is_compressed:
if getattr(ds, 'NumberOfFrames', 1) > 1:
pixel_data_sequence = (
pydicom.encaps.decode_data_sequence(ds.PixelData)
)
else:
pixel_data_sequence = [
pydicom.encaps.defragment_data(ds.PixelData)
]
fragments = gdcm.SequenceOfFragments.New()
for pixel_data in pixel_data_sequence:
fragment = gdcm.Fragment()
fragment.SetByteStringValue(pixel_data)
fragments.AddFragment(fragment)
data_element.SetValue(fragments.__ref__())
else:
data_element.SetByteStringValue(ds.PixelData)
return data_element | c45370201de36b7a5ea41613bc1387185ce80567 | 20,995 |
def thermo_paths(spc_dct, spc_locs_dct, spc_mods, run_prefix):
""" Set up the path for saving the pf input and output.
Placed in a MESSPF, NASA dirs high in run filesys.
"""
thm_path_dct = {}
for spc_name in spc_locs_dct:
spc_thm_path_dct = {}
spc_info = sinfo.from_dct(spc_dct[spc_name])
spc_formula = automol.inchi.formula_string(spc_info[0])
thm_prefix = [spc_formula, automol.inchi.inchi_key(spc_info[0])]
spc_locs_lst = spc_locs_dct[spc_name]
for sidx, spc_locs in enumerate(spc_locs_lst, start=1):
spc_mod_thm_path_dct = {}
for midx, mod in enumerate(spc_mods):
idx = sidx * 10 + midx
spc_mod_thm_path_dct[mod] = (
job_path(
run_prefix, 'MESS', 'PF',
thm_prefix, locs_idx=idx),
job_path(
run_prefix, 'THERM', 'NASA',
thm_prefix, locs_idx=idx)
)
spc_mod_thm_path_dct['mod_total'] = (
job_path(
run_prefix, 'MESS', 'PF',
thm_prefix, locs_idx=sidx),
job_path(
run_prefix, 'THERM', 'NASA',
thm_prefix, locs_idx=sidx)
)
spc_thm_path_dct[tuple(spc_locs)] = spc_mod_thm_path_dct
spc_thm_path_dct['spc_total'] = (
job_path(
run_prefix, 'MESS', 'PF',
thm_prefix, locs_idx=0),
job_path(
run_prefix, 'THERM', 'NASA',
thm_prefix, locs_idx=0)
)
thm_path_dct[spc_name] = spc_thm_path_dct
return thm_path_dct | 43e7746e38cd7617c0f4168afa59fe557d67316e | 20,996 |
def create_da_model_std(filename, eta_rho=10, xi_rho=10, s_rho=1,
reftime=default_epoch, clobber=False,
cdl=None, title="My Model STD"):
"""
Create an time varying model standard deviation file
Parameters
----------
filename : string
name and path of file to create
eta_rho: int, optional
number of rows in the eta direction
xi_rho: int, optional
number of columns in the xi direction
s_rho: int, optional
number of s-levels
reftime: datetime, optional
date of epoch for time origin in netcdf
clobber: bool, optional
If True, clobber any existing files and recreate. If False, use
the existing file definition
cdl: string, optional,
Use the specified CDL file as the definition for the new
netCDF file.
title: string, optional
netcdf attribute title
Returns
-------
nc, netCDF4 object
"""
# Generate the Structure
dims, vars, attr = cdl_parser(
_cdl_dir + "s4dvar_std_m.cdl" if cdl is None else cdl)
# Fill in the appropriate dimension values
dims = _set_grid_dimensions(dims, eta_rho, xi_rho, s_rho)
vars = _set_time_ref(vars, "ocean_time", reftime)
# Create the file
_nc = ncgen(filename, dims=dims, vars=vars, attr=attr, clobber=clobber,
title=title)
# Return the new file
return _nc | c22f3c023d266a5b53cff2f64afe10975c08464d | 20,997 |
from rmgpy.chemkin import writeThermoEntry
def thermoEntry(request, section, subsection, index):
"""
A view for showing an entry in a thermodynamics database.
"""
# Load the thermo database if necessary
loadDatabase('thermo', section)
# Determine the entry we wish to view
try:
database = getThermoDatabase(section, subsection)
except ValueError:
raise Http404
index = int(index)
if index != 0 and index != -1:
for entry in database.entries.values():
if entry.index == index:
break
else:
raise Http404
else:
if index == 0:
index = min(entry.index for entry in database.entries.values() if entry.index > 0)
else:
index = max(entry.index for entry in database.entries.values() if entry.index > 0)
return HttpResponseRedirect(reverse(thermoEntry,
kwargs={'section': section,
'subsection': subsection,
'index': index,
}))
# Get the structure of the item we are viewing
structure = getStructureInfo(entry.item)
# Prepare the thermo data for passing to the template
# This includes all string formatting, since we can't do that in the template
if isinstance(entry.data, str):
thermo = ['Link', database.entries[entry.data].index]
else:
thermo = entry.data
# Get the thermo data for the molecule
nasa_string = None
if isinstance(entry.item, Molecule):
species = Species(molecule=[entry.item])
species.generateResonanceIsomers()
ThermoDatabase().findCp0andCpInf(species, thermo)
nasa_string = ''
try:
if isinstance(thermo,NASA):
nasa = thermo
else:
nasa = thermo.toNASA(Tmin=100.0, Tmax=5000.0, Tint=1000.0)
species.thermo = nasa
nasa_string = writeThermoEntry(species)
except:
pass
referenceType = ''
reference = entry.reference
return render_to_response('thermoEntry.html', {'section': section, 'subsection': subsection, 'databaseName': database.name, 'entry': entry, 'structure': structure, 'reference': reference, 'referenceType': referenceType, 'thermo': thermo, 'nasa_string':nasa_string}, context_instance=RequestContext(request)) | a13fb8236ef9778b65d29358452234a32ba53a14 | 20,998 |
import io
def read_template(filename):
"""[summary]
This function is for reading a template from a file
[description]
Arguments:
filename {[type]} -- [description]
Returns:
[type] -- [description]
"""
with io.open(filename, encoding = 'utf-8') as template_file:
content = template_file.read()
return Template(content) | f4eceb6d2b9075d0cf61d31842b754d6f3c01ce4 | 21,000 |
def S(a):
"""
Return the 3x3 cross product matrix
such that S(a)*b = a x b.
"""
assert a.shape == (3,) , "Input vector is not a numpy array of size (3,)"
S = np.asarray([[ 0.0 ,-a[2], a[1] ],
[ a[2], 0.0 ,-a[0] ],
[-a[1], a[0], 0.0 ]])
return S | b71f2529ccdafcc2b27f28c030ec2e3be9bf43ea | 21,001 |
from typing import Collection
from typing import Mapping
from typing import Any
from typing import Set
from typing import Dict
from typing import List
import itertools
def bulk_get_subscriber_user_ids(
stream_dicts: Collection[Mapping[str, Any]],
user_profile: UserProfile,
subscribed_stream_ids: Set[int],
) -> Dict[int, List[int]]:
"""sub_dict maps stream_id => whether the user is subscribed to that stream."""
target_stream_dicts = []
for stream_dict in stream_dicts:
stream_id = stream_dict["id"]
is_subscribed = stream_id in subscribed_stream_ids
try:
validate_user_access_to_subscribers_helper(
user_profile,
stream_dict,
lambda user_profile: is_subscribed,
)
except JsonableError:
continue
target_stream_dicts.append(stream_dict)
recip_to_stream_id = {stream["recipient_id"]: stream["id"] for stream in target_stream_dicts}
recipient_ids = sorted(stream["recipient_id"] for stream in target_stream_dicts)
result: Dict[int, List[int]] = {stream["id"]: [] for stream in stream_dicts}
if not recipient_ids:
return result
"""
The raw SQL below leads to more than a 2x speedup when tested with
20k+ total subscribers. (For large realms with lots of default
streams, this function deals with LOTS of data, so it is important
to optimize.)
"""
query = SQL(
"""
SELECT
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
FROM
zerver_subscription
WHERE
zerver_subscription.recipient_id in %(recipient_ids)s AND
zerver_subscription.active AND
zerver_subscription.is_user_active
ORDER BY
zerver_subscription.recipient_id,
zerver_subscription.user_profile_id
"""
)
cursor = connection.cursor()
cursor.execute(query, {"recipient_ids": tuple(recipient_ids)})
rows = cursor.fetchall()
cursor.close()
"""
Using groupby/itemgetter here is important for performance, at scale.
It makes it so that all interpreter overhead is just O(N) in nature.
"""
for recip_id, recip_rows in itertools.groupby(rows, itemgetter(0)):
user_profile_ids = [r[1] for r in recip_rows]
stream_id = recip_to_stream_id[recip_id]
result[stream_id] = list(user_profile_ids)
return result | 49c6fc717340523ef4bdc1d66de111c4c86ce777 | 21,002 |
def adjust_learning_rate(optimizer, step):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
if step == 500000:
for param_group in optimizer.param_groups:
param_group['lr'] = 0.0005
elif step == 1000000:
for param_group in optimizer.param_groups:
param_group['lr'] = 0.0003
elif step == 2000000:
for param_group in optimizer.param_groups:
param_group['lr'] = 0.0001
return optimizer | 729c6650eb9b88102b68ba5e8d356e1cfa8b6632 | 21,003 |
def get_menu_permissions(obj):
"""
接收request中user对象的id
:param obj:
:return: 通过表级联得到user或者user所属group对应的菜单信息
"""
menu_obj = Menu.objects # 菜单对象
umids = [x.id for x in UserMenu.objects.get(user=obj).menu.all()]
isgroups = [x.id for x in User.objects.get(id=obj).groups.all()] # 用户所属组
if isgroups:
gmids = [[m.id for m in x.menu.all()]
for x in GroupMenu.objects.filter(group__in=isgroups)
if x.menu.all()][0] # 获取组被授权的菜单
menus = menu_obj.filter(Q(id__in=gmids) | Q(id__in=umids))
else:
menus = menu_obj.filter(id__in=umids) # 获取用户被授权的菜单
return menus | 45b68b8f39b5507aa1aa1a58c3cc16eb5a1a983a | 21,004 |
import typing
def process_package(
package_path: str,
include_patterns: typing.Optional[typing.List[str]] = None,
exclude_patterns: typing.Optional[typing.List[str]] = None,
) -> SchemaMap:
"""
Recursively process a package source folder and return all json schemas from the top level functions it can find.
You can use optional include/exclude patterns to filter the functions you want to process. These patterns are also
applied to the file names that are processed, with the exception of __init__.py, which is always processed.
:param package_path: The path to the your python package
:param include_patterns: A list of wildcard patterns to match the function names you want to include
:param exclude_patterns: A list of wildcard patterns to match the function names you want to exclude
:return: A dictionary containing your function names and their json schemas
"""
function_schema_map = {}
for package_chain, package_file_path in package_iterator(package_path, include_patterns, exclude_patterns):
function_schema_map.update(
**{
f"{package_chain}.{func_name}": func_schema
for func_name, func_schema in process_file(
package_file_path, include_patterns, exclude_patterns
).items()
}
)
return function_schema_map | f0297d8d93161dc481f8d2aca81a4618ced603fe | 21,005 |
def remove_suffix(input_string, suffix):
"""From the python docs, earlier versions of python does not have this."""
if suffix and input_string.endswith(suffix):
return input_string[: -len(suffix)]
return input_string | af4af2442f42121540de00dfaece13831a27cc57 | 21,008 |
def ais_TranslatePointToBound(*args):
"""
:param aPoint:
:type aPoint: gp_Pnt
:param aDir:
:type aDir: gp_Dir
:param aBndBox:
:type aBndBox: Bnd_Box &
:rtype: gp_Pnt
"""
return _AIS.ais_TranslatePointToBound(*args) | a45701c8a35fdd07e870ec850467a49145acd644 | 21,009 |
def unet_deepflash2(pretrained=None, **kwargs):
"""
U-Net model optimized for deepflash2
pretrained (str): specifies the dataset for pretrained weights
"""
model = _unet_deepflash2(pretrained=pretrained, **kwargs)
return model | 40b11641e3e2c418458c7e1d7e6180d4015ab2b9 | 21,010 |
import requests
def get_bga_game_list():
"""Gets a geeklist containing all games currently on Board Game Arena."""
result = requests.get("https://www.boardgamegeek.com/xmlapi2/geeklist/252354")
return result.text | 61418d5c0e0ad12c3f7af8a7831d02f94153ac84 | 21,011 |
def artifact(name: str, path: str):
"""Decorate a step to create a KFP HTML artifact.
Apply this decorator to a step to create a Kubeflow Pipelines artifact
(https://www.kubeflow.org/docs/pipelines/sdk/output-viewer/).
In case the path does not point to a valid file, the step will fail with
an error.
To generate more than one artifact per step, apply the same decorator
multiple time, as shown in the example below.
```python
@artifact(name="artifact1", path="./figure.html")
@artifact(name="artifact2", path="./plot.html")
@step(name="artifact-generator")
def foo():
# ...
# save something to plot.html and figure.html
# ...
```
**Note**: Currently the only supported format is HTML.
Args:
name: Artifact name
path: Absolute path to an HTML file
"""
def _(step: Step):
if not isinstance(step, Step):
raise ValueError("You should decorate functions that are decorated"
" with the @step decorator!")
step.artifacts.append(Artifact(name, path))
return step
return _ | b5033a66612d0f2aa5b138b368ca0f1acb7c2b21 | 21,012 |
import http
def build_status(code: int) -> str:
"""
Builds a string with HTTP status code and reason for given code.
:param code: integer HTTP code
:return: string with code and reason
"""
status = http.HTTPStatus(code)
def _process_word(_word: str) -> str:
if _word == "OK":
return _word
return _word.capitalize()
reason = " ".join(_process_word(word) for word in status.name.split("_"))
text = f"{code} {reason}"
return text | 9730abf472ddc3d5e852181c9d60f8c42fee687d | 21,013 |
def pretty_picks_players(picks):
"""Formats a table of players picked for the gameweek, with live score information"""
fields = ["Team", "Position", "Player", "Gameweek score", "Chance of playing next game",
"Player news", "Sub position", "Id"]
table = PrettyTable(field_names=fields)
table.title = "GW points: " + str(picks.score) \
+ " - Average GW points: " + str(picks.event.average_entry_score) \
+ " - Overall arrow: " + picks.entry.overall_arrow["unicode"] \
+ " - GW rank: " + str(picks.entry.summary_event_rank) \
+ " - Overall rank: " + str(picks.entry.summary_overall_rank)
for p in picks.picks:
if picks.player_fielded[p.pick_position]:
table.add_row([p.team_name, p.position, p.displayname, p.gw_points,
p.chance_of_playing_next_round, p.news, p.pick_position, p.id_])
table.add_row(["===", "===", "=======", "==", "", "", "==", "==="])
for p in picks.picks:
if not picks.player_fielded[p.pick_position]:
table.add_row([p.team_name, p.position, p.displayname, p.gw_points,
p.chance_of_playing_next_round, p.news, p.pick_position, p.id_])
table.align = "l"
table.align["Gameweek score"] = "r"
table.align["Sub position"] = "r"
table.align["Chance of playing next game"] = "c"
return table | f4269fedad07b3302f724ba38f3afc1d8d9afc9f | 21,014 |
def open_path(request):
"""
handles paths authors/
"""
if(request.method == "POST"):
json_data = request.data
new_author = Author(is_active=False)
# Creating new user login information
if "password" in json_data:
password = json_data["password"]
json_data.pop("password")
new_author.set_password(password)
new_author.username = json_data["username"]
for k, v in json_data.items():
setattr(new_author, k, v)
new_author.host = HOST_URL
url = new_author.host + "author/" + str(new_author.id)
new_author.url = url
# Try creating user,
# if duplicate user, return Bad Request
try:
new_author.save()
except IntegrityError:
return HttpResponseBadRequest("username taken")
return HttpResponse(str(new_author.id), status=status.HTTP_200_OK)
if(request.method == "GET"):
data = Author.objects.all()
ser = AuthorSerializer(data, many=True)
return JsonResponse(ser.data, safe=False) | 3c6a8d8fa6ac03a0bdd2b805fa348c43cf088f35 | 21,015 |
def encode_task(task):
""" Encodes a syllogistic task.
Parameters
----------
task : list(list(str))
List representation of the syllogism (e.g., [['All', 'A', 'B'], ['Some', 'B', 'C']]).
Returns
-------
str
Syllogistic task encoding (e.g., 'AI1').
"""
return SyllogisticTaskEncoder.encode_task(task) | b05b9e691d045bcc4877e1d9b9875902b9201bf7 | 21,016 |
def resize_small(image, resolution):
"""Shrink an image to the given resolution."""
h, w = image.shape[0], image.shape[1]
ratio = resolution / min(h, w)
h = tf.round(h * ratio, tf.int32)
w = tf.round(w * ratio, tf.int32)
return tf.image.resize(image, [h, w], antialias=True) | c44f615c788f300c62eef617f47b81c761ce63bc | 21,017 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.