content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def is_available():
"""Return true if a pdfjs installation is available."""
try:
get_pdfjs_res('build/pdf.js')
get_pdfjs_res('web/viewer.html')
except PDFJSNotFound:
return False
else:
return True
|
d345ca0b881ecc749fcea8ec4f579f9ba05f25c4
| 32,947 |
def reissueMissingJobs(updatedJobFiles, jobBatcher, batchSystem,
childJobFileToParentJob, childCounts, config,
killAfterNTimesMissing=3):
"""Check all the current job ids are in the list of currently running batch system jobs.
If a job is missing, we mark it as so, if it is missing for a number of runs of
this function (say 10).. then we try deleting the job (though its probably lost), we wait
then we pass the job to processFinishedJob.
"""
runningJobs = set(batchSystem.getIssuedJobIDs())
jobIDsSet = set(jobBatcher.getJobIDs())
#Clean up the reissueMissingJobs_missingHash hash, getting rid of jobs that have turned up
missingJobIDsSet = set(reissueMissingJobs_missingHash.keys())
for jobID in missingJobIDsSet.difference(jobIDsSet):
reissueMissingJobs_missingHash.pop(jobID)
logger.critical("Job id %s is no longer missing" % str(jobID))
assert runningJobs.issubset(jobIDsSet) #Assert checks we have no unexpected jobs running
jobsToKill = []
for jobID in set(jobIDsSet.difference(runningJobs)):
jobFile = jobBatcher.getJob(jobID)
if reissueMissingJobs_missingHash.has_key(jobID):
reissueMissingJobs_missingHash[jobID] = reissueMissingJobs_missingHash[jobID]+1
else:
reissueMissingJobs_missingHash[jobID] = 1
timesMissing = reissueMissingJobs_missingHash[jobID]
logger.critical("Job %s with id %s is missing for the %i time" % (jobFile, str(jobID), timesMissing))
if timesMissing == killAfterNTimesMissing:
reissueMissingJobs_missingHash.pop(jobID)
jobsToKill.append(jobID)
killJobs(jobsToKill, updatedJobFiles, jobBatcher, batchSystem, childJobFileToParentJob, childCounts, config)
return len(reissueMissingJobs_missingHash) == 0
|
6ac051049f1e454fdc92a1a136ef4736e602d121
| 32,948 |
def all_children(wid):
"""Return all children of a widget."""
_list = wid.winfo_children()
for item in _list:
if item.winfo_children():
_list.extend(item.winfo_children())
return _list
|
ca52791b06db6f2dd1aeedc3656ecf08cb7de6d8
| 32,949 |
def logout():
"""------------- Log out -----------------------"""
# remove user session cookies
flash("You Have Been Logged Out")
session.pop("user")
return redirect(url_for("login"))
|
66577a335a3e86c56c2aa78afacef4817786ac30
| 32,950 |
def listtodict(l: ty.Sequence) -> ty.Mapping:
"""Converts list to dictionary"""
return dict(zip(l[::2], l[1::2]))
|
80e645c3b7834e4fd5980fdb3e5df75114e0da82
| 32,951 |
def sqrtspace(a, b, n_points):
"""
:return: Distribute n_points quadratically from point a to point b, inclusive
"""
return np.linspace(0, 1, n_points)**2*(b-a)+a
|
d88f3cd808dbab7447cf9609e3770a15e703e515
| 32,952 |
from datetime import datetime
def tstr2iso(input_string: str) -> datetime:
"""
Convert a specific type of ISO string that are compliant with file pathing requirement to ISO datetime.
:return:
"""
no_colon_input_string = input_string.replace(":", "")
iso_datetime = tstr2iso_nocolon(no_colon_input_string)
return iso_datetime
|
bb591dceef294c36eb9c028b5e28979c37f05a16
| 32,953 |
def test_processing_hooks_are_inherited():
"""Processing hooks are inherited from base classes if missing.
"""
class TestView(DummyBase):
def __call__(self, *args, **kwargs):
return self.count
testview = create_view(TestView)
assert [testview(), testview(), testview()] == [2, 4, 6]
|
465303560f95c098c891361b504238dc4fe22adb
| 32,954 |
def lesson(request, order, slug):
"""
One lesson can be viewed in two different ways:
(1) as independent lesson
(2) as part of one course
As (1) it is well, independent. And it is not really
important to jump to next in order lesson or not.
It is more important in this conetxt to display 'related lessons'.
As (2) this lesson is within logical sequence of group of lessons
- a course.
In this context, lesson title from within a course may override
the title of the lesson.
E.g. L#18, lesson.title = Django Deployment from Zero to Hero Part 1
L#19, lesson.title = Django Deployment from Zero to Hero Part 2
L#20, lesson.title = Django Deployment from Zero to Hero Part 3
Within course, those lessons will be titled differently:
course.title = Django Deployment from Zero to Hero
lesson#1 - Setup VPS host
lesson#2 - Setup Nginx
lesson#3 - Prepare Database
where lesson#1 is same 'thing' as L#18
lesson#2 is same 'thing' as L#19.
they are just within different context.
Long story short, if user clicks on lesson from course view - lesson
will be displayed differently - as lesson within course.
To switch between views - pass http parameter view = course | lesson
"""
try:
lesson = Lesson.objects.get(order=order)
except Lesson.DoesNotExist:
logger.warning(f"Lesson #{order} not found")
raise Http404("Lesson not found")
user = request.user
if lesson.lesson_type == PRO and not user.is_authenticated:
return login_with_pro(lesson_order=order)
elif lesson.lesson_type == PRO and user.is_authenticated:
if user.profile and not user.profile.is_pro_user():
# means an authenticated user which is not PRO
# wants to access a PRO lesson => he will be redirected
# to upgrade view with lesson_ord argument
return upgrade_with_pro(lesson_order=order)
view = request.GET.get('view', 'lesson')
if view == 'lesson':
template_name = 'lessons/lesson.html'
else:
template_name = 'lessons/lesson_within_course.html'
course = None
lesson_group = None
if view == 'course':
if lesson.lesson_groups.count() > 0:
lesson_group = lesson.lesson_groups.first()
course = lesson_group.course
similar_lessons = []
lesson_groups = LessonGroup.objects.filter(
course=course
).order_by('order')
next_item = lesson_group.get_next_lesson_group_obj()
prev_item = lesson_group.get_prev_lesson_group_obj()
else:
lesson_groups = []
similar_lessons = [
sim_lesson.post
for sim_lesson in lesson.similar_lessons.all()
]
next_item = lesson.get_next_lesson_obj()
prev_item = lesson.get_prev_lesson_obj()
return render(
request,
template_name,
{
'page': lesson,
'course': course,
'lesson_group': lesson_group,
'similar_lessons': similar_lessons,
'all_course_lessons': lesson_groups,
'next_item': next_item,
'prev_item': prev_item
}
)
|
7365179a033728f6208d26e666929f8b414c8d72
| 32,956 |
def _run_prospector(filename,
stamp_file_name,
disabled_linters,
show_lint_files):
"""Run prospector."""
linter_tools = [
"pep257",
"pep8",
"pyflakes"
]
if can_run_pylint():
linter_tools.append("pylint")
# Run prospector on tests. There are some errors we don't care about:
# - invalid-name: This is often triggered because test method names
# can be quite long. Descriptive test method names are
# good, so disable this warning.
# - super-on-old-class: unittest.TestCase is a new style class, but
# pylint detects an old style class.
# - too-many-public-methods: TestCase subclasses by definition have
# lots of methods.
test_ignore_codes = [
"invalid-name",
"super-on-old-class",
"too-many-public-methods"
]
kwargs = dict()
if _file_is_test(filename):
kwargs["ignore_codes"] = test_ignore_codes
else:
if can_run_frosted():
linter_tools += ["frosted"]
return _stamped_deps(stamp_file_name,
_run_prospector_on,
[filename],
linter_tools,
disabled_linters,
show_lint_files,
**kwargs)
|
393435da9d7d638be0e7461ec5251a1485649d7f
| 32,958 |
def dEuler212(q, w):
"""
dEuler212(Q,W)
dq = dEuler212(Q,W) returns the (2-1-2) euler angle derivative
vector for a given (2-1-2) euler angle vector Q and body
angular velocity vector w.
dQ/dt = [B(Q)] w
"""
return np.dot(BmatEuler212(q), w)
|
567a7a452c1e86a01854d63b0fd2efb0ea951fcd
| 32,959 |
from typing import Callable
import functools
def with_zero_out_padding_outputs(
graph_net: Callable[[gn_graph.GraphsTuple], gn_graph.GraphsTuple]
) -> Callable[[gn_graph.GraphsTuple], gn_graph.GraphsTuple]:
"""A wrapper for graph to graph functions that zeroes padded d output values.
See `zero_out_padding` for a full explanation of the method.
Args:
graph_net: A Graph Neural Network.
Returns:
A Graph Neural Network that will zero out all output padded values.
"""
@functools.wraps(graph_net)
def wrapper(graph: gn_graph.GraphsTuple) -> gn_graph.GraphsTuple:
return zero_out_padding(graph_net(graph))
return wrapper
|
5f23defb49df229b2edec46f1f018a25401ca3f4
| 32,960 |
def as_bytes(x) -> bytes:
"""Convert a value to bytes by converting it to string and encoding in utf8."""
if _is_bytes(x):
return bytes(x)
if not isinstance(x, str):
x = str(x)
return x.encode('utf8')
|
2c1c48bd1b02f290ec33dc427ebc4536ba2f2caf
| 32,961 |
def getGeneCount(person, geneSetDictionary):
"""
determines how many genes a person is assumed to have based upon the query information provided
"""
if person in geneSetDictionary["no_genes"]:
gene_count = 0
elif person in geneSetDictionary["one_gene"]:
gene_count = 1
else:
gene_count = 2
return gene_count
|
0fef236dd805ae77f04a22670752031af15ca5b2
| 32,962 |
import json
def merge_json(*args):
"""
Take a list of json files and merges them together
Input: list of json file
Output: dictionary of merged json
"""
json_out = dict()
for json_file in args:
try:
if isinstance(json_file, dict):
json_out = {**json_out, **json_file}
else:
with open(json_file) as fn:
json_out = {**json_out, **json.load(fn)}
except OSError as error:
raise error
return json_out
|
37d5e29468d2de2aa11e5a92dc59b7b7b28a170d
| 32,963 |
def get_KPP_PL_tag(last_tag, tag_prefix='T'):
""" Get the next P/L tag in a format T??? """
assert (len(last_tag) == 4), "Tag must be 4 characers long! (e.g. T???)"
last_tag_num = int(last_tag[1:])
return '{}{:0>3}'.format(tag_prefix, last_tag_num+1)
|
feb9cedce1fe4dd17aac3d28df25c951bb24cc3f
| 32,965 |
from ibmsecurity.appliance.ibmappliance import IBMError
def update(isamAppliance, description, properties, check_mode=False, force=False):
"""
Update a specified Attribute Matcher
"""
id, update_required, json_data = _check(isamAppliance, description, properties)
if id is None:
raise IBMError("999", "Cannot update data for unknown Attribute Matcher: {0}".format(description))
if force is True or update_required is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put(
"Update a specified Attribute Matcher",
"{0}/{1}".format(am_uri, id), json_data)
return isamAppliance.create_return_object()
|
2b7d90a15a65035aa623fc16dada0e76076221c1
| 32,966 |
def get_all_themes(config, brand_id):
"""
Get all themes for the given brand id.
:param config: context config
:param brand_id: the brand id for the relevant help center
:return list: list of all themes
"""
url = f"https://{config['subdomain']}.zendesk.com/api/guide/theming/{brand_id}/themes.json"
res = get(config, url)
return res['themes']
|
54e846e8cfbafc418fae3b57818632d1ef8bbb42
| 32,968 |
import logging
def maximum_radius_test(gpu_memory=None, number_of_gpu=None):
"""
:return:
"""
if gpu_memory is None and number_of_gpu is None:
gpu_memory, number_of_gpu = tfu.client.read_gpu_memory()
logging.info('GPU Memory={:.2f} Number of GPU={}'.format(gpu_memory, number_of_gpu))
if 10 <= gpu_memory < 11:
r_input = 120
r_output = 78
elif 11 <= gpu_memory < 12:
r_input = '!'
r_output = '!'
elif 12 <= gpu_memory < 16:
r_input = 136
r_output = 94
return r_input, r_output
|
a422dd94e8003e25a011ff6f604c20c6b75a203f
| 32,969 |
def tca_plus(source, target):
"""
TCA: Transfer Component Analysis
:param source:
:param target:
:param n_rep: number of repeats
:return: result
"""
result = dict()
metric = 'process'
for src_name in source:
try:
stats = []
val = []
src = prepare_data(src_name, metric)
for tgt_name in target:
try:
tgt = prepare_data(tgt_name, metric)
loc = tgt['file_la'] + tgt['file_lt']
dcv_src, dcv_tgt = get_dcv(src, tgt)
norm_src, norm_tgt = smart_norm(src, tgt, dcv_src, dcv_tgt)
_train, _test = map_transform(norm_src, norm_tgt)
clf = create_model(_train)
actual, predicted, predicted_proba = predict_defects(clf=clf, test=_test)
abcd = metrics.measures(actual,predicted,loc)
recall = abcd.calculate_recall()
pf = abcd.get_pf()
g = abcd.get_g_score()
f = abcd.calculate_f1_score()
pci_20 = abcd.get_pci_20()
print([src_name, tgt_name, recall, pf, g, f, pci_20])
stats.append([src_name, tgt_name,recall, pf, g, f, pci_20])
except Exception as e:
print(src_name, tgt_name, e)
continue
except Exception as e:
print(src_name, tgt_name, e)
continue
print('completed',len(source))
stats_df = pd.DataFrame(stats, columns = ['source', 'target', 'recell', 'pf', 'g', 'f', 'pci_20'])
# result.update({tgt_name: stats_df})
return stats_df
|
95242aa64db7b88a7f170abf619677c1d4acde57
| 32,971 |
def local_2d_self_attention_spatial_blocks(query_antecedent,
kv_channels,
heads,
memory_h_dim=None,
memory_w_dim=None,
mask_right=False,
master_dtype=tf.float32,
slice_dtype=tf.float32,
name=None):
"""Attention to the source position and a neighborhood to the left or right.
The sequence is divided into blocks of length block_size.
Attention for a given query position can only see memory positions
less than or equal to the query position, in the corresponding block
and the previous block.
Args:
query_antecedent: a mtf.Tensor with shape [batch, num_h_blocks,
num_w_blocks, h_dim, w_dim, io_channels] must have the same size as
query_length, but a different name.
kv_channels: a mtf.Dimension (the size of the key and value vectors)
heads: a mtf.Dimension (the number of heads)
memory_h_dim: mtf Dimension, for the memory height block.
memory_w_dim: mtf Dimension, for the memory width block.
mask_right: bool, flag specifying whether we mask out attention to the right
for the decoder.
master_dtype: a tf.dtype
slice_dtype: a tf.dtype
name: an optional string.
Returns:
a Tensor of shape
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]
Raises:
ValueError: if channels or depth don't match.
"""
with tf.variable_scope(
name, default_name="multihead_attention", values=[query_antecedent]):
h_dim, w_dim, io_channels = query_antecedent.shape.dims[-3:]
batch, num_h_blocks, num_w_blocks = query_antecedent.shape.dims[:3]
wq, wk, wv, wo = multihead_attention_vars(
query_antecedent.mesh, heads, io_channels, kv_channels,
master_dtype, slice_dtype, query_antecedent.dtype)
# Rename dimensions for the memory height and width.
memory_antecedent = mtf.rename_dimension(query_antecedent, h_dim.name,
"memory_" + h_dim.name)
memory_antecedent = mtf.rename_dimension(memory_antecedent, w_dim.name,
"memory_" + w_dim.name)
memory_h_dim, memory_w_dim = memory_antecedent.shape.dims[-3:-1]
# Call einsum over the query and memory to get query q, keys k and values v.
q = mtf.einsum([query_antecedent, wq],
mtf.Shape([
batch, heads, num_h_blocks, num_w_blocks, h_dim, w_dim,
kv_channels
]))
k = mtf.einsum([memory_antecedent, wk],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
v = mtf.einsum([memory_antecedent, wv],
mtf.Shape([batch, heads, num_h_blocks, num_w_blocks,
memory_h_dim, memory_w_dim, kv_channels]))
# Halo exchange for memory blocks.
k, v = local_2d_halo_exchange(k, v, num_h_blocks, memory_h_dim,
num_w_blocks, memory_w_dim, mask_right)
# Calculate the causal mask to avoid peeking into the future. We compute
# this once and reuse it for all blocks since the block_size is known.
mask = None
if mask_right:
mask = attention_bias_local_2d_block(query_antecedent.mesh, h_dim, w_dim,
memory_h_dim, memory_w_dim)
output = dot_product_attention(q, k, v, mask=mask)
return mtf.einsum(
[output, wo],
mtf.Shape(
[batch, num_h_blocks, num_w_blocks, h_dim, w_dim, io_channels]))
|
6295dff8753f4b577086fd414a386271ed6e1a1a
| 32,972 |
def polling_locations_import_from_structured_json(structured_json):
"""
This pathway in requires a we_vote_id, and is not used when we import from Google Civic
:param structured_json:
:return:
"""
polling_location_manager = PollingLocationManager()
polling_locations_saved = 0
polling_locations_updated = 0
polling_locations_not_processed = 0
for one_polling_location in structured_json:
we_vote_id = one_polling_location['we_vote_id'] if 'we_vote_id' in one_polling_location else ''
line1 = one_polling_location['line1'] if 'line1' in one_polling_location else ''
city = one_polling_location['city'] if 'city' in one_polling_location else ''
state = one_polling_location['state'] if 'state' in one_polling_location else ''
if positive_value_exists(we_vote_id) and positive_value_exists(line1) and positive_value_exists(city) and \
positive_value_exists(state):
proceed_to_update_or_create = True
else:
proceed_to_update_or_create = False
if proceed_to_update_or_create:
# Values that are not required
polling_location_id = one_polling_location['polling_location_id'] \
if 'polling_location_id' in one_polling_location else ''
location_name = one_polling_location['location_name'] if 'location_name' in one_polling_location else ''
polling_hours_text = one_polling_location['polling_hours_text'] \
if 'polling_hours_text' in one_polling_location else ''
directions_text = one_polling_location['directions_text'] \
if 'directions_text' in one_polling_location else ''
line2 = one_polling_location['line2'] if 'line2' in one_polling_location else ''
zip_long = one_polling_location['zip_long'] if 'zip_long' in one_polling_location else ''
results = polling_location_manager.update_or_create_polling_location(
we_vote_id, polling_location_id, location_name, polling_hours_text, directions_text,
line1, line2, city, state, zip_long)
else:
polling_locations_not_processed += 1
results = {
'success': False,
'status': 'Required value missing, cannot update or create'
}
if results['success']:
if results['new_polling_location_created']:
polling_locations_saved += 1
else:
polling_locations_updated += 1
else:
polling_locations_not_processed += 1
polling_locations_results = {
'success': True,
'status': "POLLING_LOCATIONS_IMPORT_PROCESS_COMPLETE",
'saved': polling_locations_saved,
'updated': polling_locations_updated,
'not_processed': polling_locations_not_processed,
}
return polling_locations_results
|
868062d4dac4a56073c832f7d2a2919a37a12203
| 32,973 |
def _mgSeqIdToTaxonId(seqId):
"""
Extracts a taxonId from sequence id used in the Amphora or Silva mg databases (ends with '|ncbid:taxonId")
@param seqId: sequence id used in mg databases
@return: taxonId
@rtype: int
"""
return int(seqId.rsplit('|', 1)[1].rsplit(':', 1)[1])
|
2ce74f453e3496c043a69b4205f258f06bfd0452
| 32,974 |
def has_progress(toppath):
"""Return `True` if there exist paths that have already been
imported under `toppath`.
"""
with progress_state() as state:
return len(state[toppath]) != 0
|
862c20336c7dd3b1b7d93022d4b633a9de89f336
| 32,975 |
def run_both_transfers(model: BiVAE, *args, **kwargs):
"""
Run both content-transfer and style-transfer on the each pair of the content-representative tensor images
:param model: Trained BiVAE model
:param class_reps: a dictionary of string class_id <-> a single 3dim Tensor (C,H,W)
:param log_dir: Path to the model.logger's log_dir (Eg. '..../exp_name/version7')
:param train_mean: Original datamodule's training set's mean
:param train_std: Oiriginal datamodule's training set std
:param linearlize: (bool). If true, linearlize the output image to range [0,1] for better viz. contrast
:return:
"""
return (save_content_transfers(model, *args, **kwargs), save_style_transfers(model, *args, **kwargs))
|
6d791931cda68b99701ccf407d78f1c470b124f0
| 32,976 |
def se_mobilenet_075():
"""
Construct SE_MobileNet.
"""
model = SE_MobileNet(widen_factor=0.75, num_classes=1000)
return model
|
277d00141576f55dc6c41896725dcd2ee7c5a1d1
| 32,977 |
from pathlib import Path
def canonicalize_lookup_info(
lookup: SshPubKeyLookupInfo,
ssh_auth_dir_root: Path,
template_vars: SshPubKeyFileTemplateVars
) -> SshPubKeyLookupInfo:
"""Expand the template variables and ensure that paths are made absolute.
"""
ad_root = ssh_auth_dir_root
expd = expand_file_template_vars
canz_path = _canonicalize_potentially_rel_path
return SshPubKeyLookupInfo(
[expd(ft, template_vars) for ft in lookup.file_template],
[canz_path(sp, ad_root) for sp in lookup.file_search_path],
canz_path(lookup.file, ad_root) if lookup.file is not None else None
)
|
24cb791ce5f0ea58daea268dc0e1804a3b056892
| 32,978 |
def reprojection_rms(impoints_known, impoints_reprojected):
"""
Compute root mean square (RMS) error of points
reprojection (cv2.projectPoints).
Both input NumPy arrays should be of shape (n_points, 2)
"""
diff = impoints_known - impoints_reprojected
squared_distances = np.sum(np.square(diff), axis=1)
rms = np.sqrt(np.mean(squared_distances))
return rms
|
11bfbd994df21eb81581012313b838cf5e44424d
| 32,979 |
import torch
def macro_accuracy_one_sub(**kwargs: dict) -> bool:
"""
Calculates whether the predicted output, after the postprocessing step of
selecting the single most 'changed' substation has been applied, wholly
matches the true output.
Differs from micro_accuracy_one_sub in that it doesn't check the
element-wise accuracy.
Differs from macro_accuracy and macro_accuracy_valid in the
postprocessing that has been applied to the prediction.
Parameters
----------
**kwargs['one_sub_P'] : torch.Tensor[float]
The output of the model after the postprocessing step of
selecting the single most 'changed' substation has been applied.
Elements are floats in the range (0,1). A value below 0.5 represents
no change; above 0.5, change.
**kwargs['Y'] : torch.Tensor[float]
The label of the datapoints Elements are floats in {0,1}.
A value below 0 represents no change; of 1, change.
Returns
-------
bool
Whether the post-processed predicted output matches the true output.
"""
one_sub_P = kwargs['one_sub_P']
Y = kwargs['Y']
return torch.equal(torch.round(one_sub_P), torch.round(Y))
|
e21dcc6b2781abe9c7e5c0d03220d0624f68546c
| 32,980 |
def modified_euler(f, y0, t0, t1, n):
""" Use the modified Euler method to compute an approximate solution
to the ODE y' = f(t, y) at n equispaced parameter values from t0 to t1
with initial conditions y(t0) = y0.
y0 is assumed to be either a constant or a one-dimensional numpy array.
t and t0 are assumed to be constants.
f is assumed to accept two arguments.
The first is a constant giving the value of t.
The second is a one-dimensional numpy array of the same size as y.
This function returns an array Y of shape (n,) if
y is a constant or an array of size 1.
It returns an array of shape (n, y.size) otherwise.
In either case, Y[i] is the approximate value of y at
the i'th value of np.linspace(t0, t, n).
"""
Y, T, h = initialize_all(y0, t0, t1, n)
for i in xrange(1, n):
Y[i] = Y[i-1] + (h / 2.) * (f(T[i-1], Y[i-1]) + f(T[i-1], Y[i-1] + h * f(T[i-1], Y[i-1])))
return Y
|
c5549f194ee8fc446561967a49e89072abdad830
| 32,981 |
def read_tree(sha1=None, data=None):
"""Read tree object with given SHA-1 (hex string) or data, and return list
of (mode, path, sha1) tuples.
"""
if sha1 is not None:
obj_type, data = read_object(sha1)
assert obj_type == 'tree'
elif data is None:
raise TypeError('must specify "sha1" or "data"')
i = 0
entries = []
for _ in range(1000):
end = data.find(b'\x00', i)
if end == -1:
break
mode_str, path = data[i:end].decode().split()
mode = int(mode_str, 8)
digest = data[end + 1:end + 21]
entries.append((mode, path, digest.hex()))
i = end + 1 + 20
return entries
|
6d3fed787ba0e817ee67e9bfd99f5e2b6984684f
| 32,982 |
def cluster(T, m):
"""
Runs PCCA++ [1] to compute a metastable decomposition of MSM states.
(i.e. find clusters using transition matrix and PCCA)
Parameters
----------
T: a probability transition matrix.
m : Desired number of metastable sets (int).
Notes
-----
The metastable decomposition is done using the PCCA method of the pyemma.msm.MSM class.
For more details and references: https://github.com/markovmodel/PyEMMA/blob/devel/pyemma/msm/models/msm.py
"""
# Compute membership vectors.
memberships = mana.pcca_memberships(T, m)
assignments = cluster_assignments(memberships)
clusters = cluster_sets(assignments)
return clusters
|
7ba6f19d519d681b4b36c59409e617a9f1b385e5
| 32,983 |
def fine_tune_class_vector(nr_class, *, exclusive_classes=True, **cfg):
"""Select features from the class-vectors from the last hidden state,
softmax them, and then mean-pool them to produce one feature per vector.
The gradients of the class vectors are incremented in the backward pass,
to allow fine-tuning.
"""
return chain(
get_pytt_class_tokens,
flatten_add_lengths,
with_getitem(0, Softmax(nr_class, cfg["token_vector_width"])),
Pooling(mean_pool),
)
|
21359e128124f075ce4cf0768a24d1d5daaba4c2
| 32,984 |
def _recommend_aals_annoy(est, userid, R, n, filter_items,
recalculate_user, filter_previously_rated,
return_scores, recommend_function,
scaling_function, *args, **kwargs):
"""Produce recommendations for Annoy and NMS ALS algorithms"""
user = est._user_factor(userid, R, recalculate_user)
# Calculate the top N items, only removing the liked items from the
# results if specified
filter_out = _get_filter_items(filter_previously_rated,
# Don't use user, since it might have
# been re-estimated:
R[userid].indices,
filter_items)
# If N is None, we set it to the number of items. The item_factors attr
# exists in all ALS models here
if n is None:
n = est.item_factors.shape[0]
# The count to produce
count = n + len(filter_out)
# See [1] in docstring for why we do this...
query = np.append(user, 0) # (is this the fastest way?)
ids, dist = map(np.asarray, # Need to be a Numpy array
recommend_function(query, count, *args, **kwargs))
# Only compute the dist scaling if we care about them, since it's
# expensive
if return_scores:
# convert the distances from euclidean to cosine distance,
# and then rescale the cosine distance to go back to inner product.
scaling = est.max_norm * np.linalg.norm(query)
dist = scaling_function(dist, scaling) # sig: f(dist, scaling)
# if we're filtering anything out...
return _do_filter(ids, dist, filter_out=filter_out,
return_scores=return_scores, n=n)
|
76e258b64d080ef9804577c92bf41e4f4621f6c2
| 32,985 |
def url_to_filename(url):
"""Converts a URL to a valid filename."""
return url.replace('/', '_')
|
db3023c582590a47a6adc32501a2e3f5fd72f24f
| 32,986 |
def _parallel_dict_from_expr_if_gens(exprs, opt):
"""Transform expressions into a multinomial form given generators."""
indices = {g: i for i, g in enumerate(opt.gens)}
zero_monom = [0]*len(opt.gens)
polys = []
for expr in exprs:
poly = {}
for term in Add.make_args(expr):
coeff, monom = [], zero_monom.copy()
for factor in Mul.make_args(term):
base, exp = decompose_power(factor)
if exp < 0:
exp, base = -exp, Pow(base, -1)
try:
monom[indices[base]] += exp
continue
except KeyError:
if factor.free_symbols & set(opt.gens):
raise PolynomialError(f'{factor} contains an element'
' of the generators set')
coeff.append(factor)
monom = tuple(monom)
poly[monom] = Mul(*coeff) + poly.get(monom, 0)
polys.append(poly)
return polys
|
81dec70ff041cb31062877e8b18823b8e4d283e0
| 32,987 |
def getMObjectHandle(value):
"""
Method used to get an MObjectHandle from any given value.
:type value: Union[str, om.MObject, om.MObjectHandle, om.MDagPath]
:rtype: om.MObjectHandle
"""
# Check for redundancy
#
if isinstance(value, om.MObjectHandle):
return value
else:
return om.MObjectHandle(getMObject(value))
|
e41c7ccd48a5b8eb3b692730d4c6c8a74240f7dd
| 32,988 |
def remove_dupes(inds1, inds2, inds3=None, inds4=None, tol=1e-6):
"""
Remove duplicates so as to not brake the interpolator.
Parameters
----------
inds1, inds2, inds3 : list or np.array()
to find unique values, must be same length
just_two : Bool [False]
do not include inds3
Returns
-------
non_dupes : list
indices of input arrays that are not duplicates
"""
def unique_seq(seq, tol=1e-6):
'''
Not exactly unique, but only points that are farther
apart than some tol
'''
return np.nonzero(np.abs(np.diff(seq)) >= tol)[0]
un_ind1 = unique_seq(inds1, tol=tol)
un_ind2 = unique_seq(inds2, tol=tol)
non_dupes = list(set(un_ind1) & set(un_ind2))
if inds3 is not None:
un_ind3 = unique_seq(inds3, tol=tol)
non_dupes = list(set(un_ind1) & set(un_ind2) & set(un_ind3))
if inds4 is not None:
un_ind4 = unique_seq(inds4, tol=tol)
non_dupes = list(set(un_ind1) & set(un_ind2) &
set(un_ind3) & set(un_ind4))
return non_dupes
|
6164e35d0b2c3b33d4e7a4f1737e356c096f2059
| 32,989 |
def filter_punctuation(fst: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Helper function for parsing number strings. Converts common cardinal strings (groups of three digits delineated by 'cardinal_separator' - see graph_utils)
and converts to a string of digits:
"1 000" -> "1000"
"1.000.000" -> "1000000"
Args:
fst: Any pynini.FstLike object. Function composes fst onto string parser fst
Returns:
fst: A pynini.FstLike object
"""
exactly_three_digits = NEMO_DIGIT ** 3 # for blocks of three
up_to_three_digits = pynini.closure(NEMO_DIGIT, 1, 3) # for start of string
cardinal_string = pynini.closure(
NEMO_DIGIT, 1
) # For string w/o punctuation (used for page numbers, thousand series)
cardinal_string |= (
up_to_three_digits
+ pynutil.delete(cardinal_separator)
+ pynini.closure(exactly_three_digits + pynutil.delete(cardinal_separator))
+ exactly_three_digits
)
return cardinal_string @ fst
|
6e78d197fd4b05b66470622a0714bea0c4a935b4
| 32,990 |
def pixbuf2image(pix):
"""Convert gdkpixbuf to PIL image"""
data = pix.get_pixels()
w = pix.props.width
h = pix.props.height
stride = pix.props.rowstride
mode = "RGB"
if pix.props.has_alpha == True:
mode = "RGBA"
im = Image.frombytes(mode, (w, h), data, "raw", mode, stride)
return im
|
a44720fa3e40571d86e65b7f73cd660270919e67
| 32,991 |
def setup_s3_client(job_data):
"""Creates an S3 client
Uses the credentials passed in the event by CodePipeline. These
credentials can be used to access the artifact bucket.
Args:
job_data: The job data structure
Returns:
An S3 client with the appropriate credentials
"""
key_id = job_data['artifactCredentials']['accessKeyId']
key_secret = job_data['artifactCredentials']['secretAccessKey']
session_token = job_data['artifactCredentials']['sessionToken']
session = Session(aws_access_key_id=key_id,
aws_secret_access_key=key_secret,
aws_session_token=session_token)
return session.client('s3', config=botocore.client.Config(signature_version='s3v4'))
|
98ff4d514734a5326dd709274bf0354e7d7cc255
| 32,992 |
import json
def unique_doc_key(doc):
"""
Creates a key that allows to check for record uniqueness
"""
keyparts = [doc['type']]
for attr in ('level', 'country', 'state', 'region', 'district', 'city'):
if attr in doc:
keyparts.append(doc[attr])
key = json.dumps(keyparts)
return key
|
a2584c4628ffd4b0f433c2f85c8c4e7132ed05ea
| 32,994 |
def parse_tag(vt):
"""
Get a VTag from a label
Parameters
----------
vt : str
A label that we want to get the VTag
Raises
------
UnknownTypeError
If the label is not known in VTag
"""
vt = vt.strip()
if vt == "C":
return TAG_CRITICAL
if vt == "L":
return TAG_LEAF
if vt == "N":
return TAG_NODE
raise UnknownTypeError('Type of value unknown: ' + str(vt))
|
ae551ca27f9c3cf542bf4c253c25731ffd8a6097
| 32,995 |
from datetime import datetime
import time
import random
def fetch_stock_revive_info(start_date: date = None, end_date: date = None, retry: int = 10) -> list:
"""
歷年上櫃減資資訊資料表
輸出格式: [{'code': '4153', 'name': '鈺緯', 'revive_date': date(2020-10-19), 'old_price': 27.20, 'new_price': 30.62}]
"""
result = []
if not start_date:
start_date = date(2013, 1, 1)
if not end_date:
end_date = datetime.today()
req = HttpRequest()
kwargs = dict()
kwargs['headers'] = req.default_headers()
kwargs['params'] = {
'o': 'json', 'l': 'zh',
'd': '{}/{:02d}/{:02d}'.format(start_date.year - 1911, start_date.month, start_date.day),
'ed': '{}/{:02d}/{:02d}'.format(end_date.year - 1911, end_date.month, end_date.day),
'_': int(time.time() * 1000)}
for i in range(retry):
req.wait_interval = random.randint(3, 5)
resp = req.send_data(method='GET', url=STOCK_REVIVE_URL, **kwargs)
if resp.status_code == 200:
try:
data = resp.json()
if not data:
continue
except Exception as e:
logger.warning(str(e))
continue
rows = data.get('aaData', [])
for r in rows:
code = r[1]
# 只抓取代碼長度為4的資料
if len(code) != 4:
continue
str_zh_date = str(r[0])
if len(str_zh_date) != 7:
continue
year = 1911 + int(str_zh_date[0:3])
month = int(str_zh_date[3:5])
day = int(str_zh_date[5:7])
revive_date = date(year, month, day)
name = r[2]
old_price = round(float(r[3]), 2)
new_price = round(float(r[4]), 2)
reason = r[9]
data = {
'code': code, 'name': name, 'revive_date': revive_date,
'old_price': old_price, 'new_price': new_price, 'reason': reason}
full_href = r[10]
if full_href:
url_list = full_href.split("\'")
if len(url_list) > 1:
param = url_list[1].split('?')
if len(param) == 2:
patch_param = param[1].replace('amp;', '')
patch_param = patch_param.replace('%', '%25')
url = "%s?%s" % (STOCK_REVIVE_DETAIL_URL, patch_param)
detail_data = fetch_stock_revive_detail_info(url)
for k, v in detail_data.items():
data[k] = v
logger.info("取得減資資料: %s" % (data, ))
result.append(data)
break
else:
logger.warning("無法取得所有上櫃減資歷史資資料")
return result
|
12e493accdcd8c6896e23a0c592c284c90e53de3
| 32,996 |
def _read_one(stream: BytesIO) -> int:
"""
Read 1 byte, converting it into an int
"""
c = stream.read(1)
if c == b"":
raise EOFError("Unexpected EOF while reading bytes")
return ord(c)
|
d3f8d22b2d2d3ff08cec42ffcf81cafe9192c707
| 32,997 |
def new_thread_mails(post, users_and_watches):
"""Return an interable of EmailMessages to send when a new thread is
created."""
c = {'post': post.content,
'post_html': post.content_parsed,
'author': post.creator.username,
'host': Site.objects.get_current().domain,
'thread': post.thread.title,
'forum': post.thread.document.title,
'post_url': post.thread.get_absolute_url()}
return emails_with_users_and_watches(
subject=_lazy(u'{forum} - {thread}'),
text_template='kbforums/email/new_thread.ltxt',
html_template='kbforums/email/new_thread.html',
context_vars=c,
users_and_watches=users_and_watches)
|
5a6e0bfbaf87f68d6010c84c0cb8c876042c0027
| 32,998 |
def known(words):
"""The subset of `words` that appear in the dictionary of WORDS."""
return set(w for w in words if w in WORDS)
|
c6665115d9cece679cef0cace8d4037aa4a8e47c
| 32,999 |
import pprint
def load_transform_data(human_dataset, bot_dataset, drop_features, bins, logger, **kwargs):
"""
Load and preprocess data, returning the examples and labels as numpy.
"""
# Load data for humans.
df1 = pd.read_csv(human_dataset)
df1 = df1.drop("screen_name", axis=1) # remove screen_name column
df1 = df1.assign(is_bot=0)
# Load data for bots.
df2 = pd.read_csv(bot_dataset)
df2 = df2.drop("screen_name", axis=1) # remove screen_name column
df2 = df2.assign(is_bot=1)
# Concatenate dataframes.
df = df1.append(df2, ignore_index=True)
# Drop unwanted features.
df = df.drop(drop_features, axis=1)
for column in df:
# Source identity and is_bot are not quantizable.
if column == "source_identity" or column == "is_bot":
continue
# Drop feature if there is only 1 distinct value.
if np.unique(df[column]).size == 1:
logger.warn("Dropping feature because only one unique value: %s" % column)
df = df.drop(column, axis=1)
continue
df[column] = pd.qcut(df[column], bins, duplicates="drop")
logger.info("Features:\n %s" % pprint.pformat(list(df.columns)))
# Encode 'source_identity' field by setting '1's if source is present.
transformed = _transform_source_identity(df.loc[:, "source_identity"])
df = df.drop("source_identity", axis=1)
df["source_identity_other_present"] = transformed[:, 0]
df["source_identity_other_absent"] = transformed[:, 1]
df["source_identity_browser_present"] = transformed[:, 2]
df["source_identity_browser_absent"] = transformed[:, 3]
df["source_identity_mobile_present"] = transformed[:, 4]
df["source_identity_mobile_absent"] = transformed[:, 5]
df["source_identity_osn_present"] = transformed[:, 6]
df["source_identity_osn_absent"] = transformed[:, 7]
df["source_identity_automation_present"] = transformed[:, 8]
df["source_identity_automation_absent"] = transformed[:, 9]
df["source_identity_marketing_present"] = transformed[:, 10]
df["source_identity_marketing_absent"] = transformed[:, 11]
df["source_identity_news_present"] = transformed[:, 12]
df["source_identity_news_absent"] = transformed[:, 13]
# Perform one-hot encoding
df = pd.get_dummies(df)
# Separate features from targets
df_X = df.drop("is_bot", axis=1)
df_y = df["is_bot"]
# Convert to numpy.
X = df_X.values.astype("float")
y = df_y.values.astype("float")
return X, y, df_X.columns
|
666b9161f1309f3e9765a123773318f55b9f6662
| 33,000 |
def latest():
"""
Latest route returns latest performed searches.
"""
return jsonify(get_latest_searches())
|
a9cc69921566ecb9f97eab008ffc8f7fea167273
| 33,001 |
def actions(board):
"""
Returns set of all possible actions (i, j) available on the board.
"""
PossActions = []
# Find empty positions
for i in range(3):
for j in range(3):
if(board[i][j] == EMPTY):
PossActions.append((i,j))
return PossActions
|
51f8b37c7b50b655c33a9ea73ded5e175c21670c
| 33,003 |
import re
import json
def getCity(html):
"""This function uses the ``html`` passed to it as a string to extract, parse and return a City object
Parameters
----------
html : str
the html returned when a get request to view the city is made. This request can be made with the following statement: ``s.get(urlCiudad + id)``, where urlCiudad is a string defined in ``config.py`` and id is the id of the city.
Returns
-------
city : dict
this function returns a json parsed City object. For more information about this object refer to the github wiki page of Ikabot.
"""
city = re.search(r'"updateBackgroundData",\s?([\s\S]*?)\],\["updateTemplateData"', html).group(1)
city = json.loads(city, strict=False)
city['Id'] = city.pop('ownerId')
city['Name'] = city.pop('ownerName')
city['x'] = city.pop('islandXCoord')
city['y'] = city.pop('islandYCoord')
city['cityName'] = city['name']
i = 0
for position in city['position']:
position['position'] = i
i += 1
if 'level' in position:
position['level'] = int(position['level'])
position['isBusy'] = False
if 'constructionSite' in position['building']:
position['isBusy'] = True
position['building'] = position['building'][:-17]
elif 'buildingGround ' in position['building']:
position['name'] = 'empty'
position['type'] = position['building'].split(' ')[-1]
position['building'] = 'empty'
city['id'] = str(city['id'])
city['propia'] = True
city['recursos'] = getAvailableResources(html, num=True)
city['storageCapacity'] = getWarehouseCapacity(html)
city['ciudadanosDisp'] = getFreeCitizens(html)
city['consumo'] = getWineConsumption(html)
city['enventa'] = onSale(html)
city['freeSpaceForResources'] = []
for i in range(5):
city['freeSpaceForResources'].append( city['storageCapacity'] - city['recursos'][i] - city['enventa'][i] )
return city
|
77af6a1c49f254f08ab226138b9a5ddc4abbc9b3
| 33,004 |
def server_hostname(config):
"""
Reads the ambari server name from the config or using the supplied script
"""
global cached_server_hostname
if cached_server_hostname is not None:
return cached_server_hostname
if config.has_option('server', 'hostname_script'):
scriptname = config.get('server', 'hostname_script')
try:
osStat = subprocess.Popen([scriptname], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = osStat.communicate()
if (0 == osStat.returncode and 0 != len(out.strip())):
cached_server_hostname = out.strip()
logger.info("Read server hostname '" + cached_server_hostname + "' using server:hostname_script")
except Exception, err:
logger.info("Unable to execute hostname_script for server hostname. " + str(err))
if cached_server_hostname is None:
cached_server_hostname = config.get('server', 'hostname')
return cached_server_hostname
|
bb6f0311566d47b32be855bcd33964b28425143e
| 33,005 |
from typing import Callable
import json
from pathlib import Path
import types
def cache_instance(get_instance_fn: Callable[..., data.TrainingInstance] = None, *, cache_dir, **instance_config):
"""Decorator to automatically cache training instances."""
if get_instance_fn is None:
return partial(cache_instance, cache_dir=cache_dir)
cache_data_id = create_hash_id(json.dumps(instance_config).encode())
cache_dir = Path(cache_dir) / cache_data_id
if not Path(cache_dir).exists():
cache_dir.mkdir(parents=True, exist_ok=True)
config_path = cache_dir / "config.json"
if not config_path.exists():
with config_path.open("w") as f:
json.dump(instance_config, f, indent=4, default=str)
@lru_cache
def get_instance(instance_id, location_query, target_date):
cache_path = cache_dir / f"{instance_id}.json"
if cache_path.exists():
with cache_path.open("r") as f:
logger.info(f"getting training instance {instance_id} from cache dir {cache_dir}")
return data.TrainingInstance(**json.load(f))
instance = get_instance_fn(location_query, target_date, instance_id=instance_id, **instance_config)
if not pd.isna(instance.target) and instance.target_is_complete:
with cache_path.open("w") as f:
json.dump(asdict(instance), f, default=str)
return instance
@wraps(get_instance_fn)
def wrapped_instance_fn(
location_query: str,
target_date: types.DateType,
):
return get_instance(
create_id(
target_date,
location_query.encode(),
target_date.isoformat().encode(),
json.dumps(instance_config).encode(),
),
location_query,
target_date,
)
return wrapped_instance_fn
|
50bab385439550eca541f4d18915c031c41a8107
| 33,006 |
def get_definitions_query_filter(request_args):
""" Get query_filter for alert_alarm_definition list route.
"""
query_filters = None
display_retired = False
valid_args = ['array_name', 'platform_name', 'instrument_name', 'reference_designator']
# Process request arguments
if 'retired' in request_args:
if (request_args.get('retired')).lower() == 'true':
display_retired = True
key = None
key_value = None
for key in valid_args:
if key in request_args:
tmp = request_args.get(key)
if tmp:
key_value = str(tmp)
break
# If query_filter to be created, create it
if key_value is not None or display_retired:
query_filters = {}
if key_value is not None:
query_filters[key] = key_value
if display_retired:
query_filters['retired'] = True
return query_filters
|
a087cbd9ca6ffe9b38afc2d8802c12e4dfd47e50
| 33,007 |
import io
def _read_dictionary_page(file_obj, schema_helper, page_header, column_metadata):
"""Read a page containing dictionary data.
Consumes data using the plain encoding and returns an array of values.
"""
raw_bytes = _read_page(file_obj, page_header, column_metadata)
io_obj = io.BytesIO(raw_bytes)
values = encoding.read_plain(
io_obj,
column_metadata.type,
page_header.dictionary_page_header.num_values
)
# convert the values once, if the dictionary is associated with a converted_type.
schema_element = schema_helper.schema_element(column_metadata.path_in_schema[-1])
return convert_column(values, schema_element) if schema_element.converted_type is not None else values
|
f4c0bf36b23238f79bfcc11821e47f88186524e0
| 33,008 |
def num_songs(t):
"""Return the number of songs in the pyTunes tree, t.
>>> pytunes = make_pytunes('i_love_music')
>>> num_songs(pytunes)
3
"""
"*** YOUR CODE HERE ***"
if is_leaf(t):
return 1
else:
sum_songs = 0
for subt in branches(t):
sum_songs += num_songs(subt)
return sum_songs
# better : sum([num_songs(b) for b in branches(t)])
|
ffba78cccbd98963daa6c1ba29650c624fdba29f
| 33,009 |
def _get_parameter_value(potential: Potential, handler: str, parameter: str) -> float:
"""Returns the value of a parameter in its default units"""
return (
potential.parameters[parameter].to(_DEFAULT_UNITS[handler][parameter]).magnitude
)
|
2fef58b3018737975e96deb4d58d54f55407c624
| 33,010 |
def triplets_in_range(mini, maxi):
"""
Finds all the triplets in a given range that meet the condition a ** 2 + b ** 2 = c ** 2
>>> triplets_in_range(2, 10)
{(3, 4, 5), (6, 8, 10)}
:param mini: The minimum in the range
:param maxi: Maximum in the rnage
:return: a set of tuples (with length 3) of numbers that meet the given condition
:rtype: set
"""
res = set()
for a in range(mini, maxi + 1):
for b in range(a + 1, maxi + 1):
c = int(sqrt(a * a + b * b) + 0.5)
if c * c == a * a + b * b and mini <= c <= maxi:
res.update([(a, b, c,)])
return res
|
1dbe7c64d483d87b2eab1f652a77e346f0ffefec
| 33,011 |
import re
def targetInCol(df, target):
"""
Return meta information (Line or Area) from information in a column of DF.
Arguments:
doc -- csv Promax geometry file
target -- meta information to get (Line or Area)
"""
c = list(df.columns)
ptarget = r''+re.escape(target)
i = [i for i, x in enumerate(c) if re.search(ptarget, x, re.I)]
return df.iloc[0][i[0]] if i else None
|
5d40cf251bd2a7593a46a5b63b5de3a56f8cec29
| 33,012 |
def default_monitor(verbose=1):
"""Returns very simple monitor object to summarize training progress.
Args:
verbose: Level of verbosity of output.
Returns:
Default monitor object.
"""
return BaseMonitor(verbose=verbose)
|
fbc5494d2545439daaeb12a4d3215295226b064e
| 33,013 |
def pca(X, k = 30, optim = "fastest"):
"""Use PCA to project X to k dimensions."""
# Center/scale the data.
s = np.std(X, axis=0)
s = np.where(s==0, 1, s)
X = (X - np.mean(X, axis=0))/s
if optim == "none":
# Compute covariance eigenvectors with numpy.
#
# TO BE DONE
#
print("Custom PCA is broken; defaulting to sklearn.")
#_, U = np.linalg.eigh(X.T@X)
#return X@U[:,:k]
pca_ = PCA(n_components=k)
return pca_.fit_transform(X)
else:
# Run PCA with sklearn.
pca_ = PCA(n_components=k)
return pca_.fit_transform(X)
|
2e5e9b82ec770aa1cda80519f7d392d68c6949a6
| 33,014 |
def get_range(a_list):
"""
=================================================================================================
get_range(a_list)
This is meant to find the maximal span of a list of values.
=================================================================================================
Arguments:
a_list -> A list of floats/ints. [1,2,-3]
=================================================================================================
Returns: a tuple of the values that are either at the end/beginning of the list. (-3,3)
=================================================================================================
"""
# Make sure the input list is correctly formatted
assert type(a_list) == list, "The input a_list should be of type list..."
# First, unpack the list of lists. This makes one list with all values from
# the lists within the input list.
#print(a_list)
unpacked = gh.unpack_list(a_list)
#print(unpacked)
# Next, float the items in the unpacked list. This will fail if any
# strings that are not floatable are in the list.
unpacked = [float(item) for item in unpacked if float(item) == float(item)]
# Then we can get the max and min of the list.
maxi = max(unpacked)
mini = min(unpacked)
# If the max value is greater than or equal to the minimum value
if abs(maxi) >= abs(mini):
# Then the bound is the int of max plus 1
bound = int(abs(maxi)) + 1
# We can then return the bounds, plus and minus bound
return (-bound, bound)
# If the min value is greater than the max value
elif abs(maxi) < abs(mini):
# Then the bound is the int of the absolute value
# of mini plus 1
bound = int(abs(mini)) + 1
# We can then return the bounds, plus and minus bound
return (-bound, bound)
# If something goes wrong,
else:
# Then raise an error
raise ValueError("This is an unexpected outcome...")
|
36e0cc78d2f45b25af56c1af51292f00c2f2623b
| 33,015 |
def create_config(solution, nodes, description_info):
"""Creates compact string representing input data file
Parameters:
solution (list) List of solutions
nodes (list) List of node specification
description_info (tuple) CSP description in form of tuple: (algorithm name, domains, constraints)
Returns:
ret_string (string) String representing input data file
"""
ret_string = write_description(description_info)
ret_string += "path:=R0;name:=none;order:=1\n"
for i, node in enumerate(nodes, start=2):
partial_string = "path:=R0"
domains = node[0]
node_name = None
for domain in domains:
variable = domain[0]
value = domain[1]
partial_string += variable + str(value)
node_name = value
partial_string += ";name:=" + str(node_name)
partial_string += ";order:=" +str(i)
if node[1] == "d":
partial_string += ";shape:=square;color:=red"
if any(node[0] == s for s in solution):
partial_string += ";color:=blank"
if node[2]:
partial_string += ";bottom_label:="
for l in node[2]:
if l != node[2][0]:
partial_string += ","
partial_string += "(c" + str(l+1) + ")"
if node[3]:
partial_string += ";side_label:="
for l in node[3]:
if l!=node[3][0]:
partial_string += "&&"
for c in l[2]:
if c != l[2][0]:
partial_string += ","
partial_string += "c" + str(c+1)
if l[1]:
partial_string += "->" + l[0] + ":" + str(set(l[1]))
else:
partial_string += "->" + l[0] + ":{}"
ret_string += partial_string
if node != nodes[len(nodes)-1]:
ret_string += "\n"
return ret_string
|
3ccf76ca36b92ceb698aafea43414fe014258b0e
| 33,016 |
def get_effective_option(metadata, settings, key):
"""
Return option with highest priority:
not-defined key < default < pelican config settings < file metadata
"""
return metadata.get(key, settings[DP_KEY].get(key))
|
4b617bd9c7fb0f0533014fae0533c0500f64c9bb
| 33,017 |
def positions_sync_out_doc_view(request):
"""
Show documentation about positionsSyncOut
"""
url_root = WE_VOTE_SERVER_ROOT_URL
template_values = positions_sync_out_doc.positions_sync_out_doc_template_values(url_root)
template_values['voter_api_device_id'] = get_voter_api_device_id(request)
return render(request, 'apis_v1/api_doc_page.html', template_values)
|
f775b6eddf1419a781e7a43d047f288c56566b3b
| 33,018 |
import gettext
def __build_caj_q_html_view__(data: object) -> any:
"""
popup's table for Caju Quality Information
"""
satellite_est = gettext("Satellite Estimation")
tns_survey = gettext("TNS Survey")
nut_count_average = gettext("Nut Count Average")
defective_rate_average = gettext("Defective Rate Average")
kor_average = gettext("KOR Average")
return f'''
<h4>Caju Quality Informations</h4>
<table>
<tr>
<th></th>
<th>{tns_survey}</th>
</tr>
<tr>
<td>{nut_count_average}</td>
<td>{__get_average_nut_count__(data.qars)}</td>
</tr>
<tr>
<td>{defective_rate_average}</td>
<td>{__get_average_defective_rate__(data.qars)}</td>
</tr>
<tr>
<td>{kor_average}</td>
<td>{__get_average_kor__(data.qars)}</td>
</tr>
</table>
'''
|
a4442f4ba486991ea3b1c75168f8ba921d9459c7
| 33,019 |
def code(email):
"""
Returns the one-time password associated with the given user for the
current time window. Returns empty string if user is not found.
"""
print("route=/code/<email> : email:", email)
u = User.get_user(email)
if u is None:
print("user not found, returning ''")
return ''
t = pyotp.TOTP(u.key)
result = str(t.now())
print("result:", result)
return result
|
4479f6af448f6c91ab6d1c563d6baa94542826a3
| 33,020 |
def blend_image_with_masks(image, masks, colors, alpha=0.5):
"""Add transparent colored mask to an image.
Args:
image: `np.ndarray`, the image of shape (width, height, channel) or (width, height).
masks: `np.ndarray`, the mask of shape (n, width, height).
colors: list, a list of RGB colors (from 0 to 1).
alpha: float, transparency to apply to masks.
Returns:
`np.ndarray`
"""
if image.dtype != "uint8":
raise Exception("The image needs to be of type uint8. "
f"Current type is: {image.dtype}.")
image = image.copy()
image = image / 255
colored = np.zeros(image.shape, dtype="float32")
if image.ndim == 2:
image = np.stack([image, image, image], axis=-1)
for color, mask in zip(colors, masks):
rgb_mask = np.stack([mask, mask, mask], axis=-1)
rgb_mask = rgb_mask.astype('float32') * alpha
colored = np.ones(image.shape, dtype='float32') * color[:3]
image = colored * rgb_mask + image * (1 - rgb_mask)
image = image * 255
return image.astype("uint8")
|
9a733d9a6721c2139a64e2e718c4bb5648dbb759
| 33,021 |
import functools
def get_trainee_and_group(func):
"""Decorator to insert trainee and group as arguments to the given function.
Creates new Trainee if did not exist in DB.
Creates new Group if did not exist in DB.
Adds the trainee to the group if it was not part of it.
Appends the trainee and group as last argument of the function.
Example:
@get_trainee_and_group
def run(bot, update, trainee, group):
....
Notes:
func has to be used in dispatcher as handler in order to receive the bot and the update arguments.
"""
@get_group
@functools.wraps(func)
def wrapper(*args, **kwargs):
bot, update = get_bot_and_update_from_args(args)
trainee_id = update.effective_user.id
trainee = Trainee.objects.get(id=trainee_id)
if trainee is None: # new trainee.
trainee = Trainee.objects.create(id=trainee_id,
first_name=update.effective_user.first_name)
group = args[-1]
if trainee not in group.trainees:
group.add_trainee(new_trainee=trainee)
args_with_trainee_and_group = args[:-1] + (trainee, group)
return func(*args_with_trainee_and_group, **kwargs)
return wrapper
|
76fb80e90b36c0264e50510c7226587a131095f5
| 33,022 |
def generate_chromatogram(
ms_data: dict,
chromatogram: str,
ms_level: int = 1
) -> list:
"""
Generates a either a Base Peak Chromatogram (BPC) or Total Ion Chromatogram
(TIC) from ripper data.
Args:
ms_data (dict): mzml ripper data in standard ripper format.
chromatogram (str): specifies type of chromatogram. Must be either "tic"
or "bpc" for Total Ion Chromatogram or Base Peak Chromatogram,
respectively.
ms_level (int, optional): specifies ms level for BPC. Defaults to 1.
Raises:
Exception: raised if chromatogram.lower() is not in ["tic", "bpc"]
Returns:
BPC (List[Tuple[float, float, float]]): list of base peaks in format:
[(retention time, m/z, intensity), ...]
TIC (List[Tuple[float, float]]): list of total ion currents in format:
[(retention_time, total_intensity), ...]
"""
if f"ms{ms_level}" not in ms_data:
return []
ms_data = ms_data[f"ms{ms_level}"]
if chromatogram.lower() == "bpc":
return [
find_max_peak(spectrum=spectrum) for spectrum in ms_data.values()
]
elif chromatogram.lower() == "tic":
return [
sum_intensity_peaks(spectrum) for spectrum in ms_data.values()
]
else:
raise Exception(
f"{chromatogram} not valid chromatogram type. Please choose "
"'tic' or 'bpc' for Total Ion Chromatogram or Base Peak "
"Chromatogram, repspectively"
)
|
901ab7c350ccb00ee277ec96c7496675274ac0f1
| 33,024 |
def get_tensor_batch_size(values):
"""Extracts batch size from tensor"""
return tf.gather(params=tf.shape(input=values), indices=tf.constant([0]))
|
c1a7d0cb789526310c332d1e2a24697d1357ceb5
| 33,025 |
def generate_particle_timestamp(time_2000):
"""
This function calculates and returns a timestamp in epoch 1900
based on an ASCII hex time in epoch 2000.
Parameter:
time_2000 - number of seconds since Jan 1, 2000
Returns:
number of seconds since Jan 1, 1900
"""
return int(time_2000, 16) + zulu_timestamp_to_ntp_time("2000-01-01T00:00:00.00Z")
|
9c05fc809953e371b756a389d98f3a74c1ea5975
| 33,026 |
def clip_histogram(hist, clip_limit):
"""Perform clipping of the histogram and redistribution of bins.
The histogram is clipped and the number of excess pixels is counted.
Afterwards the excess pixels are equally redistributed across the
whole histogram (providing the bin count is smaller than the cliplimit).
Parameters
----------
hist : ndarray
Histogram array.
clip_limit : int
Maximum allowed bin count.
Returns
-------
hist : ndarray
Clipped histogram.
"""
# calculate total number of excess pixels
excess_mask = hist > clip_limit
excess = hist[excess_mask]
n_excess = excess.sum() - excess.size * clip_limit
# Second part: clip histogram and redistribute excess pixels in each bin
bin_incr = int(n_excess / hist.size) # average binincrement
upper = clip_limit - bin_incr # Bins larger than upper set to cliplimit
hist[excess_mask] = clip_limit
low_mask = hist < upper
n_excess -= hist[low_mask].size * bin_incr
hist[low_mask] += bin_incr
mid_mask = (hist >= upper) & (hist < clip_limit)
mid = hist[mid_mask]
n_excess -= mid.size * clip_limit - mid.sum()
hist[mid_mask] = clip_limit
prev_n_excess = n_excess
while n_excess > 0: # Redistribute remaining excess
index = 0
while n_excess > 0 and index < hist.size:
under_mask = hist < 0
step_size = int(hist[hist < clip_limit].size / n_excess)
step_size = max(step_size, 1)
indices = np.arange(index, hist.size, step_size)
under_mask[indices] = True
under_mask = under_mask & (hist < clip_limit)
hist[under_mask] += 1
n_excess -= under_mask.sum()
index += 1
# bail if we have not distributed any excess
if prev_n_excess == n_excess:
break
prev_n_excess = n_excess
return hist
|
0947568a36024dfdfd9fc37385676e924aedb603
| 33,027 |
from typing import Iterable
from typing import Callable
from typing import Tuple
def aggregate_precision_recall(
labels_pred_iterable: Iterable,
precision_recall_fn: Callable = buffered_precision_recall,
) -> Tuple[float, float]:
"""
Computes aggregate range-based precision recall metrics for the given prediction labels.
Parameters
----------
labels_pred_iterable
An iterable that gives 2-tuples of boolean lists corresponding to `true_labels` and
`pred_labels` respectively.
precision_recall_fn
Function to call in order to get the precision, recall metrics.
Returns
-------
A tuple containing average precision and recall in that order.
"""
total_prec, total_reca, total_prec_w, total_reca_w = 0.0, 0.0, 0.0, 0.0
for true_labels, pred_labels in labels_pred_iterable:
true_ranges = labels_to_ranges(true_labels)
pred_ranges = labels_to_ranges(pred_labels)
_prec, _reca = precision_recall_fn(true_ranges, pred_ranges)
_prec_w, _reca_w = len(pred_ranges), len(true_ranges)
total_prec += _prec * _prec_w
total_prec_w += _prec_w
total_reca += _reca * _reca_w
total_reca_w += _reca_w
return (
total_prec / total_prec_w if total_prec_w > 0 else 0,
total_reca / total_reca_w if total_reca_w > 0 else 0,
)
|
d777832230ae84ff86c0ad60dced8a1c007ed90f
| 33,028 |
def find_tag_for(t):
"""If transaction matches a rule, returns corresponding tuple
(tag, ruler, match).
"""
res = []
for (tag, rulers) in list(TAGS.items()):
for ruler in rulers:
m, matches = match(ruler, t)
if m:
res.append((tag, ruler, matches))
if res:
# Return rule with the most fields.
# If several, pick the ont with the longer rules.
return max(
res,
key=lambda tag_ruler_matches: (
len(list(rulify(tag_ruler_matches[1]).keys())),
sum([len(v) for v in list(tag_ruler_matches[2].values()) if v]),
),
)
return None, None, None
|
1b0afd086f428606dfc993d61a0753da98ea176d
| 33,029 |
from typing import Union
from typing import Dict
from typing import Any
import typing
def DOMWidget(
layout: Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]] = {},
on_layout: typing.Callable[[Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]]], Any] = None,
) -> Element[ipywidgets.widgets.domwidget.DOMWidget]:
"""Widget that can be inserted into the DOM"""
kwargs: Dict[Any, Any] = without_default(DOMWidget, locals())
if isinstance(kwargs.get("layout"), dict):
kwargs["layout"] = Layout(**kwargs["layout"])
widget_cls = ipywidgets.widgets.domwidget.DOMWidget
comp = react.core.ComponentWidget(widget=widget_cls)
return Element(comp, **kwargs)
|
cee1f61b5eb57582fae65e28ca8823d13bcdff51
| 33,030 |
def load(path, element_spec=None, compression=None, reader_func=None):
"""Loads a previously saved dataset.
Example usage:
>>> import tempfile
>>> path = os.path.join(tempfile.gettempdir(), "saved_data")
>>> # Save a dataset
>>> dataset = tf.data.Dataset.range(2)
>>> tf.data.experimental.save(dataset, path)
>>> new_dataset = tf.data.experimental.load(path)
>>> for elem in new_dataset:
... print(elem)
tf.Tensor(0, shape=(), dtype=int64)
tf.Tensor(1, shape=(), dtype=int64)
Note that to load a previously saved dataset, you need to specify
`element_spec` -- a type signature of the elements of the saved dataset, which
can be obtained via `tf.data.Dataset.element_spec`. This requirement exists so
that shape inference of the loaded dataset does not need to perform I/O.
If the default option of sharding the saved dataset was used, the element
order of the saved dataset will be preserved when loading it.
The `reader_func` argument can be used to specify a custom order in which
elements should be loaded from the individual shards. The `reader_func` is
expected to take a single argument -- a dataset of datasets, each containing
elements of one of the shards -- and return a dataset of elements. For
example, the order of shards can be shuffled when loading them as follows:
```python
def custom_reader_func(datasets):
datasets = datasets.shuffle(NUM_SHARDS)
return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE)
dataset = tf.data.experimental.load(
path="/path/to/data", ..., reader_func=custom_reader_func)
```
Args:
path: Required. A path pointing to a previously saved dataset.
element_spec: Optional. A nested structure of `tf.TypeSpec` objects matching
the structure of an element of the saved dataset and specifying the type
of individual element components. If not provided, the nested structure of
`tf.TypeSpec` saved with the saved dataset is used.
compression: Optional. The algorithm to use to decompress the data when
reading it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`.
reader_func: Optional. A function to control how to read data from shards.
If present, the function will be traced and executed as graph computation.
Returns:
A `tf.data.Dataset` instance.
Raises:
FileNotFoundError: If `element_spec` is not specified and the saved nested
structure of `tf.TypeSpec` can not be located with the saved dataset.
"""
return _LoadDataset(
path=path,
element_spec=element_spec,
compression=compression,
reader_func=reader_func)
|
d3ec8a97cab7897658758f42486e6f4f3b605e6d
| 33,031 |
def test_confusion_PRFAS():
"""
Line=True class, column=Prediction
TR_B [[1585 109 4]
TR_I [ 126 1233 17]
TR_O [ 20 12 82]]
(unweighted) Accuracy score = 90.97 % trace=2900 sum=3188
precision recall f1-score support
TR_B 0.916 0.933 0.924 1698
TR_I 0.911 0.896 0.903 1376
TR_O 0.796 0.719 0.756 114
avg / total 0.909 0.910 0.909 3188
"""
cm = np.array([ [1585, 109, 4]
, [ 126, 1233, 17]
, [ 20, 12, 82]])
p, r, f1, a, s = confusion_PRFAS(cm)
def ok(a,b): return abs(a-b) < 0.001
assert ok(p,0.909), ("p",p)
assert ok(r,0.910), ("r",r)
assert ok(f1,0.909), ("f",f1)
assert ok(a,0.9097), ("a",a)
assert ok(s,3188), ("s",s)
|
257736819e3dd6a1c4f2644a15bc74cde2f4c49b
| 33,032 |
def wrap(x, m, M):
"""
:param x: a scalar
:param m: minimum possible value in range
:param M: maximum possible value in range
Wraps ``x`` so m <= x <= M; but unlike ``bound()`` which
truncates, ``wrap()`` wraps x around the coordinate system defined by m,M.\n
For example, m = -180, M = 180 (degrees), x = 360 --> returns 0.
"""
diff = M - m
while x > M:
x = x - diff
while x < m:
x = x + diff
return x
|
274017550a39a79daacdcc96c76c09116093f47a
| 33,033 |
from typing import Iterable
from typing import List
from typing import Any
import click
from typing import cast
from typing import Callable
def execute_processors(processors: Iterable[ProcessorType], state: State) -> None:
"""Execute a sequence of processors to generate a Document structure. For block handling,
we use a recursive approach. Only top-level blocks are extracted and processed by block
processors, which, in turn, recursively call this function.
Args:
processors: iterable of processors
state: state structure
Returns:
generated geometries
"""
outer_processors: List[Any] = [] # gather commands outside of top-level blocks
top_level_processors: List[Any] = [] # gather commands inside of top-level blocks
block = None # save the current top-level block's block layer_processor
nested_count = 0 # block depth counter
expect_block = False # set to True by `begin` command
for proc in processors:
if getattr(proc, "__vpype_block_processor__", False):
if not expect_block:
# `begin` was omitted
nested_count += 1
else:
expect_block = False
# if we in a top level block, we save the block layer_processor
# (nested block are ignored for the time being)
if nested_count == 1:
block = proc
else:
top_level_processors.append(proc)
elif expect_block:
raise click.BadParameter("A block command must always follow 'begin'")
elif isinstance(proc, BeginBlock):
# entering a block
nested_count += 1
expect_block = True
elif isinstance(proc, EndBlock):
if nested_count < 1:
raise click.BadParameter(
"A 'end' command has no corresponding 'begin' command"
)
nested_count -= 1
if nested_count == 0:
# We're closing a top level block. The top-level sequence [BeginBlock,
# block_processor, *top_level_processors, EndBlock] is now replaced by a
# placeholder closure that will execute the corresponding block processor on
# the top_level_processors sequence.
#
# Note: we use the default argument trick to copy the *current* value of
# block and top_level_processor "inside" the placeholder function.
# noinspection PyShadowingNames
def block_processor_placeholder(
state: State, block=block, processors=tuple(top_level_processors)
) -> State:
return cast(Callable, block)(state, processors)
outer_processors.append(block_processor_placeholder)
# reset the top level layer_processor list
top_level_processors = list()
else:
top_level_processors.append(proc)
else:
# this is a 'normal' layer_processor, we can just add it to the top of the stack
if nested_count == 0:
outer_processors.append(proc)
else:
top_level_processors.append(proc)
# at this stage, the stack must have a single frame, otherwise we're missing end commands
if nested_count > 0:
raise click.ClickException("An 'end' command is missing")
# the (only) frame's processors should now be flat and can be chain-called
for proc in outer_processors:
cast(Callable, proc)(state)
|
4af44e41c02184286c4c038143e1461d8fbe044d
| 33,034 |
def date(repo, subset, x):
"""Changesets within the interval, see :hg:`help dates`.
"""
# i18n: "date" is a keyword
ds = getstring(x, _("date requires a string"))
dm = util.matchdate(ds)
return subset.filter(lambda x: dm(repo[x].date()[0]),
condrepr=('<date %r>', ds))
|
91d6cea81861791daed3220bc03e3002a47a959d
| 33,035 |
def addAuthor(author):
"""
Creates an Author dictionary
:param author: Author instance
:return: Dict
"""
author_dict = dict()
# author_dict['id'] = "{}/api/{}".format(DOMAIN, author.id)
author_dict['id'] = "{}/api/author/{}".format(DOMAIN, author.id)
author_dict['host'] = "{}/api/".format(author.host_url)
author_dict['displayName'] = author.username
author_dict['github'] = author.github_url
author_dict['url'] = "{}/api/author/{}".format(DOMAIN, author.id)
# Optional Attributes
if author.github_url:
author_dict['github'] = author.github_url
if author.user.first_name:
author_dict['firstName'] = author.user.first_name
if author.user.last_name:
author_dict['lastName'] = author.user.last_name
if author.user.email:
author_dict['email'] = author.user.email
if author.bio:
author_dict['bio'] = author.bio
return author_dict
|
f6b35909e223987eb37178d1f6722eaffacc94cd
| 33,036 |
def add_transformer_enc_hyperparams_args(parser):
"""Only applicable when args.model_name is 'transformer_enc'"""
parser.add_argument('--hid_dim', type=int, default=128)
parser.add_argument('--num_enc_layers', type=int, default=3)
parser.add_argument('--num_enc_heads', type=int, default=8)
parser.add_argument('--enc_pf_dim', type=int, default=256)
parser.add_argument('--enc_dropout', type=float, default=0.1)
parser.add_argument('--fc_dim', type=int, default=64, help='hidden size of the linear layer added on top')
return parser
|
bc38c3cc1d9fc7e87cebfbf7bdc74f8e9d0a124e
| 33,037 |
def make_length(value):
""" Make a kicad length measurement from an openjson measurement """
return int(round(float(value) * MULT))
|
1fe311b94eaaf123f7a028d3a06232185903179d
| 33,038 |
def with_metaclass(meta, *bases):
"""copied from https://github.com/Byron/bcore/blob/master/src/python/butility/future.py#L15"""
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, nbases, d):
if nbases is None:
return type.__new__(cls, name, (), d)
# There may be clients who rely on this attribute to be set to a reasonable value, which is why
# we set the __metaclass__ attribute explicitly
if not PY3 and '___metaclass__' not in d:
d['__metaclass__'] = meta
# end
return meta(name, bases, d)
# end
# end metaclass
return metaclass(meta.__name__ + 'Helper', None, {})
# end handle py2
|
e0d9c4d580125cc60ab8319cc9a2ca918ef40291
| 33,040 |
import math
def calc_mupen_res(N,region_w,region_h):
"""find res to fit N mupen instances in region"""
results = []
for row_length in range(1,N+1):
col_length = math.ceil(N/float(row_length))
instance_width = int(math.floor( min(640, region_w/float(row_length) )))
instance_height = int(math.floor(instance_width*(480.0/640.0)))
if instance_height*col_length <= region_h and instance_width*row_length <= region_w:
results.append((instance_width, instance_height))
return max(results)
|
35b5e739102097d856b7c2e154516d4e866a1567
| 33,041 |
from typing import List
def pos_tag_wordnet(text: List) -> List:
"""Create pos_tag with wordnet format
:rtype: object
:param (List) text: string to be nltk_pos_tagged for syntactic similar synonyms
:return (List[List[str, 'pos_tag']]) tagged_text: str values with according nltk_pos_tag
"""
pos_tagged_text = nltk.pos_tag(text)
# map the pos tagging output with wordnet output
tagged_text = []
wordnet_map = {
"N": wordnet.NOUN,
"V": wordnet.VERB,
"J": wordnet.ADJ,
"R": wordnet.ADV
}
for (word, pos_tag) in pos_tagged_text:
tagged_text.append((word, wordnet_map.get(pos_tag[0])) if pos_tag[0] in wordnet_map.keys() else (word, wordnet.NOUN))
return tagged_text
|
7da0081c37064678ce70590cecc313ee6ec60673
| 33,042 |
def create_app():
"""
Create an app with config file
:return: Flask App
"""
# init a flask app
app = Flask(__name__)
# 从yaml文件中加载配置,此加载方式有效加载
# 初始化APP
_config_app(app)
# 允许跨域请求
if app.config.get('CORS_ENABLE'):
CORS(app)
# 配置蓝图
configure_blueprints(app)
# 配置中间件
configure_middleware(app)
return app
|
74d7e7beab4e86faec1fbf8dc357791ac50874dd
| 33,043 |
import json
def img_to_json(img, decimals=2, swap=False, save=None):
""" Convert an image volume to web-ready JSON format suitable for import into
the Neurosynth viewer.
Args:
img: An image filename.
round: Optional integer giving number of decimals to round values to.
swap: A temporary kludge to deal with some orientation problems. For some reason
the switch from PyNifti to NiBabel seems to produce images that load in a
different orientation given the same header. In practice this can be addressed
by flipping the x and z axes (swap = True), but need to look into this and
come up with a permanent solution.
Returns:
a JSON-formatted string.
"""
try:
data = nb.load(img).get_data()
except Exception as e:
raise Exception("Error loading %s: %s" % (img, str(e)))
dims = list(data.shape)
# Convenience method to package and output the converted data;
# also handles cases where image is blank.
def package_json(contents=None):
if contents is None:
contents = {
'thresh': 0.0,
'max': 0.0,
'min': 0.0,
'dims': dims,
'values': [],
'indices': []
}
# Write to file or return string
if save is None:
return json.dumps(contents)
else:
json.dump(contents, open(save, 'w'))
# Skip empty images
data = np.nan_to_num(data)
if np.sum(data) == 0:
return package_json()
# Round values to save space. Note that in practice the resulting JSON file will
# typically be larger than the original nifti unless the image is relatively
# dense (even when compressed). More reason to switch from JSON to nifti reading
# in the viewer!
data = np.round_(data, decimals)
# Temporary kludge to fix orientation issue
if swap:
data = np.swapaxes(data, 0, 2)
# Identify threshold--minimum nonzero value
thresh = np.min(np.abs(data[np.nonzero(data)]))
# compress into 2 lists, one with values, the other with list of indices
# for each value
uniq = list(np.unique(data))
# uniq = np.unique()
uniq.remove(0)
if len(uniq) == 0:
return package_json()
contents = {
'thresh': round(thresh, decimals),
'max': round(np.max(data), decimals),
'min': round(np.min(data), decimals),
'dims': dims,
'values': [float('%.2f' % u) for u in uniq]
}
ds_flat = data.ravel()
all_inds = []
for val in uniq:
if val == 0:
continue
ind = [int(x) for x in list(np.where(ds_flat == val)[0])] # UGH
all_inds.append(ind)
contents['indices'] = all_inds
return package_json(contents)
|
18e1d92d73493e69efaf055616ccb2f5d55fc835
| 33,044 |
import ctypes
import typing
import array
def encode_float(
encoder_state: ctypes.Structure,
pcm_data: bytes,
frame_size: int,
max_data_bytes: int
) -> typing.Union[bytes, typing.Any]:
"""Encodes an Opus frame from floating point input"""
pcm_pointer = ctypes.cast(pcm_data, opuslib.api.c_float_pointer)
opus_data = (ctypes.c_char * max_data_bytes)()
result = libopus_encode_float(
encoder_state,
pcm_pointer,
frame_size,
opus_data,
max_data_bytes
)
if result < 0:
raise opuslib.OpusError(result)
return array.array('b', opus_data[:result]).tobytes()
|
fc349b4eae1c330444114b3df86f1603c931a30a
| 33,045 |
from datetime import datetime
def _login(use_cookie):
"""User login helper function.
The request data should contain at least 'email' and 'password'.
The cookie expiration duration is defined in flask app config.
If user is not authenticated, it raises Unauthorized exception.
"""
data = _get_request_data()
if 'email' not in data or 'password' not in data:
raise exception_handler.BadRequest(
'missing email or password in data'
)
expire_timestamp = (
datetime.datetime.now() + app.config['REMEMBER_COOKIE_DURATION']
)
data['expire_timestamp'] = expire_timestamp
user = auth_handler.authenticate_user(**data)
if not login_user(user, remember=data.get('remember', False)):
raise exception_handler.UserDisabled('failed to login: %s' % user)
user_log_api.log_user_action(user.id, request.path)
response_data = user_api.record_user_token(
user.token, user.expire_timestamp, user=user
)
return utils.make_json_response(200, response_data)
|
084238d593b95901fcb260088f730fd7e9ac3f64
| 33,046 |
def get_highlightjs_setting(setting, default=None):
"""
Read a setting
"""
return HIGHLIGHTJS.get(setting, default)
|
86b3b52fc7e95448a2ce6e860d4b261d78d68a38
| 33,049 |
def package_dir_path(path):
"""Return package path to package install directory"""
return path + '/.pkg'
|
edd4b97256ccf02a3f1165b99cae746826e8aee0
| 33,051 |
def sort_cluster_data(cluster_data, cluster_accuracy):
"""
sort cluster data based on GDT_mean values of cluster_accuracy.
-> cluster 0 will have highest GDT_mean
-> cluster <max> will have lowest GDT_mean
.. Note :: if cluster_data has noise_label assigned, will move this label to the end of the sorted cluster data.
Args:
cluster_data (CLUSTER_DATA): output of apply_DBSCAN(), apply_KMeans(), or heat_KMeans()
cluster_accuracy (CLUSTER_DATA_ACCURACY): output of map_cluster_accuracy()
Returns:
sorted_cluster_data (CLUSTER_DATA)
sorted cluster_data
"""
if not isinstance(cluster_data, CLUSTER_DATA):
raise TypeError("cluster_data has wrong data type.")
if not isinstance(cluster_accuracy, CLUSTER_DATA_ACCURACY):
raise TypeError("cluster_accuracy has wrong data type.")
### rank and test if labels have same range
ranked_array, ranked_ndx = _misc.get_ranked_array(cluster_accuracy.GDT_mean, verbose=False)
if set(cluster_data.labels) != set(ranked_ndx):
raise ValueError("labels of cluster_data and cluster_accuracy do not match.")
if cluster_data.noise_label is not None:
# move noise label to the very end
noise_ndx = np.where(ranked_ndx == cluster_data.noise_label)[0]
other_ndx = np.where(ranked_ndx != cluster_data.noise_label)[0]
ranked_array = np.append(ranked_array[other_ndx], ranked_array[noise_ndx])
ranked_ndx = np.append(ranked_ndx[other_ndx], ranked_ndx[noise_ndx])
### REMOVE ###
# # algorithms with -1 label for noise (e.g. DBSCAN, OPTICS)
# else:
# ranked_array, ranked_ndx = _misc.get_ranked_array(cluster_accuracy.GDT_mean, verbose=False)
# if set(cluster_data.labels + 1) != set(ranked_ndx):
# raise ValueError("labels of cluster_data and cluster_accuracy do not match.")
# # move noise label (here: max(ranked_ndx)) to the very end
# noise_ndx = np.where(ranked_ndx == max(ranked_ndx))[0]
# other_ndx = np.where(ranked_ndx != max(ranked_ndx))[0]
# ranked_array = np.append(ranked_array[other_ndx], ranked_array[noise_ndx])
# ranked_ndx = np.append(ranked_ndx[other_ndx], ranked_ndx[noise_ndx])
### REMOVE ###
### remap data
sorted_labels = [ranked_ndx.tolist().index(i) for i in cluster_data.labels]
### REMOVE ###
# # algorithms with -1 label for noise (e.g. DBSCAN, OPTICS)
# else:
# _misc.cprint(f"Note: shifted labels from {min(cluster_data.labels)}..{max(cluster_data.labels)} to {min(ranked_ndx)}..{max(ranked_ndx)} with {max(ranked_ndx)} being the 'noise'.", "red")
# sorted_labels = [ranked_ndx.tolist().index(i) for i in cluster_data.labels + 1] # shift labels
### REMOVE ###
### create new object
sorted_wss_data = WSS_DATA_obj(wss=cluster_data.wss_data.wss, # float
sse=cluster_data.wss_data.sse[ranked_ndx], # sorted
se_mean=cluster_data.wss_data.se_mean[ranked_ndx], # sorted
se_std=cluster_data.wss_data.se_std[ranked_ndx]) # sorted
sorted_cluster_data = CLUSTER_DATA(centers=cluster_data.centers[ranked_ndx], # sorted
counts=cluster_data.counts[ranked_ndx], # sorted
labels=sorted_labels, # sorted
noise_label=cluster_data.noise_label, # reassign
inertia=cluster_data.inertia, # float
wss_data=sorted_wss_data, # sorted
compact_score=cluster_data.compact_score[ranked_ndx]) # sorted
return sorted_cluster_data
|
964e9a646da025ae6819bc902319f7f1b6c9ae9c
| 33,052 |
import random
def reprintClean(pack):
"""
Helper function specifically for reprint packs.
:param pack: List, contains the 12 cards in a pack
:return: temppack, the pack with the higher rarity cards implanted in
"""
temppack = pack
rarity = random.randint(0, 12)
if rarity == 0:
card = randCard(ultra)
if ("Monster" in card["type"]):
temppack[5] = card
elif ("Spell" in card["type"]):
temppack[8] = card
elif ("Trap" in card["type"]):
temppack[11] = card
else:
rarity = random.randint(0, 6)
if rarity == 0:
card = randCard(super)
if ("Monster" in card["type"]):
temppack[5] = card
elif ("Spell" in card["type"]):
temppack[8] = card
elif ("Trap" in card["type"]):
temppack[11] = card
card = randCard(rare)
if ("Monster" in card["type"]):
temppack[4] = card
elif ("Spell" in card["type"]):
temppack[7] = card
elif ("Trap" in card["type"]):
temppack[10] = card
else:
card = randCard(rare)
if ("Monster" in card["type"]):
temppack[4] = card
elif ("Spell" in card["type"]):
temppack[7] = card
elif ("Trap" in card["type"]):
temppack[10] = card
return temppack
|
3b73ab930197e482699b340b4cb9c0f068e63985
| 33,053 |
def logout():
"""Logs the current user out"""
del session['user_id']
return redirect('/')
|
7584ceceb2f6afa95a82d212ca4b9b537a1d4ad2
| 33,054 |
def main() -> int:
"""
Main function. Executed if script is called standalone.
"""
args = _parse_cmd_args()
try:
return _search_symantec(args.keyword, args.limit)
except KeyboardInterrupt:
_warn("Keyboard interrupt detected\n", True)
return 1
|
b8f06d95ab08ca25b55a8f0256ac51902383dfe0
| 33,055 |
def _check_imgs_array(imgs):
"""Check input image if it is an array
Parameters
----------
imgs : array of str, shape=[n_subjects, n_sessions]
Element i, j of the array is a path to the data of subject i
collected during session j.
Data are loaded with numpy.load and expected
shape is [n_voxels, n_timeframes]
n_timeframes and n_voxels are assumed to be the same across
subjects
n_timeframes can vary across sessions
Each voxel's timecourse is assumed to have mean 0 and variance 1
Returns
-------
shapes : array
Shape of input images
"""
assert_array_2axis(imgs, "imgs")
n_subjects, n_sessions = imgs.shape
shapes = np.zeros((n_subjects, n_sessions, 2))
for i in range(n_subjects):
for j in range(n_sessions):
if not (isinstance(imgs[i, j], str) or isinstance(
imgs[i, j], np.str_) or isinstance(imgs[i, j], np.str)):
raise ValueError("imgs[%i, %i] is stored using "
"type %s which is not a str" %
(i, j, type(imgs[i, j])))
shapes[i, j, :] = get_shape(imgs[i, j])
return shapes
|
43705a4467a27df3027d9ebed4b8f5eec2866916
| 33,056 |
def smoothline(xs, ys=None, interpol=3, window=1, verbose=3):
"""Smoothing 1D vector.
Description
-----------
Smoothing a 1d vector can be challanging if the number of data is low sampled.
This smoothing function therefore contains two steps. First interpolation of the
input line followed by a convolution.
Parameters
----------
xs : array-like
Data points for the x-axis.
ys : array-like
Data points for the y-axis.
interpol : int, (default : 3)
The interpolation factor. The data is interpolation by a factor n before the smoothing step.
window : int, (default : 1)
Smoothing window that is used to create the convolution and gradually smoothen the line.
verbose : int [1-5], default: 3
Print information to screen. A higher number will print more.
Returns
-------
xnew : array-like
Data points for the x-axis.
ynew : array-like
Data points for the y-axis.
"""
if window is not None:
if verbose>=3: print('[smoothline] >Smoothing by interpolation..')
# Specify number of points to interpolate the data
# Interpolate
extpoints = np.linspace(0, len(xs), len(xs) * interpol)
spl = make_interp_spline(range(0, len(xs)), xs, k=3)
# Compute x-labels
xnew = spl(extpoints)
xnew[window:-window]
# First smoothing on the raw input data
ynew=None
if ys is not None:
ys = _smooth(ys,window)
# Interpolate ys line
spl = make_interp_spline(range(0, len(ys)), ys, k=3)
ynew = spl(extpoints)
ynew[window:-window]
else:
xnew, ynew = xs, ys
return xnew, ynew
|
7e7d50e55f801a14394dc2c9fab4e8f392dee546
| 33,057 |
def prune_model(keras_model, prun_factor_dense=10, prun_factor_conv=10, metric='L1', comp=None, num_classes=None, label_one_hot=None):
"""
A given keras model get pruned. The factor for dense and conv says how many percent
of the dense and conv layers should be deleted.
Args:
keras_model: Model which should be pruned
prun_factor_dense: Integer which says how many percent of the neurons should be deleted
prun_factor_conv: Integer which says how many percent of the filters should be deleted
metric: Metric which should be used to prune the model
comp: Dictionary with compiler settings
num_classes: Number of different classes of the model
label_one_hot: Boolean value if labels are one hot encoded or not
Return:
pruned_model: New model after pruning
"""
if callable(getattr(keras_model, "predict", None)) :
model = keras_model
elif isinstance(keras_model, str) and ".h5" in keras_model:
model = load_model(keras_model)
else:
print("No model given to prune")
if num_classes <= 2 and comp == None:
comp = {
"optimizer": 'adam',
"loss": tf.keras.losses.BinaryCrossentropy(),
"metrics": 'accuracy'}
elif num_classes > 3 and comp == None:
if label_one_hot == True:
comp = {
"optimizer": 'adam',
"loss": tf.keras.losses.CategoricalCrossentropy(),
"metrics": 'accuracy'}
else:
comp = {
"optimizer": 'adam',
"loss": tf.keras.losses.SparseCategoricalCrossentropy(),
"metrics": 'accuracy'}
layer_types, layer_params, layer_output_shape, layer_bias = load_model_param(model)
num_new_neurons = np.zeros(shape=len(layer_params), dtype=np.int16)
num_new_filters = np.zeros(shape=len(layer_params), dtype=np.int16)
layer_params, num_new_neurons, num_new_filters, layer_output_shape = model_pruning(layer_types, layer_params, layer_output_shape, layer_bias, num_new_neurons, num_new_filters, prun_factor_dense, prun_factor_conv,metric)
print("Finish with pruning")
pruned_model = build_pruned_model(model, layer_params, layer_types, num_new_neurons, num_new_filters, comp)
print("Model built")
return pruned_model
|
90e01b5e1de4acc4649f48f0931f8db5cdc6867c
| 33,058 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.