content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def log_report():
""" The log report shows the log file. The user can filter and search the log. """
log_main = open(main_log, 'r').readlines()
data_main = []
for line in log_main:
split_line = line.split(' ')
data_main.append([' '.join(split_line[:2]), ' '.join(split_line[2:])])
return render_template(
'log.html',
title="Logs",
data_main=data_main) | 53905c90bed2666c7e668bf76ff03a6ba93eca5b | 6,520 |
def int_or_none(x) -> int:
"""Either convert x to an int or return None."""
try:
return int(x)
except TypeError:
return None
except ValueError:
return None | e7fbd422a6c61293c9f4f71df211a85570d4400e | 6,521 |
def needed_to_build_multi(deriv_outputs, existing=None, on_server=None):
"""
:param deriv_outputs: A mapping from derivations to sets of outputs.
:type deriv_outputs: ``dict`` of ``Derivation`` to ``set`` of ``str``
"""
if existing is None:
existing = {}
if on_server is None:
on_server = {}
needed, need_fetch = {}, {}
for deriv, outputs in deriv_outputs.items():
needed_to_build(deriv, outputs, needed=needed, need_fetch=need_fetch,
existing=existing, on_server=on_server)
return needed, need_fetch | d05083ea9c71c982d312e8b420b21bba92b80ee4 | 6,523 |
def iscode(c):
"""
Tests if argument type could be lines of code,
i.e. list of strings
"""
if type(c) == type([]):
if c:
return type(c[0]) == type('')
else:
return True
else: return False | e60da6c05922ff1e67db15fa4caa1500a8f470c7 | 6,524 |
def get_comment_list(request, thread_id, endorsed, page, page_size, requested_fields=None):
"""
Return the list of comments in the given thread.
Arguments:
request: The django request object used for build_absolute_uri and
determining the requesting user.
thread_id: The id of the thread to get comments for.
endorsed: Boolean indicating whether to get endorsed or non-endorsed
comments (or None for all comments). Must be None for a discussion
thread and non-None for a question thread.
page: The page number (1-indexed) to retrieve
page_size: The number of comments to retrieve per page
requested_fields: Indicates which additional fields to return for
each comment. (i.e. ['profile_image'])
Returns:
A paginated result containing a list of comments; see
discussion.rest_api.views.CommentViewSet for more detail.
"""
response_skip = page_size * (page - 1)
cc_thread, context = _get_thread_and_context(
request,
thread_id,
retrieve_kwargs={
"with_responses": True,
"recursive": False,
"user_id": request.user.id,
"response_skip": response_skip,
"response_limit": page_size,
}
)
# Responses to discussion threads cannot be separated by endorsed, but
# responses to question threads must be separated by endorsed due to the
# existing comments service interface
if cc_thread["thread_type"] == "question":
if endorsed is None: # lint-amnesty, pylint: disable=no-else-raise
raise ValidationError({"endorsed": ["This field is required for question threads."]})
elif endorsed:
# CS does not apply resp_skip and resp_limit to endorsed responses
# of a question post
responses = cc_thread["endorsed_responses"][response_skip:(response_skip + page_size)]
resp_total = len(cc_thread["endorsed_responses"])
else:
responses = cc_thread["non_endorsed_responses"]
resp_total = cc_thread["non_endorsed_resp_total"]
else:
if endorsed is not None:
raise ValidationError(
{"endorsed": ["This field may not be specified for discussion threads."]}
)
responses = cc_thread["children"]
resp_total = cc_thread["resp_total"]
# The comments service returns the last page of results if the requested
# page is beyond the last page, but we want be consistent with DRF's general
# behavior and return a PageNotFoundError in that case
if not responses and page != 1:
raise PageNotFoundError("Page not found (No results on this page).")
num_pages = (resp_total + page_size - 1) // page_size if resp_total else 1
results = _serialize_discussion_entities(request, context, responses, requested_fields, DiscussionEntity.comment)
paginator = DiscussionAPIPagination(request, page, num_pages, resp_total)
return paginator.get_paginated_response(results) | 980e52645e96853339df0525359ddba4698bf7e7 | 6,525 |
from typing import List
def files(name: str, dependencies=False, excludes=None) -> List[PackagePath]:
"""
List all files belonging to a distribution.
Arguments:
name:
The name of the distribution.
dependencies:
Recursively collect files of dependencies too.
excludes:
Distributions to ignore if **dependencies** is true.
Returns:
All filenames belonging to the given distribution.
With ``dependencies=False``, this is just a shortcut for::
conda_support.distribution(name).files
"""
return [file
for dist in _iter_distributions(name, dependencies, excludes)
for file in dist.files] | cfda01bb7e6858e378aadeea47e6e4a0d76dda2f | 6,526 |
def ready_to_delete_data_node(name, has_executed, graph):
"""
Determines if a DataPlaceholderNode is ready to be deleted from the
cache.
Args:
name:
The name of the data node to check
has_executed: set
A set containing all operations that have been executed so far
graph:
The networkx graph containing the operations and data nodes
Returns:
A boolean indicating whether the data node can be deleted or not.
"""
data_node = get_data_node(name, graph)
return set(gr.successors(graph, data_node)).issubset(has_executed) | 7da3c6053146a1772223e29e1eca15107e0347b6 | 6,527 |
import hashlib
def extract_hash_parts(repo):
"""Extract hash parts from repo"""
full_hash = hashlib.sha1(repo.encode("utf-8")).hexdigest()
return full_hash[:2], full_hash[2:] | aa1aebaf9b8330539eb0266c4ff97fd3459753c8 | 6,528 |
def create_cloud_mask(im_QA, satname, cloud_mask_issue):
"""
Creates a cloud mask using the information contained in the QA band.
KV WRL 2018
Arguments:
-----------
im_QA: np.array
Image containing the QA band
satname: string
short name for the satellite: ```'L5', 'L7', 'L8' or 'S2'```
cloud_mask_issue: boolean
True if there is an issue with the cloud mask and sand pixels are being
erroneously masked on the images
Returns:
-----------
cloud_mask : np.array
boolean array with True if a pixel is cloudy and False otherwise
"""
# convert QA bits (the bits allocated to cloud cover vary depending on the satellite mission)
if satname == 'L8':
cloud_values = [2800, 2804, 2808, 2812, 6896, 6900, 6904, 6908]
elif satname == 'L7' or satname == 'L5' or satname == 'L4':
cloud_values = [752, 756, 760, 764]
elif satname == 'S2':
cloud_values = [1024, 2048] # 1024 = dense cloud, 2048 = cirrus clouds
# find which pixels have bits corresponding to cloud values
cloud_mask = np.isin(im_QA, cloud_values)
# remove cloud pixels that form very thin features. These are beach or swash pixels that are
# erroneously identified as clouds by the CFMASK algorithm applied to the images by the USGS.
if sum(sum(cloud_mask)) > 0 and sum(sum(~cloud_mask)) > 0:
morphology.remove_small_objects(cloud_mask, min_size=10, connectivity=1, in_place=True)
if cloud_mask_issue:
elem = morphology.square(3) # use a square of width 3 pixels
cloud_mask = morphology.binary_opening(cloud_mask,elem) # perform image opening
# remove objects with less than 25 connected pixels
morphology.remove_small_objects(cloud_mask, min_size=25, connectivity=1, in_place=True)
return cloud_mask | 5143c1c61425a131bdb3b0f91018643c9a9d4123 | 6,529 |
import re
def split_bucket(s3_key):
"""
Returns the bucket name and the key from an s3 location string.
"""
match = re.match(r'(?:s3://)?([^/]+)/(.*)', s3_key, re.IGNORECASE)
if not match:
return None, s3_key
return match.group(1), match.group(2) | 6b854bdc9d105643a9fa528e6fefd19672451e63 | 6,530 |
def contains_chinese(ustr):
"""
将字符串中的中文去除
Args:
ustr: 字符串
Returns: 去除中文的字符串
"""
return any('\u4e00' <= char <= '\u9fff' for char in ustr) | 8d53a214e1754e1c129f1583a298f5a19e1f76d3 | 6,531 |
def payment_activity():
"""Request for extra-curricular activity"""
try:
req_json = request.get_json(force=True)
except TypeError:
return jsonify(message='Invalid json input'), 400
activity_info = req_json['activity']
student = req_json['student']
envelope_args = {
'signer_client_id': 1000,
'ds_return_url': req_json['callback-url'],
'gateway_account_id': session.get('payment_gateway_account_id'),
'gateway_name': session.get('payment_gateway'),
'gateway_display_name': session.get('payment_display_name')
}
try:
# Create envelope with payment
envelope = DsDocument.create_with_payment(
'payment-activity.html', student, activity_info, envelope_args
)
# Submit envelope to Docusign
envelope_id = Envelope.send(envelope, session)
except ApiException as exc:
return process_error(exc)
user_documents = session.get('ds_documents')
if not user_documents:
session['ds_documents'] = [envelope_id]
else:
session['ds_documents'].append(envelope_id)
try:
# Get the recipient view
result = Envelope.get_view(envelope_id, envelope_args, student, session)
except ApiException as exc:
return process_error(exc)
return jsonify({'envelope_id': envelope_id, 'redirect_url': result.url}) | a313b6e5ed00ffc9b3685ce28a9c640e96276347 | 6,532 |
def gdc_to_dos_list_response(gdcr):
"""
Takes a GDC list response and converts it to GA4GH.
:param gdc:
:return:
"""
mres = {}
mres['data_objects'] = []
for id_ in gdcr.get('ids', []):
mres['data_objects'].append({'id': id_})
if len(gdcr.get('ids', [])) > 0:
mres['next_page_token'] = gdcr['ids'][-1:][0]
return mres | a237a64f55c15fb10070d76b6f3cc4f283460a96 | 6,533 |
def get_labels_by_db_and_omic_from_graph(graph):
"""Return labels by db and omic given a graph."""
db_subsets = defaultdict(set)
db_entites = defaultdict(dict)
entites_db = defaultdict(dict)
# entity_type_map = {'Gene':'genes', 'mirna_nodes':'mirna', 'Abundance':'metabolites', 'BiologicalProcess':'bps'}
for u, v, k in graph.edges(keys=True):
if ANNOTATIONS not in graph[u][v][k]:
continue
if 'database' not in graph[u][v][k][ANNOTATIONS]:
continue
for database in graph[u][v][k][ANNOTATIONS]['database']:
db_subsets[database].add(u)
db_subsets[database].add(v)
for database, nodes in db_subsets.items():
db_entites[database] = calculate_database_sets_as_dict(nodes, database)
database_sets = calculate_database_sets_as_dict(nodes, database)
db_entites[database] = database_sets
for entity_type, entities in database_sets.items():
entites_db[entity_type][database] = entities
return db_entites, entites_db | 14374977afb09fded25f78e14fced607bb8f9ea1 | 6,534 |
import logging
def covid_API_england ():
"""Function retrieves date, hospital admissions, total deaths
and daily cases using government API"""
england_only = [
'areaType=nation',
'areaName=England'
]
dates_and_cases = {
"date": "date",
"newCasesByPublishDate": "newCasesByPublishDate",
"newAdmissions": "newAdmissions",
"cumDailyNsoDeathsByDeathDate":"cumDailyNsoDeathsByDeathDate"
}
api = Cov19API(filters=england_only, structure=dates_and_cases)
logging.info('API has received query for national data')
global DATA2
DATA2 = api.get_json()
return DATA2 | a13090a052a35dd675c1fb31b861cbcc4b9e7c4a | 6,536 |
from meerschaum.actions.shell import default_action_completer
from typing import Optional
from typing import List
from typing import Any
def _complete_uninstall(
action : Optional[List[str]] = None,
**kw : Any
) -> List[str]:
"""
Override the default Meerschaum `complete_` function.
"""
if action is None:
action = []
options = {
'plugin': _complete_uninstall_plugins,
'plugins': _complete_uninstall_plugins,
}
if len(action) > 0 and action[0] in options:
sub = action[0]
del action[0]
return options[sub](action=action, **kw)
return default_action_completer(action=(['uninstall'] + action), **kw) | 1cfdc5694a069c924316f57e4804ae04d63bb4af | 6,537 |
def test_bucket():
"""Universal bucket name for use throughout testing"""
return 'test_bucket' | 2f78b1b1bf7ccfff07ca29213d975f3b20f0e9a5 | 6,538 |
def us_send_code():
"""
Send code view.
This takes an identity (as configured in USER_IDENTITY_ATTRIBUTES)
and a method request to send a code.
"""
form_class = _security.us_signin_form
if request.is_json:
if request.content_length:
form = form_class(MultiDict(request.get_json()), meta=suppress_form_csrf())
else:
form = form_class(formdata=None, meta=suppress_form_csrf())
else:
form = form_class(meta=suppress_form_csrf())
form.submit_send_code.data = True
if form.validate_on_submit():
# send code
user = form.user
if not user.us_totp_secret:
after_this_request(_commit)
user.us_totp_secret = _security._totp_factory.generate_totp_secret()
_datastore.put(user)
send_security_token(
user,
form.chosen_method.data,
user.us_totp_secret,
user.us_phone_number,
send_magic_link=True,
)
if _security._want_json(request):
# Not authenticated yet - so don't send any user info.
return base_render_json(form, include_user=False)
return _security.render_template(
config_value("US_SIGNIN_TEMPLATE"),
us_signin_form=form,
methods=config_value("US_ENABLED_METHODS"),
chosen_method=form.chosen_method.data,
code_sent=True,
skip_loginmenu=True,
**_security._run_ctx_processor("us_signin")
)
# Here on GET or failed validation
if _security._want_json(request):
payload = {"methods": config_value("US_ENABLED_METHODS")}
return base_render_json(form, include_user=False, additional=payload)
return _security.render_template(
config_value("US_SIGNIN_TEMPLATE"),
us_signin_form=form,
methods=config_value("US_ENABLED_METHODS"),
skip_loginmenu=True,
**_security._run_ctx_processor("us_signin")
) | 7ca09dc6d6fdc7840d893e01b4166d65a1b9cc02 | 6,541 |
def merge_texts(files, file_index, data_type):
""" merge the dataframes in your list """
dfs, filenames = get_dataframe_list(files, file_index, data_type)
# enumerate over the list, merge, and rename columns
try:
df = dfs[0]
# print(*[df_.columns for df_ in dfs],sep='\n')
for i, frame in enumerate(dfs[1:]):
if data_type == 'gene':
try:
# rename first columns to metadata value
df = df.rename(columns={'raw_counts': get_metadata_tag(filenames[0])})
df = df.merge(frame, on='gene').rename(columns={'raw_counts':'raw_counts_' + get_metadata_tag(filenames[i-1])})
except:
continue
elif data_type == 'transcript':
try:
df = df.merge(frame, on='transcript')
# df = df.merge(frame, on=frame.index)
except:
continue
return df
except:
print("Could not merge dataframe") | 4e336a240afd100797b707efc9ccc96feb8d2919 | 6,542 |
def create_dictionary(names, months, years, max_sustained_winds, areas_affected, updated_damages, deaths):
"""Create dictionary of hurricanes with hurricane name as the key and a dictionary of hurricane data as the value."""
hurricanes = dict()
num_hurricanes = len(names)
for i in range(num_hurricanes):
hurricanes[names[i]] = {"Name": names[i],
"Month": months[i],
"Year": years[i],
"Max Sustained Wind": max_sustained_winds[i],
"Areas Affected": areas_affected[i],
"Damage": updated_damages[i],
"Deaths": deaths[i]}
return hurricanes | 5a27d5349113f29d2af55df27a2ee2c2cc524549 | 6,543 |
def create_variable_type(parent, nodeid, bname, datatype):
"""
Create a new variable type
args are nodeid, browsename and datatype
or idx, name and data type
"""
nodeid, qname = _parse_nodeid_qname(nodeid, bname)
if datatype and isinstance(datatype, int):
datatype = ua.NodeId(datatype, 0)
if datatype and not isinstance(datatype, ua.NodeId):
raise RuntimeError("Data type argument must be a nodeid or an int refering to a nodeid, received: {}".format(datatype))
return node.Node(parent.server, _create_variable_type(parent.server, parent.nodeid, nodeid, qname, datatype)) | b2202b929bc51e2a2badfef6ec31df45e9736268 | 6,544 |
def load_NWP(input_nc_path_decomp, input_path_velocities, start_time, n_timesteps):
"""Loads the decomposed NWP and velocity data from the netCDF files
Parameters
----------
input_nc_path_decomp: str
Path to the saved netCDF file containing the decomposed NWP data.
input_path_velocities: str
Path to the saved numpy binary file containing the estimated velocity
fields from the NWP data.
start_time: numpy.datetime64
The start time of the nowcasting. Assumed to be a numpy.datetime64 type
n_timesteps: int
Number of time steps to forecast
Returns
-------
R_d: list
A list of dictionaries with each element in the list corresponding to
a different time step. Each dictionary has the same structure as the
output of the decomposition function
uv: array-like
Array of shape (timestep,2,m,n) containing the x- and y-components
of the advection field for the (NWP) model field per forecast lead time.
"""
if not NETCDF4_IMPORTED:
raise MissingOptionalDependency(
"netCDF4 package is required to load the decomposed NWP data, "
"but it is not installed"
)
# Open the file
ncf_decomp = netCDF4.Dataset(input_nc_path_decomp, "r", format="NETCDF4")
velocities = np.load(input_path_velocities)
# Initialise the decomposition dictionary
decomp_dict = dict()
decomp_dict["domain"] = ncf_decomp.domain
decomp_dict["normalized"] = bool(ncf_decomp.normalized)
decomp_dict["compact_output"] = bool(ncf_decomp.compact_output)
# Convert the start time and the timestep to datetime64 and timedelta64 type
zero_time = np.datetime64("1970-01-01T00:00:00", "ns")
analysis_time = np.timedelta64(int(ncf_decomp.analysis_time), "ns") + zero_time
timestep = ncf_decomp.timestep
timestep = np.timedelta64(timestep, "m")
valid_times = ncf_decomp.variables["valid_times"][:]
valid_times = np.array(
[np.timedelta64(int(valid_times[i]), "ns") for i in range(len(valid_times))]
)
valid_times = valid_times + zero_time
# Find the indices corresponding with the required start and end time
start_i = (start_time - analysis_time) // timestep
assert analysis_time + start_i * timestep == start_time
end_i = start_i + n_timesteps + 1
# Add the valid times to the output
decomp_dict["valid_times"] = valid_times[start_i:end_i]
# Slice the velocity fields with the start and end indices
uv = velocities[start_i:end_i, :, :, :]
# Initialise the list of dictionaries which will serve as the output (cf: the STEPS function)
R_d = list()
for i in range(start_i, end_i):
decomp_dict_ = decomp_dict.copy()
cascade_levels = ncf_decomp.variables["pr_decomposed"][i, :, :, :]
# In the netcdf file this is saved as a masked array, so we're checking if there is no mask
assert not cascade_levels.mask
means = ncf_decomp.variables["means"][i, :]
assert not means.mask
stds = ncf_decomp.variables["stds"][i, :]
assert not stds.mask
# Save the values in the dictionary as normal arrays with the filled method
decomp_dict_["cascade_levels"] = np.ma.filled(cascade_levels)
decomp_dict_["means"] = np.ma.filled(means)
decomp_dict_["stds"] = np.ma.filled(stds)
# Append the output list
R_d.append(decomp_dict_)
return R_d, uv | d96dacc14404f59b15a428e62608765486623460 | 6,545 |
def get_ts(fn, tc, scale=0):
"""Returns timestamps from a frame number and timecodes file or cfr fps
fn = frame number
tc = (timecodes list or Fraction(fps),tc_type)
scale default: 0 (ns)
examples: 3 (µs); 6 (ms); 9 (s)
"""
scale = 9 - scale
tc, tc_type = tc
if tc_type == 'cfr':
ts = round(10 ** scale * fn * Fraction(tc.denominator, tc.numerator))
return ts
elif tc_type == 'vfr':
ts = round(float(tc[fn]) * 10 ** (scale - 3))
return ts | 845b2600268a2942ca0fe2b09336ab724b00e299 | 6,546 |
import numpy
def adapt_array(array):
"""
Using the numpy.save function to save a binary version of the array,
and BytesIO to catch the stream of data and convert it into a BLOB.
:param numpy.array array: NumPy array to turn into a BLOB
:return: NumPy array as BLOB
:rtype: BLOB
"""
out = BytesIO()
numpy.save(out, array)
out.seek(0)
return out.read() | 36a62c745de0e933b520821ea7cce70f5013c5d2 | 6,547 |
def make_queue(paths_to_image, labels, num_epochs=None, shuffle=True):
"""returns an Ops Tensor with queued image and label pair"""
images = tf.convert_to_tensor(paths_to_image, dtype=tf.string)
labels = tf.convert_to_tensor(labels, dtype=tf.uint8)
input_queue = tf.train.slice_input_producer(
tensor_list=[images, labels],
num_epochs=num_epochs,
shuffle=shuffle)
return input_queue | 7a2ad9338642a5d6c7af59fe972ee9fd07f128b8 | 6,548 |
def display_import(request, import_id):
"""Display the details of an import."""
import_object = get_object_or_404(RegisteredImport, pk=import_id)
context_data = {'import': import_object}
return render(request, 'eats/edit/display_import.html', context_data) | b5676dd5da1791fb6eda3d6989b9c7c0c8b02b8c | 6,549 |
def TransformContainerAnalysisData(image_name, occurrence_filter=None,
deployments=False):
"""Transforms the occurrence data from Container Analysis API."""
analysis_obj = container_analysis_data_util.ContainerAndAnalysisData(
image_name)
occs = FetchOccurrencesForResource(image_name, occurrence_filter)
for occ in occs:
analysis_obj.add_record(occ)
if deployments:
depl_occs = FetchDeploymentsForImage(image_name, occurrence_filter)
for depl_occ in depl_occs:
analysis_obj.add_record(depl_occ)
analysis_obj.resolveSummaries()
return analysis_obj | d7021dde08a77ac6922274f3e69d841983728f4e | 6,550 |
def bilinear_initializer(shape, dtype, partition_info):
"""
Bilinear initializer for deconvolution filters
"""
kernel = get_bilinear_kernel(shape[0], shape[1], shape[2])
broadcasted_kernel = np.repeat(kernel.reshape(shape[0], shape[1], shape[2], -1), repeats=shape[3], axis=3)
return broadcasted_kernel | 48a7cc2808e72df816c9b6ff7a8975eb52e4185e | 6,552 |
def pdf():
"""
Демо-версия PDF отчеа, открывается прямо в браузере,
это удобнее, чем каждый раз скачивать
"""
render_pdf(sample_payload_obj, './output.pdf')
upload_file('./output.pdf')
return send_file('./output.pdf', attachment_filename='output.pdf') | a2a60c26df9844e605606538d40d1402cd5a4985 | 6,553 |
def run_clear_db_es(app, arg_env, arg_skip_es=False):
"""
This function actually clears DB/ES. Takes a Pyramid app as well as two flags. _Use with care!_
For safety, this function will return without side-effect on any production system.
Also does additional checks based on arguments supplied:
If an `arg_env` (default None) is given as a non-empty string value,
this function will return without side-effect if the current app environment does not match the given value.
If `arg_skip_es` (default False) is True, this function will return after DB clear
and before running create_mapping.
Args:
app: Pyramid application
arg_env (str): if provided, only run if environment matches this value
arg_skip_es (bool): if True, do not run create_mapping after DB clear
Returns:
bool: True if DB was cleared (regardless of ES)
"""
env = app.registry.settings.get('env.name', '')
# for now, do NOT allow clearing of production systems
if is_stg_or_prd_env(env):
log.error('clear_db_es_contents: will NOT run on env %s. Exiting...' % env)
return False
if arg_env and arg_env != env:
log.error('clear_db_es_contents: environment mismatch! Given --env %s '
'does not match current env %s. Exiting....' % (arg_env, env))
return False
log.info('clear_db_es_contents: clearing DB tables...')
db_success = clear_db_tables(app)
if not db_success:
log.error('clear_db_es_contents: clearing DB failed! Try to run again.'
' This command can fail if there are external DB connections')
return False
log.info('clear_db_es_contents: successfully cleared DB')
# create mapping after clear DB to remove ES contents
if not arg_skip_es:
log.info('clear_db_es_contents: clearing ES with create_mapping...')
run_create_mapping(app, purge_queue=True)
log.info('clear_db_es_contents: done!')
return True | e7d865dec8691c4d0db7bef71b68bab9bc5174a2 | 6,554 |
def init_total_population():
"""
Real Name: b'init total population'
Original Eqn: b'init Infected asymptomatic+init Susceptible'
Units: b'person'
Limits: (None, None)
Type: component
b''
"""
return init_infected_asymptomatic() + init_susceptible() | cf742a00d0140c48dbdb4692dabbb8bbd6c5c6b2 | 6,555 |
def one_hot(dim: int, idx: int):
""" Get one-hot vector """
v = np.zeros(dim)
v[idx] = 1
return v | 84b87b357dc7b7bf54af4718885aa1d6fbcb35e4 | 6,556 |
import re
def process_priors(prior_flat, initial_fit):
"""Process prior input array into fit object."""
if any(
[float(val) <= 0 for key, val in prior_flat.items() if key.endswith("sdev")]
):
raise ValueError("Standard deviations must be larger than zero.")
prior = {}
for key, val in initial_fit.prior.items():
if hasattr(val, "__len__"):
nmax = len(
[k for k in prior_flat if re.match(f"{key}__array_[0-9]+-mean", k)]
)
prior[key] = gv.gvar(
[prior_flat[f"{key}__array_{n}-mean"] for n in range(nmax)],
[prior_flat[f"{key}__array_{n}-sdev"] for n in range(nmax)],
)
else:
prior[key] = gv.gvar(prior_flat[f"{key}-mean"], prior_flat[f"{key}-sdev"])
fit = nonlinear_fit(initial_fit.data, initial_fit.fcn, prior)
for attr in ["models", "meta"]:
if hasattr(initial_fit, attr):
setattr(fit, attr, getattr(initial_fit, attr))
return fit | 32358fb494a221e5e7d5d4d73776993f1c363f0f | 6,557 |
def _sample_data(ice_lines, frac_to_plot):
"""
Get sample ice lines to plot
:param ice_lines: all ice lines
:param frac_to_plot: fraction to plot
:return: the sampled ice lines
"""
if frac_to_plot < 1.:
ice_plot_data = ice_lines.sample(int(ice_lines.shape[0] * frac_to_plot))
elif frac_to_plot > 1:
ice_plot_data = ice_lines.sample(frac_to_plot)
else:
ice_plot_data = ice_lines.copy()
ice_plot_data = ice_plot_data.reset_index(drop=True)
return ice_plot_data | e5da9b1ecaf615863504e81cdd246336de97b319 | 6,558 |
def fast_dot(M1, M2):
"""
Specialized interface to the numpy.dot function
This assumes that A and B are both 2D arrays (in practice)
When A or B are represented by 1D arrays, they are assumed to reprsent
diagonal arrays
This function then exploits that to provide faster multiplication
"""
if len(M1.shape) in [1, 2] and len(M2.shape) == 1:
return M1*M2
elif len(M1.shape) == 1 and len(M2.shape) == 2:
return M1[:,None]*M2
elif len(M1.shape) == 2 and len(M2.shape) == 2:
return M1.dot(M2)
else:
raise Exception('fast_dot requires shapes to be 1 or 2') | b34e44787f48dfb25af4975e74262f3d8eaa5096 | 6,559 |
async def autoredeem(
bot: commands.Bot,
guild_id: int
) -> bool:
"""Iterates over the list of users who have
enabled autoredeem for this server, and if
one of them does redeem some of their credits
and alert the user."""
await bot.wait_until_ready()
conn = bot.db.conn
guild = bot.get_guild(guild_id)
if guild is None:
return False
async with bot.db.lock:
async with conn.transaction():
ar_members = await conn.fetch(
"""SELECT * FROM members
WHERE guild_id=$1
AND autoredeem=True""",
guild_id
)
redeemed = False
for m in ar_members:
ms = await get_members([int(m['user_id'])], guild)
if len(ms) == 0:
continue
current_credits = await get_credits(
bot, int(m['user_id'])
)
if current_credits < bot_config.PREMIUM_COST:
continue
try:
await alert_user(
bot, int(m['user_id']),
f"You have autoredeem enabled in {guild.name}, "
f"so {bot_config.PREMIUM_COST} credits were taken "
"from your account since they ran out of premium."
)
except Exception:
continue
try:
await redeem(
bot, int(m['user_id']),
guild_id, 1
)
redeemed = True
except errors.NotEnoughCredits:
pass
return redeemed | ee0a34e4aa9d85e9402dcbec8a1ecce5a2ca58e1 | 6,560 |
from typing import Optional
from typing import Sequence
def plot_heatmap(
data: DataFrame,
columns: Optional[Sequence[str]] = None,
droppable: bool = True,
sort: bool = True,
cmap: Optional[Sequence[str]] = None,
names: Optional[Sequence[str]] = None,
yaxis: bool = False,
xaxis: bool = True,
legend_kws: dict = None,
sb_kws: dict = None) -> SubplotBase:
"""NA heatmap. Plots NA values as red lines and normal values
as black lines.
Parameters
----------
data : DataFrame
Input data.
columns : Optional[Sequence[str]], optional
Columns names.
droppable : bool, optional
Show values to be dropped by :py:meth:`pandas.DataFrame.dropna()`
method.
sort : bool, optional
Sort DataFrame by selected columns.
cmap : Optional[Sequence[str]], optional
Heatmap and legend colormap: non-missing values, droppable values,
NA values, correspondingly. Passed to :py:meth:`seaborn.heatmap()`
method.
names : Optional[Sequence[str]], optional
Legend labels: non-missing values, droppable values,
NA values, correspondingly.
yaxis : bool, optional
Show Y axis.
xaxis : bool, optional
Show X axis.
legend_kws : dict, optional
Keyword arguments passed to
:py:meth:`matplotlib.axes._subplots.AxesSubplot()` method.
sb_kws : dict, optional
Keyword arguments passed to
:py:meth:`seaborn.heatmap` method.
Returns
-------
matplotlib.axes._subplots.AxesSubplot
AxesSubplot object.
"""
if not cmap:
cmap = ['green', 'orange', 'red']
if not names:
names = ['Filled', 'Droppable', 'NA']
if not sb_kws:
sb_kws = {'cbar': False}
cols = _select_cols(data, columns).tolist()
data_na = data.loc[:, cols].isna().copy()
if sort:
data_na.sort_values(by=cols, inplace=True)
if droppable:
non_na_mask = ~data_na.values
na_rows_mask = data_na.any(axis=1).values[:, None]
droppable_mask = non_na_mask & na_rows_mask
data_na = data_na.astype(float)
data_na.values[droppable_mask] = 0.5
labels = names
else:
labels = [names[0], names[-1]]
if not legend_kws:
legend_kws = {'bbox_to_anchor': (0.5, 1.15), 'loc': 'upper center', 'ncol': len(labels)}
ax_heatmap = heatmap(data_na, cmap=cmap, **sb_kws)
ax_heatmap.yaxis.set_visible(yaxis)
ax_heatmap.xaxis.set_visible(xaxis)
legend_elements = [Patch(facecolor=cmap[0]), Patch(facecolor=cmap[-1])]
if droppable:
legend_elements.insert(1, Patch(facecolor=cmap[1]))
ax_heatmap.legend(legend_elements, labels, **legend_kws)
return ax_heatmap | 84233ee9293131ce98072f880a3c1a57fc71b321 | 6,562 |
def iadd_tftensor(left, right, scale=1):
"""This function performs an in-place addition. However, TensorFlow returns
a new object after a mathematical operation. This means that in-place here
only serves to avoid the creation of a TfTensor instance. We do not have
any control over the memory where the Tensor is stored."""
_check_shape(left, right)
# If scale=1 we obtain a x2 speed-up if we do not multiply by the scale.
if scale == 1:
left._tf = left._tf + right._tf
else:
left._tf = left._tf + scale*right._tf
return left | 3f14de3df3544b74f0a900fca33eb6cdf6e11c00 | 6,563 |
def encode(string_):
"""Change String to Integers"""
return (lambda f, s: f(list( ord(c) for c in str(string_) ) , \
s))(lambda f, s: sum(f[i] * 256 ** i for i in \
range(len(f))), str(string_)) | da3a729c2024d80792e08424745dc267ca67dff7 | 6,565 |
def generate_file_prefix(bin_params):
""" Use the bin params to generate a file prefix."""
prefix = "bin_"
for j in range(0, len(bin_params)):
if (j + 1) % 2 != 0:
prefix += str(bin_params[j]) + "-"
else:
prefix += str(bin_params[j]) + "_"
return prefix | cc058a64fcab77f6a4794a8bf7edb1e0e86c040c | 6,566 |
def extract_features_from_html(html, depth, height):
"""Given an html text, extract the node based features
including the descendant and ancestor ones if depth and
height are respectively nonzero."""
root = etree.HTML(html.encode('utf-8')) # get the nodes, serve bytes, unicode fails if html has meta
features = extract_features_from_nodes(list(root.iter()), depth, height)
# add the paths to the elements for identification
features.loc[:, 'path'] = pd.Series((node.getroottree().getpath(node) for node in root.iter()))
return features | ee7b627bf7c859fc886eab10f6a8b6b793653262 | 6,567 |
def __clean_field(amazon_dataset, option):
"""Cleanes the Text field from the datset """
clean = []
if option == 1:
for i in amazon_dataset['Text']:
clean.append(__one(i))
elif option == 2:
for i in amazon_dataset['Summary']:
clean.append(__one(i))
else:
pass
return clean | 1e8ef28c810413b87804a42514059c347d715972 | 6,568 |
import warnings
def _read_atom_line(line):
"""
COLUMNS DATATYPE FIELD DEFINITION
-------------------------------------------------------------------------------------
1 - 6 RecordName "ATOM "
7 - 11 Integer serial Atom serial number.
13 - 16 Atom name Atom name.
17 Character altLoc Alternate location indicator.
18 - 20 Residue name resName Residue name.
22 Character chainID Chain identifier.
23 - 26 Integer resSeq Residue sequence number.
27 AChar iCode Code for insertion of residues.
31 - 38 Real(8.3) x Orthogonal coordinates for X in Angstroms.
39 - 46 Real(8.3) y Orthogonal coordinates for Y in Angstroms.
47 - 54 Real(8.3) z Orthogonal coordinates for Z in Angstroms.
55 - 60 Real(6.2) occupancy Occupancy.
61 - 66 Real(6.2) tempFactor Temperature factor.
77 - 78 LString(2) element Element symbol, right-justified.
79 - 80 LString(2) charge Charge on the atom.
"""
lineInfo = {}
lineInfo['RecordName'] = line[0:6]
lineInfo['serial'] = int(line[7:12].strip())
lineInfo['name'] = line[12:16].strip()
lineInfo['altLoc'] = line[16].strip()
lineInfo['resName'] = line[17:21].strip()
lineInfo['chainID'] = line[21].strip()
lineInfo['resSeq'] = int(line[22:26].strip())
lineInfo['iCode'] = line[26].strip()
try:
lineInfo['position'] = np.array(
[float(line[30:38]), float(line[38:46]), float(line[46:54])],
)
except ValueError:
raise ValueError("Invalid or missing coordinate(s)")
try:
lineInfo['occupancy'] = float(line[54:60])
except ValueError:
lineInfo['occupancy'] = None # Rather than arbitrary zero or one
if lineInfo['occupancy'] is not None and lineInfo['occupancy'] < 0:
warnings.warn("Negative occupancy in one or more atoms")
try:
lineInfo['bfactor'] = float(line[60:66])
except ValueError:
# The PDB use a default of zero if the data is missing
lineInfo['bfactor'] = 0.0
lineInfo['segid'] = line[72:76].strip()
lineInfo['element'] = line[76:78].strip().upper()
lineInfo['charge'] = line[79:81].strip()
return lineInfo | e511352dcc0bfcdec98035673adf759256c13e4c | 6,570 |
from typing import List
def semantic_parse_entity_sentence(sent: str) -> List[str]:
"""
@param sent: sentence to grab entities from
@return: noun chunks that we consider "entities" to work with
"""
doc = tnlp(sent)
ents_ke = textacy.ke.textrank(doc, normalize="lemma")
entities = [ent for ent, _ in ents_ke]
return entities | c65fa1d8da74b86b3e970cbf7f351e03d5a3fcec | 6,571 |
from numpy import array
def match_cam_time(events, frame_times):
"""
Helper function for mapping ephys events to camera times. For each event in events, we return the nearest
camera frame before the event.
Parameters
----------
events : 1D numpy array
Events of interest. Sampled at a higher rate than frame_times.
frame_times : 1D numpy array
Timepoints of camera frames to be assigned to events. Sampled at a lower rate than events.
"""
output = []
for a in events:
lags = array(a - frame_times)
before = len(lags[lags > 0]) - 1
if before >= 0:
output.append(before)
return array(output) | 3f086a0f65a34183a429cf3c50e90fdc742672d3 | 6,574 |
import ctypes
def _glibc_version_string_ctypes() -> Optional[str]:
"""
Fallback implementation of glibc_version_string using ctypes.
"""
try:
except ImportError:
return None
# ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
# manpage says, "If filename is NULL, then the returned handle is for the
# main program". This way we can let the linker do the work to figure out
# which libc our process is actually using.
#
# We must also handle the special case where the executable is not a
# dynamically linked executable. This can occur when using musl libc,
# for example. In this situation, dlopen() will error, leading to an
# OSError. Interestingly, at least in the case of musl, there is no
# errno set on the OSError. The single string argument used to construct
# OSError comes from libc itself and is therefore not portable to
# hard code here. In any case, failure to call dlopen() means we
# can proceed, so we bail on our attempt.
try:
process_namespace = ctypes.CDLL(None)
except OSError:
return None
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return None
# Call gnu_get_libc_version, which returns a string like "2.5"
gnu_get_libc_version.restype = ctypes.c_char_p
version_str: str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
return version_str | 86ae885182585eeb1c5e53ee8109036dd93d06d3 | 6,575 |
from typing import Sequence
from typing import List
from typing import Tuple
def encode_instructions(
stream: Sequence[Instruction],
func_pool: List[bytes],
string_pool: List[bytes],
) -> Tuple[bytearray, List[bytes], List[bytes]]:
"""
Encode the bytecode stream as a single `bytes` object that can be
written to file or kept in memory.
Parameters
----------
stream: Sequence[Instruction]
The bytecode instruction objects to be encoded.
func_pool: List[bytes]
Where the generated bytecode for function objects is stored
before being put in the final bytecode stream.
string_pool: List[bytes]
Where string objects are stored before being put in the final
bytecode stream.
Returns
-------
bytes
The encoded stream of bytecode instructions. It is guaranteed
to have a length proportional to the length of `stream`.
"""
result_stream = bytearray(len(stream) * 8)
for index, instruction in enumerate(stream):
start = index * 8
end = start + 8
opcode_space = instruction.opcode.value.to_bytes(1, BYTE_ORDER)
operand_space = encode_operands(
instruction.opcode, instruction.operands, func_pool, string_pool
)
operand_space = operand_space.ljust(7, b"\x00")
result_stream[start:end] = opcode_space + operand_space
return result_stream, func_pool, string_pool | 0a371731f627b96ca3a07c5ac992fd46724a7817 | 6,576 |
def get_random(selector):
"""Return one random game"""
controller = GameController
return controller.get_random(MySQLFactory.get(), selector) | 89f458a434cd20e10810d03e7addb1c5d6f1475a | 6,577 |
def get_ssh_dispatcher(connection, context):
"""
:param Message context: The eliot message context to log.
:param connection: The SSH connection run commands on.
"""
@deferred_performer
def perform_run(dispatcher, intent):
context.bind(
message_type="flocker.provision.ssh:run",
command=intent.log_command_filter(intent.command),
).write()
endpoint = SSHCommandClientEndpointWithTTY.existingConnection(
connection, intent.command)
d = Deferred()
connectProtocol(endpoint, CommandProtocol(
deferred=d, context=context))
return d
return TypeDispatcher({
Run: perform_run,
Sudo: perform_sudo,
Put: perform_put,
Comment: perform_comment,
}) | 1cb965c4e175276672173d5696e3196da5725fce | 6,578 |
def read_ac(path, cut_off, rnalen):
"""Read the RNA accessibility file and output its positions and values
The file should be a simple table with two columns:
The first column is the position and the second one is the value
'#' will be skipped
"""
access = []
with open(path) as f:
i = 0
while i < rnalen:
for line in f:
line = line.split()
if not line:
continue
elif line[0][0] == "#":
continue
elif len(line) < 2:
continue
else:
v = line[1]
if v == "NA":
access.append(0)
else:
try:
v = 2 ** (-float(v))
except:
continue
if v >= cut_off:
access.append(1)
else:
access.append(0)
i += 1
return access | 0a8b6c2ff6528cf3f21d3b5efce14d59ff8ad2b6 | 6,579 |
def subtableD0(cxt: DecoderContext, fmt: Format):
""" ORI """
fmt = FormatVI(fmt)
return MNEM.ORI, [Imm(fmt.imm16, width=16, signed=False), Reg(fmt.reg1), Reg(fmt.reg2)], 2 | 2bb307bd74568745b7f453365f7667c383cae9ff | 6,580 |
from datetime import datetime
def format_date(unix_timestamp):
""" Return a standardized date format for use in the two1 library.
This function produces a localized datetime string that includes the UTC timezone offset. This offset is
computed as the difference between the local version of the timestamp (python's datatime.fromtimestamp)
and the utc representation of the input timestamp.
Args:
unix_timestamp (float): a floating point unix timestamp
Returns:
string: A string formatted with "%Y-%m-%d %H:%M:%S %Z"
"""
local_datetime = datetime.fromtimestamp(unix_timestamp)
utz_offset = local_datetime - datetime.utcfromtimestamp(unix_timestamp)
local_date = local_datetime.replace(
tzinfo=timezone(utz_offset)
).strftime("%Y-%m-%d %H:%M:%S %Z")
return local_date | cc1a6ee0c604e14f787741ff2cb0e118134c9b92 | 6,581 |
import copy
from operator import and_
def or_(kb, goals, substitutions=dict(), depth=0, mask=None,
k_max=None, max_depth=1):
"""Base function of prover, called recursively.
Calls and_, which in turn calls or_, in order to recursively calculate scores for every possible proof in proof
tree.
Args:
kb: dict of facts / rules
goals: goal to be proved
substitutions: dict which contains current variable substitutions and scores of current proof path
depth: current proof depth
mask: mask to apply so that goal facts (which are drawn from kb) cannot be proved by unifying with themselves
k_max: number of fact unifications to retain from unifications with all facts in kb
max_depth: maximum allowed proof depth before termination
Returns:
List of proof paths of goal with corresponding scores
"""
proofs = []
# initialize history and substitutions as empty
if substitutions == {}:
substitutions['VARSUBS'] = {}
substitutions['HISTORY'] = []
for struct in kb:
# avoid fake added struct
if struct == 'goal':
continue
# Check if struct order matches
if len(struct[0]) != len(goals):
continue
rule = rule_struct_form(kb[struct], struct)
head = substitute(rule[0], substitutions, kb)
body = rule[1:]
mask_id = None
if mask is not None:
mask_key, mask_id = mask
mask_id = mask_id if mask_key == struct else None
is_fact = len(struct) == 1 and all([not is_variable(x)
for x in struct[0]])
if not is_fact and depth == max_depth:
# maximum depth reached
continue
# rule has been applied before
elif applied_before(rule, substitutions, kb):
continue
substitutions_copy = copy.deepcopy(substitutions)
substitutions_copy['HISTORY'].append([struct, depth])
substitutions_ = unify(head, goals, substitutions_copy, kb, depth, mask_id,
transpose=is_fact)
if is_fact and k_max is not None:
new_success, success_indices = tf.nn.top_k(substitutions_["SUCCESS"], k_max)
substitutions_["SUCCESS"] = new_success
for value in substitutions_['VARSUBS'].values():
if value['struct'] != 'goal' and not 'subset' in value:
value['subset'] = success_indices
if substitutions_ != 'FAILURE':
proof = and_(kb, body, substitutions_, depth, mask, k_max=k_max, max_depth=max_depth)
if not isinstance(proof, list):
proof = [proof]
else:
proof = flatten_proofs(proof)
for proof_substitutions in proof:
if proof_substitutions != 'FAILURE':
proofs.append(proof_substitutions)
return flatten_proofs(proofs) | d19382167143ffc3b5267fda126cc4f8d45fc86c | 6,582 |
import copy
def print_term(thy, t):
"""More sophisticated printing function for terms. Handles printing
of operators.
Note we do not yet handle name collisions in lambda terms.
"""
def get_info_for_operator(t):
return thy.get_data("operator").get_info_for_fun(t.head)
def get_priority(t):
if nat.is_binary(t) or hol_list.is_literal_list(t):
return 100 # Nat atom case
elif t.is_comb():
op_data = get_info_for_operator(t)
if op_data is not None:
return op_data.priority
elif t.is_all() or logic.is_exists(t) or logic.is_if(t):
return 10
else:
return 95 # Function application
elif t.is_abs():
return 10
else:
return 100 # Atom case
def helper(t, bd_vars):
LEFT, RIGHT = OperatorData.LEFT_ASSOC, OperatorData.RIGHT_ASSOC
# Some special cases:
# Natural numbers:
if nat.is_binary(t):
return N(str(nat.from_binary(t)))
if hol_list.is_literal_list(t):
items = hol_list.dest_literal_list(t)
res = N('[') + commas_join(helper(item, bd_vars) for item in items) + N(']')
if hasattr(t, "print_type"):
return N("(") + res + N("::") + print_type(thy, t.T) + N(")")
else:
return res
if set.is_literal_set(t):
empty_set = "∅" if settings.unicode() else "{}"
if hasattr(t, "print_type"):
return N("(") + N(empty_set) + N("::") + print_type(thy, t.T) + N(")")
else:
return N(empty_set)
if logic.is_if(t):
P, x, y = t.args
return N("if ") + helper(P, bd_vars) + N(" then ") + helper(x, bd_vars) + \
N(" else ") + helper(y, bd_vars)
if t.is_var():
return V(t.name)
elif t.is_const():
if hasattr(t, "print_type") and t.print_type:
return N("(" + t.name + "::") + print_type(thy, t.T) + N(")")
else:
return N(t.name)
elif t.is_comb():
op_data = get_info_for_operator(t)
# First, we take care of the case of operators
if op_data and op_data.arity == OperatorData.BINARY and t.is_binop():
arg1, arg2 = t.args
# Obtain output for first argument, enclose in parenthesis
# if necessary.
if (op_data.assoc == LEFT and get_priority(arg1) < op_data.priority or
op_data.assoc == RIGHT and get_priority(arg1) <= op_data.priority):
str_arg1 = N("(") + helper(arg1, bd_vars) + N(")")
else:
str_arg1 = helper(arg1, bd_vars)
if settings.unicode() and op_data.unicode_op:
str_op = N(' ' + op_data.unicode_op + ' ')
else:
str_op = N(' ' + op_data.ascii_op + ' ')
# Obtain output for second argument, enclose in parenthesis
# if necessary.
if (op_data.assoc == LEFT and get_priority(arg2) <= op_data.priority or
op_data.assoc == RIGHT and get_priority(arg2) < op_data.priority):
str_arg2 = N("(") + helper(arg2, bd_vars) + N(")")
else:
str_arg2 = helper(arg2, bd_vars)
return str_arg1 + str_op + str_arg2
# Unary case
elif op_data and op_data.arity == OperatorData.UNARY:
if settings.unicode() and op_data.unicode_op:
str_op = N(op_data.unicode_op)
else:
str_op = N(op_data.ascii_op)
if get_priority(t.arg) < op_data.priority:
str_arg = N("(") + helper(t.arg, bd_vars) + N(")")
else:
str_arg = helper(t.arg, bd_vars)
return str_op + str_arg
# Next, the case of binders
elif t.is_all():
all_str = "!" if not settings.unicode() else "∀"
if hasattr(t.arg, "print_type"):
var_str = B(t.arg.var_name) + N("::") + print_type(thy, t.arg.var_T)
else:
var_str = B(t.arg.var_name)
body_repr = helper(t.arg.body, [t.arg.var_name] + bd_vars)
return N(all_str) + var_str + N(". ") + body_repr
elif logic.is_exists(t):
exists_str = "?" if not settings.unicode() else "∃"
if hasattr(t.arg, "print_type"):
var_str = B(t.arg.var_name) + N("::") + print_type(thy, t.arg.var_T)
else:
var_str = B(t.arg.var_name)
body_repr = helper(t.arg.body, [t.arg.var_name] + bd_vars)
return N(exists_str) + var_str + N(". ") + body_repr
# Function update
elif function.is_fun_upd(t):
f, upds = function.strip_fun_upd(t)
upd_strs = [helper(a, bd_vars) + N(" := ") + helper(b, bd_vars) for a, b in upds]
return N("(") + helper(f, bd_vars) + N(")(") + commas_join(upd_strs) + N(")")
# Finally, usual function application
else:
if get_priority(t.fun) < 95:
str_fun = N("(") + helper(t.fun, bd_vars) + N(")")
else:
str_fun = helper(t.fun, bd_vars)
if get_priority(t.arg) <= 95:
str_arg = N("(") + helper(t.arg, bd_vars) + N(")")
else:
str_arg = helper(t.arg, bd_vars)
return str_fun + N(" ") + str_arg
elif t.is_abs():
lambda_str = "%" if not settings.unicode() else "λ"
if hasattr(t, "print_type"):
var_str = B(t.var_name) + N("::") + print_type(thy, t.var_T)
else:
var_str = B(t.var_name)
body_repr = helper(t.body, [t.var_name] + bd_vars)
return N(lambda_str) + var_str + N(". ") + body_repr
elif t.is_bound():
if t.n >= len(bd_vars):
raise OpenTermException
else:
return B(bd_vars[t.n])
else:
raise TypeError()
t = copy(t) # make copy here, because infer_printed_type may change t.
infertype.infer_printed_type(thy, t)
res = helper(t, [])
if settings.highlight():
res = optimize_highlight(res)
return res | 745b378dac77411ba678911c478b6f5c8915c762 | 6,583 |
def build_model():
"""Builds the model."""
return get_model()() | f843bce4edf099efd138a198f12c392aa2e723cd | 6,584 |
def truncate_field_data(model, data):
"""Truncate all data fields for model by its ``max_length`` field
attributes.
:param model: Kind of data (A Django Model instance).
:param data: The data to truncate.
"""
fields = dict((field.name, field) for field in model._meta.fields)
return dict((name, truncate_by_field(fields[name], value))
for name, value in data.items()) | 3f0c77d279e712258d3a064ca9fed06cd64d9eaf | 6,585 |
import io
def get_all_students(zip):
"""Returns student tuple for all zipped submissions found in the zip file."""
students = []
# creating all the student objects that we can zip files of
for filename in zip.namelist():
if not filename.endswith(".zip"):
continue
firstname, surname = split_zipname(filename)
student_zip_data = io.BytesIO(zip.open(filename).read())
student_zipfile = zf.ZipFile(student_zip_data)
students.append(Student(firstname, surname, student_zipfile))
return students | d5088ecf43275664e8420f30f508e70fad7cef77 | 6,586 |
def is_shipping_method_applicable_for_postal_code(
customer_shipping_address, method
) -> bool:
"""Return if shipping method is applicable with the postal code rules."""
results = check_shipping_method_for_postal_code(customer_shipping_address, method)
if not results:
return True
if all(
map(
lambda rule: rule.inclusion_type == PostalCodeRuleInclusionType.INCLUDE,
results.keys(),
)
):
return any(results.values())
if all(
map(
lambda rule: rule.inclusion_type == PostalCodeRuleInclusionType.EXCLUDE,
results.keys(),
)
):
return not any(results.values())
# Shipping methods with complex rules are not supported for now
return False | cca519a35ab01dddac71ac18bdf8a40e1b032b83 | 6,587 |
def populate_api_servers():
""" Find running API servers. """
def api_server_info(entry):
prefix, port = entry.rsplit('-', 1)
project_id = prefix[len(API_SERVER_PREFIX):]
return project_id, int(port)
global api_servers
monit_entries = yield monit_operator.get_entries()
server_entries = [api_server_info(entry) for entry in monit_entries
if entry.startswith(API_SERVER_PREFIX)]
for project_id, port in server_entries:
api_servers[project_id] = port | 0543e350917c3fe419022aebdd9002098021923a | 6,588 |
def create_recipe_json(image_paths: list) -> dict:
"""
Orchestrate the various services to respond to a create recipe request.
"""
logger.info('Creating recipe json from image paths: {}'.format(image_paths))
full_text = load_images_return_text(image_paths)
recipe_json = assign_text_to_recipe(full_text)
return recipe_json | 25ef26d15bf20384df81f46da519c07ea883d5a7 | 6,590 |
def rstrip_extra(fname):
"""Strip extraneous, non-discriminative filename info from the end of a file.
"""
to_strip = ("_R", "_", "fastq", ".", "-")
while fname.endswith(to_strip):
for x in to_strip:
if fname.endswith(x):
fname = fname[:len(fname) - len(x)]
break
return fname | 281ff6dcfae1894dd4685acf433bde89538fe87e | 6,591 |
def run(preprocessors, data, preprocessing=defaultdict(lambda: None), fit=True):
"""Applies preprocessing to data. It currently suppoerts StandardScaler and
OneHotEncoding
Parameters
----------
preprocessors : list
preprocessors to be applied
data : pd.DataFrame
data to be preprocessed
preprocessing : dict, optional
encoders of each preprocessor, by default defaultdict(lambda: None)
fit : bool, optional
if False, it applies to current encoder, by default True
Returns
-------
pd.DataFrame dict
preprocessed data and preprocessors used
"""
scaler_to_data_type = {
'StandardScaler': 'numeric',
'OneHotEncoder': 'object'}
if len(preprocessors) == 0:
return data, preprocessing
preprocessor = preprocessors[0]
data_type = scaler_to_data_type[preprocessor]
splited_data = split_by_type(data)
splited_data[data_type], preprocessing[preprocessor] = \
apply_preprocessor(splited_data[data_type],
preprocessor,
fit=fit,
encoder=preprocessing[preprocessor])
processed_data = pd.concat(splited_data.values(), axis=1)
return run(preprocessors[1:], processed_data, preprocessing, fit) | 94b10007896062760a278cdeaf60388152c96f73 | 6,592 |
def vouchers_tab(request, voucher_group, deleted=False, template_name="manage/voucher/vouchers.html"):
"""Displays the vouchers tab
"""
vouchers = voucher_group.vouchers.all()
paginator = Paginator(vouchers, 20)
page = paginator.page((request.POST if request.method == 'POST' else request.GET).get("page", 1))
taxes = Tax.objects.all()
if (request.method == "POST") and (deleted is False):
voucher_form = VoucherForm(data=request.POST)
else:
voucher_form = VoucherForm()
return render_to_string(template_name, request=request, context={
"voucher_group": voucher_group,
"taxes": taxes,
"form": voucher_form,
"vouchers_inline": vouchers_inline(request, voucher_group, vouchers, paginator, page),
}) | f488c21a83b6b22e3c0e4d5fa2e35156435bada7 | 6,593 |
def spots_rmsd(spots):
""" Calculate the rmsd for a series of small_cell_spot objects
@param list of small_cell_spot objects
@param RMSD (pixels) of each spot
"""
rmsd = 0
count = 0
print 'Spots with no preds', [spot.pred is None for spot in spots].count(True), 'of', len(spots)
for spot in spots:
if spot.pred is None:
continue
rmsd += measure_distance(col((spot.spot_dict['xyzobs.px.value'][0],spot.spot_dict['xyzobs.px.value'][1])),col(spot.pred))**2
count += 1
if count == 0: return 0
return math.sqrt(rmsd/count) | 13809a7a0353dc18b037cd2d78944ed5f9cdc596 | 6,595 |
def sanitize_df(data_df, schema, setup_index=True, missing_column_procedure='fill_zero'):
""" Sanitize dataframe according to provided schema
Returns
-------
data_df : pandas DataFrame
Will have fields provided by schema
Will have field types (categorical, datetime, etc) provided by schema.
"""
data_df = data_df.reset_index()
for ff, field_name in enumerate(schema.field_names):
type_ff = schema.fields[ff].descriptor['type']
if field_name not in data_df.columns:
if missing_column_procedure == 'fill_zero':
if type_ff == 'integer':
data_df[field_name] = 0
elif type_ff == 'number':
data_df[field_name] = 0.0
# Reorder columns to match schema
data_df = data_df[schema.field_names]
# Cast fields to required type (categorical / datetime)
for ff, name in enumerate(schema.field_names):
ff_spec = schema.descriptor['fields'][ff]
if 'pandas_dtype' in ff_spec and ff_spec['pandas_dtype'] == 'category':
data_df[name] = data_df[name].astype('category')
elif 'type' in ff_spec and ff_spec['type'] == 'datetime':
data_df[name] = pd.to_datetime(data_df[name])
if hasattr(schema, 'primary_key'):
data_df = data_df.sort_values(schema.primary_key)
if setup_index:
data_df = data_df.set_index(schema.primary_key)
return data_df | 8664f9dd8feea60044397072d85d21c8b5dfd6d4 | 6,596 |
def get_id_argument(id_card):
"""
获取身份证号码信息
:param id_card:
:return:
"""
id_card = id_card.upper()
id_length = len(id_card)
if id_length == 18:
code = {
'body': id_card[0:17],
'address_code': id_card[0:6],
'birthday_code': id_card[6:14],
'order_code': id_card[14:17],
'check_bit': id_card[17:18],
'type': 18
}
else:
code = {
'body': id_card,
'address_code': id_card[0:6],
'birthday_code': '19' + id_card[6:12],
'order_code': id_card[12:15],
'check_bit': '',
'type': 15
}
return code | ae4cad97e787fe1b0697b6a0f842f0da09795d6a | 6,597 |
def rhypergeometric(n, m, N, size=None):
"""
Returns hypergeometric random variates.
"""
if n == 0:
return np.zeros(size, dtype=int)
elif n == N:
out = np.empty(size, dtype=int)
out.fill(m)
return out
return np.random.hypergeometric(n, N - n, m, size) | e8ea95bb742891037de264462be168fab9d68923 | 6,598 |
def my_model_builder(my_model: MyModel) -> KerasModel:
"""Build the siamese network model """
input_1 = layers.Input(my_model.input_shape)
input_2 = layers.Input(my_model.input_shape)
# As mentioned above, Siamese Network share weights between
# tower networks (sister networks). To allow this, we will use
# same embedding network for both tower networks.
embedding_network = build_model_tower(my_model)
tower_1 = embedding_network(input_1)
tower_2 = embedding_network(input_2)
merge_layer = layers.Lambda(euclidean_distance)([tower_1, tower_2])
normal_layer = tf.keras.layers.BatchNormalization()(merge_layer)
output_layer = layers.Dense(1, activation="sigmoid")(normal_layer)
keras_model = keras.Model(inputs=[input_1, input_2], outputs=output_layer)
keras_model.compile(
loss=my_model.loss,
optimizer=my_model.optimizer,
metrics=my_model.metrics
)
return keras_model | 53b32468469e7fc8cbc8f2776a1711181363bf60 | 6,600 |
from datetime import datetime
def create_embed(
title,
description,
fields = None,
colour = None,
timestamp = datetime.utcnow(),
author = None,
author_icon = None,
thumbnail = None,
image = None,
footer = None
):
"""Create an Embed
Args:
title (str): Set title
description (str): Set description
fields (list of tuples): Set fields
colour (int, optional): Set color. Defaults to None.
timestamp (datetime, optional): Set timestamp. Defaults to current time.
author (str, optional): Set author. Defaults to None.
author_icon (str, optional): Set author icon using image url. Defaults to None.
thumbnail (str, optional): Set thumbnail using image url. Defaults to None.
image (str, optional): Set image using image url. Defaults to None.
footer (str, optional): Set footer. Defaults to None.
Returns:
embed: returns an embed
"""
embed = Embed(
title=title,
description=description,
colour=colour,
timestamp=timestamp
)
if fields is not None:
for name, value, inline in fields:
embed.add_field(name=name, value=value, inline=inline)
embed.set_author(name=author, icon_url=author_icon)
embed.set_footer(text=footer)
embed.set_thumbnail(url=thumbnail)
embed.set_image(url=image)
return embed | 741238fc50e2eda98a5cbfeab0c5c4d5cb3adbf2 | 6,601 |
def autoCalibration(I):
"""Returns horizontal and vertical factors by which every distance in
pixels should be multiplied in order to obtain the equivalent distance in
millimeters. This program assumes that the scale presents clear axis ticks and
that the distance between two biggest ticks is equal to 10 mm.
It also assumes that both horizontal and vertical scales are present in the
up right quarter of image I.
Args:
I (array): one canal image. If I is a RGB image, it is transformed
to a grayscale image.
Returns:
calibFactorX (double) and calibFactorY (double) are respectively the
horizontal and vertical calibration factors
"""
#Check if I is a 1-canal image
if len(I.shape) > 2:
I = cv2.cvtColor(I, cv2.COLOR_RGB2GRAY)
length, width = I.shape[0], I.shape[1]
#Cropping with empirical percentages and binarization of the selection
# !!! EMPIRICAL
TCP = 0.1 #Top cropping percentage - #empirical percentage
LCP = 0.5 #Left cropping percentage
BCP = 0.65 #Bottom cropping percentage
RCP = 0.1 #Right cropping percentage
Scale_image = I[int(TCP * length):length-int(BCP * length),\
int(LCP * width):width-int(RCP * width)]
Binar_I = cv2.threshold(Scale_image, 220., 255, cv2.THRESH_BINARY)[1]
#Selection of the biggest axis ticks: contours of white objects are found as
#well as minimal rectangles encapsulating each object. Conditions on the
#size of these contours/bounding rectangles enable the removal of objects
#that are not the biggest ticks
contours = cv2.findContours(Binar_I, cv2.RETR_EXTERNAL, \
cv2.CHAIN_APPROX_NONE)[0]
contours_size = [contours[i].size for i in range (len(contours))]
BoundingRectangles = []
for i in range(len(contours)):
if contours_size[i]<=1.7*np.mean(contours_size): #condition to stop considering the objects corresponding to figures
p1, p2, l1, l2 = cv2.boundingRect(contours[i]) #rectangles identified with point (p1,p2) and vectors (l2,0), (0,l1)
BoundingRectangles.append([i, (p1,p2,l1,l2), 2.*l1+2.*l2])
MeanPerim = np.mean([BoundingRectangles[i][2] for i in range(len(BoundingRectangles))])
Dashes = [BoundingRectangles[i] for i in range(len(BoundingRectangles)) if BoundingRectangles[i][2]>MeanPerim] #removal of points and small dashes
#Calculation of the minimal distances between two horizontal ticks and
#two vertical ticks
#browse all detected axis ticks
horiz = 10000000.
vertic = 10000000.
for i in range (0, len(Dashes)-1):
ref_Dash = Dashes[i][1]
for j in range(i+1,len(Dashes)):
if len(set(list(range(Dashes[j][1][0],Dashes[j][1][0]+Dashes[j][1][2])))\
.intersection(list(range(ref_Dash[0],ref_Dash[0]+ref_Dash[2]))))>2:
h = abs(ref_Dash[1]+ref_Dash[3]-Dashes[j][1][1]-Dashes[j][1][3])
if h<vertic:
vertic = h
if len(set(list(range(Dashes[j][1][1],Dashes[j][1][1]+Dashes[j][1][3])))\
.intersection(list(range(ref_Dash[1],ref_Dash[1]+ref_Dash[3]))))>2:
h = abs(ref_Dash[0]-Dashes[j][1][0])
if h<horiz:
horiz = h
#Factors to convert distance in pixels into distance in millimeters
if horiz == 10000000. or horiz == 0:
calibFactorX = None
else:
calibFactorX = 10./horiz
if vertic == 10000000. or vertic == 0:
calibFactorY = None
else:
calibFactorY = 10./vertic
''' visual check
for d in range(len(Dashes)):
p1 = Dashes[d][1][0]
p2 = Dashes[d][1][1]
l1 = Dashes[d][1][2]
l2 = Dashes[d][1][3]
for l in range(p1,p1+l1+1):
Binar_I[p2,l] = 150
Binar_I[p2+l2,l] = 150
for c in range(p2,p2+l2+1):
Binar_I[c,p1] = 150
Binar_I[c,p1+l1] = 150
cv2.imshow('Binary image', Binar_I)
cv2.waitKey(0) & 0xFF
cv2.destroyAllWindows()
'''
return calibFactorX, calibFactorY | 30016c41a8b21531cfd277a669a6b16b01322387 | 6,602 |
def reverse_handler(handler_input):
"""Check if a verb is provided in slot values. If provided, then
looks for the paradigm in the irregular_verbs file.
If not, then it asks user to provide the verb again.
"""
# iterate over the dictionaries in irregular_verbs.py and looks for
# the verb in the slot. If it finds it, it returns the dictionary
def get_verb(irregular_verbs, filled_verboconiugato_slot):
for dictionary in IRREGULAR_VERBS["verbs"]:
if dictionary["PS"] == verboconiugato or dictionary["PP"] == verboconiugato:
return dictionary
# type: (HandlerInput) -> Response
attribute_manager = handler_input.attributes_manager
session_attr = attribute_manager.session_attributes
slots = handler_input.request_envelope.request.intent.slots
if verboconiugato_slot in slots: # if slot is filled
verboconiugato = slots[verboconiugato_slot].value
handler_input.attributes_manager.session_attributes[
verboconiugato_slot_key] = verboconiugato # verbo is equal to what i said ex. know
# execute the function based on the verb the user asks for. askedVerb
# becomes equal to the dictionary returned by the function
askedVerb = get_verb(irregular_verbs.IRREGULAR_VERBS, verboconiugato)
if verboconiugato == "read" and askedVerb:
baseVerb = askedVerb["Base"]
pastSimple = askedVerb["PS"]
pastPart = askedVerb["PP"]
traduzione = askedVerb["Italiano"]
speech = ("<lang xml:lang='en-GB'>{}</lang> è il verbo <voice name='Emma'><lang xml:lang='en-GB'>to {}</lang></voice>. Il suo paradigma è <voice name='Emma'><lang xml:lang='en-GB'>to {}, <phoneme alphabet='ipa' ph='rɛd'>{}</phoneme>, <phoneme alphabet='ipa' ph='rɛd'>{}</phoneme></lang></voice>. Significa <phoneme alphabet='ipa' ph='ˈlɛdʤere'>{}</phoneme>.".format(verboconiugato, baseVerb, baseVerb, pastSimple, pastPart, traduzione))
reprompt = ("Cosa vuoi chiedermi?")
handler_input.response_builder.set_should_end_session(True)
elif askedVerb:
baseVerb = askedVerb["Base"]
pastSimple = askedVerb["PS"]
pastPart = askedVerb["PP"]
traduzione = askedVerb["Italiano"]
speech = ("<lang xml:lang='en-GB'>{}</lang> è il verbo <voice name='Emma'><lang xml:lang='en-GB'>to {}</lang></voice>. Il suo paradigma è <voice name='Emma'><lang xml:lang='en-GB'>to {}, {}, {}</lang></voice>. Significa {}.".format(
verboconiugato, baseVerb, baseVerb, pastSimple, pastPart, traduzione))
reprompt = ("Cosa vuoi chiedermi?")
handler_input.response_builder.set_should_end_session(True)
else:
speech = (
"Non trovo il verbo <lang xml:lang='en-GB'>{}</lang>. Se è corretto, allora la sua coniugazione è regolare".format(verboconiugato))
reprompt = ("Cosa vuoi chiedermi?")
handler_input.response_builder.set_should_end_session(True)
# if slot isn't filled, repeat helptext
else:
speech = ("Non ho capito." + help_text)
handler_input.response_builder.ask(help_text)
handler_input.response_builder.speak(speech).ask(
reprompt).set_should_end_session(True)
return handler_input.response_builder.response | f1b49b88314f3218af03c6910d72729f888f2a11 | 6,603 |
def load_wrf_data(filename):
"""Load required data form the WRF output file : filename"""
base_data=load_vars(filename,wrfvars)
skin_t=load_tskin(filename,tsvar,landmask)
base_data.append(skin_t)
atts=mygis.read_atts(filename,global_atts=True)
return Bunch(data=base_data,global_atts=atts) | da6439d3d4adfc8b84d5bf5911aa5e4b9d628baa | 6,604 |
def sanitize_date(date_dict: dict):
"""
Function to take the date values entered by the user and check their validity. If valid it returns True,
otherwise it sets the values to None and returns False
:param date_dict:
:return:
"""
month = date_dict["month"]
day = date_dict["day"]
year = date_dict["year"]
date = [month, day, year]
date_is_valid = not any([component is None for component in date])
if date_is_valid:
date_is_valid &= not (month == 2 and day > 29)
date_is_valid &= not (month in [4, 6, 9, 11] and day > 30)
is_leap_year = (year % 4) == 0
is_leap_year &= ((year % 100) != 0 or (year % 400) == 0)
date_is_valid &= not (month == 2 and day == 29 and not is_leap_year)
if not date_is_valid:
date_dict["month"] = date_dict["day"] = date_dict["year"] = None
return False
return True | c8cc01c8c1259ab8c4b263e36ae9f85a95356017 | 6,606 |
def create_scale(tonic, pattern, octave=1):
"""
Create an octave-repeating scale from a tonic note
and a pattern of intervals
Args:
tonic: root note (midi note number)
pattern: pattern of intervals (list of numbers representing
intervals in semitones)
octave: span of scale (in octaves)
Returns:
list of midi notes in the scale
"""
assert(sum(pattern)==12)
scale = [tonic]
note = tonic
for o in range(octave):
for i in pattern:
note += i
if note <= 127:
scale.append(note)
return scale | f9337289fda2e1b08cd371d3e91cc5a23c9c9822 | 6,607 |
def _qfloat_append(qf, values, axis=None):
"""Implement np.append for qfloats."""
# First, convert to the same unit.
qf1, qf2 = same_unit(qf, values, func=np.append)
nominal = np.append(qf1.nominal, qf2.nominal, axis)
std = np.append(qf1.uncertainty, qf2.uncertainty, axis)
return QFloat(nominal, std, qf1.unit) | 46049a2ba43997578ae502acd395cfa767e623ca | 6,608 |
from typing import List
def filter_by_mean_color(img:np.ndarray, circles:List[Circle], threshold=170) -> List[Circle]:
"""Filter circles to keep only those who covers an area which high pixel mean than threshold"""
filtered = []
for circle in circles:
box = Box(circle=circle)
area = box.get_region(img)
if np.mean(area) > threshold:
filtered.append(circle)
return filtered | d23f92d363cd4df70ba0d0d01450865546d7f289 | 6,609 |
def ParseSortByArg(sort_by=None):
"""Parses and creates the sort by object from parsed arguments.
Args:
sort_by: list of strings, passed in from the --sort-by flag.
Returns:
A parsed sort by string ending in asc or desc.
"""
if not sort_by:
return None
fields = []
for field in sort_by:
if field.startswith('~'):
field = field.lstrip('~') + ' desc'
else:
field += ' asc'
fields.append(field)
return ','.join(fields) | cc2c40d8d810396420e5c3ede0d65159ed21d6bc | 6,610 |
def dense_to_text(decoded, originals):
"""
Convert a dense, integer encoded `tf.Tensor` into a readable string.
Create a summary comparing the decoded plaintext with a given original string.
Args:
decoded (np.ndarray):
Integer array, containing the decoded sequences.
originals (np.ndarray):
String tensor, containing the original input string for comparision.
`originals` can be an empty tensor.
Returns:
np.ndarray:
1D string Tensor containing only the decoded text outputs.
[decoded_string_0, ..., decoded_string_N]
np.ndarray:
2D string Tensor with layout:
[[decoded_string_0, original_string_0], ...
[decoded_string_N, original_string_N]]
"""
decoded_strings = []
original_strings = []
for d in decoded:
decoded_strings.append(''.join([itoc(i) for i in d]))
if len(originals) > 0:
for o in originals:
original_strings.append(''.join([c for c in o.decode('utf-8')]))
else:
original_strings = ['n/a'] * len(decoded_strings)
decoded_strings = np.array(decoded_strings, dtype=np.object)
original_strings = np.array(original_strings, dtype=np.object)
summary = np.vstack([decoded_strings, original_strings])
return np.array(decoded_strings), summary | d7d4ec6ef2653a4e9665711201cef807a6c9830b | 6,611 |
def admin_view_all_working_curriculums(request):
""" views all the working curriculums offered by the institute """
user_details = ExtraInfo.objects.get(user = request.user)
des = HoldsDesignation.objects.all().filter(user = request.user).first()
if str(des.designation) == "student" or str(des.designation) == "Associate Professor" or str(des.designation) == "Professor" or str(des.designation) == "Assistant Professor" :
return HttpResponseRedirect('/programme_curriculum/mainpage/')
elif str(request.user) == "acadadmin" :
pass
curriculums = Curriculum.objects.filter(working_curriculum=1)
return render(request,'programme_curriculum/acad_admin/admin_view_all_working_curriculums.html',{'curriculums':curriculums}) | 8ba99fe5712c8a93b62e2ab0c9e22594a442d9bd | 6,612 |
def getEmuAtVa(vw, va, maxhit=None):
"""
Build and run an emulator to the given virtual address
from the function entry point.
(most useful for state analysis. kinda heavy though...)
"""
fva = vw.getFunction(va)
if fva == None:
return None
cbva,cbsize,cbfva = vw.getCodeBlock(va)
fgraph = v_graphutil.buildFunctionGraph(vw, fva)
# Just take the first one off the iterator...
for path in v_graphutil.getCodePathsTo(fgraph, cbva):
emu = vw.getEmulator()
opcodes = v_graphutil.getOpsFromPath(vw, fgraph, path)
for op in opcodes:
if op.va == va:
break
emu.executeOpcode(op)
return emu | bea812a1d74b39e9ba83fde56bf90e4055425b89 | 6,613 |
def _create_test_validity_conditional(metric):
"""Creates BigQuery SQL clauses to specify validity rules for an NDT test.
Args:
metric: (string) The metric for which to add the conditional.
Returns:
(string) A set of SQL clauses that specify conditions an NDT test must
meet to be considered a valid, completed test.
"""
# NDT test is supposed to last 10 seconds, give some buffer for tests that
# ended slighly before 10 seconds.
MIN_DURATION = _seconds_to_microseconds(9)
# Tests that last > 1 hour are likely erroneous.
MAX_DURATION = _seconds_to_microseconds(3600)
# A test that did not exchange at least 8,192 bytes is likely erroneous.
MIN_BYTES = 8192
# web100 state variable constants from
# http://www.web100.org/download/kernel/tcp-kis.txt
STATE_CLOSED = 1
STATE_ESTABLISHED = 5
STATE_TIME_WAIT = 11
# For RTT metrics, exclude results of tests with 10 or fewer round trip time
# samples, because there are not enough samples to accurately estimate the
# RTT.
MIN_RTT_SAMPLES = 10
conditions = []
# Must have completed the TCP three-way handshake.
conditions.append((
'(web100_log_entry.snap.State = {state_closed}\n\t'
'\tOR (web100_log_entry.snap.State >= {state_established}\n\t'
'\t\tAND web100_log_entry.snap.State <= {state_time_wait}))').format(
state_closed=STATE_CLOSED,
state_established=STATE_ESTABLISHED,
state_time_wait=STATE_TIME_WAIT))
# Must have been determined to be unaffected by platform error.
conditions.append(('blacklist_flags == 0'))
if _is_server_to_client_metric(metric):
# Must leave slow start phase of TCP, indicated by reaching
# congestion at least once.
conditions.append('web100_log_entry.snap.CongSignals > 0')
# Must send at least the minimum number of bytes.
conditions.append('web100_log_entry.snap.HCThruOctetsAcked >= %d' %
MIN_BYTES)
# Must last for at least the minimum test duration.
conditions.append(
('(web100_log_entry.snap.SndLimTimeRwin +\n\t'
'\tweb100_log_entry.snap.SndLimTimeCwnd +\n\t'
'\tweb100_log_entry.snap.SndLimTimeSnd) >= %u') % MIN_DURATION)
# Must not exceed the maximum test duration.
conditions.append(
('(web100_log_entry.snap.SndLimTimeRwin +\n\t'
'\tweb100_log_entry.snap.SndLimTimeCwnd +\n\t'
'\tweb100_log_entry.snap.SndLimTimeSnd) < %u') % MAX_DURATION)
# Exclude results of tests with fewer than 10 round trip time samples,
# because there are not enough samples to accurately estimate the RTT.
if metric == 'minimum_rtt' or metric == 'average_rtt':
conditions.append('web100_log_entry.snap.CountRTT > %u' %
MIN_RTT_SAMPLES)
else:
# Must receive at least the minimum number of bytes.
conditions.append('web100_log_entry.snap.HCThruOctetsReceived >= %u' %
MIN_BYTES)
# Must last for at least the minimum test duration.
conditions.append('web100_log_entry.snap.Duration >= %u' % MIN_DURATION)
# Must not exceed the maximum test duration.
conditions.append('web100_log_entry.snap.Duration < %u' % MAX_DURATION)
return '\n\tAND '.join(conditions) | 8c65150bdbed3ba75546fc64d8b322d9950339c1 | 6,614 |
from typing import Union
from typing import Sequence
def tfds_train_test_split(
tfds: tf.data.Dataset,
test_frac: float,
dataset_size: Union[int, str],
buffer_size: int = 256,
seed: int = 123,
) -> Sequence[Union[tf.data.Dataset, tf.data.Dataset, int, int]]:
"""
!!! does not properly work, seems to be dependant on hardware, open isssue on
github/tensorflow?
Split tf-dataset into a train and test dataset.
https://stackoverflow.com/questions/48213766/split-a-dataset-created-by-tensorflow-dataset-api-in-to-train-and-test
Args:
tfds (tf.data.Dataset): Tf-dataset, that will be split into a train- and
testset.
test_frac (float): Fract
Returns:
[tf.data.Dataset, tf.data.Dataset, int, int]: Returns train and test datasets
as well as the absolut sizes of the full and the train dataset.
"""
logger.warning(
"This methods of data splitting does not gurantee same split on every machine.")
full_ds_size = None
if dataset_size == "auto":
logger.warning(
"dataset_size='auto': In order to calculate the size of the dataset, all "
"samples will be loaded.")
full_ds_size = get_tfds_size(tfds)
elif isinstance(dataset_size, int):
full_ds_size = dataset_size
logger.info(f"Using following seed to shuffle data: {seed}")
tfds = tfds.shuffle(buffer_size, reshuffle_each_iteration=False, seed=seed)
train_ds_fraction = 1.0 - test_frac
train_ds_size = int(train_ds_fraction * full_ds_size)
logger.info(f"train dataset size: {train_ds_size}, val dataset size: "
"{full_ds_size - train_ds_size}")
train_dataset = tfds.take(train_ds_size)
test_dataset = tfds.skip(train_ds_size)
return train_dataset, test_dataset, full_ds_size, train_ds_size | 8bbb554eca8a09716279a5d818e1cc3e7bd5ad16 | 6,615 |
from datetime import datetime
def seconds_to_hms(seconds):
"""
Convert seconds to H:M:S format.
Works for periods over 24H also.
"""
return datetime.timedelta(seconds=seconds) | e862be76c6ef6b76f8e4f6351e033193ddefd5b8 | 6,616 |
def _add_spot_2d(image, ground_truth, voxel_size_yx, precomputed_gaussian):
"""Add a 2-d gaussian spot in an image.
Parameters
----------
image : np.ndarray, np.uint
A 2-d image with shape (y, x).
ground_truth : np.ndarray
Ground truth array with shape (nb_spots, 4).
- coordinate_y
- coordinate_x
- sigma_yx
- amplitude
voxel_size_yx : int or float
Size of a voxel on the yx plan, in nanometer.
precomputed_gaussian : Tuple[np.ndarray]
Tuple with one tables of precomputed values for the erf, with shape
(nb_value, 2). One table per dimension.
Returns
-------
new_image : np.ndarray, np.uint
A 2-d image with simulated spots and shape (y, x).
"""
# cast image
original_dtype = image.dtype
image = image.astype(np.float64)
# compute reference spot shape
max_sigma = max(ground_truth[:, 2])
radius_yx, _ = stack.get_radius(
voxel_size_z=None, voxel_size_yx=voxel_size_yx,
psf_z=None, psf_yx=max_sigma)
radius_yx = np.ceil(radius_yx).astype(np.int64)
yx_shape = radius_yx * 2 + 1
# build a grid to represent a spot image
image_spot = np.zeros((yx_shape, yx_shape), dtype=np.uint8)
grid = detection.initialize_grid(
image_spot=image_spot,
voxel_size_z=None,
voxel_size_yx=voxel_size_yx,
return_centroid=False)
# pad image
image_padded = np.pad(image, ((radius_yx, radius_yx),
(radius_yx, radius_yx)), mode="constant")
# loop over every spot
for (coord_y, coord_x, sigma_yx, amp) in ground_truth:
# simulate spot signal
position_spot = np.asarray((radius_yx, radius_yx), dtype=np.int64)
position_spot = np.ravel_multi_index(
position_spot, dims=image_spot.shape)
position_spot = list(grid[:, position_spot])
simulated_spot = detection.gaussian_2d(
grid=grid,
mu_y=position_spot[0],
mu_x=position_spot[1],
sigma_yx=sigma_yx,
voxel_size_yx=voxel_size_yx,
psf_amplitude=amp,
psf_background=0,
precomputed=precomputed_gaussian)
simulated_spot = np.reshape(simulated_spot, image_spot.shape)
# add spot
coord_y_min = int(coord_y)
coord_y_max = int(coord_y + 2 * radius_yx + 1)
coord_x_min = int(coord_x)
coord_x_max = int(coord_x + 2 * radius_yx + 1)
image_padded[coord_y_min:coord_y_max,
coord_x_min:coord_x_max] += simulated_spot
# unpad image
image = image_padded[radius_yx:-radius_yx, radius_yx:-radius_yx]
image_raw = np.reshape(image, image.size)
# sample Poisson distribution from gaussian values
image_raw = np.random.poisson(lam=image_raw, size=image_raw.size)
# reshape and cast image
new_image = np.reshape(image_raw, image.shape)
new_image = np.clip(new_image, 0, np.iinfo(original_dtype).max)
new_image = new_image.astype(original_dtype)
return new_image | 45c32a181df1d0239b0ad872d7c0ad83862338ed | 6,618 |
import requests
def bing(text, bot):
"""<query> - returns the first bing search result for <query>"""
api_key = bot.config.get("api_keys", {}).get("bing_azure")
# handle NSFW
show_nsfw = text.endswith(" nsfw")
# remove "nsfw" from the input string after checking for it
if show_nsfw:
text = text[:-5].strip().lower()
rating = NSFW_FILTER if show_nsfw else DEFAULT_FILTER
if not api_key:
return "Error: No Bing Azure API details."
# why are these all differing formats and why does format have a $? ask microsoft
params = {
"Sources": bingify("web"),
"Query": bingify(text),
"Adult": bingify(rating),
"$format": "json"
}
request = requests.get(API_URL, params=params, auth=(api_key, api_key))
# I'm not even going to pretend to know why results are in ['d']['results'][0]
j = request.json()['d']['results'][0]
if not j["Web"]:
return "No results."
result = j["Web"][0]
# not entirely sure this even needs un-escaping, but it wont hurt to leave it in
title = formatting.truncate(unescape(result["Title"]), 60)
desc = formatting.truncate(unescape(result["Description"]), 150)
url = unescape(result["Url"])
return colors.parse('{} -- $(b){}$(b): "{}"'.format(url, title, desc)) | 5aa5fe7acdc64c815d4a8727b06c13f1e3e3b2ce | 6,619 |
from typing import List
def single_length_RB(
RB_number: int, RB_length: int, target: int = 0
) -> List[List[str]]:
"""Given a length and number of repetitions it compiles Randomized Benchmarking
sequences.
Parameters
----------
RB_number : int
The number of sequences to construct.
RB_length : int
The number of Cliffords in each individual sequence.
target : int
Index of the target qubit
Returns
-------
list
List of RB sequences.
"""
S = []
for _ in range(RB_number):
seq = np.random.choice(24, size=RB_length - 1) + 1
seq = np.append(seq, inverseC(seq))
seq_gates = []
for cliff_num in seq:
g = [f"{c}[{target}]" for c in cliffords_decomp[cliff_num - 1]]
seq_gates.extend(g)
S.append(seq_gates)
return S | dda3f5a191c666460fc4c791c33530940986b623 | 6,620 |
def decode(text_file_abs_path, threshold=10):
"""
Decodes a text into a ciphertext.
Parameters
---------
text_file_abs_path: str
Returns
-------
ciphertext: str
"""
try:
with open(text_file_abs_path, "rb") as f:
text = f.read()
except Exception:
return None
freq_limit = limit_freq_threshold(threshold)
renamed_ciphertext = ''.join(
MarkovToolbox.derive_first_letter_of_every_sentence(text))
ciphertext = revert_renamed_number(renamed_ciphertext, freq_limit)
if threshold != 10:
ciphertext = NumericalToolbox.change_base(ciphertext, threshold, 10,
standard_formatting=False)
return ciphertext | c0c8c96438baedda43940e2373edc4714511b507 | 6,621 |
def templatetag(parser, token):
"""
Outputs one of the bits used to compose template tags.
Since the template system has no concept of "escaping", to display one of
the bits used in template tags, you must use the ``{% templatetag %}`` tag.
The argument tells which template bit to output:
================== =======
Argument Outputs
================== =======
``openblock`` ``{%``
``closeblock`` ``%}``
``openvariable`` ``{{``
``closevariable`` ``}}``
``openbrace`` ``{``
``closebrace`` ``}``
``opencomment`` ``{#``
``closecomment`` ``#}``
================== =======
"""
# token.split_contents() isn't useful here because this tag doesn't accept variable as arguments
bits = token.contents.split()
if len(bits) != 2:
raise TemplateSyntaxError("'templatetag' statement takes one argument")
tag = bits[1]
if tag not in TemplateTagNode.mapping:
raise TemplateSyntaxError("Invalid templatetag argument: '%s'."
" Must be one of: %s" %
(tag, list(TemplateTagNode.mapping)))
return TemplateTagNode(tag) | 3b441ec3035f8efde9fd2507ab83c83ec5940a7a | 6,622 |
def reflected_phase_curve(phases, omega, g, a_rp):
"""
Reflected light phase curve for a homogeneous sphere by
Heng, Morris & Kitzmann (2021).
Parameters
----------
phases : `~np.ndarray`
Orbital phases of each observation defined on (0, 1)
omega : tensor-like
Single-scattering albedo as defined in
g : tensor-like
Scattering asymmetry factor, ranges from (-1, 1).
a_rp : float, tensor-like
Semimajor axis scaled by the planetary radius
Returns
-------
flux_ratio_ppm : tensor-like
Flux ratio between the reflected planetary flux and the stellar flux in
units of ppm.
A_g : tensor-like
Geometric albedo derived for the planet given {omega, g}.
q : tensor-like
Integral phase function
"""
# Convert orbital phase on (0, 1) to "alpha" on (0, np.pi)
alpha = jnp.asarray(2 * np.pi * phases - np.pi)
abs_alpha = jnp.abs(alpha)
alpha_sort_order = jnp.argsort(alpha)
sin_abs_sort_alpha = jnp.sin(abs_alpha[alpha_sort_order])
sort_alpha = alpha[alpha_sort_order]
gamma = jnp.sqrt(1 - omega)
eps = (1 - gamma) / (1 + gamma)
# Equation 34 for Henyey-Greestein
P_star = (1 - g ** 2) / (1 + g ** 2 +
2 * g * jnp.cos(alpha)) ** 1.5
# Equation 36
P_0 = (1 - g) / (1 + g) ** 2
# Equation 10:
Rho_S = P_star - 1 + 0.25 * ((1 + eps) * (2 - eps)) ** 2
Rho_S_0 = P_0 - 1 + 0.25 * ((1 + eps) * (2 - eps)) ** 2
Rho_L = 0.5 * eps * (2 - eps) * (1 + eps) ** 2
Rho_C = eps ** 2 * (1 + eps) ** 2
alpha_plus = jnp.sin(abs_alpha / 2) + jnp.cos(abs_alpha / 2)
alpha_minus = jnp.sin(abs_alpha / 2) - jnp.cos(abs_alpha / 2)
# Equation 11:
Psi_0 = jnp.where(
(alpha_plus > -1) & (alpha_minus < 1),
jnp.log((1 + alpha_minus) * (alpha_plus - 1) /
(1 + alpha_plus) / (1 - alpha_minus)),
0
)
Psi_S = 1 - 0.5 * (jnp.cos(abs_alpha / 2) -
1.0 / jnp.cos(abs_alpha / 2)) * Psi_0
Psi_L = (jnp.sin(abs_alpha) + (np.pi - abs_alpha) *
jnp.cos(abs_alpha)) / np.pi
Psi_C = (-1 + 5 / 3 * jnp.cos(abs_alpha / 2) ** 2 - 0.5 *
jnp.tan(abs_alpha / 2) * jnp.sin(abs_alpha / 2) ** 3 * Psi_0)
# Equation 8:
A_g = omega / 8 * (P_0 - 1) + eps / 2 + eps ** 2 / 6 + eps ** 3 / 24
# Equation 9:
Psi = ((12 * Rho_S * Psi_S + 16 * Rho_L *
Psi_L + 9 * Rho_C * Psi_C) /
(12 * Rho_S_0 + 16 * Rho_L + 6 * Rho_C))
flux_ratio_ppm = 1e6 * (a_rp ** -2 * A_g * Psi)
q = _integral_phase_function(
Psi, sin_abs_sort_alpha, sort_alpha, alpha_sort_order
)
return flux_ratio_ppm, A_g, q | e6f9fadaec4614b5ea0058d13956cb5ef13d57b5 | 6,623 |
from operator import pos
def costspec(
currencies: list[str] = ["USD"],
) -> s.SearchStrategy[pos.CostSpec]:
"""Generates a random CostSpec.
Args:
currencies: An optional list of currencies to select from.
Returns:
A new search strategy.
"""
return s.builds(pos.CostSpec, currency=common.currency(currencies)) | 4147a7046e5d4b16a6f919d77683a849ceb2ce54 | 6,624 |
import json
import base64
def _process_input(data, context):
""" Pre-process request input before it is sent to TensorFlow Serving REST API
Args:
data (obj): the request data stream
context (Context): an object containing request and configuration details
Returns:
(dict): a JSON-serializable dict that contains request body and headers
"""
read_data = data.read()
# endpoint API
if context.request_content_type == 'application/json':
# read as numpy array
image_np = np.asarray(json.loads(read_data)).astype(np.dtype('uint8'))
image_np = np.array(Image.fromarray(image_np).resize((INPUT_HEIGHT,INPUT_WIDTH)))
# batch transform of jpegs
elif context.request_content_type == 'application/x-image':
# load image from bytes and resize
image_from_bytes = Image.open(BytesIO(read_data)).convert('RGB')
image_from_bytes = image_from_bytes.resize((INPUT_HEIGHT,INPUT_WIDTH))
image_np = np.array(image_from_bytes)
# batch transform of tfrecord
elif context.request_content_type == 'application/x-tfexample':
example = tf.train.Example()
example.ParseFromString(read_data)
example_feature = MessageToDict(example.features)
image_encoded = str.encode(example_feature['feature']['image']['bytesList']['value'][0])
image_b64 = base64.decodebytes(image_encoded)
image_np = np.frombuffer(image_b64, dtype=np.dtype('uint8')).reshape(32,32,3)
image_np = np.array(Image.fromarray(image_np).resize((INPUT_HEIGHT,INPUT_WIDTH)))
# raise error if content type is not supported
else:
print("")
_return_error(415, 'Unsupported content type "{}"'.format(
context.request_content_type or 'Unknown'))
# preprocess for resnet50
image_np = tf.keras.applications.resnet_v2.preprocess_input(image_np)
# json serialize
data_np_json = {"instances": [image_np.tolist()]}
data_np_json_serialized = json.dumps(data_np_json)
return data_np_json_serialized | 542e9d04a8e93cb835f049ebd3c9105e24e3b5ac | 6,626 |
def similarity_matrix_2d(X, Y, metric='cos'):
"""
Calculate similarity matrix
Parameters:
X: ndarray
input matrix 1
Y: ndarray
input matrix 2
distFunc: function
distance function
Returns:
result: ndarray
similarity matrix
"""
n_X = len(X)
n_Y = len(Y)
if metric == 'cos':
dist_func = cos_dist_2d
elif metric == 'euclid':
dist_func = euclid_dist_2d
elif metric == 'mahal':
dist_func = mahal_dist_2d
else:
dist_func = cos_dist_2d
#SM = sp.zeros((nX, nY))
SM = [map(dist_func, n_X * [X[i]], Y) for i in xrange(n_X)]
#for i in xrange(nX):
# SM.append(map(distFunc, nX * [X[i]], Y))
SM = sp.array(SM)
return SM | 02d78531347c3acb90049505297c009c845e27d2 | 6,627 |
def issue_list_with_tag(request, tag=None, sortorder=None):
"""
For a tag. display list of issues
"""
if tag:
stag = "\"%s\"" % tag
issues = Issue.tagged.with_any(stag)
tag_cloud = []
if issues:
tag_cloud = get_tagcloud_issues(issues)
issues = issues.filter(is_draft=False)
return issue_list(
request,
issues=issues,
sortorder=sortorder,
min_tv=1,
subset=True,
extra_context = {
'selected_tag' : tag,
'issue_tags' : tag_cloud,
'sort_url' : reverse('issue_with_tag', args=[tag,]),
})
else:
return issue_list(request) | 1f51db85a0b5008819fda73d3d86fa184b56b327 | 6,628 |
def update_deal(id, deal_dict):
"""
Runs local validation on the given dict and gives passing ones to the API to update
"""
if utils.validate_deal_dict(utils.UPDATE, deal_dict, skip_id=True):
resp = utils.request(utils.UPDATE, 'deals', {'id': id}, data=deal_dict)
return utils.parse(resp)
else:
# validation failed but the exception was suppressed
pass | 82355c1a0204128f30b66a91fc22e3650b99f74d | 6,629 |
def plot_tilt_hist(series, ntile: str, group_name: str, extra_space: bool = True):
"""
Plots the histogram group tilts for a single ntile
:param series: frame containing the avg tilts, columns: group, index: pd.Period
:param ntile: the Ntile we are plotting for
:param group_name: the name of the group
:return: None
"""
if extra_space:
fig, ax = plt.subplots(1, 2, figsize=LARGE_FIGSIZE)
else:
_, ax = plt.subplots(1, 1, figsize=(4.5, 4.5))
title = 'Weight Relative to Universe' if 'Ntile' in group_name else 'Group Exposure'
plotter_frame = series.to_frame('weight')
plotter_frame['colors'] = [TILTS_COLOR_MAP(i) for i in np.linspace(0, 1, len(series))]
plotter_frame = plotter_frame.sort_values('weight')
ax[0].barh(plotter_frame.index.tolist(), plotter_frame['weight'].tolist(), align='center',
color=plotter_frame['colors'].tolist())
ax[0].set(title=f'{ntile}, {group_name}'.title(), ylabel='Group', xlabel=title)
ax[0].axvline(0, linestyle='-', color='black', lw=1)
if extra_space:
return ax[1]
plt.show() | 8f3077831cd11092e2a14bc60152ba693c0da6a6 | 6,630 |
def get_constraints_for_x(cell, board):
"""
Get the constraints for a given cell cell
@param cell Class instance of Variable; a cell of the Sudoku board
@param board
@return Number of constraints
"""
nconstraints = 0
# Row
for cellj in board[cell.row][:cell.col]:
if cellj.get_domain_size() > 1:
nconstraints += 1
for cellj in board[cell.row][cell.col+1:]:
if cellj.get_domain_size() > 1:
nconstraints += 1
# Col
for irow in range(cell.row):
if board[irow][cell.col].get_domain_size() > 1:
nconstraints += 1
for irow in range(cell.row+1, cell.max_domain_val):
if board[irow][cell.col].get_domain_size() > 1:
nconstraints += 1
# .. This would not generalize to a new board, but leave for now
ibox_row = int(cell.row/3) * 3
ibox_col = int(cell.col/3) * 3
if board[ibox_row+1][ibox_col+1].get_domain_size() > 1 \
or board[ibox_row+1][ibox_col+2].get_domain_size() > 1 \
or board[ibox_row+2][ibox_col+1].get_domain_size() > 1 \
or board[ibox_row+2][ibox_col+2].get_domain_size() > 1:
nconstraints += 1
return nconstraints | a46cda54569a12e80b9d52896f07335480799cb1 | 6,631 |
def average(values):
"""Computes the arithmetic mean of a list of numbers.
>>> print average([20, 30, 70])
40.0
"""
try:
return stats.mean(values)
except ZeroDivisionError:
return None | 85d02529404301891e0ecd1f2a9b76695a357504 | 6,632 |
def get_subgraph_pos(G, pos):
""" Returns the filtered positions for subgraph G. If subgraph = original graph then pos will be returned.
Parameters
----------
G : nx.Graph
A graph object.
Pos : dict
A dictionary with nodes as keys and positions as values.
Example
-------
>>> pos = nx.spring_layout(G)
>>> subgraph_nodes = ['1','2','3']
>>> subgraph = G.subgraph(subgraph_nodes)
>>> subgraph_positions = get_subgraph_pos(subgraph,pos)
Returns
-------
dict
Assuming positions were generated earlier for a larger graph with some layout algorithm
this functions returns the filtered positions by the subgraph.
"""
return {k: v for k, v in pos.items() if k in G.nodes()} | ca7fc389cc51aaace7a751f2107fe5cfbfd22e6c | 6,633 |
def _calculateWindowPosition(screenGeometry, iconGeometry, windowWidth, windowHeight):
"""
Calculate window position near-by the system tray using geometry of a system tray
and window geometry
@param screenGeometry: geometry of the screen where system tray is located
@type screenGeometry: QRect
@param iconGeometry: geometry of the system tray icon in screen coordinates
@type iconGeometry: QRect
@param windowWidth: width of the main window
@type windowWidth: int
@param windowHeight: height of the main window including header
@type windowHeight: int
@return: coordinates for main window positioning
@rtype: QPoint
"""
possibleWindowPositions = {
LEFT: {
'x': iconGeometry.x() + iconGeometry.width(),
'y': iconGeometry.y() + iconGeometry.height() / 2 - windowHeight / 2
},
BOTTOM: {
'x': iconGeometry.x() + iconGeometry.width() / 2 - windowWidth / 2,
'y': iconGeometry.y() - windowHeight
},
RIGHT: {
'x': iconGeometry.x() - windowWidth,
'y': iconGeometry.y() + iconGeometry.height() / 2 - windowHeight / 2
},
TOP: {
'x': iconGeometry.x() + iconGeometry.width() / 2 - windowWidth / 2,
'y': iconGeometry.y() + iconGeometry.height()
},
}
position = possibleWindowPositions[_guessTrayPosition(screenGeometry, iconGeometry)]
return QPoint(position['x'], position['y']) | 112011828dcfd0a6a54b6fe2c3d8acd92baf6c64 | 6,634 |
def build_from_config(config: dict, name: str) -> HomingMotor:
"""Build the named HomingMotor from data found in config"""
def check_for_key(key, cfg):
if key not in cfg:
raise RuntimeError('Key "{}" for HomingMotor "{}" not found.'.format(key, name))
else:
return cfg[key]
if name not in config:
raise RuntimeError('Config for HomingMotor "{}" not found.'.format(name))
my_config = config[name]
inverted = check_for_key('inverted', my_config)
max_steps = check_for_key('max_steps', my_config)
name = check_for_key('name', my_config)
pulse_delay = float(check_for_key('pulse_delay', my_config))
sensor = check_for_key('sensor', my_config)
stepper = check_for_key('stepper', my_config)
dir_pin = int(check_for_key("dir_pin", stepper))
ms1_pin = int(check_for_key("ms1_pin", stepper))
ms2_pin = int(check_for_key("ms2_pin", stepper))
ms3_pin = int(check_for_key("ms3_pin", stepper))
step_pin = int(check_for_key("step_pin", stepper))
step_size = int(check_for_key("step_size", stepper))
input_pin = int(check_for_key('input_pin', sensor))
m = build(name, dir_pin, step_pin, ms1_pin, ms2_pin, ms3_pin, input_pin, max_steps, inverted, pulse_delay)
m.set_step_size(step_size)
# print('{} built from config OK'.format(m.get_name()))
return m | ce39fc8db48da8145b9221120c3ec02f3bdda40f | 6,635 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.