content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_list_channels(sc):
"""Get list of channels."""
# https://api.slack.com/methods/channels.list
response = sc.api_call(
"channels.list",
)
return response['channels'] | d31271bcc065b4a212e298c6283c4d658e5547da | 21,133 |
def error_handler(error):
"""エラーメッセージを生成するハンドラ"""
response = jsonify({ 'cause': error.description['cause'] })
return response, error.code | 282b1a11d8e7326be1fa2d0b1b2457dc5d5d5ca1 | 21,134 |
def search_records(
name: str,
search: TextClassificationSearchRequest = None,
common_params: CommonTaskQueryParams = Depends(),
include_metrics: bool = Query(
False, description="If enabled, return related record metrics"
),
pagination: PaginationParams = Depends(),
service: TextClassificationService = Depends(
TextClassificationService.get_instance
),
datasets: DatasetsService = Depends(DatasetsService.get_instance),
current_user: User = Security(auth.get_user, scopes=[]),
) -> TextClassificationSearchResults:
"""
Searches data from dataset
Parameters
----------
name:
The dataset name
search:
The search query request
common_params:
Common query params
include_metrics:
Flag to enable include metrics
pagination:
The pagination params
service:
The dataset records service
datasets:
The dataset service
current_user:
The current request user
Returns
-------
The search results data
"""
search = search or TextClassificationSearchRequest()
query = search.query or TextClassificationQuery()
dataset = datasets.find_by_name(
user=current_user, name=name, task=TASK_TYPE, workspace=common_params.workspace
)
result = service.search(
dataset=dataset,
query=query,
sort_by=search.sort,
record_from=pagination.from_,
size=pagination.limit,
exclude_metrics=not include_metrics,
)
return result | 7dd932131f5fda1680fd419697df9c0a04d19fa5 | 21,135 |
def guess_udic(dic,data):
"""
Guess parameters of universal dictionary from dic, data pair.
Parameters
----------
dic : dict
Dictionary of JCAMP-DX, acqu, proc and spectrum parameters.
data : ndarray
Array of NMR data.
Returns
-------
udic : dict
Universal dictionary of spectral parameters.
"""
# Create an empty universal dictionary
udic = fileiobase.create_blank_udic(1)
# Update defalt parameters, first acqu.par parameters in dic are tried, then JCAMP-DX header parameters
# size
if data is not None:
udic[0]["size"] = len(data)
else:
warn('No data, cannot set udic size')
# sw
try:
udic[0]['sw'] = float(dic['acqu']['bandwidth']) * 1000
except KeyError:
try:
udic[0]['sw'] = float(dic['dx']['$SW'][0]) * float(dic['dx']['$BF1'][0])
except KeyError:
try:
if dic["spectrum"]["freqdata"]:
udic[0]['sw'] = dic["spectrum"]["xaxis"][-1] - dic["spectrum"]["xaxis"][0]
elif data is not None:
udic[0]['sw'] = len(data) / dic["spectrum"]["xaxis"][-1]
else:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
except KeyError:
warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz")
# obs
try:
udic[0]['obs'] = float(dic['acqu']['b1Freq'])
except KeyError:
try:
udic[0]['obs'] = float(dic['dx']['$BF1'][0])
except KeyError:
warn("Cannot set observe frequency - set manually using: 'udic[0]['obs'] = x' where x is magnetic field in MHz")
# car
try:
udic[0]['car'] = float(dic['acqu']['lowestFrequency']) + (float(dic['acqu']['bandwidth']) * 1000 / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$REFERENCEPOINT'][0]) * -1 ) + (float(dic['dx']['$SW'][0]) * udic[0]['obs'] / 2)
except KeyError:
try:
udic[0]['car'] = (float(dic['dx']['$BF1'][0]) - float(dic['dx']['$SF'][0])) * 1000000
except KeyError:
warn("Cannot set carrier - try: 'udic[0]['car'] = x * udic[0]['obs']' where x is the center of the spectrum in ppm")
# label
try:
udic[0]['label'] = dic['acqu']['rxChannel']
except KeyError:
try:
label_value = dic['dx'][".OBSERVENUCLEUS"][0].replace("^", "")
udic[0]["label"] = label_value
except KeyError:
warn("Cannot set observed nucleus label")
#keys left to default
# udic[0]['complex']
# udic[0]['encoding']
# udic[0]['time'] = True
# udic[0]['freq'] = False
return udic | a8d79255b34f407ea54766ec2e4aedaf2ae42df9 | 21,136 |
def matchPosAny (msg, pos, rules, subrules):
"""Indicates whether or not `msg` matches any (i.e. a single) `subrule`
in `rules`, starting at position `pos`.
Returns the position in `msg` just after a successful match, or -1
if no match was found.
"""
index = -1
for rule in subrules:
if (index := matchPos(msg, pos, rules, rule)) != -1:
break
return index | 6ad053cdb61d7cc917e3acb896ea5d23cc042de9 | 21,137 |
def compute_accuracy(model, loader):
"""
:param model: a model which returns classifier_output and segmentator_output
:param loader: data loader
"""
model.eval() # enter evaluation mode
score_accum = 0
count = 0
for x, y, _, _ in loader:
classifier_output, _ = model(x)
score_accum += accuracy(classifier_output.data.cpu().numpy(), y.data.cpu().numpy()) * y.shape[0]
count += y.shape[0]
return float(score_accum / count) | ecc86c3c9c2429843bdd25023b3c6f0393c83db9 | 21,138 |
from operator import and_
def create_base_query_grouped_fifo(rse_id, filter_by_rse='destination', session=None):
"""
Build the sqlalchemy queries to filter relevant requests and to group them in datasets.
Group requests either by same destination RSE or source RSE.
:param rse_id: The RSE id.
:param filter_by_rse: Decide whether to filter by transfer destination or source RSE (`destination`, `source`).
:param session: The database session.
"""
# query DIDs that are attached to a collection and add a column indicating the order of attachment in case of mulitple attachments
attachment_order_subquery = session.query(models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_scope, models.DataIdentifierAssociation.name, models.DataIdentifierAssociation.scope,
func.row_number().over(partition_by=(models.DataIdentifierAssociation.child_name, models.DataIdentifierAssociation.child_scope),
order_by=models.DataIdentifierAssociation.created_at).label('order_of_attachment'))\
.subquery()
# query transfer requests and join with according datasets
filtered_requests_subquery = None
grouped_requests_subquery = None
dialect = session.bind.dialect.name
if dialect == 'mysql' or dialect == 'sqlite':
filtered_requests_subquery = session.query(models.Request.id.label('id'),
func.ifnull(attachment_order_subquery.c.name, models.Request.name).label('dataset_name'),
func.ifnull(attachment_order_subquery.c.scope, models.Request.scope).label('dataset_scope'))
combined_attached_unattached_requests = session.query(func.ifnull(attachment_order_subquery.c.scope, models.Request.scope).label('scope'),
func.ifnull(attachment_order_subquery.c.name, models.Request.name).label('name'),
models.Request.bytes,
models.Request.requested_at)
elif dialect == 'postgresql':
filtered_requests_subquery = session.query(models.Request.id.label('id'),
func.coalesce(attachment_order_subquery.c.name, models.Request.name).label('dataset_name'),
func.coalesce(attachment_order_subquery.c.scope, models.Request.scope).label('dataset_scope'))
combined_attached_unattached_requests = session.query(func.coalesce(attachment_order_subquery.c.scope, models.Request.scope).label('scope'),
func.coalesce(attachment_order_subquery.c.name, models.Request.name).label('name'),
models.Request.bytes,
models.Request.requested_at)
elif dialect == 'oracle':
filtered_requests_subquery = session.query(models.Request.id.label('id'),
func.nvl(attachment_order_subquery.c.name, models.Request.name).label('dataset_name'),
func.nvl(attachment_order_subquery.c.scope, models.Request.scope).label('dataset_scope'))
combined_attached_unattached_requests = session.query(func.nvl(attachment_order_subquery.c.scope, models.Request.scope).label('scope'),
func.nvl(attachment_order_subquery.c.name, models.Request.name).label('name'),
models.Request.bytes,
models.Request.requested_at)
filtered_requests_subquery = filtered_requests_subquery.join(attachment_order_subquery, and_(models.Request.name == attachment_order_subquery.c.child_name,
models.Request.scope == attachment_order_subquery.c.child_scope,
attachment_order_subquery.c.order_of_attachment == 1), isouter=True)
combined_attached_unattached_requests = combined_attached_unattached_requests.join(attachment_order_subquery, and_(models.Request.name == attachment_order_subquery.c.child_name,
models.Request.scope == attachment_order_subquery.c.child_scope,
attachment_order_subquery.c.order_of_attachment == 1), isouter=True)
# depending if throttler is used for reading or writing
if filter_by_rse == 'source':
filtered_requests_subquery = filtered_requests_subquery.filter(models.Request.source_rse_id == rse_id)
combined_attached_unattached_requests = combined_attached_unattached_requests.filter(models.Request.source_rse_id == rse_id)
elif filter_by_rse == 'destination':
filtered_requests_subquery = filtered_requests_subquery.filter(models.Request.dest_rse_id == rse_id)
combined_attached_unattached_requests = combined_attached_unattached_requests.filter(models.Request.dest_rse_id == rse_id)
filtered_requests_subquery = filtered_requests_subquery.filter(models.Request.state == RequestState.WAITING).subquery()
combined_attached_unattached_requests = combined_attached_unattached_requests.filter(models.Request.state == RequestState.WAITING).subquery()
# group requests and calculate properties like oldest requested_at, amount of children, volume
grouped_requests_subquery = session.query(func.sum(combined_attached_unattached_requests.c.bytes).label('volume'),
func.min(combined_attached_unattached_requests.c.requested_at).label('oldest_requested_at'),
func.count().label('amount_childs'),
combined_attached_unattached_requests.c.name,
combined_attached_unattached_requests.c.scope)\
.group_by(combined_attached_unattached_requests.c.scope, combined_attached_unattached_requests.c.name)\
.subquery()
return grouped_requests_subquery, filtered_requests_subquery | e4399a447e767610c7451f61ad543553168de1d6 | 21,139 |
def then(state1, state2):
"""
Like ``bind``, but instead of a function that returns a statetful action,
just bind a new stateful action.
Equivalent to bind(state1, lambda _: state2)
"""
return bind(state1, lambda _: state2) | ef6200f8776b84a5a9893b894b3d7cd406598f7d | 21,140 |
from typing import Optional
from typing import Dict
from typing import Any
def get_skyregions_collection(run_id: Optional[int]=None) -> Dict[str, Any]:
"""
Produce Sky region geometry shapes JSON object for d3-celestial.
Args:
run_id (int, optional): Run ID to filter on if not None.
Returns:
skyregions_collection (dict): Dictionary representing a JSON obejct
containing the sky regions.
"""
skyregions = SkyRegion.objects.all()
if run_id is not None:
skyregions = skyregions.filter(run=run_id)
features = []
for skr in skyregions:
ra_fix = 360. if skr.centre_ra > 180. else 0.
ra = skr.centre_ra - ra_fix
dec = skr.centre_dec
width_ra = skr.width_ra / 2.
width_dec = skr.width_dec / 2.
id = skr.id
features.append(
{
"type": "Feature",
"id": f"SkyRegion{id}",
"properties": {
"n": f"{id:02d}",
"loc": [ra, dec]
},
"geometry": {
"type": "MultiLineString",
"coordinates": [[
[ra+width_ra, dec+width_dec],
[ra+width_ra, dec-width_dec],
[ra-width_ra, dec-width_dec],
[ra-width_ra, dec+width_dec],
[ra+width_ra, dec+width_dec]
]]
}
}
)
skyregions_collection = {
"type": "FeatureCollection",
"features" : features
}
return skyregions_collection | 8d8fe2e46a9d37e774dbdab506f012a0560796e1 | 21,141 |
def construct_sru_query(keyword, keyword_type=None, mat_type=None, cat_source=None):
"""
Creates readable SRU/CQL query, does not encode white spaces or parenthesis -
this is handled by the session obj.
"""
query_elems = []
if keyword is None:
raise TypeError("query argument cannot be None.")
if keyword_type is None:
# take as straight sru query and pass to sru_query method
query_elems.append(keyword.strip())
elif keyword_type == "ISBN":
query_elems.append('srw.bn = "{}"'.format(keyword))
elif keyword_type == "UPC":
query_elems.append('srw.sn = "{}"'.format(keyword))
elif keyword_type == "ISSN":
query_elems.append('srw.in = "{}"'.format(keyword))
elif keyword_type == "OCLC #":
query_elems.append('srw.no = "{}"'.format(keyword))
elif keyword_type == "LCCN":
query_elems.append('srw.dn = "{}"'.format(keyword))
if mat_type is None or mat_type == "any":
pass
elif mat_type == "print":
query_elems.append('srw.mt = "bks"')
elif mat_type == "large print":
query_elems.append('srw.mt = "lpt"')
elif mat_type == "dvd":
query_elems.append('srw.mt = "dvv"')
elif mat_type == "bluray":
query_elems.append('srw.mt = "bta"')
if cat_source is None or cat_source == "any":
pass
elif cat_source == "DLC":
query_elems.append('srw.pc = "dlc"')
return " AND ".join(query_elems) | fbe28156beca73339fa88d200777e25172796864 | 21,142 |
def sitemap_xml():
"""Default Sitemap XML"""
show_years = retrieve_show_years(reverse_order=False)
sitemap = render_template("sitemaps/sitemap.xml",
show_years=show_years)
return Response(sitemap, mimetype="text/xml") | e6be9c98d1a1cd4bbfb04e9ad9676cc4b8521d79 | 21,143 |
def remove_property(product_id, property_id):
"""
Remove the property
"""
property = db.db.session.query(TypeProperty).\
filter_by(product_id = product_id, product_property_id = property_id).first()
try:
db.db.session.delete(property)
db.db.session.commit()
except Exception as e:
db.db.session.rollback()
print(e)
raise TypePropertyException("Could not remove type property")
tmp = property.to_dict()
return(json_util.dumps(tmp)) | cca8822d5b7de3ca8ee9b451fe21d4ddeb20e736 | 21,144 |
def format_solution_table_calc(solution, node_ids_to_nodes):
"""
:type solution: dict[int, list[int]]
:type node_ids_to_nodes: dict[int, int]
:rtype: dict[int, str]
"""
new_solution = {}
for (color, path) in solution.items():
new_path = []
for p in path:
back_p = node_ids_to_nodes[p]
new_p = "{0}{1}".format(
chr(back_p % width + ord("A")),
chr(back_p // width + ord("0")),
)
new_path.append(new_p)
new_solution[color] = " ".join(new_path)
return new_solution | 335bc0e90860a9181d5b819a5bb9e44cd44f750d | 21,145 |
import functools
def hashable(func):
"""Decorator for functions with numpy arrays as input arguments that will benefit from caching
Example:
from midgard.math import nputil
from functools import lru_cache
@nputil.hashable
@lru_cache()
def test_func(a: np.ndarray, b: np.ndarray = None)
do_something
return something
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
new_args_list = list()
for arg in args:
if isinstance(arg, np.ndarray):
arg = HashArray(arg)
new_args_list.append(arg)
for k, v in kwargs.items():
if isinstance(v, np.ndarray):
kwargs[k] = HashArray(v)
return func(*new_args_list, **kwargs)
return wrapper | db23cc12f9a322aaae6a585068a5c30194d7be7b | 21,146 |
def object_hash(fd, fmt, repo=None):
""" Function to read the content of a open file, create appropiate object
and write the object to vcs directory and return the hash of the file"""
data = fd.read()
# choosing constructor on the basis of the object type found in header
if fmt == b'commit' : obj = vcsCommit(repo, data)
elif fmt == b'tree' : obj = vcsTree(repo, data)
elif fmt == b'tag' : obj = vcsTag(repo, data)
elif fmt == b'blob' : obj = vcsBlob(repo, data)
else:
raise Exception('Unknown type %s!' % fmt)
return object_write(obj, repo) | b55a4da36934843c111e4d66dc552c556c8d0ba4 | 21,148 |
import io
def read_from_pdf(pdf_file):
"""
读取PDF文件内容,并做处理
:param pdf_file: PDF 文件
:return: pdf文件内容
"""
# 二进制读取pdf文件内的内容
with open(pdf_file, 'rb') as file:
resource_manage = PDFResourceManager()
return_str = io.StringIO()
lap_params = LAParams()
# 内容转换
device = TextConverter(
resource_manage,
return_str,
laparams = lap_params
)
process_pdf(resource_manage, device, file)
device.close()
# 获取转换后的pdf文件内容
pdf_content = return_str.getvalue()
print(pdf_content)
return pdf_content | 140d4545f952983017d175303397c494457b4628 | 21,149 |
def _descending(dbus_object):
"""
Verify levels of variant values always descend by one.
:param object dbus_object: a dbus object
:returns: None if there was a failure of the property, otherwise the level
:rtype: int or NoneType
None is a better choice than False, for 0, a valid variant level, is always
interpreted as False.
"""
# pylint: disable=too-many-return-statements
if isinstance(dbus_object, dbus.Dictionary):
key_levels = [_descending(x) for x in dbus_object.keys()]
value_levels = [_descending(x) for x in dbus_object.values()]
if any(k is None for k in key_levels) or \
any(v is None for v in value_levels):
return None
max_key_level = max(key_levels) if key_levels != [] else 0
max_value_level = max(value_levels) if value_levels != [] else 0
max_level = max(max_key_level, max_value_level)
variant_level = dbus_object.variant_level
if variant_level == 0:
return max_level
if variant_level != max_level + 1:
return None
else:
return variant_level
elif isinstance(dbus_object, (dbus.Array, dbus.Struct)):
levels = [_descending(x) for x in dbus_object]
if any(l is None for l in levels):
return None
max_level = max(levels) if levels != [] else 0
variant_level = dbus_object.variant_level
if variant_level == 0:
return max_level
if variant_level != max_level + 1:
return None
else:
return variant_level
else:
variant_level = dbus_object.variant_level
return variant_level if variant_level in (0, 1) else None | 55de473807c22c50d8f65597cde390a56dcb9cd6 | 21,150 |
def _is_avconv():
"""
Returns `True` if the `ffmpeg` binary is really `avconv`.
"""
out = _run_command(['ffmpeg', '-version'])
return out and isinstance(out, strtype) and 'DEPRECATED' in out | dc9003623b4497b75d37f4e759f31401ad6261e1 | 21,151 |
def countries(request):
"""
Returns all valid countries and their country codes
"""
return JsonResponse({
"countries": [{
"id": unicode(code),
"name": unicode(name)
} for code, name in list(django_countries.countries)]
}) | 20296279dea898741950715a41b4188f7f5e6724 | 21,152 |
def generate_monomer(species, monomerdict, initlen, initnames, tbobs):
"""
generate a PySB monomer based on species
:param species: a Species object
:param monomerdict: a dictionary with all monomers linked to their species id
:param initlen: number of the initial species
:param initnames: names of the initial species
:return: monomer, observable, and updated monomerdict
"""
if species.id <= initlen:
name = initnames[species.id - 1]
else:
name = 'sp_' + str(species.id)
sites = ['init']
m = Monomer(name, sites)
monomerdict[species.id] = m
if species.id in tbobs:
Observable('obs' + name, m(init=None))
return monomerdict | 2966473ef084991d0c589c16a8479f6395702b43 | 21,154 |
import logging
import tqdm
def convert_bert_tokens(outputs):
"""
Converts BERT tokens into a readable format for the parser, i.e. using Penn Treebank tokenization scheme.
Does the heavy lifting for this script.
"""
logging.info("Adjusting BERT indices to align with Penn Treebank.")
mapped_outputs = [] # Will hold the final results: sentences and mapped span indices
for output in tqdm(outputs):
comb_text = [word for sentence in output['sentences'] for word in sentence]
sentence_start_idx = 0
sent_so_far = []
word_so_far = []
sentence_map = output['sentence_map']
subtoken_map = output['subtoken_map']
clusters = output['clusters']
# preds = output['predicted_clusters']
# top_mentions = output['top_spans']
for i, subword in enumerate(comb_text):
if i != 0 and sentence_map[i - 1] != sentence_map[i]: # New sentence
sent_so_far.append(convert_bert_word(''.join(word_so_far)))
word_so_far = []
mapped_outputs.append({'doc_key': output['doc_key'],
'num_speakers': num_speakers(output['speakers']),
'words': sent_so_far,
'clusters': adjust_cluster_indices(clusters, subtoken_map, sentence_start_idx, i - 1)
# 'predicted_clusters': adjust_cluster_indices(preds, subtoken_map, sentence_start_idx, i - 1),
# 'top_mentions': adjust_top_mentions(top_mentions, subtoken_map, sentence_start_idx, i - 1)
})
sent_so_far = []
sentence_start_idx = i
elif i != 0 and subtoken_map[i - 1] != subtoken_map[i]: # New word
fullword = ''.join(word_so_far)
if fullword != '[SEP][CLS]': # Need this because sentences indices increment at SEP and CLS tokens
sent_so_far.append(convert_bert_word(fullword))
else:
sentence_start_idx += 2 # The sentence actually starts two tokens later due to [SEP] and [CLS]
word_so_far = []
word_so_far.append(subword)
return mapped_outputs | 27cef75fc48fb87e20f77af95e265406c6b8c520 | 21,155 |
def calculate_iou(ground_truth_path, prediction_path):
""" Calculate the intersection over union of two raster images.
Args:
ground_truth_path (str): Path to the ground truth raster image.
prediction_path (str): Path to the prediction raster image.
Returns:
float: The intersection over union of the two raster datasets.
"""
with rasterio.open(ground_truth_path) as ground_truth_dataset:
with rasterio.open(prediction_path) as prediction_dataset:
ground_truth_array = ground_truth_dataset.read(1)
prediction_array = prediction_dataset.read(1)
intersection = np.logical_and(ground_truth_array, prediction_array)
union = np.logical_or(ground_truth_array, prediction_array)
iou = np.sum(intersection) / np.sum(union)
return iou | 70e49e787fe57f5c4d94a043d41b96de1b14fd39 | 21,157 |
from bokeh.models import ColumnDataSource
import warnings
def bokeh_scatter(x,
y=None,
*,
xlabel='x',
ylabel='y',
title='',
figure=None,
data=None,
saveas='scatter',
copy_data=False,
**kwargs):
"""
Create an interactive scatter plot with bokeh
:param x: arraylike or key for data for the x-axis
:param y: arraylike or key for data for the y-axis
:param data: source for the data of the plot (pandas Dataframe for example)
:param xlabel: label for the x-axis
:param ylabel: label for the y-axis
:param title: title of the figure
:param figure: bokeh figure (optional), if provided the plot will be added to this figure
:param outfilename: filename of the output file
:param copy_data: bool, if True the data argument will be copied
Kwargs will be passed on to :py:class:`masci_tools.vis.bokeh_plotter.BokehPlotter`.
If the arguments are not recognized they are passed on to the bokeh function `scatter`
"""
if isinstance(x, (dict, pd.DataFrame, ColumnDataSource)) or x is None:
warnings.warn(
'Passing the source as first argument is deprecated. Please pass in source by the keyword data'
'and xdata and ydata as the first arguments', DeprecationWarning)
data = x
x = kwargs.pop('xdata', 'x')
y = kwargs.pop('ydata', 'y')
plot_data = process_data_arguments(data=data,
x=x,
y=y,
copy_data=copy_data,
single_plot=True,
same_length=True,
use_column_source=True)
entry, source = plot_data.items(first=True)
plot_params.set_defaults(default_type='function', name=entry.y)
kwargs = plot_params.set_parameters(continue_on_error=True, **kwargs)
p = plot_params.prepare_figure(title, xlabel, ylabel, figure=figure)
plot_kwargs = plot_params.plot_kwargs(plot_type='scatter')
res = p.scatter(x=entry.x, y=entry.y, source=source, **plot_kwargs, **kwargs)
plot_params.add_tooltips(p, res, entry)
if plot_params['level'] is not None:
res.level = plot_params['level']
plot_params.draw_straight_lines(p)
plot_params.set_limits(p)
plot_params.save_plot(p, saveas)
return p | d2bf64efcd751f3dea0d63c1c02af14952684bd7 | 21,158 |
from utils.format import format_output
from utils.rule import get_all_rules
from utils.type import check_type
from utils.rule import get_rules
from core.exceptions import RuleArgumentsError
from utils.type import update_type, check_type
from utils.rule import add_rule
from core.exceptions import RuleArgumentsError
from utils.rule import update_rule, check_rule
from utils.type import update_type
from core.exceptions import RuleArgumentsError
from utils.rule import check_rule
from utils.profile import clean_profiles
from utils.rule import remove_rule
from utils.type import update_type
from utils.profile import clean_profiles
from utils.rule import remove_rule
from utils.type import update_type
from core.exceptions import RuleArgumentsError
from utils.rule import check_rule, link_rule
from utils.rule import link_rule
from core.exceptions import RuleArgumentsError
from utils.rule import unlink_rule, check_rule
from utils.rule import unlink_rule
from utils.type import check_type, update_type
from utils.rule import check_rule, move_rule
def rulesActionsHandler(args):
""" Check rule action and execute associates functions.
:param args: Rule action
:return: Return result from the executed functions.
"""
if 'get' == args.action:
# get rule arguments :
# - id:
# type: int
# args number : 1 or more
# required: False
# - type:
# type: str
# args number: 1 or more
# required: False
if not args.id and not args.type:
rules = get_all_rules()
else:
if args.type:
check_type(type=args.type)
rules = get_rules(type=args.type, id=args.id)
return format_output(rules)
if 'add' == args.action:
# add rule arguments :
# - type:
# type: str
# args number : 1
# required: True
# - desc:
# type: str
# args number: 1
# required: True
# - auditcmd:
# type: str
# args number: 1 or more
# required: False
# note: can't be set with auditscript
# - remedcmd:
# type: str
# args number: 1 or more
# required: False
# note: can't be set with remedscript
# - auditscript:
# type: str
# args number: 1
# required: False
# note: can't be set with auditcmd
# - remedscript:
# type: str
# args number: 1
# required: False
# note: can't be set with remedcmd
try:
if args.audit_cmd and args.audit_script:
raise RuleArgumentsError('Rule cant have auditscript AND auditcmd at the same time')
# elif args.remed_cmd and args.remed_script:
# raise RuleArgumentsError('Rule cant have remedscript AND remedcmd at the same time')
elif not (args.audit_cmd or args.audit_script):
raise RuleArgumentsError('Rule must have at least one auditcmd OR one auditscript')
# elif not (args.remed_cmd or args.remed_script):
# raise RuleArgumentsError('Rule must have at least one remedcmd OR one remedscript')
except RuleArgumentsError as rvd:
print rvd
exit(rvd.code)
check_type(type=args.type)
updated_type = add_rule(desc=args.desc, type=args.type, audit_cmd=args.audit_cmd,
audit_script=args.audit_script, remed_cmd=args.remed_cmd,
remed_script=args.remed_script)
return update_type(type=updated_type)
if 'update' == args.action:
# update rule arguments :
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number : 1
# required: True
# - desc:
# type: str
# args number: 1
# required: False
# - auditcmd:
# type: str
# args number: 1 or more
# required: False
# note: can't be set with auditscript
# - remedcmd:
# type: str
# args number: 1 or more
# required: False
# note: can't be set with remedscript
# - auditscript:
# type: str
# args number: 1
# required: False
# note: can't be set with auditcmd
# - remedscript:
# type: str
# args number: 1
# required: False
# note: can't be set with remedcmd
try:
# if args.audit_cmd and args.audit_script:
# raise RuleArgumentsError('Rule cant have auditscript AND auditcmd at the same time')
# elif args.remed_cmd and args.remed_script:
# raise RuleArgumentsError('Rule cant have remedscript AND remedcmd at the same time')
if not (args.audit_cmd or args.audit_script):
raise RuleArgumentsError('Rule must have at least one auditcmd OR one auditscript')
except RuleArgumentsError as rvd:
print rvd
exit(rvd.code)
check_rule(type=args.type, id=args.id)
updated_type = update_rule(desc=args.desc, type=args.type, audit_cmd=args.audit_cmd,
audit_script=args.audit_script, remed_cmd=args.remed_cmd,
remed_script=args.remed_script, id=args.id)
return update_type(updated_type)
if 'remove' == args.action:
# remove rule arguments :
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number : 1
# required: True
# - all
try:
if args.id:
if args.all:
raise RuleArgumentsError("--all option doesn't need an id (all rules will be deleted)")
else:
check_rule(type=args.type, id=args.id)
clean_profiles(type=args.type, id=args.id)
updated_type = remove_rule(type=args.type, id=args.id)
return update_type(updated_type)
else:
if args.all:
clean_profiles(type=args.type)
updated_type = remove_rule(type=args.type)
return update_type(updated_type)
else:
raise RuleArgumentsError("For removing one rule, id must be set !")
except RuleArgumentsError as rae:
print rae
exit(rae.code)
if 'link' == args.action:
# link rule arguments :
# - profile:
# type: str
# args number : 1 or more
# required: True
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number: 1 or more
# required: False
# - all
try:
if args.all and not args.type:
raise RuleArgumentsError("--all options can't be used without rule type")
if args.id:
if args.all:
raise RuleArgumentsError("--all option doesn't need an id (all rules will be added)")
check_rule(type=args.type, id=args.id)
return link_rule(profile=args.profile, type=args.type, id=args.id)
else:
return link_rule(profile=args.profile, type=args.type, id=-1)
except RuleArgumentsError as rae:
print rae
exit(rae.code)
if 'unlink' == args.action:
# unlink rule arguments :
# - profile:
# type: str
# args number : 1 or more
# required: True
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number: 1 or more
# required: False
# - all
try:
if args.id:
if args.all:
raise RuleArgumentsError("--all option doesn't need an id (all rules will be added)")
else:
check_rule(type=args.type, id=args.id)
return unlink_rule(profile=args.profile, type=args.type, id=args.id)
else:
return unlink_rule(profile=args.profile, type=args.type, id=-1)
except RuleArgumentsError as rae:
print rae
exit(rae)
if 'move' == args.action:
# move rule arguments :
# - type:
# type: str
# args number : 1
# required: True
# - id:
# type: int
# args number: 1 or more
# required: True
# - newtype:
# type: str
# args number : 1
# required: True
# - all
check_type(args.type)
check_type(args.newtype)
check_rule(type=args.type, id=args.id)
updated_oldtype, updated_newtype = move_rule(oldtype=args.type, id=args.id, newtype=args.newtype)
update_type(updated_oldtype)
return update_type(updated_newtype)
return | 991873d489486b5e6ffc9676a2d9a6e5af9e944b | 21,159 |
def superposition_training_mnist(model, X_train, y_train, X_test, y_test, num_of_epochs, num_of_tasks, context_matrices, nn_cnn, batch_size=32):
"""
Train model for 'num_of_tasks' tasks, each task is a different permutation of input images.
Check how accuracy for original images is changing through tasks using superposition training.
:param model: Keras model instance
:param X_train: train input data
:param y_train: train output labels
:param X_test: test input data
:param y_test: test output labels
:param num_of_epochs: number of epochs to train the model
:param num_of_tasks: number of different tasks (permutations of original images)
:param context_matrices: multidimensional numpy array with random context (binary superposition)
:param nn_cnn: usage of (convolutional) neural network (possible values: 'nn' or 'cnn')
:param batch_size: batch size - number of samples per gradient update (default = 32)
:return: list of test accuracies for 10 epochs for each task
"""
original_accuracies = []
# context_multiplication(model, context_matrices, 0)
# first training task - original MNIST images
history, _, accuracies = train_model(model, X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1,
mode='superposition', context_matrices=context_matrices, task_index=0)
original_accuracies.extend(accuracies)
print_validation_acc(history, 0)
# other training tasks - permuted MNIST data
for i in range(num_of_tasks - 1):
print("\n\n Task: %d \n" % (i + 1))
# multiply current weights with context matrices for each layer (without changing weights from bias node)
if nn_cnn == 'nn':
context_multiplication(model, context_matrices, i + 1)
elif nn_cnn == 'cnn':
context_multiplication_CNN(model, context_matrices, i + 1)
permuted_X_train = permute_images(X_train)
history, _, accuracies = train_model(model, permuted_X_train, y_train, X_test, y_test, num_of_epochs, nn_cnn, batch_size, validation_share=0.1,
mode='superposition', context_matrices=context_matrices, task_index=i + 1)
original_accuracies.extend(accuracies)
print_validation_acc(history, i + 1)
return original_accuracies | e0e837c3a92e047ce894c166d09e2ce6a58b3035 | 21,160 |
import json
def json2dict(astr: str) -> dict:
"""将json字符串转为dict类型的数据对象
Args:
astr: json字符串转为dict类型的数据对象
Returns:
返回dict类型数据对象
"""
return json.loads(astr) | f13b698dcf7dda253fd872bb464594901280f03b | 21,161 |
from typing import Any
def any(wanted_type=None):
"""Matches against type of argument (`isinstance`).
If you want to match *any* type, use either `ANY` or `ANY()`.
Examples::
when(mock).foo(any).thenReturn(1)
verify(mock).foo(any(int))
"""
return Any(wanted_type) | 4c92d19a2168f815a88f2fa8aa56f0d656a5a534 | 21,162 |
def list_top_level_blob_folders(container_client):
"""
List all top-level folders in the ContainerClient object *container_client*
"""
top_level_folders,_ = walk_container(container_client,max_depth=1,store_blobs=False)
return top_level_folders | baf41750aae23df6d051986f24814d0f286afb6b | 21,163 |
import string
def keyword_encipher(message, keyword, wrap_alphabet=KeywordWrapAlphabet.from_a):
"""Enciphers a message with a keyword substitution cipher.
wrap_alphabet controls how the rest of the alphabet is added
after the keyword.
0 : from 'a'
1 : from the last letter in the sanitised keyword
2 : from the largest letter in the sanitised keyword
>>> keyword_encipher('test message', 'bayes')
'rsqr ksqqbds'
>>> keyword_encipher('test message', 'bayes', KeywordWrapAlphabet.from_a)
'rsqr ksqqbds'
>>> keyword_encipher('test message', 'bayes', KeywordWrapAlphabet.from_last)
'lskl dskkbus'
>>> keyword_encipher('test message', 'bayes', KeywordWrapAlphabet.from_largest)
'qspq jsppbcs'
"""
cipher_alphabet = keyword_cipher_alphabet_of(keyword, wrap_alphabet)
cipher_translation = ''.maketrans(string.ascii_lowercase, cipher_alphabet)
return unaccent(message).lower().translate(cipher_translation) | 155e997e1199f4adb25e20ad2c6e0047ffc7f7fd | 21,164 |
import PIL
def plt_to_img(dummy: any = None, **kwargs) -> PIL.Image.Image:
"""
Render the current figure as a (PIL) image
- Take dummy arg to support expression usage `plt_to_img(...)` as well as statement usage `...; plt_to_img()`
"""
return PIL.Image.open(plot_to_file(**kwargs)) | d07be803a2f3c71fa62b920c0a72954578d24f59 | 21,165 |
def _escape_char(c, escape_char=ESCAPE_CHAR):
"""Escape a single character"""
buf = []
for byte in c.encode('utf8'):
buf.append(escape_char)
buf.append('%X' % _ord(byte))
return ''.join(buf) | a4f4c69eb51a338d54b685336c036d991c295666 | 21,166 |
def error_log_to_html(error_log):
"""Convert an error log into an HTML representation"""
doc = etree.Element('ul')
for l in error_log:
if l.message.startswith('<runtrace '):
continue
el = etree.Element('li')
el.attrib['class'] = 'domain_{domain_name} level_{level_name} type_{type_name}'.format( # NOQA: E501
domain_name=l.domain_name,
level_name=l.level_name,
type_name=l.type_name,
)
el.text = '{msg:s} [{line:d}:{column:d}]'.format(
msg=l.message,
line=l.line,
column=l.column,
)
doc.append(el)
return doc | d2df223a0be82c5f58cf57be504833061d1afd40 | 21,167 |
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == "linear":
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == "step":
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == "plateau":
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode="min", factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == "cosine":
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError("learning rate policy [%s] is not implemented", opt.lr_policy)
return scheduler | b8996d9963533249f1b387a36bac3f209e70daff | 21,168 |
def train_PCA(data, num_components):
"""
Normalize the face by subtracting the mean image
Calculate the eigenValue and eigenVector of the training face, in descending order
Keep only num_components eigenvectors (corresponding to the num_components largest eigenvalues)
Each training face is represented in this basis by a vector
Calculate the weight vectors for training images
Normalized training face = F - mean = w1*u1 + w2*u2 + ... + wk*uk => w = u.T * face
:param train_data: M * N^2, each row corresponding to each image, which is reshaped into 1-D vector
:param num_components: The number of the largest eigenVector to be kept
:return:
mean_image: 1 * N^2
eigenVectors: num_components * N^2 matrix, each row represents each eigenface, in descending order
weiVec_train: M * K matrix, each row is the weight vectors used to represent the training face
"""
mean_image = np.mean(data, axis=0)
data = data - mean_image
eigenValues, eigenVectors = eigen(data)
eigenVectors = eigenVectors[:num_components]
weiVec_train = np.dot(data, eigenVectors.T)
return mean_image, eigenVectors, weiVec_train | 2404fca9fe053c275b187e2435b497166ed7f4d8 | 21,171 |
def count_revoked_tickets_for_party(party_id: PartyID) -> int:
"""Return the number of revoked tickets for that party."""
return db.session \
.query(DbTicket) \
.filter_by(party_id=party_id) \
.filter_by(revoked=True) \
.count() | 6ad857a4630d2add7d2d46b9e930178a18f89e29 | 21,172 |
def get_data(data_x, data_y):
"""
split data from loaded data
:param data_x:
:param data_y:
:return: Arrays
"""
print('Data X Length', len(data_x), 'Data Y Length', len(data_y))
print('Data X Example', data_x[0])
print('Data Y Example', data_y[0])
train_x, test_x, train_y, test_y = train_test_split(data_x, data_y, test_size=0.4, random_state=40)
dev_x, test_x, dev_y, test_y, = train_test_split(test_x, test_y, test_size=0.5, random_state=40)
print('Train X Shape', train_x.shape, 'Train Y Shape', train_y.shape)
print('Dev X Shape', dev_x.shape, 'Dev Y Shape', dev_y.shape)
print('Test Y Shape', test_x.shape, 'Test Y Shape', test_y.shape)
return train_x, train_y, dev_x, dev_y, test_x, test_y | a40406da641b36784719da3c3e375130e013e889 | 21,175 |
async def api_download_profile() -> str:
"""Downloads required files for the current profile."""
global download_status
assert core is not None
download_status = {}
def update_status(url, path, file_key, done, bytes_downloaded, bytes_expected):
bytes_percent = 100
if (bytes_expected is not None) and (bytes_expected > 0):
bytes_percent = int(bytes_downloaded / bytes_expected * 100)
download_status[file_key] = {"done": done, "bytes_percent": bytes_percent}
await rhasspyprofile.download_files(
core.profile,
status_fun=update_status,
session=get_http_session(),
ssl_context=ssl_context,
)
download_status = {}
return "OK" | 0dd6aaf17b49f8e48eb72c8adf726f2852937f18 | 21,176 |
def cumulative_segment_wrapper(fun):
"""Wrap a cumulative function such that it can be applied to segments.
Args:
fun: The cumulative function
Returns:
Wrapped function.
"""
def wrapped_segment_op(x, segment_ids, **kwargs):
with tf.compat.v1.name_scope(
None, default_name=fun.__name__+'_segment_wrapper', values=[x]):
segments, _ = tf.unique(segment_ids)
n_segments = tf.shape(segments)[0]
output_array = tf.TensorArray(
x.dtype, size=n_segments, infer_shape=False)
def loop_cond(i, out):
return i < n_segments
def execute_cumulative_op_on_segment(i, out):
segment_indices = tf.where(tf.equal(segment_ids, segments[i]))
seg_begin = tf.reduce_min(segment_indices)
seg_end = tf.reduce_max(segment_indices)
segment_data = x[seg_begin:seg_end+1]
out = out.write(i, fun(segment_data, **kwargs))
return i+1, out
i_end, filled_array = tf.while_loop(
loop_cond,
execute_cumulative_op_on_segment,
loop_vars=(tf.constant(0), output_array),
parallel_iterations=10,
swap_memory=True
)
output_tensor = filled_array.concat()
output_tensor.set_shape(x.get_shape())
return output_tensor
return wrapped_segment_op | 5471e525ab73855927fe04530b8ec6e14a4436d9 | 21,177 |
from typing import Any
def read_pet_types(
skip: int = 0,
limit: int = 100,
db: Session = Depends(deps.get_db),
current_user: models.User = Depends(deps.get_current_active_superuser)
) -> Any:
"""
Read pet types
:return:
"""
if not crud.user.is_superuser(current_user):
raise HTTPException(status_code=403, detail="Not enough permissions")
return crud.pettype.get_all(db=db, skip=skip, limit=limit) | 60de7c25b305bbb1a628db27559bcdb3abc5fb24 | 21,178 |
def get_approves_ag_request():
"""Creates the prerequisites for - and then creates and returns an instance of - ApprovesAgRequest."""
# Creates an access group request and an approver (required to create an instance of ApprovesAgRequest).
agr = AccessGroupRequest(reader=None, ag=None, justification=MAGIC_STRING)
approver = Approver(email="[email protected]", password="abc123", name="Peter", surname="Parker")
db.session.add(agr)
db.session.add(approver)
# Returns a ApprovesAgRequest object.
return ApprovesAgRequest(ag_request=agr, approver=approver) | 139900d7948b4bd836410be09ba35a21954f2dc4 | 21,179 |
import requests
def currency_history(
base: str = "USD", date: str = "2020-02-03", api_key: str = ""
) -> pd.DataFrame:
"""
Latest data from currencyscoop.com
https://currencyscoop.com/api-documentation
:param base: The base currency you would like to use for your rates
:type base: str
:param date: Specific date, e.g., "2020-02-03"
:type date: str
:param api_key: Account -> Account Details -> API KEY (use as password in external tools)
:type api_key: str
:return: Latest data of base currency
:rtype: pandas.DataFrame
"""
payload = {"base": base, "date": date, "api_key": api_key}
url = "https://api.currencyscoop.com/v1/historical"
r = requests.get(url, params=payload)
temp_df = pd.DataFrame.from_dict(r.json()["response"])
temp_df["date"] = pd.to_datetime(temp_df["date"])
return temp_df | ed8c547e433a7f08e67863aca86b991bd746ccbb | 21,180 |
from typing import Type
from typing import Dict
def _ref_tier_copy(source_eaf: Type[Eaf] = None,
target_eaf: Type[Eaf] = None,
source_tier_name: str = "",
target_tier_name: str = "",
target_parent_tier_name: str = "",
override_params: Dict[str, str] = {}):
"""
Copy annotations from a ref tier in one EAF to a new ref tier in another EAF
:param source_eaf: The Eaf object to copy from
:param target_eaf: The Eaf object to write to
:param source_tier_name: Name of the tier to get
:param target_tier_name: The name to call this tier in the destination
:param target_parent_tier_name: The name of the parent for the ref tier in the destination object
:param override_params: Use this to change tier params from what the tier has in the source file
:return:
"""
params = override_params if override_params else source_eaf.get_parameters_for_tier(source_tier_name)
target_eaf.add_tier(target_tier_name, ling=params["LINGUISTIC_TYPE_REF"], parent=target_parent_tier_name, tier_dict=params)
annotations = source_eaf.get_ref_annotation_data_for_tier(source_tier_name)
for annotation in annotations:
target_eaf.add_ref_annotation(id_tier=target_tier_name,
tier2=target_parent_tier_name,
time=annotation[0]+1,
value=annotation[2])
return target_eaf | f0c2fe27446d4a1f992f33c7610bc177d0e2c896 | 21,184 |
def fibonacci(length=10):
"""Get fibonacci sequence given it length.
Parameters
----------
length : int
The length of the desired sequence.
Returns
-------
sequence : list of int
The desired Fibonacci sequence
"""
if length < 1:
raise ValueError("Sequence length must be > 0")
sequence = [0] * (length + 2)
sequence[0] = 0
sequence[1] = 1
for i in range(2, len(sequence)):
sequence[i] = sequence[i - 1] + sequence[i - 2]
return sequence[: -2] | afa3ef63a663b4e89e5c4a694315083debdbab59 | 21,185 |
def get_direct_hit_response(request, query, snuba_params, referrer):
"""
Checks whether a query is a direct hit for an event, and if so returns
a response. Otherwise returns None
"""
event_id = normalize_event_id(query)
if event_id:
snuba_args = get_snuba_query_args(
query=u'id:{}'.format(event_id),
params=snuba_params)
results = raw_query(
selected_columns=SnubaEvent.selected_columns,
referrer=referrer,
**snuba_args
)['data']
if len(results) == 1:
response = Response(
serialize([SnubaEvent(row) for row in results], request.user)
)
response['X-Sentry-Direct-Hit'] = '1'
return response | 4ffc0dcd5dbac56fc60e2414c1952e629f1fc951 | 21,187 |
from typing import List
from typing import Tuple
from typing import Set
def _canonicalize_clusters(clusters: List[List[Tuple[int, int]]]) -> List[List[Tuple[int, int]]]:
"""
The data might include 2 annotated spans which are identical,
but have different ids. This checks all clusters for spans which are
identical, and if it finds any, merges the clusters containing the
identical spans.
"""
merged_clusters: List[Set[Tuple[int, int]]] = []
for cluster in clusters:
cluster_with_overlapping_mention = None
for mention in cluster:
# Look at clusters we have already processed to
# see if they contain a mention in the current
# cluster for comparison.
for cluster2 in merged_clusters:
if mention in cluster2:
# first cluster in merged clusters
# which contains this mention.
cluster_with_overlapping_mention = cluster2
break
# Already encountered overlap - no need to keep looking.
if cluster_with_overlapping_mention is not None:
break
if cluster_with_overlapping_mention is not None:
# Merge cluster we are currently processing into
# the cluster in the processed list.
cluster_with_overlapping_mention.update(cluster)
else:
merged_clusters.append(set(cluster))
return [list(c) for c in merged_clusters] | d8435e6859e1f9720d7a6f1ec7dd1e2d51df5502 | 21,189 |
def remove_outliers(matches, keypoints):
"""
Calculate fundamental matrix between 2 images to remove incorrect matches.
Return matches with outlier removed. Rejects matches between images if there are < 20
:param matches: List of lists of lists where matches[i][j][k] is the kth cv2.Dmatch object for images i and j
:param keypoints: List of lists of cv2.Keypoint objects. keypoints[i] is list for image i.
"""
for i in range(len(matches)):
for j in range(len(matches[i])):
if j <= i: continue
if len(matches[i][j]) < 20:
matches[i][j] = []
continue
kpts_i = []
kpts_j = []
for k in range(len(matches[i][j])):
kpts_i.append(keypoints[i][matches[i][j][k].queryIdx].pt)
kpts_j.append(keypoints[j][matches[i][j][k].trainIdx].pt)
kpts_i = np.int32(kpts_i)
kpts_j = np.int32(kpts_j)
F, mask = cv2.findFundamentalMat(kpts_i, kpts_j, cv2.FM_RANSAC, ransacReprojThreshold=3)
if np.linalg.det(F) > 1e-7: raise ValueError(f"Bad F_mat between images: {i}, {j}. Determinant: {np.linalg.det(F)}")
matches[i][j] = np.array(matches[i][j])
if mask is None:
matches[i][j] = []
continue
matches[i][j] = matches[i][j][mask.ravel() == 1]
matches[i][j] = list(matches[i][j])
if len(matches[i][j]) < 20:
matches[i][j] = []
continue
return matches | 53b70f98389a33ba6a28c65fab8862bc629d2f0d | 21,190 |
def err_comp(uh, snap, times_offline, times_online):
"""
Computes the absolute l2 error norm and the rms error
norm between the true solution and the nirom solution projected
on to the full dimensional space
"""
err = {}
w_rms = {}
soln_names = uh.keys()
# ky = list(uh.keys())[0]
N = snap[list(uh.keys())[0]].shape[0]
tstep = np.searchsorted(times_offline, times_online)
for key in soln_names:
interp = uh[key]
true = snap[key][:, tstep]
err[key] = np.linalg.norm(true - interp, axis=0)
w_rms[key] = err[key]/(np.sqrt(N))
return w_rms | d64b061ec1cb3f8d7e9247cc5a74c4b6b852bc3b | 21,191 |
from datetime import datetime
def calc_stock_state(portfolio,code:int,date:datetime,stocks,used_days:int):
"""
状態を計算
- 株価・テクニカル指標・出来高の時系列情報
- 総資産、所持株数
Args:
stocks: 単元株数と始値、終値、高値、低値、出来高を含む辞書を作成
used_days: 用いる情報の日数
"""
stock_df=stocks[code]['prices']
date=datetime(date.year,date.month,date.day) #convert to datetime
try:
time_series_array=stock_df[stock_df.index<=date][-used_days:].values
except Exception as e:
logger.error("datetime comparison error")
logger.error(e)
time_series_array=time_series_array/time_series_array[0] #normalization
time_series_list=list(time_series_array.flatten())
s1=portfolio.initial_deposit
s2=portfolio.stocks[code].total_cost # 取得にかかったコスト(総額)
s3=portfolio.stocks[code].current_count # 現在保有している株数
s4=portfolio.stocks[code].average_cost # 平均取得価額
return time_series_list+[s1,s2,s3,s4] | 7aec335e15d5c169bfbaf7995614c532c31bd353 | 21,192 |
def lowercase_words(words):
"""
Lowercases a list of words
Parameters
-----------
words: list of words to process
Returns
-------
Processed list of words where words are now all lowercase
"""
return [word.lower() for word in words] | b6e8658f35743f6729a9f8df229b382797b770f6 | 21,193 |
def convert_images_to_arrays_train(file_path, df):
"""
Converts each image to an array, and appends each array to a new NumPy
array, based on the image column equaling the image file name.
INPUT
file_path: Specified file path for resized test and train images.
df: Pandas DataFrame being used to assist file imports.
OUTPUT
NumPy array of image arrays.
"""
lst_imgs = [l for l in df['train_image_name']]
return np.array([np.array(Image.open(file_path + img)) for img in lst_imgs]) | bab9ccc350c891d8c8dc634a431309490533f8ad | 21,194 |
def get_projection_matrix(X_src, X_trg, orthogonal, direction='forward', out=None):
"""
X_src: ndarray
X_trg: ndarray
orthogonal: bool
direction: str
returns W_src if 'forward', W_trg otherwise
"""
xp = get_array_module(X_src, X_trg)
if orthogonal:
if direction == 'forward':
u, s, vt = xp.linalg.svd(xp.dot(X_trg.T, X_src))
W = xp.dot(vt.T, u.T, out=out)
elif direction == 'backward':
u, s, vt = xp.linalg.svd(xp.dot(X_src.T, X_trg))
W = xp.dot(vt.T, u.T, out=out)
else:
if direction == 'forward':
W = xp.dot(xp.linalg.pinv(X_src), X_trg, out=out)
elif direction == 'backward':
W = xp.dot(xp.linalg.pinv(X_trg), X_src, out=out)
return W | ef7f722e6beeb652069270afd81315a951d2a925 | 21,195 |
def _standardize_df(data_frame):
"""
Helper function which divides df by std and extracts mean.
:param data_frame: (pd.DataFrame): to standardize
:return: (pd.DataFrame): standardized data frame
"""
return data_frame.sub(data_frame.mean(), axis=1).div(data_frame.std(), axis=1) | cbe0e1f5c507181a63193a4e08f4ed8139d9e129 | 21,196 |
def has_edit_metadata_permission(user, record):
"""Return boolean whether user can update record."""
return EditMetadataPermission(user, record).can() | fd2a60d27151181c02d5a0fb6548f28beaa5b2b3 | 21,197 |
def truncate_chars_middle(text, limit, sep="..."):
"""
Truncates a given string **text** in the middle, so that **text** has length **limit** if the number of characters
is exceeded, or else **len(text)** if it isn't.
Since this is a template filter, no exceptions are raised when they would normally do.
:param text: the text to truncate.
:param limit: the maximum length of **text**.
:param sep: the separator to display in place of the (**len(text) - limit**) truncated characters.
:return: a truncated version of **text**.
"""
if not text or limit < 0:
return ""
length = len(text)
if length < limit:
return text
else:
first_half = ceil(limit / 2)
second_half = length - floor(limit / 2)
return text[:first_half] + sep + text[second_half:] | e08e6ec0b3522104d54e6690361d6ecf297f5566 | 21,198 |
import re
def parse_block(block, site_name, site_num, year):
"""Parse a main data block from a BBC file"""
# Cleanup difficult issues manually
# Combination of difficult \n's and OCR mistakes
replacements = {'Cemus': 'Census',
'Description of plot': 'Description of Plot',
'Description Oi Plot': 'Description of Plot',
'Acknowledgmentsz': 'Acknowledgments: ',
'Other Observers:]': 'Other Observers: ',
'Other 0berservers': 'Other Observers: ',
'0ther Observerers': 'Other Observers: ',
'Other 0bservers': 'Other Observers: ',
'Other Observers.': 'Other Observers:',
'Other Observers]': 'Other Observers:',
'Continnity': 'Continuity',
'lViagnolia': 'Magnolia',
'lVildlife': 'Wildlife',
'Mallard ): American Black Duck hybrid': 'Mallard x American Black Duck hybrid',
'Observerszj': 'Observers',
'Bobolink; 9.0 territories': 'Bobolink, 9.0 territories',
"37°38'N, 121°46lW": "37°38'N, 121°46'W",
'Common Yellowthroat, 4.5, Northern Flicker, 3.0': 'Common Yellowthroat, 4.5; Northern Flicker, 3.0',
'Red-bellied Woodpecker, 2.0, Carolina Chickadee, 2.0': 'Red-bellied Woodpecker, 2.0; Carolina Chickadee, 2.0',
'Winter 1992': ' ', #One header line in one file got OCR'd for some reason
'nuLquu “1:10': ' ',
'nululuu 1:1:1.)': ' ',
'20.9 h; 8 Visits (8 sunrise), 8, 15, 22, 29 April; 6, 13, 20, 27 May.': '20.9 h; 8 Visits (8 sunrise); 8, 15, 22, 29 April; 6, 13, 20, 27 May.',
'19.3 h; 11 visits (11 sunrise;': '19.3 h; 11 visits (11 sunrise);',
'Foster Plantation; 42"7’N': 'Foster Plantation; 42°7’N',
'Hermit Thrush, 4.5 (18), Black-throatcd Green Warbler': 'Hermit Thrush, 4.5 (18); Black-throated Green Warbler', # Fixes both delimiter and selling of throated
'39"] 2‘N, 76°54’W': '39°12‘N, 76°54’W',
"42°“7'N, 77°45’W": "42°7'N, 77°45’W",
'41°4\'N, 76"7’W': "41°4'N, 76°7’W",
'w‘sits': 'visits',
'79513’W': '79°13’W',
'Continuity.': 'Continuity:',
'Continuity"': 'Continuity:',
"40°44'N, 7 D50’W": "40°44'N, 75°50’W",
"41350'N, 71°33'W": "41°50'N, 71°33'W",
'44°57’N, 68D41’W': '44°57’N, 68°41’W',
'18.8 11; 11 Visits': '18.8 h; 11 Visits',
"Descripn'on of Plot": "Description of Plot",
'41 c’42’N, 73°13’VV': "41°42'N, 73°13'W",
'Northern Rough-winged Swallow. 0.5': 'Northern Rough-winged Swallow, 0.5',
'Warbling Vireo, 1.0, Northern Cardinal, 1.0': 'Warbling Vireo, 1.0; Northern Cardinal, 1.0',
'Wood Thrush, 3.0 (18), American Redstart, 3.0': 'Wood Thrush, 3.0; American Redstart, 3.0',
'study-hrs': 'study-hours',
'studyhours': 'study-hours',
'Nuttall’s Woodpecker, 3 (9; 2N),':'Nuttall’s Woodpecker, 3 (9; 2N);',
'38°35’45”N\', 76°45’46"W': '38°35’45”N, 76°45’46"W',
'Northern Parula 8': 'Northern Parula, 8',
'47°08’N, 99°] 5’ W': '47°08’N, 99°15’ W',
'Yellow Warbler, 1,’ Clay-colored Sparrow, 1,Savannah Sparrow, 1;': 'Yellow Warbler, 1; Clay-colored Sparrow, 1; Savannah Sparrow, 1;',
'Established 1993; 2 )n‘.': 'Established 1993; 2.',
'Established l983': 'Established 1983',
'Established 1978; 18 you': 'Established 1978; 18 yr.',
'This plot is part of a larger plot that was first censused in 1981.': '',
'Ruby-throatcd Hummingbird': 'Ruby-throated Hummingbird',
'RuHed Grouse': 'Ruffed Grouse',
'\Varbler': "Warbler",
'VVarbler': "Warbler",
'Common Yellowthroat 3': 'Common Yellowthroat, 3',
'all known to breed in immediate vicinity': '',
'and a number of vagrants': '',
"Utner Ubservers": "Other Observers",
'Dovmy': 'Downy',
"W'oodpecker": "Woodpecker",
"\700d Thrush": "Wood Thrush",
"\form-eating Warbler": "Worm-eating Warbler",
"Clifl' Swallow": "Cliff Swallow",
'Clifl\ Swallow"': 'Cliff Swallow',
'Downy Woodpecknululuu I JHJ er': 'Downy Woodpecker',
'unidentified Accipiter': 'Accipiter sp.',
"Traill’s Flycatcher": "Willow Flycatcher",
'Eastern Titmouse': 'Tufted Titmouse',
'Common Barn Owl': 'Barn Owl',
'Common Bushtit': 'Bushtit',
'Yellow-shafted Flicker': 'Northern Flicker',
'Yellowshafted Flicker': 'Northern Flicker',
'Common Barn-Owl': 'Barn Owl',
'Northern Parula Warbler': 'Northern Parula',
'Yellow-rumped,': 'Yellow-rumped Warbler,',
'Common Crow': 'American Crow',
', Raven,': ', Common Raven,',
'; Raven,': '; Common Raven,',
'+_': '+',
'chickadee sp.;': 'chickadee sp.,',
'Yellow Warbler, 0.5, Common Yellowthroat, 0.5.': 'Yellow Warbler, 0.5; Common Yellowthroat, 0.5.',
'Whip-poor-will, 1.0, European Starling, 1.0': 'Whip-poor-will, 1.0; European Starling, 1.0',
'80(9\'45"': '80°9\'45"',
'American Crow; 1.0;': 'American Crow, 1.0;',
"47°08'N7 99°15'W;": "47°08'N 99°15'W;",
"', 7'6°45": ", 76°45",
"43°] 6’N": "43°16'N",
"121°461W": "121°46'W",
"39.] h;": "39.1 h;",
"74°ll": "74°11",
"40°] 1": "40°11",
"Estao lished": "Established",
"Estabo lished": "Established",
"Estab lished": "Established",
"79°O": "79°0",
"79°]": "79°1",
"12.] h;": "12.1 h;",
"terfitories": "territories"
}
block = get_cleaned_string(block)
for replacement in replacements:
if replacement in block:
print("Replacing {} with {}".format(replacement, replacements[replacement]))
block = block.replace(replacement, replacements[replacement])
block = get_clean_block(block)
p = re.compile(r'((?:Site Number|Location|Continuity|Previously called|Size|Description of Plot|Edge|Topography and Elevation|Weather|Coverage|Census|Fledglings|Nests and Fledglings|Fledglings Seen|Fledglings Noted|Total|Visitors|Nests Found|Remarks|Observers|Other Observers|Other Observer|Acknowledgments)):')
split_block = p.split(block)[1:] #discard first value; an empty string
block_dict = {split_block[i]: split_block[i+1] for i in range(0, len(split_block), 2)}
block_dict['SiteName'] = site_name
block_dict['SiteNumInCensus'] = site_num * 10000 + year
return block_dict | 0a367e9163d1136ec725560156d67c05ca1c1d38 | 21,199 |
def isempty(s):
"""
return if input object(string) is empty
"""
if s in (None, "", "-", []):
return True
return False | 9c3ffd6ab818e803c1c0129588c345361c58807f | 21,200 |
def raw(text):
"""Returns a raw string representation of text"""
new_str = ''
for char in text:
try:
new_str += trans_map[char]
except KeyError:
new_str += char
return new_str | 528e88837bba76411b44044b566e2a645db4433e | 21,202 |
def airtovac(wave_air):
"""
taken from idl astrolib
;+
; NAME:
; AIRTOVAC
; PURPOSE:
; Convert air wavelengths to vacuum wavelengths
; EXPLANATION:
; Wavelengths are corrected for the index of refraction of air under
; standard conditions. Wavelength values below 2000 A will not be
; altered. Uses relation of Ciddor (1996).
;
; CALLING SEQUENCE:
; AIRTOVAC, WAVE_AIR, [ WAVE_VAC]
;
; INPUT/OUTPUT:
; WAVE_AIR - Wavelength in Angstroms, scalar or vector
; If this is the only parameter supplied, it will be updated on
; output to contain double precision vacuum wavelength(s).
; OPTIONAL OUTPUT:
; WAVE_VAC - Vacuum wavelength in Angstroms, same number of elements as
; WAVE_AIR, double precision
;
; EXAMPLE:
; If the air wavelength is W = 6056.125 (a Krypton line), then
; AIRTOVAC, W yields an vacuum wavelength of W = 6057.8019
;
; METHOD:
; Formula from Ciddor 1996, Applied Optics 62, 958
;
; NOTES:
; Take care within 1 A of 2000 A. Wavelengths below 2000 A *in air* are
; not altered.
; REVISION HISTORY
; Written W. Landsman November 1991
; Use Ciddor (1996) formula for better accuracy in the infrared
; Added optional output vector, W Landsman Mar 2011
; Iterate for better precision W.L./D. Schlegel Mar 2011
;-
"""
wave_vac = wave_air * 1.0
g = wave_vac > 2000 #Only modify above 2000 A
if np.sum(g):
for iter in [0, 1]:
if isinstance(g, np.ndarray):
sigma2 = (1e4/wave_vac[g])**2. #Convert to wavenumber squared
# Compute conversion factor
fact = 1. + 5.792105e-2 / (238.0185 - sigma2) + \
1.67917e-3 / (57.362 - sigma2)
wave_vac[g] = wave_air[g] * fact #Convert Wavelength
else: # scalar version
sigma2 = (1e4/wave_vac)**2. #Convert to wavenumber squared
# Compute conversion factor
fact = 1. + 5.792105e-2 / (238.0185 - sigma2) + \
1.67917e-3 / (57.362 - sigma2)
wave_vac = wave_air * fact #Convert Wavelength
return wave_vac | 68d71855f0fa8256acc23bfd24d68985cfc1f3a7 | 21,203 |
def clamp(val, min_, max_):
"""clamp val to between min_ and max_ inclusive"""
if val < min_:
return min_
if val > max_:
return max_
return val | 31f2441ba03cf765138a7ba9b41acbfe21b7bda7 | 21,204 |
def GetUserLink(provider, email):
"""Retrieves a url to the profile of the specified user on the given provider.
Args:
provider: The name of the provider
email: The email alias of the user.
Returns:
Str of the url to the profile of the user.
"""
user_link = ''
if email and provider == Provider.ISSUETRACKER:
user_link = 'http://code.google.com/u/' + email.split('@')[0]
return encoding_util.EncodeToAscii(user_link) | ad5f30e7e04000369d45d242b18afc59922da9bc | 21,205 |
def _as_bytes0(path):
"""Crashes translation if the path contains NUL characters."""
res = _as_bytes(path)
rstring.check_str0(res)
return res | 76c9c130d1a74f9cacb34e30141db74400f6ea33 | 21,206 |
def get_ip(request):
"""Determines user IP address
Args:
request: resquest object
Return:
ip_address: requesting machine's ip address (PUBLIC)
"""
ip_address = request.remote_addr
return ip_address | 84e1540bc8b79fd2043a8fb6f107f7bcd8d7cc8c | 21,207 |
def _is_valid_new_style_arxiv_id(identifier):
"""Determine if the given identifier is a valid new style arXiv ID."""
split_identifier = identifier.split('v')
if len(split_identifier) > 2:
return False
elif len(split_identifier) == 2:
identifier, version = split_identifier
if not version.isnumeric():
return False
else:
identifier = split_identifier[0]
split_identifier = identifier.split('.')
if len(split_identifier) != 2:
return False
prefix, suffix = split_identifier
if not prefix.isnumeric() or not suffix.isnumeric():
return False
if len(prefix) != 4 or len(suffix) not in {4, 5}:
return False
month = prefix[2:4]
if int(month) > 12:
return False
return True | 71171984ad1497fa45e109b9657352c20bfe7682 | 21,208 |
def download_suite(request, domain, app_id):
"""
See Application.create_suite
"""
if not request.app.copy_of:
request.app.set_form_versions(None)
return HttpResponse(
request.app.create_suite()
) | 382817e3a790d59c33c69eb5334841d2d9a1a7af | 21,209 |
def get_graph(mol):
""" Converts `rdkit.Chem.Mol` object to `PreprocessingGraph`.
"""
if mol is not None:
if not C.use_aromatic_bonds:
rdkit.Chem.Kekulize(mol, clearAromaticFlags=True)
molecular_graph = PreprocessingGraph(molecule=mol, constants=C)
return molecular_graph | 3d105de313ab1aed6ed0fff598e791cd903e94de | 21,210 |
def dict_fetchall(cursor):
"""
Returns all rows from a cursor as a dict
"""
desc = cursor.description
return [
dict(zip([col[0] for col in desc], row))
for row in cursor.fetchall()
] | 6d5e6621ac2cb6229f7caf6714cbc0124a33c271 | 21,211 |
def count_vowels(s):
"""Used to count the vowels in the sequence"""
s = s.lower()
counter=0
for x in s:
if(x in ['a','e','i','o','u']):
counter+=1
return counter | 236500c76b22510e6f0d97a4200865e2a18b47c3 | 21,212 |
def _is_valid_dtype(matrix, complex_dtype=False, all_dtype=False):
""" Check to see if it's a usable float dtype """
if all_dtype:
return matrix.dtype in NUMPY_FLOAT_DTYPES + NUMPY_COMPLEX_DTYPES
elif complex_dtype:
return matrix.dtype in NUMPY_COMPLEX_DTYPES
else:
return matrix.dtype in NUMPY_FLOAT_DTYPES | 1ca4a79082f170f53a905773b42fa8cb833e4016 | 21,213 |
def assess_edge(self, edge, fsmStack, request, **kwargs):
"""
Try to transition to ASSESS, or WAIT_ASSESS if not ready,
or jump to ASK if a new question is being asked.
"""
fsm = edge.fromNode.fsm
if not fsmStack.state.linkState: # instructor detached
return fsm.get_node('END')
elif fsmStack.state.linkState.fsmNode.node_name_is_one_of('QUESTION'):
if fsmStack.state.unitLesson == fsmStack.state.linkState.unitLesson:
return fsm.get_node('WAIT_ASSESS')
else: # jump to the new question
fsmStack.state.unitLesson = fsmStack.state.linkState.unitLesson
fsmStack.state.save()
return fsm.get_node('TITLE')
else: # pragma: no cover
if not fsmStack.next_point.response_to_check.selfeval:
return edge.toNode
if fsmStack.next_point.response_to_check.selfeval != 'correct':
return fsm.get_node('INCORRECT_ANSWER')
elif fsmStack.next_point.response_to_check.selfeval == 'correct':
return fsm.get_node('CORRECT_ANSWER')
return edge.toNode | e09d07afbc37c73188bbb5e6fa3436c88fce9bd7 | 21,214 |
def viable_source_types_for_generator_real (generator):
""" Returns the list of source types, which, when passed to 'run'
method of 'generator', has some change of being eventually used
(probably after conversion by other generators)
"""
source_types = generator.source_types ()
if not source_types:
# If generator does not specify any source types,
# it might be special generator like builtin.lib-generator
# which just relays to other generators. Return '*' to
# indicate that any source type is possibly OK, since we don't
# know for sure.
return ['*']
else:
result = []
for s in source_types:
viable_sources = viable_source_types(s)
if viable_sources == "*":
result = ["*"]
break
else:
result.extend(type.all_derived(s) + viable_sources)
return unique(result) | e946663241fb77d3632f88b2f879d650e65f6d73 | 21,215 |
def negSamplingCostAndGradient(predicted, target, outputVectors, dataset,
K=10):
""" Negative sampling cost function for word2vec models
Implement the cost and gradients for one predicted word vector
and one target word vector as a building block for word2vec
models, using the negative sampling technique. K is the sample
size.
Note: See test_word2vec below for dataset's initialization.
Arguments/Return Specifications: same as softmaxCostAndGradient
"""
# Sampling of indices is done for you. Do not modify this if you
# wish to match the autograder and receive points!
indices = [target]
indices.extend(getNegativeSamples(target, dataset, K))
### YOUR CODE HERE
u_o, v_c = outputVectors[target], outputVectors[indices[1:]]
loss = -np.log(sigmoid(np.matmul(u_o, predicted)))
print(u_o)
print(v_c)
### END YOUR CODE
return cost, gradPred, grad | d4b8a16166406f9e13296b8b5c53c56f95ff9d6b | 21,216 |
def get_start_block(block):
"""
Gets the deepest block to use as the starting block.
"""
if not block.get('children'):
return block
first_child = block['children'][0]
return get_start_block(first_child) | e658954bb69f88f10c2f328c605d6da094ba065d | 21,217 |
def transform_rank_list(lam_ref, A, b, rank):
"""
A is a list here. We sum the first `rank` elements of it
to return a matrix with the desired rank.
"""
_A = sum(A[0:rank])
_b = b
_d = _A @ lam_ref + _b
assert np.linalg.matrix_rank(_A) == rank, "Unexpected rank mismatch"
return _A, _b, _d | 2b77db3cb27ce3b66d0038042d649226e5a231d2 | 21,218 |
def FindWindowsWithTitle(title_to_search):
"""Finds windows with given title.
Args:
title_to_search: Window title substring to search, case-insensitive.
Returns:
A list of HWND that match the search condition.
"""
desktop_handle = None
return FindWindowsWithText(desktop_handle, title_to_search) | bcc75a4351969cfdbb475032d556e73a4b0ceb92 | 21,219 |
import time
def main_update(next_image_step):
"""
This includes some functionality for image / file writing at a specified frequency,
Assumes global variables:
time, step, files_freq, next_image_step
if numerical dt exceeds next specified writing point
override dt make sure we hit that point
Set some flags so that image / file writing proceeds
"""
if md.thermal:
dt = advDiff.get_max_dt()*md.courantFac #additional md.courantFac helps stabilise advDiff
advDiff.integrate(dt)
else:
dt = advector.get_max_dt()
#This relates to file writing at set period:
#override dt make sure we hit certain time values
#Set some flags so that image / file writing proceeds
if step == 0:
files_this_step = True
else:
files_this_step = False
if time + dt >= next_image_step:
dt = next_image_step - time
files_this_step = True
next_image_step += files_freq #increment time for our next image / file dump
#Do advection
advector.integrate(dt)
marker.advection(dt)
#remove drift in pressure
pressureSurf = _pressure.evaluate()[0]
pressureField.data[:] -= pressureSurf/surfLength
return time+dt, step+1, files_this_step, next_image_step | 6a402c70948dc90aade783b94b054ef5abf8225c | 21,220 |
def assemble_batches(inputs, crop_mode='center_only'):
"""
Assemble DataFrame of image crops for feature computation.
Input:
inputs: list of filenames (center_only, corners, and selective_search mode)
OR input DataFrame (list mode)
mode: string
'list': take the image windows from the input as-is
'center_only': take the CROPPED_DIM middle of the image windows
'corners': take CROPPED_DIM-sized boxes at 4 corners and center of
the image windows, as well as their flipped versions: a total of 10.
'selective_search': run Selective Search region proposal on the
image windows, and take each enclosing subwindow.
Output:
df_batches: list of DataFrames, each one of BATCH_SIZE rows.
Each row has 'image', 'filename', and 'window' info.
Column 'image' contains (X x 3 x CROPPED_DIM x CROPPED_IM) ndarrays.
Column 'filename' contains source filenames.
Column 'window' contains [ymin, xmin, ymax, xmax] ndarrays.
If 'filename' is None, then the row is just for padding.
Note: for increased efficiency, increase the batch size (to the limit of gpu
memory) to avoid the communication cost
"""
if crop_mode == 'list':
images_df = _assemble_images_list(inputs)
elif crop_mode == 'center_only':
images_df = _assemble_images_center_only(inputs)
elif crop_mode == 'corners':
images_df = _assemble_images_corners(inputs)
elif crop_mode == 'selective_search':
images_df = _assemble_images_selective_search(inputs)
else:
raise Exception("Unknown mode: not in {}".format(CROP_MODES))
# Make sure the DataFrame has a multiple of BATCH_SIZE rows:
# just fill the extra rows with NaN filenames and all-zero images.
N = images_df.shape[0]
remainder = N % BATCH_SIZE
if remainder > 0:
zero_image = np.zeros_like(images_df['image'].iloc[0])
zero_window = np.zeros((1, 4), dtype=int)
remainder_df = pd.DataFrame([{
'filename': None,
'image': zero_image,
'window': zero_window
}] * (BATCH_SIZE - remainder))
images_df = images_df.append(remainder_df)
N = images_df.shape[0]
# Split into batches of BATCH_SIZE.
ind = np.arange(N) / BATCH_SIZE
df_batches = [images_df[ind == i] for i in range(N / BATCH_SIZE)]
return df_batches | f22f3ed33b339a4375a1e3319d26cb2946762978 | 21,222 |
def __VF2_feasible(graph1, graph2, vertex1, vertex2, map21, map12, terminals1,
terminals2, subgraph):
"""
Returns :data:`True` if two vertices `vertex1` and `vertex2` from graphs
`graph1` and `graph2`, respectively, are feasible matches. `mapping21` and
`mapping12` are the current state of the mapping from `graph1` to `graph2`
and vice versa, respectively. `terminals1` and `terminals2` are lists of
the vertices that are directly connected to the already-mapped vertices.
`subgraph` is :data:`True` if graph2 is to be treated as a potential
subgraph of graph1. i.e. graph1 is a specific case of graph2.
Uses the VF2 algorithm of Vento and Foggia. The feasibility is assessed
through a series of semantic and structural checks. Only the combination
of the semantic checks and the level 0 structural check are both
necessary and sufficient to ensure feasibility. (This does *not* mean that
vertex1 and vertex2 are always a match, although the level 1 and level 2
checks preemptively eliminate a number of false positives.)
"""
cython.declare(vert1=Vertex, vert2=Vertex, edge1=Edge, edge2=Edge, edges1=dict, edges2=dict)
cython.declare(i=cython.int)
cython.declare(term1Count=cython.int, term2Count=cython.int, neither1Count=cython.int, neither2Count=cython.int)
if not subgraph:
# To be feasible the connectivity values must be an exact match
if vertex1.connectivity1 != vertex2.connectivity1: return False
if vertex1.connectivity2 != vertex2.connectivity2: return False
if vertex1.connectivity3 != vertex2.connectivity3: return False
# Semantic check #1: vertex1 and vertex2 must be equivalent
if subgraph:
if not vertex1.isSpecificCaseOf(vertex2): return False
else:
if not vertex1.equivalent(vertex2): return False
# Get edges adjacent to each vertex
edges1 = graph1.edges[vertex1]
edges2 = graph2.edges[vertex2]
# Semantic check #2: adjacent vertices to vertex1 and vertex2 that are
# already mapped should be connected by equivalent edges
for vert2 in edges2:
if vert2 in map12:
vert1 = map12[vert2]
if not vert1 in edges1: # atoms not joined in graph1
return False
edge1 = edges1[vert1]
edge2 = edges2[vert2]
if subgraph:
if not edge1.isSpecificCaseOf(edge2): return False
else: # exact match required
if not edge1.equivalent(edge2): return False
# there could still be edges in graph1 that aren't in graph2.
# this is ok for subgraph matching, but not for exact matching
if not subgraph:
for vert1 in edges1:
if vert1 in map21:
vert2 = map21[vert1]
if not vert2 in edges2: return False
# Count number of terminals adjacent to vertex1 and vertex2
term1Count = 0; term2Count = 0; neither1Count = 0; neither2Count = 0
for vert1 in edges1:
if vert1 in terminals1: term1Count += 1
elif vert1 not in map21: neither1Count += 1
for vert2 in edges2:
if vert2 in terminals2: term2Count += 1
elif vert2 not in map12: neither2Count += 1
# Level 2 look-ahead: the number of adjacent vertices of vertex1 and
# vertex2 that are non-terminals must be equal
if subgraph:
if neither1Count < neither2Count: return False
else:
if neither1Count != neither2Count: return False
# Level 1 look-ahead: the number of adjacent vertices of vertex1 and
# vertex2 that are terminals must be equal
if subgraph:
if term1Count < term2Count: return False
else:
if term1Count != term2Count: return False
# Level 0 look-ahead: all adjacent vertices of vertex2 already in the
# mapping must map to adjacent vertices of vertex1
for vert2 in edges2:
if vert2 in map12:
vert1 = map12[vert2]
if vert1 not in edges1: return False
# Also, all adjacent vertices of vertex1 already in the mapping must map to
# adjacent vertices of vertex2, unless we are subgraph matching
if not subgraph:
for vert1 in edges1:
if vert1 in map21:
vert2 = map21[vert1]
if vert2 not in edges2: return False
# All of our tests have been passed, so the two vertices are a feasible
# pair
return True | f3ebfa379d710f5e1c6651713c15e9c6148d576d | 21,223 |
async def place_rectangle(
interface, element, x, y, width, height, include_all_sides=True, variant=None
):
"""Place a rectangle of an element.
Parameters
----------
interface
The editor interface.
x
X coordinate of the upper left corner.
y
Y coordinate of the upper left corner.
width
Width of the rectangle.
height
Height of the rectangle.
include_all_sides
If False, skip returning the left, right and lower sides. Use this
when placing walls and you are only interested in the insides.
variant
The variant of element to place.
Returns
-------
A list of tuples (element, direction, x, y, variant) of placed elements.
Some may be omitted, if `include_all_sides` is false.
"""
await interface.place_element(
element,
Direction.NONE,
(x, y),
(x + width - 1, y + height - 1),
variant=variant,
)
return_elements = []
if include_all_sides:
x_range = range(x, x + width)
y_range = range(y, y + height)
else:
x_range = range(x + 1, x + width - 1)
y_range = range(y, y + height - 1)
for placed_x in x_range:
for placed_y in y_range:
return_elements.append(
(element, Direction.NONE, placed_x, placed_y, variant)
)
return return_elements | e9b2c16e77627dc3e0cac5f0abfcdce23db5eb29 | 21,224 |
def calc_Q_hat_hs_d_t(Q, A_A, V_vent_l_d_t, V_vent_g_i, mu_H, mu_C, J_d_t, q_gen_d_t, n_p_d_t, q_p_H, q_p_CS, q_p_CL, X_ex_d_t, w_gen_d_t, Theta_ex_d_t, L_wtr, region):
"""(40-1a)(40-1b)(40-2a)(40-2b)(40-2c)(40-3)
Args:
Q: 当該住戸の熱損失係数(W/(m2・K))
A_A: 床面積の合計(m2)
V_vent_l_d_t: 日付dの時刻tにおける局所換気量(m3/h)
V_vent_g_i: 暖冷房区画iの全般換気量(m3/h)
mu_H: 当該住戸の暖房期の日射取得係数((W/m2)/(W/m2))
mu_C: 当該住戸の冷房期の日射取得係数((W/m2)/(W/m2))
J_d_t: 日付dの時刻tにおける水平面全天日射量(W/m2)
q_gen_d_t: 日付dの時刻tにおける内部発熱(W)
n_p_d_t: 日付dの時刻tにおける在室人数(人)
q_p_H: 暖房期における人体からの1人当たりの顕熱発熱量(W/人)
q_p_CS: 冷房期における人体からの1人当たりの顕熱発熱量(W/人)
q_p_CL: 冷房期における人体からの1人当たりの潜熱発熱量(W/人)
X_ex_d_t: 日付dの時刻tにおける外気の絶対湿度(kg/kg(DA))
w_gen_d_t: param Theta_ex_d_t: 日付dの時刻tにおける外気温度(℃)
L_wtr: 水の蒸発潜熱(kJ/kg)
region: 地域区分
Theta_ex_d_t: returns: 日付dの時刻tにおける1時間当たりの熱源機の風量を計算するための熱源機の暖房出力(MJ/h)
Returns:
日付dの時刻tにおける1時間当たりの熱源機の風量を計算するための熱源機の暖房出力(MJ/h)
"""
H, C, M = get_season_array_d_t(region)
c_p_air = get_c_p_air()
rho_air = get_rho_air()
Theta_set_H = get_Theta_set_H()
Theta_set_C = get_Theta_set_C()
X_set_C = get_X_set_C()
Q_hat_hs_d_t = np.zeros(24 * 365)
Q_hat_hs_H_d_t = np.zeros(24 * 365)
Q_hat_hs_CS_d_t = np.zeros(24 * 365)
Q_hat_hs_CL_d_t = np.zeros(24 * 365)
# 暖房期 (40-1b)
if mu_H is not None:
Q_hat_hs_H_d_t[H] = (((Q - 0.35 * 0.5 * 2.4) * A_A + (c_p_air * rho_air * (V_vent_l_d_t[H] + np.sum(V_vent_g_i[:5]))) / 3600) * (Theta_set_H - Theta_ex_d_t[H]) \
- mu_H * A_A * J_d_t[H] - q_gen_d_t[H] - n_p_d_t[H] * q_p_H) * 3600 * 10 ** -6
# (40-1a)
Q_hat_hs_d_t[H] = np.clip(Q_hat_hs_H_d_t[H], 0, None)
# 冷房期 (40-2b)
Q_hat_hs_CS_d_t[C] = (((Q - 0.35 * 0.5 * 2.4) * A_A + (c_p_air * rho_air * (V_vent_l_d_t[C] + np.sum(V_vent_g_i[:5]))) / 3600) * (Theta_ex_d_t[C] - Theta_set_C) \
+ mu_C * A_A * J_d_t[C] + q_gen_d_t[C] + n_p_d_t[C] * q_p_CS) * 3600 * 10 ** -6
# (40-2c)
Q_hat_hs_CL_d_t[C] = ((rho_air * (V_vent_l_d_t[C] + np.sum(V_vent_g_i[:5])) * (X_ex_d_t[C] - X_set_C) * 10 ** 3 + w_gen_d_t[C]) \
* L_wtr + n_p_d_t[C] * q_p_CL * 3600) * 10 ** -6
# (40-2a)
Q_hat_hs_d_t[C] = np.clip(Q_hat_hs_CS_d_t[C], 0, None) + np.clip(Q_hat_hs_CL_d_t[C], 0, None)
# 中間期 (40-3)
Q_hat_hs_d_t[M] = 0
return Q_hat_hs_d_t | 64dd272673507b15a2d2c1782a0c3db88c3f8d76 | 21,226 |
def get_all_infoproviders():
"""
Endpunkt `/infoproviders`.
Response enthält Informationen über alle, in der Datenbank enthaltenen, Infoprovider.
"""
try:
return flask.jsonify(queries.get_infoprovider_list())
except Exception:
logger.exception("An error occurred: ")
err = flask.jsonify({"err_msg": "An error occurred while loading all infoproviders"})
return err, 400 | 73966c2a5b171baead9edccec27f6380f61fb2ab | 21,227 |
import math
def maidenhead(dec_lat, dec_lon):
"""Convert latitude and longitude to Maidenhead grid locators."""
try:
dec_lat = float(dec_lat)
dec_lon = float(dec_lon)
except ValueError:
return ''
if _non_finite(dec_lat) or _non_finite(dec_lon):
return ''
if 90 < math.fabs(dec_lat) or 180 < math.fabs(dec_lon):
return ''
if 89.99999 < dec_lat:
# force North Pole to just inside lat_sq 'R'
dec_lat = 89.99999
if 179.99999 < dec_lon:
# force 180 to just inside lon_sq 'R'
dec_lon = 179.99999
adj_lat = dec_lat + 90.0
adj_lon = dec_lon + 180.0
# divide into 18 zones (fields) each 20 degrees lon, 10 degrees lat
grid_lat_sq = chr(int(adj_lat / 10) + 65)
grid_lon_sq = chr(int(adj_lon / 20) + 65)
# divide into 10 zones (squares) each 2 degrees lon, 1 degrees lat
grid_lat_field = str(int(adj_lat % 10))
grid_lon_field = str(int((adj_lon / 2) % 10))
# remainder in minutes
adj_lat_remainder = (adj_lat - int(adj_lat)) * 60
adj_lon_remainder = ((adj_lon) - int(adj_lon / 2) * 2) * 60
# divide into 24 zones (subsquares) each 5 degrees lon, 2.5 degrees lat
grid_lat_subsq = chr(97 + int(adj_lat_remainder / 2.5))
grid_lon_subsq = chr(97 + int(adj_lon_remainder / 5))
# remainder in seconds
adj_lat_remainder = (adj_lat_remainder % 2.5) * 60
adj_lon_remainder = (adj_lon_remainder % 5.0) * 60
# divide into 10 zones (extended squares) each 30 secs lon, 15 secs lat
grid_lat_extsq = chr(48 + int(adj_lat_remainder / 15))
grid_lon_extsq = chr(48 + int(adj_lon_remainder / 30))
return (grid_lon_sq + grid_lat_sq +
grid_lon_field + grid_lat_field +
grid_lon_subsq + grid_lat_subsq +
grid_lon_extsq + grid_lat_extsq) | 63e44fffbf113f7c8a195b58556eef80a66690f7 | 21,228 |
def parse_worker_string(miner, worker):
"""
Parses a worker string and returns the coin address and worker ID
Returns:
String, String
"""
worker_part_count = worker.count(".") + 1
if worker_part_count > 1:
if worker_part_count == 2:
coin_address, worker = worker.split('.')
else:
worker_parts = worker.split('.')
coin_address = worker_parts[0]
worker = worker_parts[worker_part_count - 1]
else:
coin_address = worker
if coin_address is not None:
if miner.coin_address is None or len(miner.coin_address) == 0:
miner.coin_address = coin_address
elif miner.coin_address != coin_address:
miner.coin_address = coin_address
if worker is not None:
if miner.worker_name is None or len(miner.worker_name) == 0:
miner.worker_name = worker
elif miner.worker_name != worker:
miner.worker_name = worker
return coin_address, worker | 3492716fc9f5290a161de0b46e7af87afbe6b348 | 21,229 |
import json
def get_inference_sequence(file_path):
"""
:param file_path: path of 2D bounding boxes
:return:
"""
with open(file_path + '.json', 'r') as f:
detected_bdbs = json.load(f)
f.close()
boxes = list()
for j, bdb2d in enumerate(detected_bdbs):
box = bdb2d['bbox']
box = {'x1': box[0], 'y1': box[1], 'x2': box[2], 'y2': box[3]}
box['score'] = bdb2d['score']
box['classname'] = bdb2d['class']
boxes.append({'2dbdb': box})
camera = dict()
camera['K'] = np.array([[529.5, 0., 365.], [0, 529.5, 265.], [0, 0, 1]])
boxes_out = list()
for box in boxes:
box_set = dict()
box_set['bdb_pos'] = [box['2dbdb']['x1'], box['2dbdb']['y1'], box['2dbdb']['x2'], box['2dbdb']['y2']]
if box['2dbdb']['classname'] not in OBJ_CATEGORY_TEST:
continue
box_set['size_cls'] = OBJ_CATEGORY_CLEAN.index(box['2dbdb']['classname'])
boxes_out.append(box_set)
data = dict()
data['rgb_path'] = file_path + '.jpg'
data['camera'] = camera
data['boxes'] = list_of_dict_to_dict_of_list(boxes_out)
data['sequence_id'] = int(file_path.split('/')[-1])
return data | a77b5f24004acf9839881cd52ce06b6f785f9bfb | 21,230 |
def _DC_GetBoundingBox(self):
"""
GetBoundingBox() -> (x1,y1, x2,y2)
Returns the min and max points used in drawing commands so far.
"""
return (self.MinX(), self.MinY(), self.MaxX(), self.MaxY()) | 47dc9e8bbc429dbd079695844c9bbcfc79b26229 | 21,231 |
from typing import Union
from typing import Iterable
from typing import List
def map_text(
text: Union[str, Text, Iterable[str], Iterable[Text]],
mapping: StringMapper
) -> Union[str, List[str]]:
"""
Replace text if it matches one of the dictionary keys.
:param text: Text instance(s) to map.
:param mapping: Mappings to replace text.
"""
if isinstance(text, Text):
text = text.get_text()
if not isinstance(text, str):
return [map_text(str(t), mapping) for t in text]
if mapping is None:
return text
if isinstance(mapping, dict) or isinstance(mapping, Series):
if text in mapping.keys():
return mapping[text]
else:
return text
elif callable(mapping):
return mapping(text)
else:
raise TypeError('mapping must be a dict or callable') | 63c9dc6803d1aad572e76cb2a6554363ae358e9c | 21,232 |
def _load_components(config: ConfigType) -> ConfigType:
"""Load the different componenets in a config
Args:
config (ConfigType)
Returns:
ConfigType
"""
special_key = "_load"
if config is not None and special_key in config:
loaded_config = read_config_file(config.pop(special_key))
updated_config = OmegaConf.merge(loaded_config, config)
assert isinstance(updated_config, ConfigType)
return updated_config
return config | b00e2225df4d493636c509380c3c19c107ad32e6 | 21,233 |
import typing
def _value_to_variant(value: typing.Union[bytes, int, float, str]) -> GLib.Variant:
"""
Automatically convert a Python value to a GLib.Variant by guessing the
matching variant type.
"""
if isinstance(value, bool):
return GLib.Variant("b", value)
elif isinstance(value, bytes):
return GLib.Variant("y", value)
elif isinstance(value, int):
return GLib.Variant("x", value)
elif isinstance(value, float):
return GLib.Variant("d", value)
elif isinstance(value, str):
return GLib.Variant("s", value)
else:
raise ValueError("Unknown value type", value) | 8b16bad781954238174a160df5239c0b8cb88e2e | 21,234 |
import math
def ha_rise_set(el_limit, lat, dec):
"""
Hour angle from transit for rising and setting.
Returns pi for a source that never sets and 0 for a source always below
the horizon.
@param el_limit : the elevation limit in radians
@type el_limit : float
@param lat : the observatory latitude in radians
@type lat : float
@param dec : the source declination in radians
@type dec : float
@return: hour angle from transit in radians
"""
cos_ha = (math.sin(el_limit) - math.sin(lat)*math.sin(dec)) \
/(math.cos(lat)*math.cos(dec))
if cos_ha <= -1:
# never sets
return pi
elif cos_ha >= 1:
# never visible
return 0
else:
return math.acos(cos_ha) | 648de7a69039d73f3947706ecc4ee90e1d05597e | 21,235 |
def create(transactions, user=None):
"""# Create Transactions
Send a list of Transaction objects for creation in the Stark Bank API
## Parameters (required):
- transactions [list of Transaction objects]: list of Transaction objects to be created in the API
## Parameters (optional):
- user [Project object]: Project object. Not necessary if starkbank.user was set before function call
## Return:
- list of Transaction objects with updated attributes
"""
return rest.post_multi(resource=_resource, entities=transactions, user=user) | 32573a0e569fde73c6eaf228ad6a07849297c7b9 | 21,236 |
def get_quote(symbol):
"""
Returns today's stock price
"""
contents = get_content(symbol)
return contents('.time_rtq_ticker span').text() | 546ac10e5f7d5b3cc661dde5dceec8c4a8b0fae0 | 21,237 |
def load_pil(data, is_file = False):
""" Parses a string or file written in PIL notation! """
# We only assign reactions in a postprocessing step,
# because there are no macrostates in nuskell.
set_io_objects(D = NuskellDomain, C = NuskellComplex)
out = dsd_read_pil(data, is_file)
clear_io_objects()
cxs = {k: v for k, v in out['complexes'].items()}
rms = {k: v for k, v in out['macrostates'].items()}
det = set(list(out['det_reactions']))
con = set(list(out['con_reactions']))
[o.clear() for o in out.values()]
out.clear()
return cxs, rms, det, con | 0fe0b507d19595f71d18d24e1f003fbaa59485fc | 21,238 |
def get(obj, key, default=None, pattern_default=(), apply_transforms=True):
"""
Get a value specified by the dotted key. If dotted is a pattern,
return a tuple of all matches
>>> d = {'hello': {'there': [1, '2', 3]}}
>>> get(d, 'hello.there[1]|int')
2
>>> get(d, 'hello.there[1:]')
['2', 3]
>>> get([{'a': 1}, {'a':2}], '[*].a')
(1, 2)
"""
ops = parse(key)
vals = el.gets(ops, obj)
if apply_transforms:
vals = ( ops.apply(v) for v in vals )
found = tuple(vals)
if not is_pattern(ops):
return found[0] if found else default
return found if found else pattern_default | b6b84a357e18fa0e78d6520ba50ff5668a97067c | 21,239 |
def str_view(request):
"""
A simple test view that returns a string.
"""
return '<Response><Message>Hi!</Message></Response>' | fd9d150afdf0589cdb4036bcb31243b2e22ef1e2 | 21,240 |
import atexit
def _run_script(script, start_with_ctty, args, kwargs):
"""
Meant to be called inside a python subprocess, do NOT call directly.
"""
enter_pty(start_with_ctty)
result = script(*args, **kwargs)
# Python-spawned subprocesses do not call exit funcs - https://stackoverflow.com/q/34506638/2907819
atexit._run_exitfuncs()
return result | 15507307bb85013d9354b7506569b69806bdf06a | 21,241 |
def get_feed_list(feeds):
""" Return List of Proto Feed Object
"""
feeds_pb_list = [feeds_pb2.Feed(**_get_valid_fields_feed(feed)) for feed in feeds]
return feeds_pb2.FeedList(data=feeds_pb_list) | 6e79c563649aef60396f0c8944d3532fabc17bc0 | 21,242 |
def group_interpellet_interval_plot(FEDs, groups, kde, logx, **kwargs):
"""
FED3 Viz: Plot the interpellet intervals as a histogram, first aggregating
the values for devices in a Groups.
Parameters
----------
FEDs : list of FED3_File objects
FED3 files (loaded by load.FED3_File)
groups : list of strings
Groups to plot (based on the group attribute of each FED3_File)
kde : bool
Whether or not to include kernel density estimation, which plots
probability density (rather than count) and includes a fit line (see
seaborn.distplot)
logx : bool
When True, plots on a logarithmic x-axis
**kwargs :
ax : matplotlib.axes.Axes
Axes to plot on, a new Figure and Axes are
created if not passed
date_filter : array
A two-element array of datetimes (start, end) used to filter
the data
**kwargs also allows FED3 Viz to pass all settings to all functions.
Returns
-------
fig : matplotlib.figure.Figure
"""
if not isinstance(FEDs, list):
FEDs = [FEDs]
for FED in FEDs:
assert isinstance(FED, FED3_File),'Non FED3_File passed to interpellet_interval_plot()'
if 'ax' not in kwargs:
fig, ax = plt.subplots(figsize=(4,5), dpi=125)
else:
ax = kwargs['ax']
bins=[]
if logx:
lowest = -2
highest = 5
ax.set_xticks(range(lowest,highest))
ax.set_xticklabels([10**num for num in range(-2,5)])
c=0
while c <= highest:
bins.append(round(lowest+c,2))
c+=0.1
else:
ax.set_xticks([0,300,600,900])
div = 900/50
bins = [i*div for i in range(50)]
ax.set_xlim(-100,1000)
for group in groups:
all_vals = []
for FED in FEDs:
if group in FED.group:
df = FED.data
if 'date_filter' in kwargs:
s, e = kwargs['date_filter']
df = df[(df.index >= s) &
(df.index <= e)].copy()
y = list(df['Interpellet_Intervals'][df['Interpellet_Intervals'] > 0])
if logx:
y = [np.log10(val) for val in y if not pd.isna(val)]
all_vals += y
sns.distplot(all_vals,bins=bins,label=group,ax=ax,norm_hist=False,
kde=kde)
ax.legend(fontsize=8)
ylabel = 'Density Estimation' if kde else 'Count'
ax.set_ylabel(ylabel)
ax.set_xlabel('minutes between pellets')
ax.set_title('Interpellet Interval Plot')
plt.tight_layout()
return fig if 'ax' not in kwargs else None | 5c0ada4fdf71af7cfed8ffe7ec8b656c8984de9b | 21,243 |
def _beta(x, p):
"""Helper function for `pdf_a`, beta = pi * d(1 - omega(x), omega(p))."""
omega = _amplitude_to_angle
return np.pi * _circ_dist(1 - omega(x), omega(p)) | 9f0defbff0567ba8c181a9565570d0c7444ddc94 | 21,244 |
def set_reporting_max_width(w):
"""
Set the max width for reported parameters. This is used to that failures don't overflow
terminals in the event arguments are dumped.
:param w: The new max width to enforce for the module
:type w: int
:return: True
"""
_REPR_MAX_WIDTH[0] = int(w)
return True | 5da03b359fc823919bf2782907a0717c1d303a31 | 21,245 |
import re
def get_version():
"""Get LanguageTool version."""
version = _get_attrib().get('version')
if not version:
match = re.search(r"LanguageTool-?.*?(\S+)$", get_directory())
if match:
version = match.group(1)
return version | 1223b13b23eb4dadafbc5a3e8bf3b6e7f521ab5b | 21,246 |
def get_mnsp_offer_index(data) -> list:
"""Get MNSP offer index"""
interconnectors = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('InterconnectorPeriodCollection')
.get('InterconnectorPeriod'))
# Container for offer index
offer_index = []
for i in interconnectors:
# Non-MNSP interconnectors do not have an MNSPOfferCollection attribute
if i.get('MNSPOfferCollection') is None:
continue
# Extract InterconnectorID and RegionID for each offer entry
for j in i.get('MNSPOfferCollection').get('MNSPOffer'):
offer_index.append((i['@InterconnectorID'], j['@RegionID']))
return offer_index | 46211e9a29f1fd1fd3148deaaaa064b6d6b05ca7 | 21,247 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.