content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import io
def inscription_summary(request, pk):
"""
Print a PDF summary of inscription
"""
candidat = get_object_or_404(Candidate, pk=pk)
buff = io.BytesIO()
pdf = InscriptionSummaryPDF(buff)
pdf.produce(candidat)
filename = slugify('{0}_{1}'.format(candidat.last_name, candidat.first_name)) + '.pdf'
buff.seek(0)
return FileResponse(buff, as_attachment=True, filename=filename)
|
aeedaaff96e3d08ca0dc503ac6641951a4fa804a
| 29,011 |
def hello(name='persona', lastmane='exposito'):
"""Function that return 'name and last name'.
name: strung,
lastname:string,
return: string,
"""
if name != 'persona':
return f'¿Como estas {name}?'
return f'Hola {name} {lastname}'
|
537ffb6beab2ad2c65c4cbf556738c9be1a8467d
| 29,012 |
def get_num(prompt: str) -> float:
"""Function to check if users input is a num"""
while True:
try:
num = int(input(prompt))
return num
except Exception as e:
print(e)
|
39fdc0588d20cc2268a98db0a729e7c6044e29a0
| 29,013 |
import random
def create_negative_mentions(doc, pos_mention_spans, neg_mention_count,
max_span_size, context_size, overlap_ratio=0.5):
""" Creates negative samples of entity mentions, i.e. spans that do not match a ground truth mention """
neg_dist_mention_spans, neg_dist_mention_sizes = [], []
neg_overlap_mention_spans, neg_overlap_mention_sizes = [], []
for sentence in doc.sentences:
sentence_token_count = len(sentence.tokens)
for size in range(1, max_span_size + 1):
for i in range(0, (sentence_token_count - size) + 1):
span = sentence.tokens[i:i + size].span
if span not in pos_mention_spans:
ov = False
# check if span is inside a ground truth span
for s1, s2 in pos_mention_spans:
if span[0] >= s1 and span[1] <= s2:
ov = True
break
if ov:
neg_overlap_mention_spans.append(span)
neg_overlap_mention_sizes.append(size)
else:
neg_dist_mention_spans.append(span)
neg_dist_mention_sizes.append(size)
# count of (inside) overlapping negative mentions and distinct negative mentions
overlap_neg_count = min(len(neg_overlap_mention_spans), int(neg_mention_count * overlap_ratio))
dist_neg_count = neg_mention_count - overlap_neg_count
# sample negative entity mentions
neg_overlap_mention_samples = random.sample(list(zip(neg_overlap_mention_spans, neg_overlap_mention_sizes)),
overlap_neg_count)
neg_overlap_mention_spans, neg_overlap_mention_sizes = zip(
*neg_overlap_mention_samples) if neg_overlap_mention_samples else ([], [])
neg_overlap_mention_masks = [create_span_mask(*span, context_size) for span in neg_overlap_mention_spans]
neg_dist_mention_samples = random.sample(list(zip(neg_dist_mention_spans, neg_dist_mention_sizes)),
min(len(neg_dist_mention_spans), dist_neg_count))
neg_dist_mention_spans, neg_dist_mention_sizes = zip(*neg_dist_mention_samples) if neg_dist_mention_samples else (
[], [])
neg_dist_mention_masks = [create_span_mask(*span, context_size) for span in neg_dist_mention_spans]
neg_mention_spans = list(neg_overlap_mention_spans) + list(neg_dist_mention_spans)
neg_mention_sizes = list(neg_overlap_mention_sizes) + list(neg_dist_mention_sizes)
neg_mention_masks = list(neg_overlap_mention_masks) + list(neg_dist_mention_masks)
return neg_mention_spans, neg_mention_sizes, neg_mention_masks
|
570c431a3a02505b3c81c2242ae4d3448a17f744
| 29,014 |
def show_item(item_id):
"""Show individual item."""
try:
item = db_session.query(Item).filter_by(id=item_id).one()
except:
flash("This item does not exist")
return redirect(url_for('index'))
# Make sure that user is authorised to see the item
if item.public or item.user_id == login_session.get('user_id'):
return render_template('showitem.html', item=item)
else:
flash("This item is not public and belongs to somebody else.")
return redirect(url_for('index'))
|
5502d904fa02af975af6a52f3c3892133d62061d
| 29,015 |
def yaw_from_pose(pose):
""" Extract the yaw (orientation) from a pose message. """
quat = np.array([pose.orientation.x, pose.orientation.y,
pose.orientation.z, pose.orientation.w])
euler = tf.transformations.euler_from_quaternion(quat)
return euler[2]
|
5112ef95693655af1dce50a2fa51de3d7c42006c
| 29,016 |
import logging
def create_influxdb_datasource_config(influxdb_parameters, cert, key) -> dict:
"""
:param influxdb_parameters: The retrieved InfluxDB parameter JSON
:param cert: The InfluxDB cert for HTTPS.
:param key: The InfluxDB key for HTTPS.
:return: data: The datasource JSON to add.
"""
data = {}
# InfluxDB port inside the container is always 8086 unless overridden inside the InfluxDB config
# We reference the InfluxDB container name in the provided URL instead of using localhost/127.0.0.1
# since this will be interpreted from inside the Grafana container
if influxdb_parameters['InfluxDBServerProtocol'] == HTTP_SERVER_PROTOCOL:
data = {
"name": DATA_SOURCE_NAME,
"type": DATA_SOURCE_TYPE,
"access": DATA_SOURCE_DIRECT_ACCESS,
"editable": False,
"url": "http://{}:{}".format(influxdb_parameters['InfluxDBContainerName'], INFLUXDB_CONTAINER_PORT),
"jsonData": {
"version": DATA_SOURCE_JSONDATA_VERSION,
"organization": influxdb_parameters['InfluxDBOrg'],
"defaultBucket": influxdb_parameters['InfluxDBBucket'],
},
"secureJsonData": {
"token": influxdb_parameters['InfluxDBToken']
}
}
elif influxdb_parameters['InfluxDBServerProtocol'] == HTTPS_SERVER_PROTOCOL:
data = {
"name": DATA_SOURCE_NAME,
"type": DATA_SOURCE_TYPE,
"access": DATA_SOURCE_PROXY_ACCESS,
"editable": False,
"url": "https://{}:{}".format(influxdb_parameters['InfluxDBContainerName'], INFLUXDB_CONTAINER_PORT),
"jsonData": {
"version": DATA_SOURCE_JSONDATA_VERSION,
"organization": influxdb_parameters['InfluxDBOrg'],
"defaultBucket": influxdb_parameters['InfluxDBBucket'],
"tlsSkipVerify": (influxdb_parameters['InfluxDBSkipTLSVerify'] == 'true'),
"tlsAuth": True,
"serverName": "https://{}:{}".format(influxdb_parameters['InfluxDBContainerName'],
INFLUXDB_CONTAINER_PORT)
},
"secureJsonData": {
"token": influxdb_parameters['InfluxDBToken'],
"tlsClientCert": cert,
"tlsClientKey": key
}
}
else:
logging.error("Received invalid InfluxDBServerProtocol! Should be http or https, but was: {}"
.format(influxdb_parameters['InfluxDBServerProtocol']))
logging.info("Generated InfluxDB datasource config")
return data
|
e57f26336ddad2c56919e776239edaba0fc2ebe7
| 29,017 |
def render_incrementals(iterable, **kwds):
"""helper function for simple incremental_expansion calls
:param iterable: sequence of items to incrementally stack
:param kwargs: options to pass to incremental_expansion
:return: a set of the rendered results from incremental_expansion
"""
s = set()
incremental_expansion(s, iterable, **kwds)
return s
|
d545fc237ba6ddaf59c20795cfae1f60197938fc
| 29,019 |
import re
def get_queues_labels(queue_labels_data):
"""Returns parsed data for main metrics.
Converts input string with raw data to a dictionary."""
queue_regexp = r'QUEUE\(([^)]+)\)'
curdepth_regexp = r'CURDEPTH\(([^)]+)\)'
maxdepth_regexp = r'MAXDEPTH\(([^)]+)\)'
queue_type_regexp = r'TYPE\(([^)]+)\)'
queues_labels = {}
for item in queue_labels_data.split('Display Queue details'):
if not item:
continue
queue = re.search(queue_regexp, item)
curdepth = re.search(curdepth_regexp, item)
maxdepth = re.search(maxdepth_regexp, item)
queue_type = re.search(queue_type_regexp, item)
if all(label is not None for label in [queue, curdepth, maxdepth, queue_type]):
queues_labels[queue.group(1)] = {
'curdepth': curdepth.group(1),
'maxdepth': maxdepth.group(1),
'type': queue_type.group(1)}
return queues_labels
|
908aa435c95028d8f34fb35a6cb02da2aa7ca706
| 29,020 |
def is_auto(item):
"""
Checks if a parameter should be automatically determined
"""
if isinstance(item, float):
if item == 9999.9:
return True
elif isinstance(item, str):
if 'auto' in item.lower():
return True
return False
|
fe6320adef43c51cdffd5b5d4a0bf34ac43d9c5a
| 29,021 |
from typing import Union
def black_scholes_price(S: float,
K: Union[float, np.ndarray],
is_call: bool,
vol: Union[float, np.ndarray],
disc: float,
T: float,
div_disc: float = 1.0):
"""
Price strikes of a common parity (ie only call or put). Use black_scholes_price_strikes to price a mix of calls/puts
:param S: float, spot price
:param K: float or array, the Strike(s)
:param is_call: bool, determines if ALL strikes are call or all are put
:param vol: float or array, the Volatility(ies) ... if float, all strikes get same vol, else a vol smile
:param disc: float, the discount factor, e.g. 0.99
:param T: float, time to maturity of option
:param div_disc: float, the dividen discount factor
:return: float or np.ndarray, same shape as strikes
"""
return black76_price(S * div_disc / disc, K, is_call, vol, disc, T)
|
888845c47fc7d5c17921f83321b6b966adfaaf33
| 29,022 |
def has_group_perm(user_level, obj, ctnr, action):
"""
Permissions for groups
Groups are assigned a subnet
"""
if not obj.subnet in [ip_range.subnet for ip_range in ctnr.ranges.all()]:
return False
return {
'cyder_admin': True, #?
'ctnr_admin': action == 'view', #?
'user': action == 'view', #?
'guest': action == 'view',
}.get(user_level, False)
|
aa55bcf846389492734046b8d33c2676ed165763
| 29,023 |
def get_tags_with_sha(repo_dir):
"""
_get_tags_with_sha_
Get list of tags for a repo and return a map of
tag:sha
"""
repo = git.Repo(repo_dir)
return {tag.name: tag.commit.hexsha for tag in repo.tags}
|
d1443ea8289f5588e29517e42da4564cbcc65177
| 29,024 |
from typing import List
from typing import Tuple
def get_reference_sample_name(sample_list: List[Tuple[str, str]]) -> str:
"""Gets the name from the reference sample, raising an exception if it does not exist in the sample table."""
for sample, sample_type in sample_list:
if sample_type == 'yes':
return sample
else:
raise NoReferenceError
|
5b702c1395fe9c19135279cee7deff870f4707c4
| 29,025 |
def form_check(form_field: BoundField, col: str = '') -> dict:
"""
Делаем бутстраповский чекбокс из джанговского поля формы
Фичи:
- если после валидации нашлись косяки, показываем их
- если в поле есть хелп-текст, то показываем его
- опционально передаем в form-group CSS-класс для выстраивания полей в грид (например col, col-3 и т.п.)
"""
# Если в форме нет такого поля, то нужно сообщить об этом куда следует
try:
attributes = form_field.field.widget.attrs
except AttributeError as e:
print('Form does not have such field: %s' % str(form_field))
raise e
# Список CSS-классов оформления, который нужно навесить на тег input
css_classes = ['form-check-input']
# Подсветим красненьким косячные поля
if form_field.errors:
css_classes.append('is-invalid')
# Сохраним имеющиеся CSS-классы оформления
if 'class' in attributes:
css_classes.append(attributes['class'])
attributes['class'] = ' '.join(css_classes)
return {'col': col, 'form_field': form_field, 'help': str(form_field.help_text)}
|
0fbfe9a57c1e496f0726c0bcb65f8cf14a85000e
| 29,026 |
import fnmatch
def IncludeFiles(filters, files):
"""Filter files based on inclusion lists
Return a list of files which match and of the Unix shell-style wildcards
provided, or return all the files if no filter is provided."""
if not filters:
return files
match = set()
for filter in filters:
match |= set(fnmatch.filter(files, filter))
return [name for name in files if name in match]
|
03d9a6359cd1c88e496f8f8a047f08c868c7b976
| 29,027 |
def make_fade_window_n(level_start, level_end, N_total, fade_start_end_idx=None):
"""
Make a fade-in or fade-out window using information on sample amounts and not time.
f_start_end defines between which start and stop indexes the fade happens.
"""
if not fade_start_end_idx:
fade_start_idx, fade_end_idx = 0, N_total
else:
fade_start_idx, fade_end_idx = fade_start_end_idx
N_fade = fade_end_idx - fade_start_idx
if N_fade < 0:
raise ValueError("Fade slice is reverse :(")
if N_total > 1:
k = 1 / (N_fade - 1)
fade_window = (level_start**2 + k * np.arange(N_fade) * (level_end**2 - level_start**2))**0.5
total_window = np.empty(N_total)
if fade_start_idx > 0:
# there are some frames in our output that come before the fade starts
total_window[:fade_start_idx].fill(level_start)
if fade_end_idx < N_total:
# there are some frames in our output that come after the fade ends
if fade_end_idx > 0:
total_window[fade_end_idx:].fill(level_end)
else:
total_window.fill(level_end)
if fade_start_idx < N_total and fade_end_idx > 0:
# some part of the fade window is falling into our [0:N_total] range
if fade_start_idx >= 0:
total_window[fade_start_idx:fade_end_idx] = fade_window[:N_total-fade_start_idx]
elif N_total > fade_end_idx:
# fade starts before our output starts and ends within our output
total_window[:fade_end_idx] = fade_window[(0 - fade_start_idx):(fade_end_idx-fade_start_idx)]
else:
# fade starts before our output starts and extends further then the end of our output
total_window[:] = fade_window[(0 - fade_start_idx):(N_total-fade_start_idx)]
elif N_total <= 1:
total_window = np.zeros(N_total)
else:
raise TypeError("Unknown fade type.")
return total_window
|
7566cc95eb58ee03e7cb90da5a03b7e24f8ea114
| 29,028 |
def translate_ethosu_tir_call_extern(tir_call_extern):
"""This is a dispatcher function to dispatch
correct translation call depending on the extern call's
first argument"""
supported_call_extern = {
"ethosu_conv2d": translate_ethosu_conv2d,
"ethosu_copy": translate_ethosu_copy,
"ethosu_depthwise_conv2d": translate_ethosu_depthwise_conv2d,
"ethosu_pooling": translate_ethosu_pooling,
}
ext_call_type = tir_call_extern.args[0].value
assert ext_call_type in supported_call_extern.keys(), f"{ext_call_type} is not yet supported"
npu_op = supported_call_extern[ext_call_type](tir_call_extern)
# Some conversions return additional outputs
# if they are needed, the caller should use the function directly
if isinstance(npu_op, tuple):
return npu_op[0]
return npu_op
|
b9cc6ce7b9db7d311695d7d4d1086f9faefca4f8
| 29,029 |
def _getSinglePosValueKey(valueRecord):
"""otBase.ValueRecord --> (2, ("YPlacement": 12))"""
assert isinstance(valueRecord, ValueRecord), valueRecord
valueFormat, result = 0, []
for name, value in valueRecord.__dict__.items():
if isinstance(value, ot.Device):
result.append((name, _makeDeviceTuple(value)))
else:
result.append((name, value))
valueFormat |= valueRecordFormatDict[name][0]
result.sort()
result.insert(0, valueFormat)
return tuple(result)
|
b49e60fdba99f2deb11c002d1632b34bd82226f0
| 29,030 |
def top_half(bbox=full_frame()):
"""Returns a bounding box covering the top half of ``bbox``."""
return make_bbox(bbox['x1'], bbox['y1'],
bbox['x2'], (bbox['y1'] + bbox['y2']) / 2.)
|
a005568dc4d8ca2a99120ca319e66ebf64371a89
| 29,031 |
def __command(client, command, default_method, use_bytes=False):
""" Private function supporting multiple command formats:
- string
- function
- (function, args, kwargs) """
if is_function(command):
return command(client)
elif is_list(command):
a, kw = (), {}
try:
command, a, kw = command
except ValueError:
pass
try:
command, a = command
except ValueError:
pass
return command(client, *a, **kw)
else:
return getattr(client, default_method)(b(command) if use_bytes else command)
|
ba6b46820756486c81b757da4b04a0e97c7c7cd7
| 29,032 |
def designate_node(fqdn, type):
"""
"""
execCmd = ' '.join([BDVAGENT, OPTION, 'designate', type, fqdn])
return executeCmd(execCmd)
|
e1f89a01c182bb1302c6997fb2a5461b09cd5319
| 29,033 |
def get_identity_for_user(user):
"""Get the Identity for the user specified via email or ID."""
identity = None
if user is not None:
# note: this seems like the canonical way to go
# 'as_user' can be either an integer (id) or email address
u = current_accounts.datastore.get_user(user)
if u is not None:
identity = get_identity(u)
else:
raise LookupError("user not found: %s" % user)
if identity is None:
identity = Identity(1)
identity.provides.add(any_user)
return identity
|
a7cdd089ab25ee67de21d22ab1811dca3fbab0df
| 29,035 |
import torch
def MetaOptNetHead_Ridge(query, support, support_labels, n_way, n_shot, lambda_reg=50.0, double_precision=False):
"""
Fits the support set with ridge regression and
returns the classification score on the query set.
Parameters:
query: a (tasks_per_batch, n_query, d) Tensor.
support: a (tasks_per_batch, n_support, d) Tensor.
support_labels: a (tasks_per_batch, n_support) Tensor.
n_way: a scalar. Represents the number of classes in a few-shot classification task.
n_shot: a scalar. Represents the number of support examples given per class.
lambda_reg: a scalar. Represents the strength of L2 regularization.
Returns: a (tasks_per_batch, n_query, n_way) Tensor.
"""
tasks_per_batch = query.size(0)
n_support = support.size(1)
n_query = query.size(1)
assert(query.dim() == 3)
assert(support.dim() == 3)
assert(query.size(0) == support.size(0) and query.size(2) == support.size(2))
assert(n_support == n_way * n_shot) # n_support must equal to n_way * n_shot
#Here we solve the dual problem:
#Note that the classes are indexed by m & samples are indexed by i.
#min_{\alpha} 0.5 \sum_m ||w_m(\alpha)||^2 + \sum_i \sum_m e^m_i alpha^m_i
#where w_m(\alpha) = \sum_i \alpha^m_i x_i,
#\alpha is an (n_support, n_way) matrix
kernel_matrix = computeGramMatrix(support, support)
kernel_matrix += lambda_reg * torch.eye(n_support).expand(tasks_per_batch, n_support, n_support).cuda()
block_kernel_matrix = kernel_matrix.repeat(n_way, 1, 1) #(n_way * tasks_per_batch, n_support, n_support)
block_kernel_matrix += 1.0 * torch.eye(n_support).expand(n_way * tasks_per_batch, n_support, n_support).cuda()
support_labels_one_hot = one_hot(support_labels.view(tasks_per_batch * n_support), n_way) # (tasks_per_batch * n_support, n_way)
support_labels_one_hot = support_labels_one_hot.transpose(0, 1) # (n_way, tasks_per_batch * n_support)
support_labels_one_hot = support_labels_one_hot.reshape(n_way * tasks_per_batch, n_support) # (n_way*tasks_per_batch, n_support)
G = block_kernel_matrix
e = -2.0 * support_labels_one_hot
#This is a fake inequlity constraint as qpth does not support QP without an inequality constraint.
id_matrix_1 = torch.zeros(tasks_per_batch*n_way, n_support, n_support)
C = Variable(id_matrix_1)
h = Variable(torch.zeros((tasks_per_batch*n_way, n_support)))
dummy = Variable(torch.Tensor()).cuda() # We want to ignore the equality constraint.
if double_precision:
G, e, C, h = [x.double().cuda() for x in [G, e, C, h]]
else:
G, e, C, h = [x.float().cuda() for x in [G, e, C, h]]
# Solve the following QP to fit SVM:
# \hat z = argmin_z 1/2 z^T G z + e^T z
# subject to Cz <= h
# We use detach() to prevent backpropagation to fixed variables.
qp_sol = QPFunction(verbose=False)(G, e.detach(), C.detach(), h.detach(), dummy.detach(), dummy.detach())
#qp_sol = QPFunction(verbose=False)(G, e.detach(), dummy.detach(), dummy.detach(), dummy.detach(), dummy.detach())
#qp_sol (n_way*tasks_per_batch, n_support)
qp_sol = qp_sol.reshape(n_way, tasks_per_batch, n_support)
#qp_sol (n_way, tasks_per_batch, n_support)
qp_sol = qp_sol.permute(1, 2, 0)
#qp_sol (tasks_per_batch, n_support, n_way)
# Compute the classification score.
compatibility = computeGramMatrix(support, query)
compatibility = compatibility.float()
compatibility = compatibility.unsqueeze(3).expand(tasks_per_batch, n_support, n_query, n_way)
qp_sol = qp_sol.reshape(tasks_per_batch, n_support, n_way)
logits = qp_sol.float().unsqueeze(2).expand(tasks_per_batch, n_support, n_query, n_way)
logits = logits * compatibility
logits = torch.sum(logits, 1)
return logits
|
07e83a8dd597a332c03f9ef01633c1c6007b9fc8
| 29,036 |
def getUserZaduzenja(userClass):
"""
Vraca zaduzenja datog korisnika
"""
_result = []
for k,v in _rented.items():
if int(k) == userClass.GetCardNumber():
_result.append(v)
return _result
|
5dc236baaad480c7169724685d8c6035a9839a61
| 29,038 |
def add_testcase_properties(xml_obj, tcconfig=None):
"""add properties to testcases"""
if xml_obj.tag == "testsuites":
expression = "./testsuite/testcase"
else:
expression = "./testcase"
multile_test_ids = {}
for testcase in xml_obj.findall(expression):
tcproperties = et.Element('properties')
tcname, name = None, testcase.get('name')
if tcconfig.get(name):
tcname = name
elif tcconfig.get(name.lower()):
tcname = name.lower()
else:
continue
polarion_id = tcconfig[tcname]
test_id = ""
if len(polarion_id) == 1:
test_id = test_id.join(polarion_id)
attribs = {'name': 'polarion-testcase-id', 'value': test_id}
element = et.Element('property', attrib=attribs)
tcproperties.append(element)
else:
for i in range(len(polarion_id)-1):
xml_obj.append(deepcopy(testcase))
multile_test_ids[testcase.get('name')] = polarion_id
testcase.insert(0, tcproperties)
for key in multile_test_ids.keys():
for index, testcase in enumerate(xml_obj.findall(expression + "[@name='" + key + "']")):
if (index < len(multile_test_ids[testcase.get('name')])):
tcproperties = et.Element('properties')
test_id = ""
test_id = test_id.join(multile_test_ids[testcase.get('name')][index])
attribs = {'name': 'polarion-testcase-id', 'value': test_id}
element = et.Element('property', attrib=attribs)
tcproperties.append(element)
testcase.insert(0, tcproperties)
return xml_obj
|
ca8b59367e4cff027ca3eae81fd740a7bff121cd
| 29,039 |
def calc_rho(RhoRef,T,S,alpha=2.0E-4, beta=7.4E-4):
"""-----------------------------------------------------------------------------
calc_rho calculates the density profile using a linear equation of state.
INPUT:
state: xarray dataframe
RhoRef : reference density at the same z as T and S slices. Can be a scalar or a
vector, depending on the size of T and S.
T, S : should be 4D arrays
alpha = 2.0E-4 # 1/degC, thermal expansion coefficient
beta = 7.4E-4, haline expansion coefficient
OUTPUT:
rho - Density [nz]
-----------------------------------------------------------------------------"""
rho = RhoRef*np.ones(np.shape(T)) - alpha*(T[:]) + beta*(S[:])
return rho
|
844a324481fccf2e695caac282a6f8711546c38f
| 29,040 |
import gettext
def get_request_details(request_id=None, srp_request=None):
"""Handles responding to all of the :py:class:`~.models.Request` detail
functions.
The various modifier functions all depend on this function to create the
actual response content.
Only one of the arguments is required. The ``srp_request`` argument is a
conveniece to other functions calling this function that have already
retrieved the request.
:param int request_id: the ID of the request.
:param srp_request: the request.
:type srp_request: :py:class:`~.models.Request`
"""
if srp_request is None:
srp_request = Request.query.get_or_404(request_id)
# A user should always be able to access their own requests, but others
# need fresh sessions.
if current_user != srp_request.submitter and not login_fresh():
return login_manager.needs_refresh()
# Different templates are used for different roles
if current_user.has_permission(PermissionType.review,
srp_request.division):
template = 'request_review.html'
elif current_user.has_permission(PermissionType.pay, srp_request.division):
template = 'request_pay.html'
elif current_user == srp_request.submitter or current_user.has_permission(
PermissionType.audit):
template = 'request_detail.html'
else:
abort(403)
if request.is_json or request.is_xhr:
return jsonify(**srp_request._json(True))
if request.is_xml:
return xmlify('request.xml', srp_request=srp_request)
return render_template(template, srp_request=srp_request,
modifier_form=ModifierForm(formdata=None),
payout_form=PayoutForm(formdata=None),
action_form=ActionForm(formdata=None),
void_form=VoidModifierForm(formdata=None),
details_form=ChangeDetailsForm(formdata=None, obj=srp_request),
note_form=AddNote(formdata=None),
# TRANS: Title for the page showing the details about a single
# TRANS: SRP request.
title=gettext(u"Request #%(request_id)s",
request_id=srp_request.id))
|
e47197af84c88f59b866e0476da32055091dbb2b
| 29,041 |
import numpy
from typing import Tuple
from typing import Optional
from typing import Dict
from typing import Any
import copy
import itertools
def packSpecialData(
data: numpy.ndarray, paramName: str
) -> Tuple[Optional[numpy.ndarray], Dict[str, Any]]:
"""
Reduce data that wouldn't otherwise play nicely with HDF5/numpy arrays to a format
that will.
This is the main entry point for conforming "strange" data into something that will
both fit into a numpy array/HDF5 dataset, and be recoverable to its original-ish
state when reading it back in. This is accomplished by detecting a handful of known
offenders and using various HDF5 attributes to store necessary auxiliary data. It is
important to keep in mind that the data that is passed in has already been converted
to a numpy array, so the top dimension is always representing the collection of
composites that are storing the parameters. For instance, if we are dealing with a
Block parameter, the first index in the numpy array of data is the block index; so
if each block has a parameter that is a dictionary, ``data`` would be a ndarray,
where each element is a dictionary. This routine supports a number of different
"strange" things:
* Dict[str, float]: These are stored by finding the set of all keys for all
instances, and storing those keys as a list in an attribute. The data themselves
are stored as arrays indexed by object, then key index. Dictionaries lacking data
for a key store a nan in it's place. This will work well in instances where most
objects have data for most keys.
* Jagged arrays: These are stored by concatenating all of the data into a single,
one-dimensional array, and storing attributes to describe the shapes of each
object's data, and an offset into the beginning of each object's data.
* Arrays with ``None`` in them: These are stored by replacing each instance of
``None`` with a magical value that shouldn't be encountered in realistic
scenarios.
Parameters
----------
data
An ndarray storing the data that we want to stuff into the database. These are
usually dtype=Object, which is how we usually end up here in the first place.
paramName
The parameter name that we are trying to store data for. This is mostly used for
diagnostics.
See Also
--------
unpackSpecialData
"""
# Check to make sure that we even need to do this. If the numpy data type is
# not "O", chances are we have nice, clean data.
if data.dtype != "O":
return data, {}
attrs: Dict[str, Any] = {"specialFormatting": True}
# make a copy of the data, so that the original is unchanged
data = copy.copy(data)
# find locations of Nones. The below works for ndarrays, whereas `data == None`
# gives a single True/False value
nones = numpy.where([d is None for d in data])[0]
if len(nones) == data.shape[0]:
# Everything is None, so why bother?
return None, attrs
if len(nones) > 0:
attrs["nones"] = True
# XXX: this whole if/then/elif/else can be optimized by looping once and then
# determining the correct action
# A robust solution would need
# to do this on a case-by-case basis, and re-do it any time we want to
# write, since circumstances may change. Not only that, but we may need
# to do perform more that one of these operations to get to an array
# that we want to put in the database.
if any(isinstance(d, dict) for d in data):
# we're assuming that a dict is {str: float}. We store the union of
# all of the keys for all of the objects as a special "keys"
# attribute, and store a value for all of those keys for all
# objects, whether or not there is actually data associated with
# that key (storing a nan when no data). This makes for a simple
# approach that is somewhat digestible just looking at the db, and
# should be quite efficient in the case where most objects have data
# for most keys.
attrs["dict"] = True
keys = sorted({k for d in data for k in d})
data = numpy.array([[d.get(k, numpy.nan) for k in keys] for d in data])
if data.dtype == "O":
# The data themselves are nasty. We could support this, but best to wait for
# a credible use case.
raise TypeError(
"Unable to coerce dictionary data into usable numpy array for "
"{}".format(paramName)
)
attrs["keys"] = numpy.array(keys).astype("S")
return data, attrs
# conform non-numpy arrays to numpy
for i, val in enumerate(data):
if isinstance(val, (list, tuple)):
data[i] = numpy.array(val)
if not any(isinstance(d, numpy.ndarray) for d in data):
# looks like 1-D plain-old-data
data = replaceNonesWithNonsense(data, paramName, nones)
return data, attrs
# check if data is jagged
candidate = next((d for d in data if d is not None))
shape = candidate.shape
ndim = candidate.ndim
isJagged = (
not all(d.shape == shape for d in data if d is not None) or candidate.size == 0
)
if isJagged:
assert all(
val.ndim == ndim for val in data if val is not None
), "Inconsistent dimensions in jagged array for: {}\nDimensions: {}".format(
paramName, [val.ndim for val in data if val is not None]
)
attrs["jagged"] = True
# offsets[i] is the index of the zero-th element of sub-array i
offsets = numpy.array(
[0]
+ list(
itertools.accumulate(val.size if val is not None else 0 for val in data)
)[:-1]
)
# shapes[i] is the shape of the i-th sub-array. Nones are represented by all
# zeros
shapes = numpy.array(
list(val.shape if val is not None else ndim * (0,) for val in data)
)
data = numpy.delete(data, nones)
data = numpy.concatenate(data, axis=None)
attrs["offsets"] = offsets
attrs["shapes"] = shapes
attrs["noneLocations"] = nones
return data, attrs
if any(isinstance(d, (tuple, list, numpy.ndarray)) for d in data):
data = replaceNonesWithNonsense(data, paramName, nones)
return data, attrs
if len(nones) == 0:
raise TypeError(
"Cannot write {} to the database, it did not resolve to a numpy/HDF5 "
"type.".format(paramName)
)
runLog.error("Data unable to find special none value: {}".format(data))
raise TypeError("Failed to process special data for {}".format(paramName))
|
ec07057f4c38b8169cd1bc04ee19dd79a585a050
| 29,042 |
def swapaxes(x, axis1, axis2):
"""Swap two axes of a variable.
Args:
x (~chainer.Variable): Input variable.
axis1 (int): The first axis to swap.
axis2 (int): The second axis to swap.
Returns:
~chainer.Variable: Variable whose axes are swapped.
"""
return Swapaxes(axis1, axis2)(x)
|
274f6960fb463e7660b0bf6e963f51957ab3b728
| 29,043 |
def add_header(img, labels, mark_midpoints=True, header_height=20):
"""Adds labels to the image, evenly distributed across the top.
This is primarily useful for showing the names of channels.
Args:
img: A PIL Image.
labels: list of strs. Labels for segments to write across the top.
mark_midpoints: bool. Whether to add a small vertical line marking the
center of each segment of the image.
header_height: int. Height of the header in pixels.
Returns:
A new PIL Image, taller than the original img and annotated.
"""
# Create a taller image to make space for a header at the top.
new_height = header_height + img.size[1]
new_width = img.size[0]
if img.mode == 'RGB':
placeholder_size = (new_height, new_width, 3)
else:
placeholder_size = (new_height, new_width)
placeholder = np.ones(placeholder_size, dtype=np.uint8) * 255
# Divide the image width into segments.
segment_width = img.size[0] / len(labels)
# Calculate midpoints for all segments.
midpoints = [int(segment_width * (i + 0.5)) for i in range(len(labels))]
if mark_midpoints:
# For each label, add a small line to mark the middle.
for x_position in midpoints:
placeholder[header_height - 5:header_height, x_position] = 0
# If image has an even width, it will need 2 pixels marked as the middle.
if segment_width % 2 == 0:
placeholder[header_height - 5:header_height, x_position + 1] = 0
bigger_img = Image.fromarray(placeholder, mode=img.mode)
# Place the original image inside the taller placeholder image.
bigger_img.paste(img, (0, header_height))
# Add a label for each segment.
draw = ImageDraw.Draw(bigger_img)
for i in range(len(labels)):
text = labels[i]
text_width = draw.textsize(text)[0]
# xy refers to the left top corner of the text, so to center the text on
# the midpoint, subtract half the text width from the midpoint position.
x_position = int(midpoints[i] - text_width / 2)
draw.text(xy=(x_position, 0), text=text, fill='black')
return bigger_img
|
f3d8d69009dd72f7e7e9c0305b248492cdc076fc
| 29,044 |
def predict_by_lr_model(test_feature, lr_model):
"""
predict by lr_model (调用 sklearn 实例方法)
"""
result_list = [] #存储每个样本label为1的概率
prob_list = lr_model.predict_proba(test_feature)
for index in range(len(prob_list)):
result_list.append(prob_list[index][1]) #下标为0的对应label为0的概率,下标为1的对应label为1的概率
return result_list
|
03ea185aa4398e8ccb7449d9e32006dd391e9c13
| 29,045 |
def _default_schedule(outs):
"""Default schedule for gpu."""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
if tag.is_broadcast(op.tag) or op.tag in ['bbox_score', 'sorted_bbox']:
schedule_injective_from_existing(s, op.output(0))
for tensor in op.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
|
b2e328e04e2ad522580e8966f66eb52d4f4b0d4d
| 29,046 |
def get_track_reviewer_abstract_counts(event, user):
"""Get the numbers of abstracts per track for a specific user.
Note that this does not take into account if the user is a
reviewer for a track; it just checks whether the user has
reviewed an abstract in a track or not.
:return: A dict mapping tracks to dicts containing the counts.
"""
# COUNT() does not count NULL values so we pass NULL in case an
# abstract is not in the submitted state. That way we still get
# the track - filtering using WHERE would only include tracks
# that have some abstract in the submitted state.
count_total = db.func.count(Abstract.id)
count_reviewable = db.func.count(db.case({AbstractState.submitted.value: Abstract.id}, value=Abstract.state))
count_reviewable_reviewed = db.func.count(db.case({AbstractState.submitted.value: AbstractReview.id},
value=Abstract.state))
count_total_reviewed = db.func.count(AbstractReview.id)
query = (Track.query.with_parent(event)
.with_entities(Track,
count_total,
count_total_reviewed,
count_reviewable - count_reviewable_reviewed)
.outerjoin(Track.abstracts_reviewed)
.outerjoin(AbstractReview, db.and_(AbstractReview.abstract_id == Abstract.id,
AbstractReview.user_id == user.id,
AbstractReview.track_id == Track.id))
.group_by(Track.id))
return {track: {'total': total, 'reviewed': reviewed, 'unreviewed': unreviewed}
for track, total, reviewed, unreviewed in query}
|
c73b00bd641e20c6b6f3c1e51173b6162e8f76ee
| 29,047 |
def fit_logistic(X, w, var_prior, X_test, initial_phi):
"""MAP logistic regression.
Input: X - (D + 1) * I training data matrix, where D is the dimensionality
and I is the number of training examples.
w - I * 1 vector containing world states for each example.
var_prior - scale factor for the prior spherical covariance.
X_test - test examples for which we need to make predictions.
initial_phi - (D + 1) * 1 vector that represents the initial solution.
Output: predictions - 1 * I_test row vector containing the predicted class values for
the input data in X_test.
phi - D + 1 row vector containing the coefficients for the
linear activation function.
"""
# Find the MAP estimate of the parameters phi
phi = optimize.minimize(
_fit_logr_cost,
initial_phi.reshape(initial_phi.size),
args=(X, w, var_prior),
method="Newton-CG",
jac=_fit_logr_jac,
hess=_fit_logr_hess
).x
predictions = sigmoid(phi @ X_test)
return (predictions, phi)
|
ba425ca35cbb1fe5359019ed46d4756daa62779b
| 29,048 |
from typing import Optional
from enum import Enum
def middle_drag_and_drop(
element_path1: UI_Element,
element_path2: UI_Element,
duration: Optional[float] = None,
mode: Enum = MoveMode.linear,
timeout: Optional[float] = None) -> UI_Element:
"""
Drags and drop with middle button pressed from element_path1 to element_path2.
:param element_path1: element path
:param element_path2: element path
:param duration: duration in seconds of the mouse move (if duration is -1 the mouse cursor doesn't move)
:param mode: move mouse mode
:param timeout: period of time in seconds that will be allowed to find the element
:return: Pywinauto wrapper with element_path2
"""
move(element_path1, duration=duration, mode=mode, timeout=timeout)
win32api_mouse_event(MOUSEEVENTF_MIDDLEDOWN, 0, 0)
unique_element = move(element_path2, duration=duration, mode=mode, timeout=timeout)
win32api_mouse_event(MOUSEEVENTF_MIDDLEUP, 0, 0)
return unique_element
|
f7325683af4b0891e68add86a0efc6abc9b5044d
| 29,049 |
def get_pi0(pv, lambdas):
"""
Compute Storey's C{pi0} from p-values C{pv} and C{lambda}.
this function is equivalent to::
m = len(pv)
return [sum(p >= l for p in pv)/((1.0-l) * m) for l in lambdas]
but the above is C{O(m*n)}, while it needs only be C{O(m+n)
(n = len(lambdas))}
@type pv: list
@param pv: B{SORTED} p-values vector
@type lambdas: list
@param lambdas: B{SORTED} lambda values vector
@rtype: list
@return: estimated proportion of null hypotheses C{pi0} for each lambda
"""
m = len(pv)
i = m - 1
pi0 = []
for l in reversed(lambdas):
while i >= 0 and pv[i] >= l:
i -= 1
pi0.append((m-i-1)/((1.0-l)*m))
pi0.reverse()
return pi0
|
4937aaa13ce3f5dda18fa3669e535c4d88be3ad6
| 29,050 |
def read_accelerometer(serial, calibration):
"""
Reads the raw values from the Arduino, parses them into separate variables
and uses the calibration data to normalize the data
Args:
serial: a reference to the serial connection with the Arduino
calibration: a reference to the calibration object that holds the
values from the accelerometer calibration process
Returns:
(x_cal, y_cal, z_cal): a tuple of the normalized data
"""
components = serial.read_str()
# parses the string from the Arduino into three separate variables
x_raw, y_raw, z_raw = tuple(map(float, components.split(',')))
# normalizes the data using the calibration information
x_cal = (x_raw - calibration.offset[0]) / (calibration.gain[0])
y_cal = (y_raw - calibration.offset[1]) / (calibration.gain[1])
z_cal = (z_raw - calibration.offset[2]) / (calibration.gain[2])
return (x_cal, y_cal, z_cal)
|
3c5537e2a017f57dca8dccd24c2ba083a9c47345
| 29,051 |
def get_access(name):
"""Get access based on name
In Python __var__ refers to a private access
_var refers to protected access
and var would refer to public access
"""
assert isinstance(name, str), "Expecting name to be a string"
if len(name) > 4 and "__" == name[:2] and "__" == name[-2:]:
return "PRIVATE"
elif len(name) > 1 and name[0] == "_":
return "PROTECTED"
else:
return "PUBLIC"
|
ffe072ed1820ce0536533a5882af1e1270780744
| 29,052 |
def run_query_series(queries, conn):
"""
Iterates through a list of queries and runs them through the connection
Args:
-----
queries: list of strings or tuples containing (query_string, kwargs)
conn: the triplestore connection to use
"""
results = []
for item in queries:
qry = item
kwargs = {}
if isinstance(item, tuple):
qry = item[0]
kwargs = item[1]
result = conn.update_query(qry, **kwargs)
# pdb.set_trace()
results.append(result)
return results
|
7a3e920663222b57233e9a01d1b3cacb039a02eb
| 29,053 |
def _get_key(block_id, block_dict, extra_args):
"""
Given a dictionary, return an element by ``key``.
block_id:
Block id
block_dict:
key: (Mandatory)
Key value to get
starting_dict: (Optional)
Starting dictionary param
extend_chained: (Default True)
By default, ``chained`` will have ``.update()`` called on it with
``starting_dict`` as the only argument. Set ``extend_chained`` to False
to ignore ``starting_dict``.
:param extra_args:
Extra argument dictionary, (If any)
Example: {'chaining_args': {'result': "Output", 'status': True},
'caller': 'Audit'}
The first return value (status) will be True if the key is found, and
False othewise. The second argument will be the value found by the key or
None if the key is not present in the dictionary.
"""
chained = runner_utils.get_chained_param(extra_args)
key = runner_utils.get_param_for_module(block_id, block_dict, 'key')
starting_dict = runner_utils.get_param_for_module(block_id, block_dict, 'starting_dict')
update_chained = runner_utils.get_param_for_module(block_id, block_dict, 'update_chained', True)
if update_chained and starting_dict:
try:
chained.update(starting_dict)
except (TypeError, ValueError):
log.error("Arguments should be of type dict.", exc_info=True)
return runner_utils.prepare_negative_result_for_module(block_id, 'invalid_format')
try:
ret = chained[key]
except KeyError:
log.error("Key not found: %s", key, exc_info=True)
return runner_utils.prepare_negative_result_for_module(block_id, 'key_not_found')
return False, None
except TypeError:
log.error("Arguments should be of type dict.", exc_info=True)
return runner_utils.prepare_negative_result_for_module(block_id, 'invalid_format')
status = bool(ret)
if not status:
return runner_utils.prepare_negative_result_for_module(block_id, 'unknown_error')
return runner_utils.prepare_positive_result_for_module(block_id, ret)
|
1c966f9d4966ff9eaafde60fce13f88a176def3d
| 29,054 |
def AP(predictions, scores):
"""
Computes the average precision per class, the average precision and the interpolated average precision at 11 points
:param predictions: list of lists of every class with tp, fp and fn. fps are zeros, the others one, indicating this is a ground truth
:param scores: confidences scores with the same lengths
:return: mAPs a classes x 2 matrix, first entry is without interpolation.
The average precision and the interpolated average precision at 11 points
"""
#recall levels
recalls = np.arange(0,1.1,0.1)
#average precisions over all classes
prec = np.zeros_like(recalls)
#average interpolated precision over all classes
iprec = np.zeros_like(recalls)
#average precision
ap = np.zeros((len(predictions), 2))
# count = 0
for i in range(len(predictions)):
# print("predictions ",len(predictions[i]))
# print("scores ",len(scores[i]))
#if this is dummy class with no predictions and gts
if len(predictions[i]) == 0:
ap[i,0] = 0
ap[i,1] = 0
else:
#sort zipped lists
zipped = zip(predictions[i], scores[i])
spreds_and_scores = sorted(zipped, key=lambda x: x[1], reverse=True)
#unzip
spreds, sscores = zip(*spreds_and_scores)
# print("spreds ", spreds)
# print("sscores ",sscores)
#get the indices of gts
npos = [ t[0] for t in enumerate(spreds) if t[1] > 0 ]
#count gts
N = len(npos)
# print("len(npos) ",len(npos))
#compute the precisions at every gt
nprec = np.arange(1,N+1) / (np.array(npos)+1)
# print("nprec ",nprec)
#store the mean
ap[i, 0] = np.mean(nprec)
# print("ap[i,0] ",ap[i, 0])
#interpolated precisions
inprec = np.zeros_like(nprec)
# try :
#maximum
mx = nprec[-1]
inprec[-1] = mx
#go backwards through precisions and check if current precision is bigger than last max
for j in range(len(npos)-2, -1, -1):
if nprec[j] > mx:
mx = nprec[j]
inprec[j] = mx
#mean of interpolated precisions
ap[i,1] = np.mean(inprec)
#get 11 indices
idx = (np.concatenate( (np.zeros((1)), np.maximum(np.zeros(10), np.around((N-1)/(10) * np.arange(1,11))-1)))).astype(int)
iprec += inprec[idx]
prec += nprec[idx]
# except:
# count +=1
# pass
# print('count ',count)
# print("APS\n",ap)
return ap, prec / len(predictions), iprec / len(predictions)
|
bc8b98d91153487261a76c07befe4cce6211d0d3
| 29,055 |
def _sample_optimization_test_problems(
rng):
"""Sample an optimization test function problem."""
is_noise = utils.sample_bool(rng, 0.5)
return {
"problem":
rng.choice(sorted(_opt_test_problems.keys())),
"noise_stdev":
utils.sample_log_float(rng, 0.01, 10.0) if is_noise else 0.0,
}
|
47edaa72d9d0f43e3c9c67684b686f5abbeb74d8
| 29,056 |
def get_domain_id_field(domain_table):
"""
A helper function to create the id field
:param domain_table: the cdm domain table
:return: the id field
"""
return domain_table + '_id'
|
5805da82b4e57d14d4105d92a62cf4b5cc4bc3f2
| 29,057 |
def construct(symbol, strategy, chains, **kwargs):
"""
This is a convenience method to allow for creation of option spreads
from predefined sources.
:param symbol: The symbol of the option chains
:param strategy: The option strategy filter to use
:param chains: Option chains data to use. This data should come from data.get() method
:param kwargs: Parameters used to construct the spreads
:return:
"""
# wrap dataframes into OptionSeries object to be used in backtest
spread_chains = strategy(chains, **kwargs)
return OptionSeries(symbol, strategy.__name__, spread_chains, **kwargs)
|
99ff1d3e9cb6a4718fc06e767904a95340ca9b6f
| 29,058 |
def get_delete_query(table_name: str) -> str:
"""Build a SQL query to delete a RDF triple from a MVCC-PostgreSQL table.
Argument: Name of the SQL table from which the triple will be deleted.
Returns: A prepared SQL query that can be executed with a tuple (subject, predicate, object).
"""
return f"""UPDATE {table_name} SET delete_t = transaction_timestamp()
WHERE subject = %s
AND predicate = %s
AND md5(object) = md5(%s)
AND delete_t = 'infinity'::timestamp"""
|
9ff605f77caa7e8d2e6a7ea1e2b82c22821c86dd
| 29,059 |
def _parse_ipv6(a):
"""
Parse IPv6 address. Ideally we would use the ipaddress module in
Python3.3 but can't rely on having this.
Does not handle dotted-quad addresses or subnet prefix
>>> _parse_ipv6("::") == (0,) * 16
True
>>> _parse_ipv6("1234:5678::abcd:0:ff00")
(18, 52, 86, 120, 0, 0, 0, 0, 0, 0, 171, 205, 0, 0, 255, 0)
"""
l, _, r = a.partition("::")
l_groups = list(chain(*[divmod(int(x, 16), 256) for x in l.split(":") if x]))
r_groups = list(chain(*[divmod(int(x, 16), 256) for x in r.split(":") if x]))
zeros = [0] * (16 - len(l_groups) - len(r_groups))
return tuple(l_groups + zeros + r_groups)
|
9a999df1cd352fe175801cbe4476f2080cc5d37d
| 29,060 |
def weighted(weights,metric='categorical_accuracy'):
""" weighted metric
Args:
- weights<list>:
* a weight for each value in y_true
- metric<str|metric>:
* snake-case strings will be turned to camel-case
* if metric is not a string the passed metric will be returned
with the assumption that it is already a keras metric class
Return: (weighted) metric instance
"""
metric=get(metric)()
def _weighted_metric(y_true,y_pred):
sample_weight=tf.reduce_sum(weights*y_true, axis=-1)
metric.reset_states()
metric.update_state(y_true,y_pred,sample_weight=sample_weight)
return metric.result()
_weighted_metric.name=f'{metric.name}-w'
return _weighted_metric
|
ffb356a178c50c517485d126c90724b5bc408ad2
| 29,061 |
from typing import Tuple
def compute_reg_strs(product_graph: TwoPlayerGraph,
coop_str: bool = False,
epsilon: float = -1) -> Tuple[list, dict, TwoPlayerGraph]:
"""
A method to compute strategies. We control the env's behavior by making it purely cooperative, pure adversarial, or
epsilon greedy.
@param coop_str: Set this to be true for purely cooperative behavior from the env
@param epsilon: Set this value to be 0 for purely adversarial behavior or with epsilon probability human picks
random actions.
"""
payoff = payoff_factory.get("cumulative", graph=product_graph)
# build an instance of regret strategy minimization class
reg_syn_handle = RegMinStrSyn(product_graph, payoff)
reg_str, reg_val = reg_syn_handle.edge_weighted_arena_finite_reg_solver(minigrid_instance=None,
purge_states=True,
plot=False)
twa_game = reg_syn_handle.graph_of_alternatives
_init_state = twa_game.get_initial_states()[0][0]
for _n in twa_game._graph.successors(_init_state):
print(f"Reg Val: {_n}: {reg_val[_n]}")
# the reg str is dict that one from one state to another. Lets convert this to print a sequence of edge actions
_next_state = reg_str[_init_state]
_action_seq = []
_action_seq.append(twa_game._graph[_init_state][_next_state][0].get("actions"))
if coop_str:
# compute cooperative strs for the player
_coop_str_dict = compute_cooperative_actions_for_env(twa_game)
_max_coop_actions: int = 1
# print(f"{_init_state}: {reg_val[_init_state]}")
# print(f"{_next_state}: {reg_val[_init_state]}")
while _next_state is not None:
_curr_state = _next_state
if twa_game.get_state_w_attribute(_curr_state, attribute="player") == "eve":
_next_state = reg_str.get(_curr_state)
else:
if _max_coop_actions <= 10:
_next_state = _coop_str_dict[_curr_state]
# only increase the counter when the human moves
_max_coop_actions += 1
else:
_next_state = reg_str.get(_curr_state)
if _next_state is not None:
_edge_act = twa_game._graph[_curr_state][_next_state][0].get("actions")
if _action_seq[-1] != _edge_act:
_action_seq.append(twa_game._graph[_curr_state][_next_state][0].get("actions"))
# print(f"{_next_state}: {reg_val[_init_state]}")
elif 0 <= epsilon <= 1:
# we randomise human strategies
_new_str_dict = compute_epsilon_str_dict(epsilon=epsilon,
reg_str_dict=reg_str,
max_human_int=3, twa_game=twa_game)
while _next_state is not None:
_curr_state = _next_state
# if twa_game.get_state_w_attribute(_curr_state, attribute="player") == "eve":
_next_state = _new_str_dict.get(_curr_state)
# else:
# _new
if _next_state is not None:
_edge_act = twa_game._graph[_curr_state][_next_state][0].get("actions")
if _action_seq[-1] != _edge_act:
_action_seq.append(twa_game._graph[_curr_state][_next_state][0].get("actions"))
for _action in _action_seq:
print(_action)
return _action_seq, reg_val, twa_game
|
99d3632ed6d692c1531a4420f794cd84df5c3264
| 29,062 |
import torch
from typing import Tuple
import functools
def make_tanh_warp_grid(matrix: torch.Tensor, warp_factor: float,
warped_shape: Tuple[int, int],
orig_shape: Tuple[int, int]):
"""
Args:
matrix: bx4x4 matrix.
warp_factor: The warping factor. `warp_factor=1.0` represents a vannila Tanh-warping,
`warp_factor=0.0` represents a cropping.
warped_shape: The target image shape to transform to.
Returns:
torch.Tensor: b x h x w x 2 (x, y).
"""
orig_h, orig_w, *_ = orig_shape
w_h = torch.tensor([orig_w, orig_h]).to(matrix).reshape(1, 1, 1, 2)
return _forge_grid(
matrix.size(0), matrix.device,
warped_shape,
functools.partial(inverted_tanh_warp_transform,
matrix=matrix,
warp_factor=warp_factor,
warped_shape=warped_shape)) / w_h*2-1
|
3f1226ea19006c5cbf683deff15a120ffb7c7af6
| 29,063 |
def insertion_sort(arr):
"""
Returns the list 'arr' sorted in nondecreasing order in O(n^2) time.
"""
for i in range(1,len(arr)):
key = arr[i]
j = i-1
while j >= 0 and arr[j] > key:
arr[j+1] = arr[j]
j = j-1
arr[j+1] = key
return arr
|
cafd83cd31cbadcbc0a5c3aaff7d21f3ae907083
| 29,064 |
def tokens_refresh_post(body=None, project_name=None, scope=None): # noqa: E501
"""Refresh tokens for an user
Request to refresh OAuth tokens for an user # noqa: E501
:param body:
:type body: dict | bytes
:param project_name: Project Name
:type project_name: str
:param scope: Scope for which token is requested
:type scope: str
:rtype: Success
"""
if connexion.request.is_json:
body = Request.from_dict(connexion.request.get_json()) # noqa: E501
return rc.tokens_refresh_post(body=body, project_name=project_name, scope=scope)
|
58e59e54cf53ecf23ca2e45f62b00094adfb49ab
| 29,065 |
def load_xml_data(path, start_node="header", search_node="name"):
"""
load the XML data
"""
retval = []
fetched_xml = minidom.parse(path)
item_list = fetched_xml.getElementsByTagName(start_node)
for value in item_list:
retval.append(value.attributes[search_node].value)
return retval
|
6a281b2b9ce3fac22ec32afa151438c8259b4f94
| 29,066 |
def saturationcheck(thermal_data,startframe,sat_threshold=0.9):
""" Determine the fraction of thermal_data that is saturated
on or after startframe (0 based). It is assumed the highest
temperature recorded in thermal_data for a particular pixel
is the saturation value and that the thermal data has already
been background-subtracted.
A pixel is defined as saturated is defined as exceeding sat_threshold*max_for_that_pixel(thermal_data)
Returns a tuple containing a number between 0 and 1 representing
the fraction of valid pixels (not identically 0, not infinite, not NaN)
that are saturated, followed by a saturation map
"""
saturation_levels = np.max(thermal_data,axis=0)
saturated = np.sum(thermal_data[startframe:,:,:] > sat_threshold*saturation_levels[np.newaxis,:,:],axis=0) > 0
valid = np.isfinite(saturation_levels) & (saturation_levels != 0.0)
fraction_saturated = np.count_nonzero(saturated)*1.0/np.count_nonzero(valid)
return (fraction_saturated,(saturated & valid))
|
d706c14c9ce59ad738cf6817240c5095664ad866
| 29,067 |
def make_image_justification(g, doc_id, boundingbox, system, confidence,
uri_ref=None):
"""
Marks a justification for something appearing in an image.
:param rdflib.graph.Graph g: The underlying RDF model
:param str doc_id: A string containing the document element (child) ID of
the source of the justification
:param Bounding_Box boundingbox: A rectangular box
within the image that bounds the justification
:param rdflib.term.URIRef system: The system object for the system which made
this justification
:param float confidence: The confidence with which to mark the justification
:param str uri_ref: A string URI representation of the image justification
(Default is None)
:returns: The created image justification resource
:rtype: rdflib.term.BNode
"""
justification = _make_aif_justification(
g, doc_id, AIDA_ANNOTATION.ImageJustification, system, confidence,
uri_ref)
mark_boundingbox(g, justification, boundingbox)
return justification
|
8561c8b890cd53318fa3e76ce66fd3ec7e204858
| 29,068 |
def update_u_gates(drag_params, pi2_pulse_schedules=None,
qubits=None, cmd_def=None, drives=None):
"""
Update the cmd_def with new single qubit gate values
Will update U2, U3
Args:
drag_params: list of drag params
pi2_pulse_schedules: list of new pi/2 gate as a pulse schedule
will use the drag_params if this is None
qubits: list of qubits to update
cmd_def: CmdDef object for the device
drives: List of drive chs
Returns:
updated cmd_def
"""
# U2 is -P1.Y90p.-P0
# U3 is -P2.X90p.-P0.X90m.-P1
def parametrized_fc(kw_name, phi0, chan, t_offset):
def _parametrized_fc(**kwargs):
return FrameChange(phase=-kwargs[kw_name]+phi0)(chan) << t_offset
return _parametrized_fc
for qubit in qubits:
drive_ch = drives[qubit]
if pi2_pulse_schedules is None:
x90_pulse = pulse_lib.drag(**drag_params[qubit])
x90_pulse = Schedule(x90_pulse(drive_ch))
else:
x90_pulse = pi2_pulse_schedules[qubit]
pulse_dur = x90_pulse.duration
# find channel dependency for u2
for _u2_group in _find_channel_groups('u2',
qubits=qubit,
cmd_def=cmd_def):
if drive_ch in _u2_group:
break
else:
_u2_group = (drive_ch, )
u2_fc1s = [parametrized_fc('P1', -np.pi/2, ch, 0)
for ch in _u2_group]
u2_fc2s = [parametrized_fc('P0', np.pi/2, ch, pulse_dur)
for ch in _u2_group]
# find channel dependency for u2
for _u3_group in _find_channel_groups('u3',
qubits=qubit,
cmd_def=cmd_def):
if drive_ch in _u3_group:
break
else:
_u3_group = (drive_ch, )
u3_fc1s = [parametrized_fc('P2', 0, ch, 0) for ch in _u3_group]
u3_fc2s = [parametrized_fc('P0', np.pi, ch, pulse_dur)
for ch in _u3_group]
u3_fc3s = [parametrized_fc('P1', -np.pi, ch, 2*pulse_dur)
for ch in _u3_group]
# add commands to schedule
# u2
schedule1 = ParameterizedSchedule(*[*u2_fc1s,
x90_pulse,
*u2_fc2s],
parameters=['P0', 'P1'],
name='u2_%d' % qubit)
# u3
schedule2 = ParameterizedSchedule(*[*u3_fc1s,
x90_pulse,
*u3_fc2s,
x90_pulse << pulse_dur,
*u3_fc3s],
parameters=['P0', 'P1', 'P2'],
name='u3_%d' % qubit)
cmd_def.add(cmd_name='u2', qubits=qubit, schedule=schedule1)
cmd_def.add(cmd_name='u3', qubits=qubit, schedule=schedule2)
|
f5719f9d14c0f37d347629207484e200b0934b1e
| 29,069 |
import select
def make_acc_fun(network_apply_fun, num_outputs=1):
""" Given a network function and number of outputs, returns an accuracy
function """
if num_outputs == 1:
prediction_function = lambda x: (x >= 0.).astype(jnp.int32)
else:
prediction_function = lambda x: x.argmax(axis=-1).astype(jnp.int32)
@jax.jit
def accuracy_fun(params, batch):
all_time_logits = network_apply_fun(params, batch['inputs'])
end_logits = select(all_time_logits, batch['index'] - 1)
predictions = jnp.squeeze(prediction_function(end_logits))
accuracies = (batch['labels'] == predictions).astype(jnp.int32)
return jnp.mean(accuracies)
return accuracy_fun
|
977d18c7b3309abee5ab3233d5355a72712f00dd
| 29,071 |
from typing import Union
from typing import Dict
from typing import Any
from typing import List
from typing import Tuple
def upsert_all(
engine: Engine,
table: Table,
data: Union[Dict[str, Any], List[Dict[str, Any]]],
) -> Tuple[int, int]:
"""
Update data by primary key columns. If not able to update, do insert.
Example::
# define data model
t_user = Table(
"users", metadata,
Column("id", Integer, primary_key=True),
Column("name", String),
)
# suppose in database we already have {"id": 1, "name": "Alice"}
data = [
{"id": 1, "name": "Bob"}, # this will be updated
{"id": 2, "name": "Cathy"}, # this will be added
]
update_count, insert_count = upsert_all(engine, t_user, data)
print(update_count) # number of row updated counter
print(insert_count) # number of row inserted counter
# will return: [{"id": 1, "name": "Bob"}, {"id": 2, "name": "Cathy"}]
with engine.connect() as connection:
print(connection.execute(select([table_user])).all())
**中文文档**
批量更新文档. 如果该表格定义了Primary Key, 则用Primary Key约束where语句. 对于
where语句无法找到的行, 自动进行批量 bulk insert.
"""
return update_all(engine=engine, table=table, data=data, upsert=True)
|
070c0091bdbef01459c0d8a46340e5997bdf0a34
| 29,073 |
def index():
"""Root route test"""
return "Weights route"
|
c2a609f067a8155f16bd2a638a7a5c9f399a1575
| 29,074 |
from typing import Optional
def get_backend_health(backend_name: Optional[str] = None,
backend_set_name: Optional[str] = None,
network_load_balancer_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBackendHealthResult:
"""
This data source provides details about a specific Backend Health resource in Oracle Cloud Infrastructure Network Load Balancer service.
Retrieves the current health status of the specified backend server.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_backend_health = oci.networkloadbalancer.get_backend_health(backend_name=oci_network_load_balancer_backend["test_backend"]["name"],
backend_set_name=oci_network_load_balancer_backend_set["test_backend_set"]["name"],
network_load_balancer_id=oci_network_load_balancer_network_load_balancer["test_network_load_balancer"]["id"])
```
:param str backend_name: The name of the backend server for which to retrieve the health status, specified as <ip>:<port> or as <ip> <OCID>:<port>. Example: `10.0.0.3:8080` or `ocid1.privateip..oc1.<var><unique_ID></var>:8080`
:param str backend_set_name: The name of the backend set associated with the backend server for which to retrieve the health status. Example: `example_backend_set`
:param str network_load_balancer_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the network load balancer to update.
"""
__args__ = dict()
__args__['backendName'] = backend_name
__args__['backendSetName'] = backend_set_name
__args__['networkLoadBalancerId'] = network_load_balancer_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:networkloadbalancer/getBackendHealth:getBackendHealth', __args__, opts=opts, typ=GetBackendHealthResult).value
return AwaitableGetBackendHealthResult(
backend_name=__ret__.backend_name,
backend_set_name=__ret__.backend_set_name,
health_check_results=__ret__.health_check_results,
id=__ret__.id,
network_load_balancer_id=__ret__.network_load_balancer_id,
status=__ret__.status)
|
6bd328878a45fa128e13bbe2fd6cd9d2c3f80129
| 29,076 |
def calculate_velocity(start_longitude, start_latitude, start_time,
end_longitude, end_latitude, end_time):
"""
Finds the magnitude of the velicty, in kilometers per hour.
"""
distance_traveled = calculate_distance(start_longitude, start_latitude,
end_longitude, end_latitude)
duration = calculate_duration_hours(start_time, end_time)
if duration == 0:
raise ValueError("Cannot calculate an average velocity when the time"
"interval is 0.")
return distance_traveled / duration
|
1a36fce9340de4fc6271a9d3bc3515b3b8806293
| 29,078 |
def get_raw_html_pbp(season, game):
"""
Loads the html file containing this game's play by play from disk.
:param season: int, the season
:param game: int, the game
:return: str, the html pbp
"""
with open(get_game_pbplog_filename(season, game), 'r') as reader:
page = reader.read()
return page
|
b2bee91be984e8d6782ee0bfabb32fc9e43dc4f4
| 29,079 |
def prepare_inputs_by_partition(
df,
partition_col,
split_date,
categorical_cols=None,
output_col=0,
lookback=12,
num_predictions=12,
):
"""
Lags, splits and normalizes a dataframe based around a partition.
"""
partitions = df[partition_col].unique()
scalers = {}
train_x = None
test_x = None
train_y = None
test_y = None
testset_by_partition = {}
for partition in partitions:
df_part = df.loc[df[partition_col] == partition].copy()
if categorical_cols is None:
df_cat_train = None
df_cat_test = None
else:
train_split_index = find_split_index(df_part, split_date)
df_cat_train = df_part.iloc[
:train_split_index, categorical_cols
].values.astype(np.float32)
df_cat_test = df_part.iloc[
train_split_index:, categorical_cols
].values.astype(np.float32)
df_part.drop(df_part.columns[categorical_cols], axis=1, inplace=True)
df_part.drop([partition_col], axis=1, inplace=True)
scaler, tr_x, te_x, tr_y, te_y = lag_normalize_split(
df_part,
split_date,
output_col=output_col,
lookback=lookback,
num_predictions=num_predictions,
)
scalers[partition] = scaler
testset_by_partition[partition] = {
"test_x": te_x
if df_cat_test is None
else [te_x, df_cat_test[0 : len(te_x)]],
"test_y": remove_last_dim(te_y),
}
if train_x is None:
train_x = tr_x
test_x = te_x
train_y = tr_y
test_y = te_y
if not df_cat_train is None:
train_x_cat = df_cat_train[: len(tr_x)]
test_x_cat = df_cat_test[: len(te_x)]
else:
train_x = np.concatenate((train_x, tr_x))
test_x = np.concatenate((test_x, te_x))
train_y = np.concatenate((train_y, tr_y))
test_y = np.concatenate((test_y, te_y))
if not df_cat_train is None:
train_x_cat = np.concatenate((train_x_cat, df_cat_train[: len(tr_x)]))
test_x_cat = np.concatenate((test_x_cat, df_cat_test[: len(te_x)]))
return (
scalers,
train_x if df_cat_train is None else [train_x, train_x_cat],
test_x if df_cat_test is None else [test_x, test_x_cat],
remove_last_dim(train_y),
remove_last_dim(test_y),
testset_by_partition,
)
|
95bdd21be019d7ed9f27797d8268c43c64d2c22e
| 29,080 |
def read_fits_data(filename, dtype="float32", **kwargs):
""" Read fits image into numpy array.
Args:
filename (str): The name of ther file to read.
dtype (str, optional): The data type for the array. Default: float32.
**kwargs: Parsed to fits.getdata.
Returns:
np.array: The image array.
"""
return fits.getdata(filename, **kwargs).astype(dtype)
|
d8e5fa22c636cfa263b0acfdabfa642e87ef486a
| 29,081 |
def streamplot(UV, ax=None, map=None, geodata=None, drawlonlatlines=False,
basemap_resolution='l', cartopy_scale="50m", lw=0.5,
cartopy_subplot=(1,1,1), axis="on", **kwargs):
"""Function to plot a motion field as streamlines.
Parameters
----------
UV : array-like
Array of shape (2, m,n) containing the input motion field.
ax : axis object
Optional axis object to use for plotting.
map : {'basemap', 'cartopy'}, optional
Optional method for plotting a map: 'basemap' or 'cartopy'. The former
uses `mpl_toolkits.basemap`_, while the latter uses cartopy_.
geodata : dictionary
Optional dictionary containing geographical information about the field.
If geodata is not None, it must contain the following key-value pairs:
drawlonlatlines : bool, optional
If set to True, draw longitude and latitude lines. Applicable if map is
'basemap' or 'cartopy'.
basemap_resolution : str, optional
The resolution of the basemap, see the documentation of
`mpl_toolkits.basemap`_.
Applicable if map is 'basemap'.
cartopy_scale : {'10m', '50m', '110m'}, optional
The scale (resolution) of the map. The available options are '10m',
'50m', and '110m'. Applicable if map is 'cartopy'.
lw: float, optional
Linewidth of the map (administrative boundaries and coastlines).
cartopy_subplot : tuple or SubplotSpec_ instance, optional
Cartopy subplot. Applicable if map is 'cartopy'.
axis : {'off','on'}, optional
Whether to turn off or on the x and y axis.
.. tabularcolumns:: |p{1.5cm}|L|
+-----------------+----------------------------------------------------+
| Key | Value |
+=================+====================================================+
| projection | PROJ.4-compatible projection definition |
+-----------------+----------------------------------------------------+
| x1 | x-coordinate of the lower-left corner of the data |
| | raster (meters) |
+-----------------+----------------------------------------------------+
| y1 | y-coordinate of the lower-left corner of the data |
| | raster (meters) |
+-----------------+----------------------------------------------------+
| x2 | x-coordinate of the upper-right corner of the data |
| | raster (meters) |
+-----------------+----------------------------------------------------+
| y2 | y-coordinate of the upper-right corner of the data |
| | raster (meters) |
+-----------------+----------------------------------------------------+
| yorigin | a string specifying the location of the first |
| | element in the data raster w.r.t. y-axis: |
| | 'upper' = upper border, 'lower' = lower border |
+-----------------+----------------------------------------------------+
Other Parameters
----------------
density : float
Controls the closeness of streamlines.
Default : 1.5
color : string
Optional streamline color. This is a synonym for the PolyCollection
facecolor kwarg in matplotlib.collections.
Default : black
Returns
-------
out : axis object
Figure axes. Needed if one wants to add e.g. text inside the plot.
"""
if map is not None and geodata is None:
raise ValueError("map!=None but geodata=None")
# defaults
density = kwargs.get("density", 1.5)
color = kwargs.get("color", "black")
# prepare x y coordinates
reproject = False
if geodata is not None:
x = np.linspace(geodata['x1'], geodata['x2'], UV.shape[2]) + geodata["xpixelsize"]/2.0
y = np.linspace(geodata['y1'], geodata['y2'], UV.shape[1]) + geodata["ypixelsize"]/2.0
extent = (geodata['x1'],geodata['x2'], geodata['y1'],geodata['y2'])
# check geodata and project if different from axes
if ax is not None and map is None:
if type(ax).__name__ == 'GeoAxesSubplot':
try:
ccrs = utils.proj4_to_cartopy(geodata["projection"])
except UnsupportedSomercProjection:
# Define fall-back projection for Swiss data(EPSG:3035)
# This will work reasonably well for Europe only.
t_proj4str = "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs"
reproject = True
elif type(ax).__name__ == 'Basemap':
utils.proj4_to_basemap(geodata["projection"])
if reproject:
geodata = utils.reproject_geodata(geodata, t_proj4str, return_grid="coords")
extent = (geodata['x1'],geodata['x2'], geodata['y1'],geodata['y2'])
X, Y = geodata["X_grid"], geodata["Y_grid"]
else:
x = np.arange(UV.shape[2])
y = np.arange(UV.shape[1])
if not reproject:
X,Y = np.meshgrid(x,y)
# draw basemaps
if map is not None:
try:
ax = basemaps.plot_geography(map, geodata["projection"],
extent, UV.shape[1:], drawlonlatlines, basemap_resolution,
cartopy_scale, lw, cartopy_subplot)
except UnsupportedSomercProjection:
# Define default fall-back projection for Swiss data(EPSG:3035)
# This will work reasonably well for Europe only.
t_proj4str = "+proj=laea +lat_0=52 +lon_0=10 +x_0=4321000 +y_0=3210000 +ellps=GRS80 +units=m +no_defs"
geodata = utils.reproject_geodata(geodata, t_proj4str, return_grid="coords")
extent = (geodata['x1'],geodata['x2'], geodata['y1'],geodata['y2'])
X, Y = geodata["X_grid"], geodata["Y_grid"]
ax = basemaps.plot_geography(map, geodata["projection"],
extent, UV.shape[1:], drawlonlatlines, basemap_resolution,
cartopy_scale, lw, cartopy_subplot)
else:
ax = plt.gca()
# plot streamplot
ax.streamplot(x, np.flipud(y), UV[0,:,:], -UV[1,:,:], density=density,
color=color, zorder=1e6)
if geodata is None or axis == "off":
axes = plt.gca()
axes.xaxis.set_ticks([])
axes.xaxis.set_ticklabels([])
axes.yaxis.set_ticks([])
axes.yaxis.set_ticklabels([])
return plt.gca()
|
bd634a5602ae0ec2a95a0e9d918a872d91691296
| 29,082 |
import random
def random_add_text(new_canvas: np.ndarray):
"""
:param new_canvas: RGBA image.
:return RGBA image.
"""
# font
font_list = [
cv2.FONT_HERSHEY_SIMPLEX,
cv2.FONT_HERSHEY_PLAIN,
cv2.FONT_HERSHEY_DUPLEX,
cv2.FONT_HERSHEY_COMPLEX,
cv2.FONT_HERSHEY_TRIPLEX,
cv2.FONT_HERSHEY_COMPLEX_SMALL,
cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
cv2.FONT_HERSHEY_SCRIPT_COMPLEX,
cv2.FONT_ITALIC,
]
font = font_list[random.randint(0, len(font_list) - 1)]
font_string = random_string(slen=random.randint(4, 7))
font_size = random.uniform(1, 2)
font_thickness = random.randint(1, 2)
# position
p1 = random.randint(120, 200)
p21 = random.randint(50, 100)
p22 = random.randint(420, 450)
if random.uniform(0, 1) < 0.5:
p2 = p21
else:
p2 = p22
# color
c1 = random.randint(0, 255)
c2 = random.randint(0, 255)
c3 = random.randint(0, 255)
new_canvas = cv2.putText(
new_canvas,
font_string,
(p1, p2),
font,
font_size,
(c1, c2, c3),
font_thickness,
bottomLeftOrigin=False,
)
return new_canvas
|
dc5f0a63ed8ccfd6cd2aee798f8b70706c6019d0
| 29,083 |
import torch
def len_to_mask(len_seq, max_len=None):
"""len to mask"""
if max_len is None:
max_len = torch.max(len_seq)
mask = torch.zeros((len_seq.size(0), max_len))
for i, l in enumerate(len_seq):
mask[i, :l] = 1
return mask
|
58cf7b3de6e28a541531ce5a6f516671493b5e08
| 29,084 |
def get_games_for_steamid(steamid: str) -> set[tuple[int, str]]:
"""Gets the games owned by a Steam user
Parameters
----------
steamid : str
The user's 64-bit Steam ID
Returns
-------
set[tuple[int, str]]
The set of games this user owns, in tuples of appid and game name
"""
body = steam_request('IPlayerService/GetOwnedGames/v0001', params={
'include_appinfo': True, 'steamid': steamid
})
return set((game['appid'], game['name']) for game in body['response']['games'])
|
7edbf85fcb5a337681c1aa21bb54948be9122fa2
| 29,085 |
from typing import Callable
from typing import Any
import functools
def normalize(how: str) -> Callable[..., Any]:
"""Apply a row or column normalization to a pandas DataFrame argument.
Parameters
----------
how : str
The normalization method to apply. Can be one of {'row', 'colum', 'minmax', 'median_column'}.
'row': Normalize each row to the range [0, 1].
'colum': Normalize each column to the range [0, 1].
'minmax': Apply a min-max normalization.
'median_column': Scale each column by subtracting the median value.
Returns
-------
Callable[..., Any]
The wrapped function.
"""
def normalize_wrap(func: Callable[..., Any]) -> Callable[..., Any]:
"""Apply a row or column normalization to a pandas DataFrame argument.
Parameters
----------
func: Callable[..., Any] :
The input function.
Returns
-------
Callable[..., Any]
The wrapped function.
"""
@functools.wraps(func)
def wrapped_func(*args: str, **kwargs: str) -> Any:
if how.lower() in set(["row", "r"]):
apply_func = lambda df: df.apply(lambda x: x / x.sum(), axis=1)
elif how.lower() in set(["column", "col", "c"]):
apply_func = lambda df: df.apply(lambda x: x / x.sum(), axis=0)
elif how.lower() in set(["minmax", "min-max", "min_max"]):
apply_func = lambda df: (df - df.min()) / (df.max() - df.min())
elif how.lower() in set(["median", "median_column", "median_col"]):
apply_func = lambda df: median_normalize(df)
elif how.lower() in set(["quantile", "quantile_column", "quantile_col"]):
apply_func = lambda df: quantile_normalize(df)
else:
raise ValueError(
"Invalid input value for 'how'. Needs to be one of {'row', 'colum', 'minmax'}."
)
filter_func = lambda arg: type(arg) == pd.DataFrame
args = override_args(args=args, func=apply_func, filter=filter_func)
kwargs = override_kwargs(kwargs=kwargs, func=apply_func, filter=filter_func)
return_value = func(*args, **kwargs)
return return_value
return wrapped_func
return normalize_wrap
|
5e837d0e8754ef4d2fc42d0b9484f1f699fb3b67
| 29,087 |
from typing import Union
def resize_image(
image: np.ndarray,
target_shape: Union[list, tuple],
ground_truth: np.ndarray = None,
):
"""
@param `image`: Dim(height, width, channels)
@param `target_shape`: (height, width, ...)
@param `ground_truth`: [[center_x, center_y, w, h, class_id], ...]
@return resized_image or (resized_image, resized_ground_truth)
Usage:
image = media.resize_image(image, yolo.input_size)
image, ground_truth = media.resize_image(image, yolo.input_size,
ground_truth)
"""
height, width, _ = image.shape
target_height = target_shape[0]
target_width = target_shape[1]
if width / height >= target_width / target_height:
scale = target_width / width
else:
scale = target_height / height
# Resize
if scale != 1:
width = int(round(width * scale))
height = int(round(height * scale))
resized_image = cv2.resize(image, (width, height))
else:
resized_image = np.copy(image)
# Pad
dw = target_width - width
dh = target_height - height
if not (dw == 0 and dh == 0):
dw = dw // 2
dh = dh // 2
# height, width, channels
padded_image = np.full(
(target_height, target_width, 3), 255, dtype=np.uint8
)
padded_image[dh : height + dh, dw : width + dw, :] = resized_image
else:
padded_image = resized_image
if ground_truth is None:
return padded_image
# Resize ground truth
ground_truth = np.copy(ground_truth)
if dw > dh:
scale = width / target_width
ground_truth[:, 0] = scale * (ground_truth[:, 0] - 0.5) + 0.5
ground_truth[:, 2] = scale * ground_truth[:, 2]
elif dw < dh:
scale = height / target_height
ground_truth[:, 1] = scale * (ground_truth[:, 1] - 0.5) + 0.5
ground_truth[:, 3] = scale * ground_truth[:, 3]
return padded_image, ground_truth
|
8a3474728546605a462f4ddbab551af0afa675f5
| 29,090 |
def _get_security_item(security_type, exchanges, code=None):
"""
get the security item.
Parameters
----------
code : str
the security code,default: None
security_type : str
the security type
exchanges : list
the exchanges
Returns
-------
DataFrame
the security item
"""
df = get_security_list(security_type=security_type, exchanges=exchanges)
if not df.empty:
df = df.set_index(df['code'])
return df.loc[code,]
return None
|
a0c8b555dbdf3c5fa1b5f7b69fceeb7bea4f9c8f
| 29,091 |
def generate_launch_description():
"""Launch file for training node to training network."""
return LaunchDescription([
DeclareLaunchArgument(
'yaml_file',
default_value=[get_default_file_path('template.yaml')],
description='Parameter file for experiment.'
),
DeclareLaunchArgument(
'n_workers',
default_value=[''],
description='Number of workers that the Server node will oversee.'
),
DeclareLaunchArgument(
'output_file',
default_value=[''],
description='Name of output file for neural network model'
),
DeclareLaunchArgument(
'policy_type',
default_value=['DQN'],
description='Policy worker will use for training.'
),
Node(
package='mrt_server',
executable='server_async_node',
name='server_node',
output='screen',
parameters=[LaunchConfiguration('yaml_file')],
arguments=[
LaunchConfiguration('n_workers'),
LaunchConfiguration('output_file'),
LaunchConfiguration('policy_type'),
]
),
])
|
ca388302307fcef3e57d672ef3375751a297c9a3
| 29,092 |
def plot_density(
data,
data_labels=None,
var_names=None,
credible_interval=0.94,
point_estimate="mean",
colors="cycle",
outline=True,
hpd_markers="",
shade=0.0,
bw=4.5,
figsize=None,
textsize=None,
):
"""Generate KDE plots for continuous variables and histograms for discrete ones.
Plots are truncated at their 100*(1-alpha)% credible intervals. Plots are grouped per variable
and colors assigned to models.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_dataset for details
data_labels : list[str]
List with names for the samples in the list of datasets. Useful when
plotting more than one trace.
var_names: list, optional
List of variables to plot (defaults to None, which results in all
variables plotted).
credible_interval : float
Credible intervals. Defaults to 0.94.
point_estimate : str or None
Plot point estimate per variable. Values should be 'mean', 'median' or None.
Defaults to 'mean'.
colors : list or string, optional
List with valid matplotlib colors, one color per model. Alternative a string can be passed.
If the string is `cycle`, it will automatically choose a color per model from matplolib's
cycle. If a single color is passed, e.g. 'k', 'C2' or 'red' this color will be used for all
models. Defaults to `cycle`.
outline : boolean
Use a line to draw KDEs and histograms. Default to True
hpd_markers : str
A valid `matplotlib.markers` like 'v', used to indicate the limits of the hpd interval.
Defaults to empty string (no marker).
shade : float
Alpha blending value for the shaded area under the curve, between 0 (no shade) and 1
(opaque). Defaults to 0.
bw : float
Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule
of thumb (the default rule used by SciPy).
figsize : tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
Returns
-------
ax : Matplotlib axes
"""
var_names = _var_names(var_names)
if not isinstance(data, (list, tuple)):
datasets = [convert_to_dataset(data, group="posterior")]
else:
datasets = [convert_to_dataset(d, group="posterior") for d in data]
if point_estimate not in ("mean", "median", None):
raise ValueError(
"Point estimate should be 'mean'," "median' or None, not {}".format(point_estimate)
)
n_data = len(datasets)
if data_labels is None:
if n_data > 1:
data_labels = ["{}".format(idx) for idx in range(n_data)]
else:
data_labels = [""]
elif len(data_labels) != n_data:
raise ValueError(
"The number of names for the models ({}) "
"does not match the number of models ({})".format(len(data_labels), n_data)
)
if colors == "cycle":
colors = ["C{}".format(idx % 10) for idx in range(n_data)]
elif isinstance(colors, str):
colors = [colors for _ in range(n_data)]
to_plot = [list(xarray_var_iter(data, var_names, combined=True)) for data in datasets]
all_labels = []
length_plotters = []
for plotters in to_plot:
length_plotters.append(len(plotters))
for var_name, selection, _ in plotters:
label = make_label(var_name, selection)
if label not in all_labels:
all_labels.append(label)
length_plotters = max(length_plotters)
rows, cols = default_grid(length_plotters, max_cols=3)
(figsize, _, titlesize, xt_labelsize, linewidth, markersize) = _scale_fig_size(
figsize, textsize, rows, cols
)
fig, ax = _create_axes_grid(length_plotters, rows, cols, figsize=figsize, squeeze=False)
axis_map = {label: ax_ for label, ax_ in zip(all_labels, ax.flatten())}
for m_idx, plotters in enumerate(to_plot):
for var_name, selection, values in plotters:
label = make_label(var_name, selection)
_d_helper(
values.flatten(),
label,
colors[m_idx],
bw,
titlesize,
xt_labelsize,
linewidth,
markersize,
credible_interval,
point_estimate,
hpd_markers,
outline,
shade,
axis_map[label],
)
if n_data > 1:
for m_idx, label in enumerate(data_labels):
ax[0].plot([], label=label, c=colors[m_idx], markersize=markersize)
ax[0].legend(fontsize=xt_labelsize)
fig.tight_layout()
return ax
|
1d0ef2ea506a6c6013844aa7d43ca867332903ca
| 29,093 |
def _vivid_light(a, b):
"""
:type a: ImageMath._Operand
:type b: ImageMath._Operand
:rtype: ImageMath._Operand
"""
color_burn = _color_burn(a, b * 2)
color_dodge = _color_dodge(a, 2 * (b - 128))
return color_burn * (b < 128) + color_dodge * (b >= 128)
|
7465a4441dd05b0e6d83acaee6a9c387654899fb
| 29,094 |
def normalize_cookies(cookies):
"""Takes cookies from Selenium or from Python Requests and
converts them to dict.
This throws away information that Selenium otherwise has (like the host and
such), but a dict is essentially all we need.
"""
requests_cookies = {}
if type(cookies) == list:
# Selenium cookies
for cookie in cookies:
requests_cookies[cookie["name"]] = cookie["value"]
elif type(cookies) == RequestsCookieJar:
# Requests cookies. Convert to dict.
requests_cookies = dict(cookies)
return requests_cookies
|
5a57fc15e7545427e8797439264245f1b7bebe7c
| 29,095 |
def from_file(filename):
"""Create a list structure from a special format of file.
Args:
filename: in which the formated string is located.
Returns:
A 2d list object.
"""
with open(filename, 'r') as f:
return from_text(f.read())
|
1035da5c6709be1a0429b955a6ef88d6877a2101
| 29,096 |
def plot_data(data):
""" Returns a scatter plot that visualizes the passed dataframe. Currently, tailored to merely
encapsulate very specific visualization. """
# lets play with namedtuples for fun. kinda like a struct-ish
PlotArgs = namedtuple('PlotArgs', ['color', 'label', 'marker'])
plotting = {0: PlotArgs('RoyalBlue', '0', 'x'), 1: PlotArgs('GoldenRod', '1', 'o')}
data.columns = ['exam_1', 'exam_2', 'admission']
# look at how neat the namedtuple is!
fig, ax = plt.subplots(1,1, figsize=(15,10))
for adminStat, grouped in data.groupby('admission'):
adminStatPlotConfig = plotting[adminStat]
grouped.plot.scatter(x='exam_1', y='exam_2', ax=ax,
color=adminStatPlotConfig.color,
label=adminStatPlotConfig.label,
marker=adminStatPlotConfig.marker)
return ax
|
98f4f517894a9e5b16c9b471cf28bbaae72007ff
| 29,097 |
def rotate(img):
"""
Rotation:
OpenCV provides scaled rotation with adjustable rotation center so that you can rotate at any location you prefer.
To find this modified transformation matrix, OpenCV provides a function, cv2.getRotationMatrix2D.
Check below example which rotates the image by 90 degree with respect to center without any scaling:
"""
M = cv2.getRotationMatrix2D((cols / 2, rows / 2), 90, 1)
return cv2.warpAffine(img, M, (cols, rows))
|
89bb652c43d61896db74be1a97bffd492d7ab95f
| 29,098 |
import hashlib
def get_sign(data_dict, key):
"""
签名函数
:param data_dict: 需要签名的参数,格式为字典
:param key: 密钥 ,即上面的API_KEY
:return: 字符串
"""
params_list = sorted(data_dict.items(), key=lambda e: e[0], reverse=False) # 参数字典倒排序为列表
params_str = "&".join(u"{}={}".format(k, v) for k, v in params_list) + '&key=' + key
# 组织参数字符串并在末尾添加商户交易密钥
md5 = hashlib.md5() # 使用MD5加密模式
md5.update(params_str.encode('utf-8')) # 将参数字符串传入
sign = md5.hexdigest().upper() # 完成加密并转为大写
return sign
|
ea7ee65cd3ae72e19293dc851255bc0f3ad4b321
| 29,100 |
def string():
"""String representation."""
return "{:s}".format('something')
|
d13ae4fe229f767c515b0f0d6439ac61c6bfdbe8
| 29,101 |
def find_films_in_location(films: pd.DataFrame) -> pd.DataFrame:
"""finds films filmed in certain location
Args:
films (pd.DataFrame): films with their locations
Returns:
pd.DataFrame: films which were filmed in certain location
"""
films.dropna(inplace=True)
# change for more precise address for better performance
local_films = films.loc[films["Location"].str.contains("Ukraine")]
if "Cooridinates" not in local_films.columns:
local_films["Coordinates"] = local_films["Location"].apply(find_location)
return local_films
|
91801357aab19e2a263951907fe74f114b833f68
| 29,102 |
def _if_installed(pname):
"""Run if the given program name is installed.
"""
def argcatcher(func):
def decorator(*args, **kwargs):
envs = [x for x in args if hasattr(x, "system_install")]
env = envs[0] if envs else None
if shared.which(pname, env):
return func(*args, **kwargs)
return decorator
return argcatcher
|
63eb3a0a3c2b2b6c7370ee4db449e6e3e1d2c84e
| 29,104 |
def check_gym_environments(env: gym.Env) -> None:
"""Checking for common errors in gym environments.
Args:
env: Environment to be checked.
Warning:
If env has no attribute spec with a sub attribute,
max_episode_steps.
Raises:
AttributeError: If env has no observation space.
AttributeError: If env has no action space.
ValueError: Observation space must be a gym.spaces.Space.
ValueError: Action space must be a gym.spaces.Space.
ValueError: Observation sampled from observation space must be
contained in the observation space.
ValueError: Action sampled from action space must be
contained in the observation space.
ValueError: If env cannot be resetted.
ValueError: If an observation collected from a call to env.reset().
is not contained in the observation_space.
ValueError: If env cannot be stepped via a call to env.step().
ValueError: If the observation collected from env.step() is not
contained in the observation_space.
AssertionError: If env.step() returns a reward that is not an
int or float.
AssertionError: IF env.step() returns a done that is not a bool.
AssertionError: If env.step() returns an env_info that is not a dict.
"""
# check that env has observation and action spaces
if not hasattr(env, "observation_space"):
raise AttributeError("Env must have observation_space.")
if not hasattr(env, "action_space"):
raise AttributeError("Env must have action_space.")
# check that observation and action spaces are gym.spaces
if not isinstance(env.observation_space, gym.spaces.Space):
raise ValueError("Observation space must be a gym.space")
if not isinstance(env.action_space, gym.spaces.Space):
raise ValueError("Action space must be a gym.space")
# Raise a warning if there isn't a max_episode_steps attribute.
if not hasattr(env, "spec") or not hasattr(env.spec, "max_episode_steps"):
if log_once("max_episode_steps"):
logger.warning(
"Your env doesn't have a .spec.max_episode_steps "
"attribute. This is fine if you have set 'horizon' "
"in your config dictionary, or `soft_horizon`. "
"However, if you haven't, 'horizon' will default "
"to infinity, and your environment will not be "
"reset."
)
# check if sampled actions and observations are contained within their
# respective action and observation spaces.
def get_type(var):
return var.dtype if hasattr(var, "dtype") else type(var)
sampled_action = env.action_space.sample()
sampled_observation = env.observation_space.sample()
# check if observation generated from stepping the environment is
# contained within the observation space
reset_obs = env.reset()
if not env.observation_space.contains(reset_obs):
reset_obs_type = get_type(reset_obs)
space_type = env.observation_space.dtype
error = (
f"The observation collected from env.reset() was not "
f"contained within your env's observation space. Its possible "
f"that There was a type mismatch, or that one of the "
f"sub-observations was out of bounds: \n\n reset_obs: "
f"{reset_obs}\n\n env.observation_space: "
f"{env.observation_space}\n\n reset_obs's dtype: "
f"{reset_obs_type}\n\n env.observation_space's dtype: "
f"{space_type}"
)
temp_sampled_reset_obs = convert_element_to_space_type(
reset_obs, sampled_observation
)
if not env.observation_space.contains(temp_sampled_reset_obs):
raise ValueError(error)
# check if env.step can run, and generates observations rewards, done
# signals and infos that are within their respective spaces and are of
# the correct dtypes
next_obs, reward, done, info = env.step(sampled_action)
if not env.observation_space.contains(next_obs):
next_obs_type = get_type(next_obs)
space_type = env.observation_space.dtype
error = (
f"The observation collected from env.step(sampled_action) was "
f"not contained within your env's observation space. Its "
f"possible that There was a type mismatch, or that one of the "
f"sub-observations was out of bounds:\n\n next_obs: {next_obs}"
f"\n\n env.observation_space: {env.observation_space}"
f"\n\n next_obs's dtype: {next_obs_type}"
f"\n\n env.observation_space's dtype: {space_type}"
)
temp_sampled_next_obs = convert_element_to_space_type(
next_obs, sampled_observation
)
if not env.observation_space.contains(temp_sampled_next_obs):
raise ValueError(error)
_check_done(done)
_check_reward(reward)
_check_info(info)
|
95d3a3b7804981cb8308269580359111b257eefe
| 29,105 |
def WI(bands: dict) -> xr.DataArray:
"""
Water Index (2015): Fisher et al. (2016)
Args:
bands (dict): Bands as {band_name: xr.DataArray}
Returns:
xr.DataArray: Computed index
"""
return (
1.7204
+ 171 * bands[obn.GREEN]
+ 3 * bands[obn.RED]
- 70 * bands[obn.NIR]
- 45 * bands[obn.SWIR_1]
- 71 * bands[obn.SWIR_2]
)
|
400c7277d5d7cca07df7953b0db957f3d4fdfd0a
| 29,106 |
import io
def readZipData(filePath):
"""
Opening the zip file in READ mode and transform scalars.csv to data frame
:param filePath: path to zip-file
:return: data frame with scalars.csv content
"""
with ZipFile(filePath.as_posix(), 'r') as zip:
scalars = None
for i in zip.namelist():
if i.endswith('Scalars.csv'):
scalars = i
break
print('Reading', scalars)
if scalars is None:
print('No scalars file exists in zip file!')
return pd.DataFrame()
scalars = zip.read(scalars)
# allow colon and semicolon as separators
df = pd.read_csv(io.BytesIO(scalars), sep=',|;')
return df
|
2efd90426366754454f13a8c5e9e61ed2a1c150d
| 29,107 |
def calc_relative_scale(skeleton, ref_bone_lengths, joint_tree) -> (float, float):
"""Calculate the factor by which the reference is larger than the query skeleton.
Args:
skeleton (torch.DoubleTensor): The query skeleton.
ref_bone_lengths (torch.DoubleTensor): The reference skeleton bone lengths.
joint_tree (list of int):
Returns:
The average scale factor.
"""
bone_lengths = cartesian_to_spherical(
absolute_to_parent_relative(ensure_cartesian(skeleton, d=3), joint_tree)
)[:, 0]
non_zero = bone_lengths.gt(1e-6)
if non_zero.sum() == 0: return 0
ratio = (ref_bone_lengths / bone_lengths).masked_select(non_zero)
return ratio.median().item()
|
cf1bbf2692666e393eb50eeb4ae9d0724af78c7f
| 29,108 |
def vertical_move(t, v_speed=2/320):
"""Probe moves vertically at v_speed [cm/s]"""
return 0.*t, 0*t, v_speed*t
|
eb6a066bf6b6659728647c78dd7673a3d45b250d
| 29,109 |
def get_favored_peaks(rama_key):
"""
returns exact favored peaks with their score value
"""
assert rama_key in range(6)
if rama_key == RAMA_GENERAL:
return [((-115.0, 131.0), 0.57068),
((-63.0, -43.0), 1.0),
((53.0, 43.0), 0.323004),
((53.0, -127.0), 0.0246619)]
if rama_key == RAMA_GLYCINE:
return [((63.0, 41.0), 1.0),
((-63.0, -41.0), 1.0),
((79.0, -173.0), 0.553852),
# ((-79.0, 173.0), 0.553852),
]
if rama_key == RAMA_CISPRO:
return [((-75.0, 155.0), 1.0),
((-89.0, 5.0), 0.701149)]
if rama_key == RAMA_TRANSPRO:
return [((-57.0, -37.0), 0.99566),
((-59.0, 143.0), 1.0),
((-81.0, 65.0), 0.0896269)]
if rama_key == RAMA_PREPRO:
return [((-57.0, -45.0), 1.0),
((-67.0, 147.0), 0.992025),
((49.0, 57.0), 0.185259)]
if rama_key == RAMA_ILE_VAL:
return [((-63.0, -45.0), 1.0),
((-121.0, 129.0), 0.76163)]
return None
|
79bf814becbbf36796e229f69d0a99cd8ef1716e
| 29,110 |
def get_all_tablespace_acls(conn):
"""
Returns:
List of :class:`~.types.RelationInfo` objects.
"""
return [RelationInfo(**row) for row in conn.execute(_pg_tablespace_stmt)]
|
561514b7986d374ba1dc7a4addf4d0588b53e59b
| 29,111 |
import regex
def chunk_pars(content):
"""Given the context contained between `\\beginnumbering` and
`\\endnumbering`, return list of paragraphs.
This is able to handle paragraphs demarcated by `\\pstart` and `\\pend` as
well as when `\\autopar` is used (see §5.2.2 of the reledmac
documentation). The use of `\\autopar` assumes that the `\\autopar` command
is given right after the `\\beginnumbering` as in the documentation.
"""
if content.find(r"\autopar") is not -1:
positions = [idx.start() for idx in regex.finditer("\n\n", content)]
else:
positions = [idx.start() for idx in regex.finditer(r"\\pstart", content)]
paragraphs = []
paragraphs.append(content[: positions[0]])
for index, par in enumerate(positions):
try:
paragraphs.append(content[par : positions[index + 1]])
except IndexError:
paragraphs.append(content[par:])
return paragraphs
|
958890791c67c90a9ed3264e82caca9bfebb5885
| 29,112 |
def bound():
""" Generate boundary for testing"""
bound = data.Boundary()
bound.degree = 3
bound.start = np.array([0.0, 0.0, 0.0])
bound.end = np.array([1.0, 0.0, 0.0])
bound.num_ctrlpts = 5
return bound
|
210636e3e0618ff8b5ffd48b48f4aa035a38e928
| 29,113 |
from typing import Union
def to_tensor(pic: Union[Image, np.ndarray]) -> Tensor:
"""Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor."""
if not (F_pil._is_pil_image(pic) or _is_numpy(pic)):
raise TypeError(f"input pic should be PIL image or numpy.ndarray, Got {type(pic)}")
if _is_numpy(pic) and not _is_numpy_image(pic):
raise ValueError(f"input pic should be 2 or 3 dimensional. Got {pic.ndim} dimensions")
# handle np.ndarray
if isinstance(pic, np.ndarray):
if pic.ndim == 2:
pic = pic[:, :, None]
img = cranet.tensor(pic.transpose(2, 0, 1))
return img
# handle PIL Image
mode_to_nptype = {'I': np.int32, 'I;16': np.int16, 'F': np.float32}
img = cranet.tensor(
np.array(pic, mode_to_nptype.get(pic.mode, np.uint8))
)
if pic.mode == '1':
img = 255 * img
img = img.reshape(pic.size[1], pic.size[0], len(pic.getbands()))
# (H x W x C) -> (C x H x W)
img = img.permute((2, 0, 1))
return img / 255
|
26040d594cee200200945d7561bc9e6bcda95f01
| 29,114 |
import numpy
def dummy_image():
"""Create a dummy image"""
x = numpy.linspace(-1.5, 1.5, 1024)
xv, yv = numpy.meshgrid(x, x)
signal = numpy.exp(- (xv ** 2 / 0.15 ** 2 + yv ** 2 / 0.25 ** 2))
# add noise
signal += 0.3 * numpy.random.random(size=signal.shape)
return signal
|
8cbf5f31cde69b8ac775114277cee8f88d6dd932
| 29,115 |
def update_user(uid, **kwargs):
"""Updates an existing user account with the specified properties.
Args:
uid: A user ID string.
kwargs: A series of keyword arguments (optional).
Keyword Args:
display_name: The user's display name (optional). Can be removed by explicitly passing
None.
email: The user's primary email (optional).
email_verified: A boolean indicating whether or not the user's primary email is
verified (optional).
phone_number: The user's primary phone number (optional). Can be removed by explicitly
passing None.
photo_url: The user's photo URL (optional). Can be removed by explicitly passing None.
password: The user's raw, unhashed password. (optional).
disabled: A boolean indicating whether or not the user account is disabled (optional).
custom_claims: A dictionary or a JSON string contining the custom claims to be set on the
user account (optional).
valid_since: An integer signifying the seconds since the epoch. This field is set by
``revoke_refresh_tokens`` and it is discouraged to set this field directly.
Returns:
UserRecord: An updated UserRecord instance for the user.
Raises:
ValueError: If the specified user ID or properties are invalid.
AuthError: If an error occurs while updating the user account.
"""
app = kwargs.pop('app', None)
user_manager = _get_auth_service(app).user_manager
try:
user_manager.update_user(uid, **kwargs)
return UserRecord(user_manager.get_user(uid=uid))
except _user_mgt.ApiCallError as error:
raise AuthError(error.code, str(error), error.detail)
|
0b52b7e42f286861b43e6e2e25a9547b1cd354d7
| 29,118 |
def add_center_dist(nusc: NuScenes,
eval_boxes: EvalBoxes):
"""
Adds the cylindrical (xy) center distance from ego vehicle to each box.
:param nusc: The NuScenes instance.
:param eval_boxes: A set of boxes, either GT or predictions.
:return: eval_boxes augmented with center distances.
"""
for sample_token in eval_boxes.sample_tokens:
sample_rec = nusc.get('sample', sample_token)
sd_record = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])
pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])
for box in eval_boxes[sample_token]:
# Both boxes and ego pose are given in global coord system, so distance can be calculated directly.
# Note that the z component of the ego pose is 0.
ego_translation = (box.translation[0] - pose_record['translation'][0],
box.translation[1] - pose_record['translation'][1],
box.translation[2] - pose_record['translation'][2])
if isinstance(box, DetectionBox):
box.ego_dist = np.sqrt(np.sum(np.array(ego_translation[:2]) ** 2))
elif isinstance(box, TrackingBox):
box.ego_translation = ego_translation
else:
raise NotImplementedError
return eval_boxes
|
5a0c09f9de689efe294a6ce500ba4dbf09885149
| 29,119 |
def check_address(btc_addr, network='test'):
""" Checks if a given string is a Bitcoin address for a given network (or at least if it is formatted as if it is).
:param btc_addr: Bitcoin address to be checked.
:rtype: hex str
:param network: Network to be checked (either mainnet or testnet).
:type network: hex str
:return: True if the Bitcoin address matches the format, raise exception otherwise.
"""
if network in ['test', "testnet"] and btc_addr[0] not in ['m', 'n']:
raise Exception("Wrong testnet address format.")
elif network in ['main', 'mainnet'] and btc_addr[0] != '1':
raise Exception("Wrong mainnet address format.")
elif network not in ['test', 'testnet', 'main', 'mainnet']:
raise Exception("Network must be test/testnet or main/mainnet")
elif len(btc_addr) not in range(26, 35+1):
raise Exception("Wrong address format, Bitcoin addresses should be 27-35 hex char long.")
else:
return True
|
9f236f5d6ccf2f28944c577e2ce8fbfb2c2a58b8
| 29,120 |
import time
def format_time(record):
"""Format time to ISO 8601.
https://en.wikipedia.org/wiki/ISO_8601
"""
utc_time = time.gmtime(record.created)
time_string = time.strftime('%Y-%m-%d %H:%M:%S', utc_time)
return '%s.%03dZ' % (time_string, record.msecs)
|
ea07736965711a214a738f5443f68cf02e20fcb2
| 29,121 |
def _cb_decode(s, maxsize=8192):
"""Decode a list of IDs from storage in a cookie.
``s`` is text as encoded by ``_cb_encode``.
``maxsize`` is the maximum size of uncompressed data. ``0`` means no limit.
Return a list of text IDs.
"""
dec = decompressobj()
squashed = unquote(s).encode('latin-1')
data = dec.decompress(squashed, maxsize)
if dec.unconsumed_tail:
raise ValueError
json_bytes = data.decode('utf-8')
return loads(json_bytes)
|
bf1239cf33bf83b1163d96641a20e4adc3e83221
| 29,122 |
def validate_model(df, fix=False):
"""
Validates the form of a model dataframe. A model dataframe must look something like this:
pos val_A val_C val_G val_T
3 1.1 4.3 -6.19 5.2
4 0.01 3.40 -10.5 5.3
5 0 1.4 10.9 231.0
A 'pos' column reports the position within a sequence to which this modle applies. 'val_X' then describe the values of the model parameters.
Specifications:
0. The dataframe must have at least one row and one column.
1. A 'pos' column is mandatory and must occur first. Values must be nonnegative integers in sequential order.
2. 'val_X' columns must conform to one of the accepted model types. These columns must be arranged in alphabetical order. Parameter values must be finite float values.
Arguments:
df (pd.DataFrame): Dataset in dataframe format
fix (bool): A flag saying whether to fix the dataframe into shape if possible.
Returns:
if fix=True:
df_valid: a valid dataframe that has been fixed by the function
if fix=False:
Nothing
Function:
Raises a TyepError if the data frame violates the specifications (if fix=False) or if these violations cannot be fixed (fix=True).
"""
# Verify dataframe has at least one row and one column
if not df.shape[0] >= 1:
raise SortSeqError(\
'Dataframe must contain at least one row')
# Validate column names
for col in df.columns:
if not is_col_type(col,['pos','vals']):
raise SortSeqError('Invalid column in dataframe: %s.'%col)
for col in ['pos']:
if not col in df.columns:
raise SortSeqError('%s column missing'%col)
# Validate parameter column names
val_cols = sorted([c for c in df.columns if is_col_type(c,'vals')])
ok = False
for cols in model_parameters_dict.values():
# Check if cols and df.columns are identical
if len(cols)==len(val_cols):
if all([a==b for a,b in zip(cols,val_cols)]):
ok = True
if not ok:
raise SortSeqError('Dataframe represents model with invalid columns: %s'%str(val_cols))
# Validate contents of all columns
df = _validate_cols(df,fix=fix)
return df
|
81c8663934c2ae33318635dd68939cff5652912b
| 29,123 |
def create_training_instances(input_files, tokenizer, max_seq_length,
dupe_factor, short_seq_prob, masked_lm_prob,
max_predictions_per_seq, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
if tf.__version__[0] == '1':
with tf.gfile.GFile(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
else:
# in tf == 2.4.1, tf.gfile is moved to tf.compat.v1.gfile
with tf.compat.v1.gfile.GFile(input_file, "r") as reader:
while True:
line = tokenization.convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = list(tokenizer.vocab.keys())
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng))
rng.shuffle(instances)
return instances
|
0274db246e701ac1da78564707c851c9e295a21e
| 29,124 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.