content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def ConstVal(val):
"""
Creates a LinComb representing a constant without creating a witness or instance variable
Should be used carefully. Using LinCombs instead of integers where not needed will hurt performance
"""
if not isinstance(val, int):
raise RuntimeError("Wrong type for ConstVal")
return LinComb(val, backend.one() * val) | d715564ea09224590be827d3e32043c4b66c5cfd | 18,833 |
def filter_required_flat_tensor_spec(flat_tensor_spec):
"""Process a flat tensor spec structure and return only the required subset.
Args:
flat_tensor_spec: A flattened sequence (result of flatten_spec_structure)
with the joined string paths as OrderedDict. Since we use OrderedDicts we
can safely call flatten_spec_structure multiple times.
Raises:
ValueError: If the passed flat_tensor_spec is not a valid flat tensor_spec
structure.
Returns:
filtered_flat_required_tensor_spec: The same flattened sequence but only
the {key: tensor_spec} pairs for the non optional tensor_spec.
"""
if not is_flat_spec_or_tensors_structure(flat_tensor_spec):
raise ValueError('Only flat tensor_spec structures are allowed.')
filtered_flat_required_tensor_spec = TensorSpecStruct()
for key, value in flat_tensor_spec.items():
if hasattr(value, 'is_optional') and value.is_optional:
continue
filtered_flat_required_tensor_spec[key] = value
return filtered_flat_required_tensor_spec | aa55e790cd335030cf2c821dd006213db022b78a | 18,834 |
def callback(photolog_id):
""" twitter로부터 callback url이 요청되었을때
최종인증을 한 후 트위터로 해당 사진과 커멘트를 전송한다.
"""
Log.info("callback oauth_token:" + request.args['oauth_token']);
Log.info("callback oauth_verifier:" + request.args['oauth_verifier']);
# oauth에서 twiter로 부터 넘겨받은 인증토큰을 세션으로 부터 가져온다.
OAUTH_TOKEN = session['OAUTH_TOKEN']
OAUTH_TOKEN_SECRET = session['OAUTH_TOKEN_SECRET']
oauth_verifier = request.args['oauth_verifier']
try:
# 임시로 받은 인증토큰을 이용하여 twitter 객체를 만들고 인증토큰을 검증한다.
twitter = Twython(current_app.config['TWIT_APP_KEY'],
current_app.config['TWIT_APP_SECRET'],
OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
final_step = twitter.get_authorized_tokens(oauth_verifier)
# oauth_verifier를 통해 얻은 최종 인증토큰을 이용하여 twitter 객체를 새로 생성한다.
twitter = Twython(current_app.config['TWIT_APP_KEY'],
current_app.config['TWIT_APP_SECRET'],
final_step['oauth_token'],
final_step['oauth_token_secret'])
session['TWITTER'] = twitter
# 파라미터로 받은 photolog_id를 이용하여 해당 사진과 커멘트를 트위터로 전송한다.
__send_twit(twitter, photolog_id)
except TwythonError as e:
Log.error("callback(): TwythonError , "+ str(e))
session['TWITTER_RESULT'] = str(e)
return redirect(url_for('.show_all')) | 3dcca97278cf20f819fa357b85e971dae9a6dac8 | 18,835 |
def calc_adjusted_pvalues(adata, method='fdr_by'):
"""Calculates pvalues adjusted per sample with the given method.
:param data: AnnData object annotated with model fit results.
:param method: Name of pvalue adjustment method (from
statsmodels.stats.multitest.multipletests).
:return: AnnData object with adjusted pvalues.
"""
assert "X_pvalue" in adata.layers.keys(), (
'No X_pvalue found in AnnData object, calculate pvalues first.')
adata.layers["X_padj"] = (np.array([multiple_testing_nan(row,
method=method)
for row in adata.layers["X_pvalue"]]))
return adata | 0097ceca4918ef4a4c4376c092b040752f408036 | 18,836 |
def create_model(model_type='mobilenet'):
"""
Create a model.
:param model_type: Must be one of 'alexnet', 'vgg16', 'resnet50' or 'mobilenet'.
:return: Model.
"""
if model_type is 'alexnet':
net = mdl.alexnet(input_shape, num_breeds, lr=0.001)
elif model_type is 'vgg16':
net = mdl.vgg16(input_shape, num_breeds, lr=0.0001)
elif model_type is 'resnet50':
net = mdl.resnet50(input_shape, num_breeds, lr=0.0002) # 0.01
elif model_type is 'mobilenet':
net = mdl.mobilenet(input_shape, num_breeds, lr=0.0001) # 0.01
else:
print("Model type is not supported.")
return net | 44ab632eff28e40b5255094e2009b479e042b00b | 18,837 |
def generate_voter_groups():
"""Generate all possible voter groups."""
party_permutations = list(permutations(PARTIES, len(PARTIES)))
voter_groups = [VoterGroup(sequence) for sequence in party_permutations]
return voter_groups | 16c55002600bf76178c529f1140fb28831d5065e | 18,838 |
import random
def generator(fields, instance):
"""
Calculates the value needed for a unique ordered representation of the fields
we are paginating.
"""
values = []
for field in fields:
neg = field.startswith("-")
# If the field we have to paginate by is the pk, get the pk field name.
if field == 'pk':
field = instance._meta.pk.name
value = instance._meta.get_field(field.lstrip("-")).value_from_object(instance)
if hasattr(value, "isoformat"):
value = value.isoformat()
value = unicode(value)
if neg:
# this creates the alphabetical mirror of a string, e.g. ab => zy, but for the full
# range of unicode characters, e.g. first unicode char => last unicode char, etc
value = u"".join([ unichr(0xffff - ord(x)) for x in value ])
values.append(value)
values.append(unicode(instance.pk) if instance.pk else unicode(random.randint(0, 1000000000)))
return NULL_CHARACTER.join(values) | 3d6f3837e109720ec78460dcd56b6cf1b3ddc947 | 18,840 |
from typing import Any
from typing import Union
def token_hash(token: Any, as_int: bool = True) -> Union[str, int]:
"""Hash of Token type
Args:
token (Token): Token to hash
as_int (bool, optional): Encode hash as int
Returns:
Union[str, int]: Token hash
"""
return _hash((token.text, token.start, token.end, token.id), as_int=as_int) | 3adfc8dce2b37b86376d47f8299cb6813faab839 | 18,841 |
import six
import base64
from datetime import datetime
def generate_totp_passcode(secret):
"""Generate TOTP passcode.
:param bytes secret: A base32 encoded secret for TOTP authentication
:returns: totp passcode as bytes
"""
if isinstance(secret, six.text_type):
secret = secret.encode('utf-8')
while len(secret) % 8 != 0:
secret = secret + b'='
decoded = base64.b32decode(secret)
totp = TOTP(
decoded, 6, SHA1(), 30, backend=default_backend())
return totp.generate(timegm(datetime.utcnow().utctimetuple())).decode() | 2f0392e86b5d84970ec43bbd4d647ca29345a373 | 18,842 |
def all_ndcubes(request):
"""
All the above ndcube fixtures in order.
"""
return request.getfixturevalue(request.param) | 906412ebe9a26de5cfddcb1d1431ab014c8084c6 | 18,843 |
from pathlib import Path
import warnings
def read_xmu(fpath: Path, scan: str='mu', ref: bool=True, tol: float=1e-4) -> Group:
"""Reads a generic XAFS file in plain format.
Parameters
----------
fpath
Path to file.
scan
Requested mu(E). Accepted values are transmission ('mu'), fluorescence ('fluo'),
or None. The default is 'mu'.
ref
Indicates if the transmission reference ('mu_ref') should also be returned.
The default is True.
tol
Tolerance in energy units to remove duplicate values.
Returns
-------
:
Group containing the requested arrays.
Notes
-----
:func:`read_xmu` assumes the following column order in the file:
1. energy.
2. transmission/fluorescence mu(E).
3. transmission reference.
See also
--------
read_file : Reads a XAFS file based on specified columns.
Examples
--------
>>> from araucaria import Group
>>> from araucaria.io import read_xmu
>>> from araucaria.testdata import get_testpath
>>> from araucaria.utils import check_objattrs
>>> fpath = get_testpath('xmu_testfile.xmu')
>>> # extracting mu and mu_ref scans
>>> group_mu = read_xmu(fpath, scan='mu')
>>> check_objattrs(group_mu, Group, attrlist=['mu', 'mu_ref'])
[True, True]
>>> # extracting only fluo scan
>>> group_fluo = read_xmu(fpath, scan='fluo', ref=False)
>>> check_objattrs(group_fluo, Group, attrlist=['fluo'])
[True]
>>> # extracting only mu_ref scan
>>> group_ref = read_xmu(fpath, scan=None, ref=True)
>>> check_objattrs(group_ref, Group, attrlist=['mu_ref'])
[True]
"""
# default modes and channels
scandict = ['mu', 'fluo', None]
coldict = {'fluo':1, 'mu':1, 'mu_ref':2}
# testing that scan exists in the current dictionary
if scan not in scandict:
warnings.warn("scan mode %s not recognized. Retrieving transmission measurement ('mu')." %scan)
scan = 'mu'
if scan is None:
usecols = (0, coldict['mu_ref'])
else:
usecols = (0, coldict[scan], coldict['mu_ref'])
group = read_file(fpath, usecols, scan, ref, tol)
return (group) | e5889fa309b7fb836cc5b7ea50f8987a647f00a2 | 18,844 |
def filter_order_by_oid(order, oid):
"""
:param order:
:type order: :class:`tests.testapp.testapp.trading.models.Order`
:param oid: Order ID
:type oid: int
"""
return order.tid == oid | bf84e2e9f2fa19dc19e1d42ceef92dd3050d1e89 | 18,845 |
from skaldship.passwords.utils import process_password_file, insert_or_update_acct
import logging
def process_pwdump_loot(loot_list=[], msf=None):
"""
Takes an array of loot records in loot_list, downloads the pwdump file and
adds the users.
"""
db = current.globalenv['db']
#cache = current.globalenv['cache']
data = []
for loot_id in loot_list:
loot = msf.loot_download(loot_id)
if loot['ltype'] not in ['host.windows.pwdump', 'windows.hashes']:
log("Loot is not a pwdump, it is a %s" % loot['ltype'], logging.ERROR)
continue
else:
# process the pwdump file
pw_data = loot['data'].split('\n')
accounts = process_password_file(
pw_data=pw_data,
file_type='PWDUMP',
source='Metasploit',
)
# find the info/0 service id for the host
host = get_host_record(loot['host'])
query = (db.t_services.f_number == '0') & (db.t_services.f_proto == 'info') & (db.t_services.f_hosts_id == host.id)
svc_id = db(query).select().first()
if svc_id is None:
# info/0 not found.. add it!
svc_id = db.t_services.insert(f_proto="info", f_number="0", f_status="info", f_hosts_id=host.id)
db.commit()
# insert or update the account records
resp_text = insert_or_update_acct(svc_id.id, accounts)
log("Added pwdump records for host: %s" % host.f_ipaddr)
data.append({loot['host']: resp_text})
return data | 57448b24350dd66271906ba5fcdc0e4453d898e9 | 18,846 |
def has_poor_grammar(token_strings):
"""
Returns whether the output has an odd number of double quotes or if it does not have balanced
parentheses.
"""
has_open_left_parens = False
quote_count = 0
for token in token_strings:
if token == '(':
if has_open_left_parens:
return True
else:
has_open_left_parens = True
elif token == ')':
if has_open_left_parens:
has_open_left_parens = False
else:
return True
elif token == '"':
quote_count += 1
return quote_count % 2 == 1 or has_open_left_parens | b35c6af0ec771ac22ff66d9ca875f5d916cb9489 | 18,847 |
import pandas as pd
def csv_dataset_reader(path):
"""
This function reads a csv from a specified path and returns a Pandas dataframe representation of it, and renames
columns.
:param path: Path to and name of the csv file to read.
:return: A Pandas dataframe.
"""
data = pd.read_csv(path, sep=",", header=None)
data.columns = ['age', 'weight', 'height']
return data | 59a298c50bf060809ebbebc5d0ff3d9670e84244 | 18,849 |
def get_daily_blurb_info():
"""Get daily blurb info."""
html, ss_image_1day_file, ss_image_1year_file = _scrape()
return _parse(html, ss_image_1day_file, ss_image_1year_file) | ffe84accebda5780e55d34e58137288d02bc072d | 18,850 |
def otsu_binarization(img):
"""
Method to perform Otsu Binarization
:param img: input image
:return: thresholded image
"""
ret2, th2 = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return th2 | 99953288b893d56e17a9e9654393aa284eaae4b7 | 18,852 |
def rosstack_depends_1(s):
"""
@param s: stack name
@type s: str
@return: A list of the names of the stacks which s depends on directly
@rtype: list
"""
return rosstackexec(['depends1', s]).split() | e917c62c628498e1f100c045bf8e966ea3bfd355 | 18,853 |
def _config_file_is_to_update():
"""
Ask the user if the configuration file should be updated or not.
:return: Returns True if the user wants to update the configuration file and False otherwise.
:rtype: bool
"""
if yes_or_no_input("Do you want to save the account on the configuration file?") == USER_INPUT_YES:
return True
return False | e14be78e150e28b87a0e8f179cc86f4a240a60d3 | 18,854 |
def funcScrapeTableWunderground(html_tree, forecast_date_str):
"""
"""
# This will get you the Wunderground table headers for future hour conditions
columns = html_tree.xpath("//table[@id='hourly-forecast-table']/thead//button[@class='tablesaw-sortable-btn']")
rows = html_tree.xpath("//table[@id='hourly-forecast-table']/tbody/tr")
fill_cols = np.asarray([])
for column in columns:
# print etree.tostring(column)
col = column.xpath("text()")[0]
fill_cols = np.append(fill_cols, col)
# print(col)
# Make a DataFrame to fill
dayDf = DataFrame(columns = fill_cols)#.set_index(fill_cols[0])
# This will go through the rows of the table and grab actual values
for row in rows:
values = row.xpath("td")
for i, value in enumerate(values):
col = columns[i].xpath("text()")[0]
val = value.xpath("ng-saw-cell-parser/div//span/text()")
# print(val)
if col == 'Time':
timeVal = val
# Initializing a single row. The goal is to make it look just like what dayDf looks like
hourRow = pd.DataFrame([forecast_date_str + ' ' + (''.join(timeVal))],
columns = [col])#.set_index
elif col == 'Conditions':
hourRow[col] = val[1]
else:
if col == 'Pressure':
val = value.xpath("ng-saw-cell-parser//span/span/text()")
val = [val[0] + ' ' + val[2][0:2]]
if col in ['Precip', 'Amount']: # These are hiding behind hyperlinks. Need to be smart
val = value.xpath("ng-saw-cell-parser/div//span/a/text()")
try:
hourRow[col] = val[0]
except:
hourRow[col] = np.nan
dayDf = dayDf.append(hourRow)
dayDf['Time'] = pd.to_datetime(dayDf['Time'])
# print(columns[i].xpath("text()")[0])
# print value.xpath("ng-saw-cell-parser/div//span/text()")
return dayDf | aa6745565e8fa01df8b8f52f1314ee7bf1a434a8 | 18,856 |
from re import S
def as_finite_diff(derivative, points=1, x0=None, wrt=None):
"""
Returns an approximation of a derivative of a function in
the form of a finite difference formula. The expression is a
weighted sum of the function at a number of discrete values of
(one of) the independent variable(s).
Parameters
==========
derivative: a Derivative instance (needs to have an variables
and expr attribute).
points: sequence or coefficient, optional
If sequence: discrete values (length >= order+1) of the
independent variable used for generating the finite
difference weights.
If it is a coefficient, it will be used as the step-size
for generating an equidistant sequence of length order+1
centered around x0. defult: 1 (step-size 1)
x0: number or Symbol, optional
the value of the independent variable (wrt) at which the
derivative is to be approximated. default: same as wrt
wrt: Symbol, optional
"with respect to" the variable for which the (partial)
derivative is to be approximated for. If not provided it
is required that the Derivative is ordinary. default: None
Examples
========
>>> from sympy import symbols, Function, exp, sqrt, Symbol, as_finite_diff
>>> x, h = symbols('x h')
>>> f = Function('f')
>>> as_finite_diff(f(x).diff(x))
-f(x - 1/2) + f(x + 1/2)
The default step size and number of points are 1 and ``order + 1``
respectively. We can change the step size by passing a symbol
as a parameter:
>>> as_finite_diff(f(x).diff(x), h)
-f(-h/2 + x)/h + f(h/2 + x)/h
We can also specify the discretized values to be used in a sequence:
>>> as_finite_diff(f(x).diff(x), [x, x+h, x+2*h])
-3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h)
The algorithm is not restricted to use equidistant spacing, nor
do we need to make the approximation around x0, but we can get
an expression estimating the derivative at an offset:
>>> e, sq2 = exp(1), sqrt(2)
>>> xl = [x-h, x+h, x+e*h]
>>> as_finite_diff(f(x).diff(x, 1), xl, x+h*sq2)
2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/\
((-h + E*h)*(h + E*h)) + (-(-sqrt(2)*h + h)/(2*h) - \
(-sqrt(2)*h + E*h)/(2*h))*f(-h + x)/(h + E*h) + \
(-(h + sqrt(2)*h)/(2*h) + (-sqrt(2)*h + E*h)/(2*h))*f(h + x)/(-h + E*h)
Partial derivatives are also supported:
>>> y = Symbol('y')
>>> d2fdxdy=f(x,y).diff(x,y)
>>> as_finite_diff(d2fdxdy, wrt=x)
-f(x - 1/2, y) + f(x + 1/2, y)
See also
========
sympy.calculus.finite_diff.apply_finite_diff
sympy.calculus.finite_diff.finite_diff_weights
"""
if wrt is None:
wrt = derivative.variables[0]
# we need Derivative to be univariate to guess wrt
if any(v != wrt for v in derivative.variables):
raise ValueError('if the function is not univariate' +
' then `wrt` must be given')
order = derivative.variables.count(wrt)
if x0 is None:
x0 = wrt
if not iterable(points):
# points is simply the step-size, let's make it a
# equidistant sequence centered around x0
if order % 2 == 0:
# even order => odd number of points, grid point included
points = [x0 + points*i for i
in range(-order//2, order//2 + 1)]
else:
# odd order => even number of points, half-way wrt grid point
points = [x0 + points*i/S(2) for i
in range(-order, order + 1, 2)]
if len(points) < order+1:
raise ValueError("Too few points for order %d" % order)
return apply_finite_diff(order, points, [
derivative.expr.subs({wrt: x}) for x in points], x0) | 4b76eae0578434a9a087b08f01eefbcd3018bc01 | 18,857 |
def is_prime(pp: int) -> bool:
"""
Returns True if pp is prime
otherwise, returns False
Note: not a very sophisticated check
"""
if pp == 2 or pp == 3:
return True
elif pp < 2 or not pp % 2:
return False
odd_n = range(3, int(sqrt(pp) + 1), 2)
return not any(not pp % i for i in odd_n) | f8661a7f625c198dd1d0b5b477aea22f50596a39 | 18,858 |
def createChromosome( totQty, menuData ):
"""
Creates the chromosome with Qty assigned to Each Dish such that
sum of all Qty equals to the number of dishes to be ordered
totQty = Number of Dishes to be Ordered
returns chromosome of dish id and corresponding quantity
"""
chromosome = []
qtySeq = randSeq2(len(menuData),totQty)
i=0
for key in menuData:
chromosome.append(Dish(key,qtySeq[i]))
i+=1
return chromosome | 6dae9c5a610a50df67e18f2034513a090088e524 | 18,859 |
def add_residual(transformed_inputs, original_inputs, zero_pad=True):
"""Adds a skip branch to residual block to the output."""
original_shape = original_inputs.shape.as_list()
transformed_shape = transformed_inputs.shape.as_list()
delta = transformed_shape[3] - original_shape[3]
stride = int(np.ceil(original_shape[1] / transformed_shape[1]))
if stride > 1:
original_inputs = tf.layers.average_pooling2d(
original_inputs, pool_size=[stride] * 2, strides=stride, padding="same")
if delta != 0:
if zero_pad:
# Pad channels with zeros at the beginning and end.
if delta > 0:
original_inputs = tf.pad(
original_inputs, [[0, 0], [0, 0], [0, 0], [delta // 2, delta // 2]],
mode="CONSTANT",
constant_values=0)
else:
transformed_inputs = tf.pad(
transformed_inputs, [
[0, 0], [0, 0], [0, 0], [-delta // 2, -delta // 2]],
mode="CONSTANT",
constant_values=0)
else:
# Convolution
original_inputs = tf.layers.conv2d(
original_inputs,
filters=transformed_shape[3],
kernel_size=(1, 1),
strides=(1, 1),
padding="same",
activation=None,
use_bias=False)
net = original_inputs + transformed_inputs
return net, original_inputs | e32897c6e80873b863fbc3358eaec8b6191086f0 | 18,860 |
def _find_bad_channels_in_epochs(epochs, picks, use_metrics, thresh, max_iter):
"""Implements the fourth step of the FASTER algorithm.
This function attempts to automatically mark bad channels in each epochs by
performing outlier detection.
Additional Parameters
---------------------
use_metrics : list of str
List of metrics to use. Can be any combination of:
'amplitude', 'variance', 'deviation', 'median_gradient'
Defaults to all of them.
thresh : float
The threshold value, in standard deviations, to apply. A channel
crossing this threshold value is marked as bad. Defaults to 3.
max_iter : int
The maximum number of iterations performed during outlier detection
(defaults to 1, as in the original FASTER paper).
"""
metrics = {
'amplitude': lambda x: np.ptp(x, axis=2),
'deviation': lambda x: _deviation(x),
'variance': lambda x: np.var(x, axis=2),
'median_gradient': lambda x: np.median(np.abs(np.diff(x)), axis=2),
'line_noise': lambda x: _freqs_power(x, epochs.info['sfreq'],
[50, 60]),
}
if use_metrics is None:
use_metrics = metrics.keys()
info = pick_info(epochs.info, picks, copy=True)
data = epochs.get_data()[:, picks]
bads = dict((m, np.zeros((len(data), len(picks)), dtype=bool)) for
m in metrics)
for ch_type, chs in _picks_by_type(info):
ch_names = [info['ch_names'][k] for k in chs]
chs = np.array(chs)
for metric in use_metrics:
logger.info('Bad channel-in-epoch detection on %s channels:'
% ch_type.upper())
s_epochs = metrics[metric](data[:, chs])
for i_epochs, epoch in enumerate(s_epochs):
outliers = find_outliers(epoch, thresh, max_iter)
if len(outliers) > 0:
bad_segment = [ch_names[k] for k in outliers]
logger.info('Epoch %d, Bad by %s:\n\t%s' % (
i_epochs, metric, bad_segment))
bads[metric][i_epochs, chs[outliers]] = True
return bads | 6b4a0acc1eb4e1fc4f229cc237e071bf87047b5e | 18,861 |
def solution(A): # O(N^2)
"""
For a given value A, compute the number with the fewest number of
squared values and return them within an array.
eg. 26 can be computed with squared values [25, 1] or [16, 9, 1], but the
answer is only [25, 1] as we are looking for the fewest number of
squared values
>>> solution(26)
[25, 1]
>>> solution(128)
[64, 64]
>>> solution(33)
[25, 4, 4]
>>> solution(256)
[256]
"""
queue = deque() # O(1)
ready_queue(A, queue, []) # O(N)
return process_queue(queue) # O(N^2) | 56f899d94cfc07a412a357a305553ad0ed8af092 | 18,862 |
def get_device_path():
"""Return device path."""
if is_gce():
return None
devices = get_devices()
device_serial = environment.get_value('ANDROID_SERIAL')
for device in devices:
if device_serial == device.serial:
return device.path
return None | 5bd8bf47859c3721e47cfc45b49aaa06bed4159e | 18,864 |
def pattern_maker(size, dynamic):
"""
Generate a pattern with pixel values drawn from the [0, 1] uniform
distribution
"""
def pattern():
return np.random.rand(size)
def static():
a_pattern = pattern()
def fn():
return a_pattern
return fn
return pattern if dynamic else static() | 3fd256fe3f8c7669faec8a7d1757a334a51145ba | 18,865 |
def RMSE(a, b):
""" Return Root mean squared error """
return np.sqrt(np.square(np.subtract(a, b)).mean()) | 7d853535fb9e4072f983f05ad192cc38f2bbea8e | 18,866 |
def alpha_a_b(coord, N, silent=True):
"""Calculate alpha, a, b for a rectangle with coordinates coord and
truncation at N."""
[x0, x1, y0, y1] = coord
a = 0
for zero in zeros[:N]:
a += exp(-zero*y0)/abs(complex(0.5, zero))
b = 0
for zero in zeros[N:]:
b += exp(-zero*y0)/abs(complex(0.5, zero))
def F_north(x):
return abs(F_N(complex(x, y1), N))
def F_south(x):
return abs(F_N(complex(x, y0), N))
def F_east(y):
return abs(F_N(complex(x1, y), N))
def F_west(y):
return abs(F_N(complex(x0, y), N))
# def x_bounds(f_new, x_new, f_old, x_old):
# return x0 <= x_new[0] <= x1
# def y_bounds(f_new, x_new, f_old, x_old):
# return y0 <= x_new[0] <= y1
ns_kwargs = {"bounds":[(x0, x1)]}
ew_kwargs = {"bounds":[(y0, y1)]}
min_north = basinhopping(F_north, 0.5*(x0 + x1), stepsize=0.5*(x1-x0), minimizer_kwargs=ns_kwargs)
min_south = basinhopping(F_south, 0.5*(x0 + x1), stepsize=0.5*(x1-x0), minimizer_kwargs=ns_kwargs)
min_east = basinhopping(F_east, 0.5*(y0 + y1), stepsize=0.5*(y1-y0), minimizer_kwargs=ew_kwargs)
min_west = basinhopping(F_west, 0.5*(y0 + y1), stepsize=0.5*(y1-y0), minimizer_kwargs=ew_kwargs)
# if not silent:
# print('min_north')
# print(min_north)
# print('min_south')
# print(min_south)
# print('min_east')
# print(min_east)
# print('min_west')
# print(min_west)
min_north = min_north.fun
min_south = min_south.fun
min_east = min_east.fun
min_west = min_west.fun
if not silent:
print((min_north, min_south, min_east, min_west))
alpha = min(min_north, min_south, min_east, min_west)
return alpha, a, b | 41cc57c16a7526bf7a88503ea9315872062b8ac5 | 18,867 |
from typing import Any
from typing import Optional
def asdataset(
dataclass: Any,
reference: Optional[DataType] = None,
dataoptions: Any = None,
) -> Any:
"""Create a Dataset object from a dataclass object.
Args:
dataclass: Dataclass object that defines typed Dataset.
reference: DataArray or Dataset object as a reference of shape.
dataoptions: Options for Dataset creation.
Returns:
Dataset object created from the dataclass object.
"""
if dataoptions is None:
try:
dataoptions = dataclass.__dataoptions__
except AttributeError:
dataoptions = DataOptions(xr.Dataset)
model = DataModel.from_dataclass(dataclass)
dataset = dataoptions.factory()
for entry in model.data_vars:
dataset[entry.name] = entry(reference)
for entry in model.coords:
if entry.name in dataset.dims:
dataset.coords[entry.name] = entry(dataset)
for entry in model.coords:
if entry.name not in dataset.dims:
dataset.coords[entry.name] = entry(dataset)
for entry in model.attrs:
dataset.attrs[entry.name] = entry()
return dataset | 4baf2df39f906f2b1981cb597cb6430e95bb1ca1 | 18,868 |
def get_edge_size(reader: ChkDirReader, chunks: list[ChunkRange], tilesize: int) -> int:
"""Gets the size of an edge tile from an unknown chunk"""
for chunk in chunks:
data: bytes = deflate_range(reader, chunk.start, chunk.end, True)
if data is None:
continue
try:
decompressed: bytes = lzo.decompress(data, False, MAX_BUFFER_LEN)
pixel_count: float = len(decompressed) / 4 # RGBA per-pixel
edge_length = pixel_count / tilesize # rect edge length
return int(edge_length)
except: # pylint: disable=bare-except
continue
return -1 | 54da5c4adafbcccae4cee9112e35470a97172b00 | 18,869 |
import time
import torch
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
time.sleep(2) # This line can prevent deadlock problem in some cases.
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
# encode mask results
if isinstance(result[0], tuple):
result = [(bbox_results, encode_mask_results(mask_results))
for bbox_results, mask_results in result]
results.extend(result)
if rank == 0:
batch_size = len(result)
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
gpu_collect = True
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results | 8ec9bf7efcd126485a8066c7d5932b0c84c44b63 | 18,870 |
def toRegexp(exp,terminate=False,lower=False):
""" Case sensitive version of the previous one, for backwards compatibility """
return toCl(exp,terminate,wildcards=('*',),lower=lower) | d550164d7d2a628a0b0bcf37f5ee95de958fc2e5 | 18,871 |
def marker_used_in_game(marker_id: int) -> bool:
"""
Determine whether the marker ID is used in the game.
:param marker_id: An official marker number, mapped to the competitor range.
:returns: True if the market is used in the game.
"""
return any([marker_id in marker_range for marker_range in MARKER_SIZES]) | 437d5b8c3ff80683e3f19d5cb3786243c6e430b3 | 18,872 |
def iround(x):
"""
Round an array to the nearest integer.
Parameters
----------
x : array_like
Input data.
Returns
-------
y : {numpy.ndarray, scalar}
The rounded elements in `x`, with `int` dtype.
"""
return np.round(x).astype(int) | 64837773f12eb096ede5d8963360ab28427b015d | 18,873 |
def get_ec2_conn():
"""
Requried: env.aws_region, env.aws_access_key, env.aws_secret_access_key
return conneciton to aws ec2
"""
conn = boto.ec2.connect_to_region(
env.aws_region,
aws_access_key_id=env.aws_access_key,
aws_secret_access_key=env.aws_secret_access_key
)
if conn is None:
print(red("Can't connect to ec2 region"))
return conn | 5c2014f7d1a3ba465ec7f205ac34a5c1feeb2aac | 18,875 |
def create_final_comment_objects():
"""Goes through the final comments and returns an array
of objects."""
arr = [] # Stores objects
for line in final_file:
row = line.split(",")
# Set object variables for each object before adding it to the array
comment_number, comment_author, account_karma, comment_score, \
comment_num_replies, comment_permalink, comment_id, \
comment_length = [i.strip('\n') for i in row]
# Add the comment object to the array
arr.append(Final_Comment(comment_number, comment_author, account_karma, \
comment_score, comment_num_replies, \
comment_permalink, comment_id, comment_length))
return arr | 02107ba5ebc23e5a8db1c30fa8709793e1fcbe7e | 18,877 |
import re
def normalise_target_name(name, used=[], max_length=None):
"""
Check that name[:max_length] is not in used and
append a integer suffix if it is.
"""
def generate_name(name, i, ml):
# Create suffix string
i_name = '' if i == 0 else '_' + str(i)
# Return concatenated string if ml is not set
if ml is None:
ml = len(name) + len(i_name)
t_name = name
else:
# Work out amount of name to drop
length = len(name) + len(i_name) - ml
t_name = name if length <= 0 else name[:-length]
# If the length of i_name is greater than ml
# just warn and revert to straight append
if len(i_name) >= ml:
log.warn('Too many repetitions of name %s.', name)
t_name = name
o_name = ''.join(filter(None, [t_name, i_name]))
return '{:{ml}.{ml}}'.format(o_name, ml=ml)
name = re.sub(r'[^-A-Za-z0-9_]', '_', name)
i = 0
test_name = generate_name(name, i, max_length)
while test_name in used:
i += 1
test_name = generate_name(name, i, max_length)
return test_name | bffc78525d766cbb941382b6f7dd9371cffee492 | 18,878 |
def construct_pairwise_df(sr: pd.Series, np_fun):
"""Constructs an upper diagonal df from all pairwise comparisons of a sr"""
sr = sr.sort_index()
_mat = np.triu(np_fun(sr.to_numpy() - sr.to_numpy()[:, None]), k=1)
_mat[np.tril_indices(_mat.shape[0])] = None
return pd.DataFrame(_mat, index=sr.index.get_level_values('qid'),
columns=sr.index.get_level_values('qid')).rename_axis(index='qid_1', columns='qid_2') | bfef4a9c64e619e2d70efb3dea1fde9da5894634 | 18,879 |
def privacy(request):
"""This returns the privacy policy page"""
return render(request=request, template_name="registration/privacy.html") | c3467b0f670facb152c1f2cd793e6dd46301bc25 | 18,880 |
def seq_search(items, key):
"""顺序查找"""
for index, item in enumerate(items):
if item == key:
return index
return -1 | 1271555aea5f7291ebb3679a219d4b3eb81d87a7 | 18,881 |
def parse_prediction_key(key):
"""The "name" or "key" of a predictor is assumed to be like:
`ProHotspotCtsProvider(Weight=Classic(sb=400, tb=8), DistanceUnit=150)`
Parse this into a :class:`PredictionKey` instance, where
- `name` == "ProHotspotCtsProvider"
- `details` will be the dict: {"Weight" : "Classic(sb=400, tb=8)",
"DistanceUnit" : 150}
(Attempts to parse to ints or floats if possible).
"""
if "(" not in key:
return PredictionKey(key, {})
i = key.index("(")
name = key[:i].strip()
dets = key[i+1:-1]
dets = [x.strip() for x in _split_by_comma_not_in_brackets(dets)]
details = {}
for x in dets:
if "=" not in x:
key, value = x, None
else:
i = x.index("=")
key = x[:i].strip()
value = x[i+1:].strip()
try:
value = int(value)
except ValueError:
pass
if isinstance(value, str):
try:
value = float(value)
except ValueError:
pass
details[key] = value
return PredictionKey(name, details) | 4d971da8097a237f6df8d96bb407c9706c6ed8f6 | 18,883 |
def tick2dayfrac(tick, nbTicks):
"""Conversion tick -> day fraction."""
return tick / nbTicks | 50d01778f62203d37e733a6b328455d3ea10e239 | 18,884 |
from datetime import datetime
def get_business_day_of_month(year, month, count):
"""
For a given month get the Nth business day by count.
Count can also be negative, e.g. pass in -1 for "last"
"""
r = rrule(MONTHLY, byweekday=(MO, TU, WE, TH, FR),
dtstart=datetime.datetime(year, month, 1),
bysetpos=count)
res = r[0]
if (res == None or res.month != month or res.year != year):
raise ValueError("No dates found in range. is there a flaw in your logic?")
return res.date() | f0322df24f63ee836cf4f98099ccc0e4eff20c67 | 18,886 |
def inpolygon(wkt, longitude, latitude):
""" To determine whether the longitude and latitude coordinate is within the orbit
:param wkt(str): the orbit wkt info
:param longitude: to determine whether the longitude within the orbit
:param latitude: to determine whether the latitude within the orbit
:return: logical value whether the coordinate within the orbit and multipolygon
"""
multipolygon = shapely.wkt.loads(wkt)
point = shapely.geometry.Point(longitude, latitude)
return multipolygon.contains(point), multipolygon | b844361f2fb3002a1d6df2a0301d19cc5b75470d | 18,887 |
def matrixMultVec(matrix, vector):
"""
Multiplies a matrix with a vector and returns the result as a new vector.
:param matrix: Matrix
:param vector: vector
:return: vector
"""
new_vector = []
x = 0
for row in matrix:
for index, number in enumerate(row):
x += number * vector[index]
new_vector.append(x)
x = 0
return new_vector | 8a03b3acfec0d91fcf0d2c85b4e2bdd4f3053dd2 | 18,888 |
def get_dev_value(weight, error):
"""
:param weight: shape [N, 1], the importance weight for N source samples in the validation set
:param error: shape [N, 1], the error value for each source sample in the validation set
(typically 0 for correct classification and 1 for wrong classification)
"""
N, d = weight.shape
_N, _d = error.shape
assert N == _N and d == _d, 'dimension mismatch!'
weighted_error = weight * error
cov = np.cov(np.concatenate((weighted_error, weight), axis=1), rowvar=False)[0][1]
var_w = np.var(weight, ddof=1)
eta = - cov / var_w
return np.mean(weighted_error) + eta * np.mean(weight) - eta | 740dbd755cf540b0133ddf321207ea0bbd74fc83 | 18,889 |
def biLSTM(f_lstm, b_lstm, inputs, dropout_x=0.):
"""Feature extraction through BiLSTM
Parameters
----------
f_lstm : VariationalDropoutCell
Forward cell
b_lstm : VariationalDropoutCell
Backward cell
inputs : NDArray
seq_len x batch_size
dropout_x : float
Variational dropout on inputs
Returns
-------
outputs : NDArray
Outputs of BiLSTM layers, seq_len x 2 hidden_dims x batch_size
"""
for f, b in zip(f_lstm, b_lstm):
inputs = nd.Dropout(inputs, dropout_x, axes=[0]) # important for variational dropout
fo, _ = f.unroll(length=inputs.shape[0], inputs=inputs, layout='TNC', merge_outputs=True)
bo, _ = b.unroll(length=inputs.shape[0], inputs=inputs.flip(axis=0), layout='TNC',
merge_outputs=True)
f.reset()
b.reset()
inputs = nd.concat(fo, bo.flip(axis=0), dim=2)
return inputs | dc3cdc07a20e4ae5fbe257a81d92f15fb51333d9 | 18,890 |
import torch
def refer_expression(captions, n_ground=1, prefix="refer expressions:", sort=True):
"""
n_ground > 1
ground_indices
[1, 0, 2]
source_text
refer expressions: <extra_id_0> red crayon <extra_id_1> Yellow banana <extra_id_2> black cow
target_text
<vis_extra_id_1> <vis_extra_id_0> <vis_extra_id_2>
n_ground == 1
source_text
refer expressions: red crayon
target_text
<vis_extra_id_1>
"""
n_boxes = len(captions)
if sort:
ground_indices = torch.randperm(n_boxes)[:n_ground].sort().values
else:
ground_indices = torch.randperm(n_boxes)[:n_ground]
ground_indices = ground_indices.tolist()
source_text = [prefix]
target_text = []
if n_ground == 1:
idx = ground_indices[0]
source_text.append(f'{captions[idx]}')
target_text.append(f'<vis_extra_id_{idx}>')
else:
for j, idx in enumerate(ground_indices):
source_text.append(f'<extra_id_{j}>')
source_text.append(f'{captions[idx]}')
target_text.append(f'<vis_extra_id_{idx}>')
# target_text.append('</s>')
source_text = " ".join(source_text)
target_text = " ".join(target_text)
# return ground_indices, source_text, target_text
return source_text, target_text | 57919ee416dbb981dbb7f03163beec779785cc2f | 18,891 |
def url_to_filename(base, url):
"""Return the filename to which the page is frozen.
base -- path to the file
url -- web app endpoint of the page
"""
if url.endswith('/'):
url = url + 'index.html'
return base / url.lstrip('/') | 35084e8b5978869bf317073c76bafc356a7d9046 | 18,892 |
def _msd_anom_3d(time, D_alpha, alpha):
"""3d anomalous diffusion function."""
return 6.0*D_alpha*time**alpha | e5204c52368202665e4dd4acd7d86096349c0d29 | 18,893 |
import json
def make_json_response(status_code, json_object, extra_headers=None):
"""
Helper function to serialize a JSON object and add the JSON content type header.
"""
headers = {
"Content-Type": 'application/json'
}
if extra_headers is not None:
headers.update(extra_headers)
return status_code, json.dumps(json_object), headers | 4857b806819e44b7a77e0a9a51df7b4fe6678656 | 18,894 |
from datetime import datetime
def calc_dst_temerin_li(time, btot, bx, by, bz, speed, speedx, density, version='2002n', linear_t_correction=False):
"""Calculates Dst from solar wind input according to Temerin and Li 2002 method.
Credits to Xinlin Li LASP Colorado and Mike Temerin.
Calls _jit_calc_dst_temerin_li. All constants are defined in there.
Note: vx has to be used with a positive sign throughout the calculation.
Parameters
==========
time : np.array
Array containing time variables.
btot : np.array
Array containing Btot.
bx : np.array
Array containing Bx in coordinate system ?.
by : np.array
Array containing By in coordinate system ?.
bz : np.array
Array containing Bz in coordinate system ?.
speed : np.array
Array containing solar wind speed.
speedx : np.array
Array containing solar wind speed in x-direction.
density : np.array
Array containing solar wind density.
version : str (default='2002')
String determining which model version should be used.
Returns
=======
dst_burton : np.array
Array with calculated Dst values over timesteps time.
"""
# Arrays
dst1=np.zeros(len(bz))
dst2=np.zeros(len(bz))
dst3=np.zeros(len(bz))
dst_tl=np.zeros(len(bz))
# Define initial values (needed for convergence, see Temerin and Li 2002 note)
dst1[0:10]=-15
dst2[0:10]=-13
dst3[0:10]=-2
if version == '2002':
newparams = False
else:
newparams = True
if version in ['2002', '2002n']:
# julian_days = [sunpy.time.julian_day(num2date(x)) for x in time]
julian_days = [astropy.time.Time(num2date(x), format='datetime', scale='utc').jd for x in time]
return _jit_calc_dst_temerin_li_2002(time, btot, bx, by, bz, speed, speedx, density, dst1, dst2, dst3, dst_tl, julian_days, newparams=newparams)
elif version == '2006':
dst1[0:10], dst2[0:10], dst3[0:10] = -10, -5, -10
ds1995 = time - date2num(datetime(1995,1,1))
ds2000 = time - date2num(datetime(2000,1,1))
# YEARLY DRIFT CORRECTION TERM (NOT IN PAPER)
if linear_t_correction:
drift_corr = -0.014435865642103548 * ds2000 + 9.57670996872173
else:
drift_corr = 0.
return _jit_calc_dst_temerin_li_2006(ds1995, ds2000, btot, bx, by, bz, speed, speedx, density, dst1, dst2, dst3) + drift_corr | f333217e34656c4566a254c1c383191f11e8c3d0 | 18,896 |
def reconstruct(vars_to_reconstruct, scheme, order_used):
"""
Reconstructs all variables using the requested scheme.
:param vars_to_reconstruct: The variables at the cell centers.
:type vars_to_reconstruct: list of list of double
:param Reconstruction.Scheme scheme: The reconstruction scheme to use.
:param order_used: Filled by the function and is used to return
the order of the reconstruction used.
:type order_used: list of int
:return: (`list of list of double`) The face reconstructed variables.
Each variable is of length `2 * number_of_cells`
"""
reconstructed_vars = [None] * len(vars_to_reconstruct)
for i in range(len(vars_to_reconstruct)):
extents = np.asarray([len(vars_to_reconstruct[i])])
reconstructed_vars[i] = _recons_dispatch[scheme](
vars_to_reconstruct[i], np.asarray(extents), 1, scheme, order_used)
return np.asarray(reconstructed_vars) | b1e3cd8b8ed91b6c7ccdd5d6903fbce3109a3871 | 18,899 |
import json
import re
from datetime import datetime
import random
def dev_view(request, slug=""):
"""View for homepage or individual developer."""
if slug == "":
dev_name = list(Dev.objects.all().values_list('dev_name', flat=True))
dev_img_address = list(Dev.objects.values_list('dev_image_address', flat=True))
dev_slug = list(Dev.objects.values_list('dev_slug', flat=True))
dev_order = list(Dev.objects.values_list('dev_order_pop', flat=True))
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_query').values())) == 1:
g_query_datetime_init = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_query'
).values_list()[0])
else:
task_id_query = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_query'
).values())[1]['task_id']
g_query_datetime_init = list(TaskResult.objects.filter(
task_id=task_id_query
).values_list()[0])
g_query_datetime = g_query_datetime_init[11]
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_dev').values())) == 1:
g_dev_datetime_init = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values_list()[0])
else:
task_id_dev = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values())[1]['task_id']
g_dev_datetime_init = list(TaskResult.objects.filter(
task_id=task_id_dev
).values_list()[0])
g_dev_datetime = g_dev_datetime_init[11]
if g_dev_datetime > g_query_datetime:
g_datetime = g_dev_datetime
elif g_dev_datetime < g_query_datetime:
g_datetime = g_query_datetime
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_query').values())) == 1:
g_query = json.loads(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_query'
).values()[0]['result'])
else:
task_id_query = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_query'
).values())[1]['task_id']
g_query = json.loads(TaskResult.objects.filter(
task_id=task_id_query
).values()[0]['result'])
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_dev').values())) == 1:
g_dev = json.loads(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values()[0]['result'])
else:
task_id_dev = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values())[1]['task_id']
g_dev = json.loads(TaskResult.objects.filter(
task_id=task_id_dev
).values()[0]['result'])
# 2-day date filter for homepage 'Latest News'
def date_criteria(g_inp):
dates = [re.search('[0-9]{4}-[0-9]{2}-[0-9]{2}', g_inp[i][8]).group(0) for i in range(len(g_inp))]
dates_datetime = [datetime.strptime(i, '%Y-%m-%d') for i in dates]
today = datetime.today()
time_criteria = datetime(year=today.year, month=today.month, day=today.day - 2)
return [g_inp[i] for i in range(len(g_inp)) if dates_datetime[i] >= time_criteria]
entries_for_carousel_init = [date_criteria(g_dev) + date_criteria(g_query)][0]
entries_for_carousel = [i for i in entries_for_carousel_init if i[9] != 'none']
entries_for_latest_news_init = entries_for_carousel
entries_for_latest_news_init = sorted(entries_for_latest_news_init, key=lambda sort: sort[8], reverse=True)
link_latest_news = [i[1] for i in entries_for_latest_news_init]
link_count = [link_latest_news.count(link_latest_news[i]) for i in
range(len(link_latest_news))]
link_zip = list(zip(link_latest_news, link_count))
link_unique = [link_zip[i][0] if link_zip[i][1] == 1 else 'none' for i in
range(len(link_zip))]
nonunique_indices_link = [i for i, x in enumerate(link_unique) if x == "none"]
nonunique_check_link = []
nonunique_entries_nonrepeat_link = []
for i in nonunique_indices_link:
nonunique_check_link.append(link_latest_news[i])
count_inst = nonunique_check_link.count(link_latest_news[i])
if count_inst == 1:
nonunique_entries_nonrepeat_link.append(entries_for_latest_news_init[i])
google_search_results_unique = []
for i in range(len(link_unique)):
try:
if link_unique[i] != 'none':
google_search_results_unique.append(entries_for_latest_news_init[i])
except IndexError:
pass
google_search_results_combined = google_search_results_unique + nonunique_entries_nonrepeat_link
page = request.GET.get('page', 1)
paginator2 = Paginator(google_search_results_combined, 2000)
try:
entries_for_latest_news = paginator2.page(page)
except PageNotAnInteger:
entries_for_latest_news = paginator2.page(1)
except EmptyPage:
entries_for_latest_news = paginator2.page(paginator2.num_pages)
random.shuffle(entries_for_carousel)
if request.user.is_authenticated:
if request.method == "POST":
p_form = FavoriteGamesUpdateForm(data=request.POST)
user_fav = list(FavoriteGames.objects.all().values_list())
user_slug_list = [user_fav[i][2] for i in range(len(user_fav))
if user_fav[i][1] == request.user.profile.id]
if request.POST["dev_user_str"] not in user_slug_list:
if p_form.is_valid():
form_instance = p_form.save(commit=False)
form_instance.profile = Profile.objects.get(user=request.user)
form_instance.dev_user_str = p_form.cleaned_data["dev_user_str"]
form_instance.save()
else:
FavoriteGames.objects.filter(
profile_id=request.user.profile.id
).filter(
dev_user_str=request.POST.get('dev_user_str')
).delete()
fav_game_check = list(FavoriteGames.objects.filter(profile_id=request.user.profile.id).values())
devs_in_favs = [fav_game_check[i]['dev_user_str'] for i in range(len(fav_game_check))]
dev_game_check_list = []
for j, i in enumerate(dev_slug):
if i in devs_in_favs:
dev_game_check_list.append('yes')
else:
dev_game_check_list.append('no')
else:
dev_game_check_list = ""
dev_list_name = sorted(list(zip_longest(dev_name, dev_img_address, dev_slug, dev_game_check_list, dev_order)),
key=lambda lowercase: lowercase[0].lower())
dev_list_pop = sorted(list(zip_longest(dev_name, dev_img_address, dev_slug, dev_game_check_list, dev_order)),
key=lambda dev_order_list: dev_order_list[4])
cache_key = "test_cache_key"
if cache.get(cache_key) is not None:
paginator_for_class_1 = Paginator(cache.get(cache_key), 48)
else:
cache.set(
cache_key,
dev_list_pop,
60 * 60 * 4,
)
context = {
'numbers': dev_list_pop,
'entries': entries_for_carousel,
'latest_news': entries_for_latest_news,
'g_query_datetime': g_query_datetime,
'g_dev_datetime': g_dev_datetime,
'g_datetime': g_datetime,
}
if request.method == "POST":
return redirect("/")
else:
return render(request, "homepage/dev_base.html", context)
else:
dev_query_results_init = TaskResult.objects.filter(task_name='homepage.tasks.rawg_fetch_dev')
dev_query_results = json.loads(dev_query_results_init.values()[0]['result'])
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_dev').values())) == 1:
g_dev_datetime_init = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values_list()[0])
else:
task_id_dev = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values())[1]['task_id']
g_dev_datetime_init = list(TaskResult.objects.filter(
task_id=task_id_dev
).values_list()[0])
g_dev_datetime = g_dev_datetime_init[11]
slug_index1 = [dev_query_results][0][0].index(slug)
dev_list = [dev_query_results[0][slug_index1]]
slugs_per_dev_list = dev_query_results[1][slug_index1]
names_per_dev_list = dev_query_results[2][slug_index1]
ratings_per_dev_list = dev_query_results[3][slug_index1]
background_img_per_dev_list = dev_query_results[4][slug_index1]
released_per_dev_list = dev_query_results[5][slug_index1]
full_clip_per_dev_list = dev_query_results[6][slug_index1]
ratings_count_per_dev_list = dev_query_results[7][slug_index1]
dev_game_data = sorted(list(zip_longest(dev_list, slugs_per_dev_list, names_per_dev_list,
ratings_per_dev_list, background_img_per_dev_list,
released_per_dev_list,
full_clip_per_dev_list, ratings_count_per_dev_list)),
key=lambda sort: sort[7], reverse=True)
dev_game_data2 = []
for i in range(len(dev_game_data)):
try:
if dev_game_data[i][4] is not None:
dev_game_data2.append(dev_game_data[i])
except IndexError:
pass
page = request.GET.get('page', 1)
paginator2 = Paginator(dev_game_data2, 2000)
try:
numbers = paginator2.page(page)
except PageNotAnInteger:
numbers = paginator2.page(1)
except EmptyPage:
numbers = paginator2.page(paginator2.num_pages)
if len(list(TaskResult.objects.filter(task_name='homepage.tasks.google_fetch_dev').values())) == 1:
google_query_results = json.loads(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values()[0]['result'])
else:
task_id_dev = list(TaskResult.objects.filter(
task_name='homepage.tasks.google_fetch_dev'
).values())[1]['task_id']
google_query_results = json.loads(TaskResult.objects.filter(
task_id=task_id_dev
).values()[0]['result'])
dev_name_list = list(Dev.objects.all().values_list('dev_name', flat=True))
dev_slug_list = list(Dev.objects.all().values_list('dev_slug', flat=True))
dev_img_list = list(Dev.objects.values_list('dev_image_address', flat=True))
dev_slug_index = dev_slug_list.index(slug)
dev_name_for_site = dev_name_list[dev_slug_index]
dev_img_for_site = dev_img_list[dev_slug_index]
google_search_results = [google_query_results[i] if google_query_results[i][6] == slug else 'none'
for i in range(len(google_query_results))]
google_search_results2 = []
for i in range(len(google_search_results)):
try:
if google_search_results[i] != 'none':
google_search_results2.append(google_search_results[i])
except IndexError:
pass
context = {
'numbers': numbers,
'google_search_results': google_search_results2,
'dev_name_for_site': dev_name_for_site,
'dev_img_for_site': dev_img_for_site,
'g_dev_datetime': g_dev_datetime,
}
return render(request, "homepage/dev_iter.html", context) | 363819f854e26b8c62f8fe41fbfbf2e64296246f | 18,901 |
def dpuEnableTaskProfile(task):
"""
Enable profiling facility of DPU Task while running to get its performance metrics
task: DPU Task. This parameter should be gotten from the result of dpuCreatTask()
Returns: 0 on success, or report error in case of any failure
"""
return pyc_libn2cube.pyc_dpuEnableTaskProfile(task) | 5bb1435ca194b214695891d451f2f56a4cdf6857 | 18,902 |
def get_isotopic_distribution(z):
"""
For an element with number ``z``, returns two ``np.ndarray`` objects containing that element's weights and relative abundances.
Args:
z (int): atomic number
Returns:
masses (np.ndarray): list of isotope masses
weights (np.ndarray): list of weights (relative to 1.00 for largest)
"""
z = str(z)
masses = list(ISOTOPE_DICTIONARY[z].keys())
weights = list(ISOTOPE_DICTIONARY[z].values())
return np.array(masses), np.array(weights) | 4b038319c37dfd13f0ef085c2b3286f6fc2749c3 | 18,903 |
def url_root():
"""根路径"""
return """
<p>Hello ! Welcome to Rabbit's WebServer Platform !</p>
<a href="http://www.miibeian.gov.cn/" target="_blank" style="">京ICP备 18018365 号</a> @2018Rabbit
""" | 2e6d1d5301ac67bdec30cdeeaeed3c8638568de9 | 18,904 |
import uuid
def CreateMatrix(args, context, history_id, gcs_results_root, release_track):
"""Creates a new iOS matrix test in Firebase Test Lab from the user's params.
Args:
args: an argparse namespace. All the arguments that were provided to this
gcloud command invocation (i.e. group and command arguments combined).
context: {str:obj} dict containing the gcloud command context, which
includes the Testing API client+messages libs generated by Apitools.
history_id: {str} A history ID to publish Tool Results to.
gcs_results_root: the root dir for a matrix within the GCS results bucket.
release_track: the release track that the command is invoked from.
Returns:
A TestMatrix object created from the supplied matrix configuration values.
"""
creator = MatrixCreator(args, context, history_id, gcs_results_root,
release_track)
return creator.CreateTestMatrix(uuid.uuid4().hex) | e536001e768f2574d6c5d773b70e6b4e58c6c3da | 18,905 |
def map_keys(func, d):
""" Returns a new dict with func applied to keys from d, while values
remain unchanged.
>>> D = {'a': 1, 'b': 2}
>>> map_keys(lambda k: k.upper(), D)
{'A': 1, 'B': 2}
>>> assert map_keys(identity, D) == D
>>> map_keys(identity, {})
{}
"""
return dict((func(k), v) for k, v in d.iteritems()) | 5e9798d208db5e43dad497d64a4b8e469c67eb3b | 18,907 |
from qiniu import Auth
def generate_qiniu_token(object_name, use_type, expire_time=600):
"""
用于生成七牛云上传所需要的Token
:param object_name: 上传到七牛后保存的文件名
:param use_type: 操作类型
:param expire_time: token过期时间,默认为600秒,即十分钟
:return:
"""
bucket_name = PRIVATE_QINIU_BUCKET_NAME
# 需要填写你的 Access Key 和 Secret Key
access_key = PRIVATE_QINIU_ACCESS_KEY
secret_key = PRIVATE_QINIU_SECRET_KEY
# 构建鉴权对象
q = Auth(access_key, secret_key)
# 上传策略示例
# https://developer.qiniu.com/kodo/manual/1206/put-policy
policy = {
# 'callbackUrl':'https://requestb.in/1c7q2d31',
# 'callbackBody':'filename=$(fname)&filesize=$(fsize)'
# 'persistentOps':'imageView2/1/w/200/h/200'
}
token = q.upload_token(bucket_name, object_name, expire_time, policy)
base_url = PRIVATE_MEDIA_URL_PREFIX
return (object_name, token, base_url, expire_time) | 9d0b65fb08032ad557f50cb73c00b4ed0f8eae5a | 18,908 |
def get_s3_object(bucket, key_name, local_file):
"""Download a S3 object to a local file in the execution environment
Parameters
----------
bucket: string, required
S3 bucket that holds the message
key: string, required
S3 key is the email object
Returns
-------
email_msg: email.message.Message object
"""
tracer.put_metadata('object', f's3://{bucket}/{key_name}')
try:
s3_resource.Bucket(bucket).download_file(key_name, local_file)
result = 'ok'
tracer.put_annotation('OBJECT_DOWNLOAD', 'SUCCESS')
except Exception as e:
tracer.put_annotation('OBJECT_DOWNLOAD', 'FAILURE')
result = f'Error: {str(e)}'
return(result) | 02b10623e30eff1ee5093d4e0f1ee51b3b97d0ac | 18,909 |
def jaxpr_eqns_input_sizes(jaxpr) -> np.ndarray:
"""Return a list of input sizes for each equation in the jaxpr.
Args:
jaxpr: Jaxpr to get input sizes for.
Returns:
A #eqns * #eqns numpy array of input sizes. cost[l, r] represents the
input size of the l-th to (r - 1)-th equation in the jaxpr.
"""
length = len(jaxpr.eqns)
input_sizes = np.full((length + 1, length + 1), 0, dtype=np.float32)
outvars = OrderedSet()
for k in range(0, length + 1):
if k > 0:
outvars = outvars.union(jaxpr.eqns[k - 1].outvars)
invars = OrderedSet()
total_size = 0
for r in range(k + 1, length + 1):
for invar in jaxpr.eqns[r - 1].invars:
if (isinstance(invar, Var) and invar in outvars and
invar not in invars):
invars.add(invar)
total_size += invar.aval.size * invar.aval.dtype.itemsize
input_sizes[k, r] = total_size
return input_sizes | 0209c0342725ae83ea8051ef47852134e6ad4502 | 18,910 |
def extract_message(raw_html):
"""Returns the content of the message element.
This element appears typically on pages with errors.
:param raw_html: Dump from any page.
"""
results = re_message.findall(raw_html)
if results:
return results[0]
return None | 498ee1c38c08db365b1bf91ecd32a79c2d2f5f68 | 18,911 |
from typing import Callable
from typing import Tuple
def _weighted_essentially_non_oscillatory_vectorized(
eno_order: int, values: Array, spacing: float, boundary_condition: Callable[[Array, int],
Array]) -> Tuple[Array, Array]:
"""Implements a more "vectorized" but ultimately slower version of `weighted_essentially_non_oscillatory`."""
if eno_order < 1:
raise ValueError(f"`eno_order` must be at least 1; got {eno_order}.")
values = boundary_condition(values, eno_order)
diffs = (values[1:] - values[:-1]) / spacing
if eno_order == 1:
return (diffs[:-1], diffs[1:])
substencil_approximations = _align_substencil_values(
jax.vmap(jnp.correlate, (None, 0), 0)(diffs, _diff_coefficients(eno_order)), jnp)
diffs2 = diffs[1:] - diffs[:-1]
chol_T = jnp.asarray(np.linalg.cholesky(_smoothness_indicator_quad_form(eno_order)).swapaxes(-1, -2))
smoothness_indicators = _align_substencil_values(
jnp.sum(jnp.square(jax.vmap(jax.vmap(jnp.correlate, (None, 0), 1), (None, 0), 0)(diffs2, chol_T)), -1), jnp)
unscaled_weights = 1 / jnp.square(smoothness_indicators + WENO_EPS)
unnormalized_weights = (jnp.asarray(_substencil_coefficients(eno_order)[..., np.newaxis]) *
jnp.stack([unscaled_weights[:, :-1], unscaled_weights[:, 1:]]))
weights = unnormalized_weights / jnp.sum(unnormalized_weights, 1, keepdims=True)
return tuple(jnp.sum(jnp.stack([substencil_approximations[:-1], substencil_approximations[1:]]) * weights, 1)) | debd652ddf02419e191d9d0c5d21640760d3f227 | 18,912 |
def defaults(dictionary, overwriteNone=False, **kwargs):
"""
Set default values of a given dictionary, option to overwrite None values.
Returns given dictionary with values updated by kwargs unless they already existed.
:param dict dictionary:
:param overwriteNone: Whether to overwrite None values.
:param kwargs:
"""
for key, value in dictionary.items():
dictValueIsNone = value is None
kwargsHasValue = key in kwargs
if overwriteNone and dictValueIsNone and kwargsHasValue:
continue
# Overwrite kwargs with dictionary
kwargs[key] = value
return kwargs | 6def5bb71839b3b627a5597ea6fa7fa1b48e463b | 18,913 |
from typing import Union
from typing import Optional
from typing import Dict
import tqdm
def expected_average_shortest_distance_to_miner(
crawl_graph: Union[
ProbabilisticWeightedCrawlGraph[CrawledNode], CrawlGraph[CrawledNode]
],
distances: Optional[np.ndarray] = None,
miner_probability: Optional[Dict[CrawledNode, float]] = None,
) -> Dict[CrawledNode, float]:
"""Estimates the average shortest distance to a miner for each node in the graph"""
if not isinstance(crawl_graph, ProbabilisticWeightedCrawlGraph):
crawl_graph = ProbabilisticWeightedCrawlGraph(crawl_graph)
if miner_probability is None:
miner_probability = estimate_miner_probability(crawl_graph)
if distances is None:
distances = crawl_graph.probabilistic_shortest_distances()
elif (
distances.ndim != 2
or distances.shape[0] != len(crawl_graph)
or distances.shape[1] != len(crawl_graph)
):
raise ValueError(
f"distances is expected to be an {len(crawl_graph)}x{len(crawl_graph)} matrix"
)
return {
node: sum(
distances[index][i] * miner_probability[crawl_graph.nodes[i]]
for i in range(len(crawl_graph))
)
for node, index in tqdm(
((n, crawl_graph.node_indexes[n]) for n in crawl_graph),
desc="calculating expected distance to miners",
leave=False,
unit=" nodes",
total=len(crawl_graph),
)
} | 6ea56881dce6d589eebec6422a0a5ffae41fe153 | 18,914 |
from typing import Callable
def dummy_state_sb(dummy_state: State, dummy_train_dataloader: DataLoader, conv_model: MosaicClassifier,
loss_fun_tuple: Callable, epoch: int, batch: int) -> State:
"""Dummy state with required values set for Selective Backprop
"""
dummy_state.train_dataloader = dummy_train_dataloader
dummy_state.epoch = epoch
dummy_state.step = epoch * dummy_state.steps_per_epoch + batch
dummy_state.model = conv_model
dummy_state.model.module.loss = loss_fun_tuple
return dummy_state | 4f4af7193ccf0a4fb883a7d4b42ef58da49333b3 | 18,915 |
def create_model(species={}, parameters={}, reactions={}, events={}):
"""Returns an SBML Level 3 model.
Example:
species = { 'E': 1, \
'EM': 0, \
'EM2': 0, \
'F': 100, \
}
parameters = {'k': (1e-06,'per_min'), \
}
reactions = { 'Production_E': \
{ 're': [(1,'E'),(1,'F')], \
'pr': [(2,'E')], \
'kin' : 'k * E * F' \
}, \
}
events = {'e': \
{ 'trigger': 'true', \
'delay': '10', \
'assignments': [('M','1'),], \
}, \
}
"""
# Create an empty SBMLDocument object. It's a good idea to check for
# possible errors. Even when the parameter values are hardwired like
# this, it is still possible for a failure to occur (e.g., if the
# operating system runs out of memory).
try:
document = sbml.SBMLDocument(3, 1)
except ValueError:
raise RuntimeError("Could not create SBMLDocumention object")
# Create the basic Model object inside the SBMLDocument object.
model = document.createModel()
check(model, "create model")
check(model.setTimeUnits("second"), "set model-wide time units")
check(model.setExtentUnits("item"), "set model units of extent")
check(
model.setSubstanceUnits("item"), "set model substance units"
) # mole, item, gram, kilogram, dimensionless
# Create a unit definition we will need later.
per_second = model.createUnitDefinition()
check(per_second, "create unit definition")
check(per_second.setId("per_min"), "set unit definition id")
unit = per_second.createUnit()
check(unit, "create unit")
check(unit.setKind(sbml.UNIT_KIND_SECOND), "set unit kind")
check(unit.setExponent(-1), "set unit exponent")
check(unit.setScale(0), "set unit scale")
check(
unit.setMultiplier(1), "set unit multiplier"
)
# Create a compartment inside this model
c1 = model.createCompartment()
check(c1, "create compartment")
check(c1.setId("c1"), "set compartment id")
check(c1.setConstant(True), 'set compartment "constant"')
check(c1.setSize(1), 'set compartment "size"')
check(c1.setSpatialDimensions(3), "set compartment dimensions")
check(
c1.setUnits("dimensionless"), "set compartment size units"
)
# Create species inside this model, set the required attributes
# for each species in SBML Level 3 (which are the 'id', 'compartment',
# 'constant', 'hasOnlySubstanceUnits', and 'boundaryCondition'
# attributes), and initialize the amount of the species along with the
# units of the amount.
for s_str, s_val in species.items():
s = model.createSpecies()
check(s, "create species")
check(s.setId(s_str), "set species id")
check(s.setCompartment("c1"), "set species compartment")
check(s.setConstant(False), 'set "constant" attribute')
check(s.setInitialAmount(float(s_val)), "set initial amount")
check(s.setSubstanceUnits("item"), "set substance units")
check(s.setBoundaryCondition(False), 'set "boundaryCondition"')
check(s.setHasOnlySubstanceUnits(False), 'set "hasOnlySubstanceUnits"')
# Create a parameter object inside this model, set the required
# attributes 'id' and 'constant' for a parameter in SBML Level 3, and
# initialize the parameter with a value along with its units.
for k_str in parameters:
k = model.createParameter()
check(k, "create parameter k")
check(k.setId(k_str), "set parameter id")
check(k.setConstant(True), 'set parameter "constant"')
check(k.setValue(parameters[k_str][0]), "set parameter value")
check(k.setUnits(parameters[k_str][1]), "set parameter units")
# Create a reaction inside this model, set the reactants and products,
# and set the reaction rate expression (the SBML "kinetic law"). We
# set the minimum required attributes for all of these objects. The
# units of the reaction rate are determined from the 'timeUnits' and
# 'extentUnits' attributes on the Model object.
for r_str in reactions:
r = model.createReaction()
check(r, "create reaction")
check(r.setId(r_str), "set reaction id")
check(r.setReversible(False), "set reaction reversibility flag")
check(r.setFast(False), 'set reaction "fast" attribute')
reactants = reactions[r_str]["re"]
for re_val, re_str in reactants:
species_ref = r.createReactant()
check(species_ref, "create reactant")
check(species_ref.setSpecies(re_str), "assign reactant species")
check(species_ref.setStoichiometry(re_val), "set set stoichiometry")
check(species_ref.setConstant(True), 'set "constant" on species')
products = reactions[r_str]["pr"]
for pr_val, pr_str in products:
species_ref = r.createProduct()
check(species_ref, "create product")
check(species_ref.setSpecies(pr_str), "assign product species")
check(species_ref.setStoichiometry(pr_val), "set set stoichiometry")
check(species_ref.setConstant(True), 'set "constant" on species')
math_ast = sbml.parseL3Formula(reactions[r_str]["kin"])
kinetic_law = r.createKineticLaw()
check(math_ast, f"create AST for rate expression")
check(kinetic_law, "create kinetic law")
check(kinetic_law.setMath(math_ast), "set math on kinetic law")
# create events
for e_str in events:
e = model.createEvent()
check(e, "create event")
check(e.setId(e_str), "set id")
check(e.setUseValuesFromTriggerTime(False), "?")
t = model.createTrigger()
check(t, "create trigger")
check(
t.setMath(sbml.parseL3Formula(events[e_str]["trigger"])),
"set trigger condition",
)
check(t.setPersistent(False), "default not persistent")
check(t.setInitialValue(False), "default not initially true")
check(e.getTrigger().getMath(), 'Problem when creating the trigger condition. The trigger will not work.')
# print( '> ' + sbml.formulaToString(e.getTrigger().getMath()) )
d = model.createDelay()
check(d, "create delay")
check(d.setMath(sbml.parseFormula(events[e_str]["delay"])), "set math")
check(e.setDelay(d), "set delay")
for ass in events[e_str]["assignments"]:
ea = model.createEventAssignment()
check(ea, "check event assignment")
check(ea.setVariable(ass[0]), "set variable")
check(ea.setMath(sbml.parseL3Formula(ass[1])), "set math")
return document | 1950509f83b858ef7829aa6f30caaa3734ff2946 | 18,916 |
def get_network_connection_query(endpoint_ids: str, args: dict) -> str:
"""Create the network connection query.
Args:
endpoint_ids (str): The endpoint IDs to use.
args (dict): The arguments to pass to the query.
Returns:
str: The created query.
"""
remote_ip_list = args.get('remote_ip', '')
if not remote_ip_list:
raise DemistoException('Please provide a remote_ip argument.')
remote_ip_list = wrap_list_items_in_double_quotes(remote_ip_list)
local_ip_filter = ''
if args.get('local_ip'):
local_ip_list = wrap_list_items_in_double_quotes(args.get('local_ip', ''))
local_ip_filter = f'and action_local_ip in({local_ip_list})'
port_list = args.get('port')
port_list_filter = f'and action_remote_port in({port_list})' if port_list else ''
return f'''dataset = xdr_data | filter agent_id in ({endpoint_ids}) and event_type = STORY
{local_ip_filter} and action_remote_ip in({remote_ip_list}) {port_list_filter}|
fields agent_hostname, agent_ip_addresses, agent_id, actor_effective_username, action_local_ip, action_remote_ip,
action_remote_port, dst_action_external_hostname, action_country, actor_process_image_name, actor_process_image_path,
actor_process_command_line, actor_process_image_sha256, actor_process_instance_id, actor_process_causality_id''' | 6390c6ae4436632055fb90687e51cfac2ca09a05 | 18,917 |
import json
def dump_into_json(filename, metrics):
"""Dump the metrics dictionary into a JSON file
It will automatically dump the dictionary:
metrics = {'duration': duration,
'voltage_extremes': voltage_extremes,
'num_beats': num_beats,
'mean_hr_bpm': mean_hr_bpm,
'beats': beats}.
in to a JSON file with the file name as the data file name.
:param filename: name of the file being read
:param metrics: a dictionary containing duration,
voltage extremes, number of beats, beats per minute,
and the time where beats occur
:returns:
- successful_JSON - test if it has successfully create JSON
"""
successful_JSON = False
try:
output_file = open(filename + '.json', 'w')
json.dump(metrics, output_file)
output_file.close()
successful_JSON = True
except TypeError:
print("Unsuccessfully output JSON file")
return successful_JSON | 2e6effbcefe7cb3033c4c472cbee3850c00ae06b | 18,918 |
def _costfun(params, pose0, fixed_pt3d, n_cams, n_pts, cam_idxs, pt3d_idxs, pts2d, K, px_err_sd):
"""
Compute residuals.
`params` contains camera parameters and 3-D coordinates.
"""
if isinstance(params, (tuple, list)):
params = np.array(params)
params = np.hstack((pose0, params))
poses, pts3d = _unpack(params, n_cams, n_pts if len(fixed_pt3d) == 0 else 0)
points_3d = fixed_pt3d if len(pts3d) == 0 else pts3d
points_proj = _project(points_3d[pt3d_idxs], poses[cam_idxs], K)
px_err = ((pts2d - points_proj) / px_err_sd[:, None]).ravel()
return px_err | 3e97e7d14712fe8b89b60de958fd743c728e8cba | 18,919 |
import base64
import requests
def get_headers(base_url: str, client_id: str, client_secret: str, grant_type: str, verify: bool):
"""
Create header with OAuth 2.0 authentication information.
:type base_url: ``str``
:param base_url: Base URL of the IdentityIQ tenant.
:type client_id: ``str``
:param client_id: Client Id for OAuth 2.0.
:type client_secret: ``str``
:param client_secret: Client Secret for OAuth 2.0.
:type grant_type: ``str``
:param grant_type: Grant Type for OAuth 2.0. Defaulted to 'client_credentials' if not provided.
:return: Header with OAuth 2.0 information if client_id & client_secret are provided, else None.
This will return None if the client_id & client_secret were not valid (authorized).
"""
if base_url is None or client_id is None or client_secret is None:
return None
if grant_type is None:
grant_type = 'client_credentials'
auth_cred = client_id + ':' + client_secret
iiq_oauth_body = f'grant_type={grant_type}'
iiq_oauth_headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'Basic %s' % base64.b64encode(auth_cred.encode()).decode()
}
oauth_response = requests.request("POST", url=f'{base_url}{IIQ_OAUTH_EXT}', data=iiq_oauth_body,
headers=iiq_oauth_headers, verify=verify)
if oauth_response is not None and 200 <= oauth_response.status_code < 300:
return {
'Authorization': 'Bearer %s' % oauth_response.json().get('access_token', None),
'Content-Type': 'application/json'
}
else:
err_msg = 'Failed to get response'
if oauth_response is not None:
err_msg += f' {oauth_response.status_code}'
raise DemistoException(err_msg) | 06ced982595d4abe99e193ec7ab43e366d575f7b | 18,921 |
import logging
from datetime import datetime
def draw_pie(fracs, labels):
"""
This method is to plot the pie chart of labels, then save it into '/tmp/' folder
"""
logging.info("Drawing the pie chart..")
fig = plt.figure()
plt.pie(fracs, labels=labels, autopct=make_autopct(fracs), shadow=True)
plt.title("Top 10 labels for newly opened issues")
figname = "piechart_{}_{}.png".format(str(datetime.datetime.today().date()),
str(datetime.datetime.today().time()))
fig.savefig("/tmp/{}".format(figname))
pic_path = "/tmp/{}".format(figname)
return pic_path | 18ee8d0b6054467b9612e282c0d12fa9a10c549b | 18,922 |
import re
def eval_function_old(param, param_type):
""" Eval Function (Deprecated)
isOwner 0xe982E462b094850F12AF94d21D470e21bE9D0E9C
:param param:
:param param_type:
:return:
"""
try:
splitted_input = param.split(' ')
except TypeError:
pass
else:
try:
print(splitted_input)
if len(splitted_input[1][2:]) != 40:
print('launch error, address must be 40 alfanumeric hash')
else:
re.search('0x[0-9,aA-zZ]{40}', splitted_input[1]).group(0)
except IndexError:
print('there is not enough data to verify current input')
pass
return splitted_input[1] | 6c28fdad6803330bcea8b086cc2e15209125a8d6 | 18,923 |
def _multi_convert(value):
"""
Function try and convert numerical values to numerical types.
"""
try:
value = int(value, 10)
except ValueError:
try:
value = float(value)
except ValueError:
pass
return value | abcd3656fdf5ce7ab1427ee6884a18853bdfaf59 | 18,924 |
def dbinom(n, p):
"""Binomial Distribution
n = number of repetitions
p = success probability
Used when a certain experiment is repeated n times
with a 0 ≤ P ≤ 1 probability to succeed once.
This doesn't return a value, but rather the specified binomial function
"""
def b(k):
"""Returns the probability of k successes"""
if 0 <= k <= n:
q = 1 - p
return rperm(n, k) * p**k * q**(n-k)
else:
return 0
# Allow accessing the used 'n' and 'p' values from the function
b.__dict__['n'] = n
b.__dict__['p'] = p
b.__dict__['expected'] = n * p
b.__dict__['variance'] = (n * p) * (1-p)
return b | 8917b3eb5ce189094f2b129c596a99d20dfcdcc5 | 18,925 |
def array_to_image(x, data_format='channels_last'):
"""Converts a 3D Numpy array to a PIL Image instance.
Args:
x: Input Numpy array.
data_format: Image data format, either "channels_first" or "channels_last".
Returns:
A PIL Image instance.
Raises:
ValueError: if invalid `x` or `data_format` is passed.
"""
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape: %s' % (x.shape,))
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format: %s' % data_format)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if x.shape[2] == 4:
return Image.fromarray(x.astype('uint8'), 'RGBA')
elif x.shape[2] == 3:
return Image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
if np.max(x) > 255:
return Image.fromarray(x[:, :, 0].astype('int32'), 'I')
return Image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: %s' % (x.shape[2],)) | 2278a317e6d820b9d1aee2d7d796261b14d719f2 | 18,926 |
import time
import logging
import re
def worker_process_download_tvtorrent(
tvTorUnit, client = None, maxtime_in_secs = 14400,
num_iters = 1, kill_if_fail = False ):
"""
Used by, e.g., :ref:`get_tv_batch`, to download missing episodes on the Plex_ TV library. Attempts to use the Deluge_ server, specified in :numref:`Seedhost Services Setup`, to download an episode. If successful then uploads the finished episode from the remote SSH server to the Plex_ server and local directory, specified in :numref:`Local and Remote (Seedhost) SSH Setup`.
:param dict tvTorUnit: a :py:class:`dict` representing a summarized magnet link searching operation on an episode. The format and meaning of this data structure is described in :py:meth:`create_tvTorUnits <howdy.tv.tv.create_tvTorUnits>`.
:param DelugeRPC client: optional argument, the `DelugeRPCClient <Deluge RPC client_>`_ object that at a low level uses the Deluge_ server to download the Magnet link at the remote SSH server. If ``None``, then this client is created using :py:meth:`get_deluge_client <howdy.core.core_deluge.get_deluge_client>`.
:param int maxtime_in_secs: optional argument, the maximum time to wait for a Magnet link found by the Jackett_ server to fully download through the Deluge_ server. Must be :math:`\ge 60` seconds. Default is 14400 seconds.
:param int num_iters: optional argument, the maximum number of Magnet links to try and fully download before giving up. The list of Magnet links to try for each missing episode is ordered from *most* seeders + leechers to *least*. Must be :math:`\ge 1`. Default is 1.
:param bool kill_if_fail: optional argument. If ``True``, then on failing operation kill the torrent download on the Deluge_ server and delete any files associated with it. If ``False``, then keep the torrent download on failure.
:returns: If successful, creates a two element :py:class:`tuple`: the first element is the base name of the episode that is uploaded to the Plex_ server, and the second element is a status :py:class:`dictionary <dict>` with three keys.
* the ``status`` is ``"SUCCESS"``.
* the ``message`` describes the final status of the operation.
* the ``time`` tells how long, in seconds, the successful operation took.
If unsuccessful, returns a failing tuple: the first element is ``None``, and the the second element is a status :py:class:`dictionary <dict>` with three keys.
* the ``status`` is ``"FAILURE"``.
* the ``message`` describes the illuminating reason as to how this operation failed.
* the ``time`` tells how long, in seconds, the failing operation took.
:rtype: tuple
.. seealso::
* :ref:`get_tv_batch`.
* :py:meth:`get_remaining_episodes <howdy.tv.tv.get_remaining_episodes>`.
* :py:meth:`create_tvTorUnits <howdy.tv.tv.create_tvTorUnits>`.
* :py:meth:`download_batched_tvtorrent_shows <howdy.tv.tv.download_batched_tvtorrent_shows>`.
.. _`Deluge RPC client`: https://github.com/JohnDoee/deluge-client
.. _Deluge: https://en.wikipedia.org/wiki/Deluge_(software)
"""
time0 = time.time( )
assert( maxtime_in_secs > 0 )
#
if client is None:
client, status = core_deluge.get_deluge_client( )
if client is None:
return None, _create_status_dict(
'FAILURE', 'cannot create or run a valid deluge RPC client.', time0 )
#
## now get list of torrents, choose "top" one
def _process_jackett_items( tvTorUnit, shared_list ):
t0 = time.time( )
torFileName = tvTorUnit[ 'torFname' ]
totFname = tvTorUnit[ 'totFname' ]
minSize = tvTorUnit[ 'minSize' ]
maxSize = tvTorUnit[ 'maxSize' ]
minSize_x265 = tvTorUnit[ 'minSize_x265' ]
maxSize_x265 = tvTorUnit[ 'maxSize_x265' ]
series_name = tvTorUnit[ 'tvshow' ]
mustHaveString = torFileName.split( )[ -1 ]
do_raw = tvTorUnit[ 'do_raw' ]
logging.info( 'jackett start: %s, %s, %s' % (
torFileName, mustHaveString, series_name ) )
#
## try this twice if it can
torFileNameAlt = re.sub('\(([0-9]+)\)', '', torFileName ).strip( )
torFileNames = [ torFileName, ]
if torFileNameAlt != torFileName: torFileNames.append( torFileNameAlt )
for tfn in torFileNames:
logging.info( 'processing jackett from "%s", using "%s" now, at %0.3f seconds after start.' % (
torFileName, tfn, time.time( ) - time0 ) )
data, status = get_tv_torrent_jackett(
tfn, maxnum = 100, keywords = [ 'x264', 'x265', '720p' ],
minsizes = [ minSize, minSize_x265 ],
maxsizes = [ maxSize, maxSize_x265 ],
keywords_exc = [ 'xvid' ], raw = do_raw,
must_have = [ mustHaveString ] )
if status == 'SUCCESS': break
if status != 'SUCCESS':
shared_list.append( ( 'jackett', _create_status_dict( 'FAILURE', status, t0 ), 'FAILURE' ) )
return
logging.info( 'successfully processed jackett on %s in %0.3f seconds.' % (
torFileName, time.time( ) - t0 ) )
shared_list.append( ( 'jackett', data, 'SUCCESS' ) )
#
def _process_eztv_io_items( tvTorUnit, shared_list ):
t0 = time.time( )
torFileName = tvTorUnit[ 'torFname' ]
totFname = tvTorUnit[ 'totFname' ]
minSize = tvTorUnit[ 'minSize' ]
maxSize = tvTorUnit[ 'maxSize' ]
minSize_x265 = tvTorUnit[ 'minSize_x265' ]
maxSize_x265 = tvTorUnit[ 'maxSize_x265' ]
series_name = tvTorUnit[ 'tvshow' ]
mustHaveString = torFileName.split( )[ -1 ]
logging.info( 'eztv.io start: %s' % torFileName )
#
data, status = get_tv_torrent_eztv_io(
torFileName, maxnum = 100, series_name = series_name,
minsizes = [ minSize, minSize_x265],
maxsizes = [ maxSize, maxSize_x265] )
if status != 'SUCCESS':
shared_list.append(
( 'eztv.io', _create_status_dict( 'FAILURE', status, time0 ), 'FAILURE' ) )
return
data_filt = list(filter(
lambda elem: any(map(lambda tok: tok in elem['title'].lower( ),
( 'x264', 'x265', '720p' ) ) ) and
'xvid' not in elem['title'].lower( ), data ) )
if len( data_filt ) == 0:
shared_list.append(
( 'eztv.io', _create_status_dict(
'FAILURE', 'ERROR, COULD NOT FIND %s IN EZTV.IO.' % torFileName, t0 ), 'FAILURE' ) )
return
logging.info( 'successfully processed eztv.io on %s in %0.3f seconds.' % (
torFileName, time.time( ) - t0 ) )
shared_list.append( ( 'eztv.io', data_filt, 'SUCCESS' ) )
#
def _process_zooqle_items( tvTorUnit, shared_list ):
t0 = time.time( )
torFileName = tvTorUnit[ 'torFname' ]
totFname = tvTorUnit[ 'totFname' ]
minSize = tvTorUnit[ 'minSize' ]
maxSize = tvTorUnit[ 'maxSize' ]
minSize_x265 = tvTorUnit[ 'minSize_x265' ]
maxSize_x265 = tvTorUnit[ 'maxSize_x265' ]
series_name = tvTorUnit[ 'tvshow' ]
mustHaveString = torFileName.split( )[ -1 ]
logging.info( 'zooqle start: %s' % torFileName )
#
data, status = get_tv_torrent_zooqle( torFileName, maxnum = 100 )
if status != 'SUCCESS':
shared_list.append( ( 'zooqle', _create_status_dict( 'FAILURE', status, t0 ), 'FAILURE' ) )
return
data_filt = list(filter(
lambda elem: any(map(lambda tok: tok in elem['title'].lower( ),
( 'x264', 'x265', '720p' ) ) ) and
'xvid' not in elem['title'].lower( ) and
elem['torrent_size'] >= minSize*1e6 and
elem['torrent_size'] <= maxSize*1e6, data ) )
if len( data_filt ) == 0:
shared_list.append(
( 'zooqle', _create_status_dict(
'FAILURE', 'ERROR, COULD NOT FIND %s IN ZOOQLE.' % torFileName, t0 ), 'FAILURE' ) )
logging.info( 'successfully processed zooqle on %s in %0.3f seconds.' % (
torFileName, time.time( ) - t0 ) )
shared_list.append( ( 'zooqle', data_filt, 'SUCCESS' ) )
m = Manager( )
shared_list = m.list( )
jobs = [ ]
for targ in ( _process_jackett_items, _process_eztv_io_items, _process_zooqle_items ):
job = Process( target = targ, args = ( tvTorUnit, shared_list ) )
job.daemon = False
jobs.append( job )
job.start( )
for job in jobs: job.join( )
for job in jobs: job.close( )
#shared_list = list(map(
# lambda proc: proc( tvTorUnit ),
# ( _process_jackett_items, _process_eztv_io_items, _process_zooqle_items ) ) )
error_tup = list(map(
lambda dat: ( dat[0], dat[1] ), filter(lambda dat: dat[-1] == 'FAILURE', shared_list ) ) )
data = list( chain.from_iterable( map(lambda dat: dat[1],
filter(lambda dat: dat[-1] == 'SUCCESS', shared_list ) ) ) )
#
## status of downloaded elements
torFileName = tvTorUnit[ 'torFname' ]
totFname = tvTorUnit[ 'totFname' ]
if len( data ) == 0:
return None, dict( error_tup )
print( 'got %d candidates for %s in %0.3f seconds.' % (
len(data), torFileName, time.time( ) - time0 ) )
#
## wrapped away in another method
return _worker_process_tvtorrents(
client, data, torFileName, totFname,
maxtime_in_secs, num_iters, kill_if_fail ) | 899b13f70d1673168eab4b533ce7e5219d25d365 | 18,927 |
from operator import or_
def find_fixture(
gameweek,
team,
was_home=None,
other_team=None,
kickoff_time=None,
season=CURRENT_SEASON,
dbsession=session,
):
"""Get a fixture given a gameweek, team and optionally whether
the team was at home or away, the kickoff time and the other team in the
fixture.
"""
fixture = None
if not isinstance(team, str):
team_name = get_team_name(team, season=season, dbsession=dbsession)
else:
team_name = team
if not team_name:
raise ValueError("No team with id {} in {} season".format(team, season))
if other_team and not isinstance(other_team, str):
other_team_name = get_team_name(other_team, season=season, dbsession=dbsession)
else:
other_team_name = other_team
query = (
dbsession.query(Fixture).filter_by(gameweek=gameweek).filter_by(season=season)
)
if was_home is True:
query = query.filter_by(home_team=team_name)
elif was_home is False:
query = query.filter_by(away_team=team_name)
elif was_home is None:
query = query.filter(
or_(Fixture.away_team == team_name, Fixture.home_team == team_name)
)
else:
raise ValueError("was_home must be True, False or None")
if other_team_name:
if was_home is True:
query = query.filter_by(away_team=other_team_name)
elif was_home is False:
query = query.filter_by(home_team=other_team_name)
elif was_home is None:
query = query.filter(
or_(
Fixture.away_team == other_team_name,
Fixture.home_team == other_team_name,
)
)
fixtures = query.all()
if not fixtures or len(fixtures) == 0:
raise ValueError(
"No fixture with season={}, gw={}, team_name={}, was_home={}, other_team_name={}".format(
season, gameweek, team_name, was_home, other_team_name
)
)
if len(fixtures) == 1:
fixture = fixtures[0]
elif kickoff_time:
# team played multiple games in the gameweek, determine the
# fixture of interest using the kickoff time,
kickoff_date = dateparser.parse(kickoff_time)
kickoff_date = kickoff_date.replace(tzinfo=timezone.utc)
kickoff_date = kickoff_date.date()
for f in fixtures:
f_date = dateparser.parse(f.date)
f_date = f_date.replace(tzinfo=timezone.utc)
f_date = f_date.date()
if f_date == kickoff_date:
fixture = f
break
if not fixture:
raise ValueError(
"No unique fixture with season={}, gw={}, team_name={}, was_home={}, kickoff_time={}".format(
season, gameweek, team_name, was_home, kickoff_time
)
)
return fixture | fcf90acd4fd8dd663c5cdf2ec99bd428c8cf7a45 | 18,928 |
import errno
def plat_specific_errors(*errnames):
"""Return error numbers for all errors in errnames on this platform.
The 'errno' module contains different global constants depending on
the specific platform (OS). This function will return the list of
numeric values for a given list of potential names.
"""
errno_names = dir(errno)
nums = [getattr(errno, k) for k in errnames if k in errno_names]
# de-dupe the list
return list(dict.fromkeys(nums).keys()) | acb70b2b7d6b16fbe2cfc9f559606efd504b8e3f | 18,929 |
from pymatgen.util.num import make_symmetric_matrix_from_upper_tri
from typing import Union
def make_symmetric_matrix(d: Union[list, float]) -> list:
"""
d (list or float):
len(d) == 1: Suppose cubic system
len(d) == 3: Suppose tetragonal or orthorhombic system
len(d) == 6: Suppose the other system
"""
if isinstance(d, float):
tensor = [[d, 0, 0], [0, d, 0], [0, 0, d]]
elif len(d) == 9:
tensor = [[d[0], d[1], d[2]], [d[3], d[4], d[5]], [d[6], d[7], d[8]]]
elif len(d) == 1:
tensor = [[d[0], 0, 0], [0, d[0], 0], [0, 0, d[0]]]
elif len(d) == 3:
tensor = [[d[0], 0, 0], [0, d[1], 0], [0, 0, d[2]]]
elif len(d) == 6:
"""
Given a symmetric matrix in upper triangular matrix form as flat array
indexes as:
[A_xx, A_yy, A_zz, A_xy, A_xz, A_yz]
This will generate the full matrix:
[[A_xx, A_xy, A_xz], [A_xy, A_yy, A_yz], [A_xz, A_yz, A_zz]
"""
tensor = make_symmetric_matrix_from_upper_tri(d).tolist()
else:
raise ValueError("{} is not valid to make symmetric matrix".format(d))
return tensor | 318caf380a8f0a0878eac54bae49c86722e532bb | 18,931 |
def convert_lds_to_block_tridiag(As, bs, Qi_sqrts, ms, Ri_sqrts):
"""
Parameterize the LDS in terms of pairwise linear Gaussian dynamics
and per-timestep Gaussian observations.
p(x_{1:T}; theta)
= [prod_{t=1}^{T-1} N(x_{t+1} | A_t x_t + b_t, Q_t)]
* [prod_{t=1}^T N(x_t | m_t, R_t)]
We can rewrite this as a Gaussian with a block tridiagonal precision
matrix J. The blocks of this matrix are:
J_{t,t} = A_t.T Q_t^{-1} A_t + Q_{t-1}^{-1} + R_t^{-1}
J_{t,t+1} = -Q_t^{-1} A_t
The linear term is h_t
h_t = -A_t.T Q_t^{-1} b_t + Q_{t-1}^{-1} b_{t-1} + R_t^{-1} m_t
We parameterize the model in terms of
theta = {A_t, b_t, Q_t^{-1/2}}_{t=1}^{T-1}, {m_t, R_t^{-1/2}}_{t=1}^T
"""
T, D = ms.shape
assert As.shape == (T-1, D, D)
assert bs.shape == (T-1, D)
assert Qi_sqrts.shape == (T-1, D, D)
assert Ri_sqrts.shape == (T, D, D)
# Construnct the inverse covariance matrices
Qis = np.matmul(Qi_sqrts, np.swapaxes(Qi_sqrts, -1, -2))
Ris = np.matmul(Ri_sqrts, np.swapaxes(Ri_sqrts, -1, -2))
# Construct the joint, block-tridiagonal precision matrix
J_lower_diag = -np.matmul(Qis, As)
J_diag = np.concatenate([-np.matmul(np.swapaxes(As, -1, -2), J_lower_diag), np.zeros((1, D, D))]) \
+ np.concatenate([np.zeros((1, D, D)), Qis]) \
+ Ris
# Construct the linear term
h = np.concatenate([np.matmul(J_lower_diag, bs[:, :, None])[:, :, 0], np.zeros((1, D))]) \
+ np.concatenate([np.zeros((1, D)), np.matmul(Qis, bs[:, :, None])[:, :, 0]]) \
+ np.matmul(Ris, ms[:, :, None])[:, :, 0]
return J_diag, J_lower_diag, h | d16721ffb77f06cd55ca3c70238ca56fad76970d | 18,932 |
def extract_string_from_tensor(input_ids, mode="single", config=None, tokenizer=None):
"""
Args:
input_ids (Tensor): input sentences with shape [batch_size, seq_len].
mode (str): ["pair", "single"]
"pair" for tasks with paired inputs `<bos> A <eos> B <eos>`,
such as summarization task, the dataset format `<bos> Article <eos> Summary <eos>`,
reading comprehension task, the dataset format `<bos> Passage Question <eos> Answer <eos>`.
"single" for tasks with single input `<bos> A <eos>`, such as Language Modeling, Lambada task.
config: the configuration of GPT-2 model.
tokenizer: the tokenizer of GPT-2 model.
Return:
prompt_list (list): list of prompt_text
reference_list (list): list of reference_text, or second part of text
rest_list (list): list of rest_text, or rest part of text
"""
batch_size = config.batch_size
seq_length = config.seq_length
prompt_list = [""] * batch_size
reference_list = [""] * batch_size
eos_text = tokenizer.eos_token
len_eos_text = len(eos_text)
input_ids_np = input_ids.asnumpy()
input_ids_np = input_ids_np.reshape((batch_size, seq_length))
# input_ids = P.Reshape()(input_ids, (batch_size, seq_length))
if mode == "pair":
for batch_idx in range(batch_size):
sentence_tensor = input_ids_np[batch_idx]
sentence_list = sentence_tensor.asnumpy().tolist()[1:]
sentence = tokenizer.decode(sentence_list)
prompt_start = 0
prompt_end = sentence.find(eos_text, 0)
reference_start = prompt_end + len_eos_text
reference_end = sentence[reference_start:].find(
eos_text, 0) + reference_start
prompt_list[batch_idx] = sentence[prompt_start:prompt_end]
reference_list[batch_idx] = sentence[reference_start:reference_end]
return prompt_list, reference_list
# For single output datasets such as WikiText, etc.
if mode == "single":
for batch_idx in range(batch_size):
sentence_tensor = input_ids_np[batch_idx]
sentence_list = sentence_tensor.asnumpy().tolist()[1:]
sentence = tokenizer.decode(sentence_list)
prompt_start = 0
prompt_end = sentence.find(eos_text, 0)
prompt_list[batch_idx] = sentence[prompt_start:prompt_end]
else:
raise NotImplementedError('mode:{} not supported.'.format(mode))
return prompt_list | 27cf8905350db53ec908f3b8ef8674a7ac3a17eb | 18,933 |
def schema_validation_matching(source_fields, target_fields):
"""Compare schemas between two dictionary objects"""
results = []
# Go through each source and check if target exists and matches
for source_field_name, source_field_type in source_fields.items():
# target field exists
if source_field_name in target_fields:
# target data type matches
if source_field_type == target_fields[source_field_name]:
results.append(
[
source_field_name,
source_field_name,
"1",
"1",
consts.VALIDATION_STATUS_SUCCESS,
"Source_type:{} Target_type:{}".format(
source_field_type, target_fields[source_field_name]
),
]
)
# target data type mismatch
else:
results.append(
[
source_field_name,
source_field_name,
"1",
"1",
consts.VALIDATION_STATUS_FAIL,
"Data type mismatch between source and target. Source_type:{} Target_type:{}".format(
source_field_type, target_fields[source_field_name]
),
]
)
# target field doesn't exist
else:
results.append(
[
source_field_name,
"N/A",
"1",
"0",
consts.VALIDATION_STATUS_FAIL,
"Target doesn't have a matching field name",
]
)
# source field doesn't exist
for target_field_name, target_field_type in target_fields.items():
if target_field_name not in source_fields:
results.append(
[
"N/A",
target_field_name,
"0",
"1",
consts.VALIDATION_STATUS_FAIL,
"Source doesn't have a matching field name",
]
)
return results | 7af82c39462de09e326c6f4413a2d6be7fd6c977 | 18,934 |
import pkg_resources
def find_thirdparty_marshaller_plugins():
""" Find, but don't load, all third party marshaller plugins.
Third party marshaller plugins declare the entry point
``'hdf5storage.marshallers.plugins'`` with the name being the
Marshaller API version and the target being a function that returns
a ``tuple`` or ``list`` of all the marshallers provided by that
plugin when given the hdf5storage version (``str``) as its only
argument.
.. versionadded:: 0.2
Returns
-------
plugins : dict
The marshaller obtaining entry points from third party
plugins. The keys are the Marshaller API versions (``str``) and
the values are ``dict`` of the entry points, with the module
names as the keys (``str``) and the values being the entry
points (``pkg_resources.EntryPoint``).
See Also
--------
supported_marshaller_api_versions
"""
all_plugins = tuple(pkg_resources.iter_entry_points(
'hdf5storage.marshallers.plugins'))
return {ver: {p.module_name: p
for p in all_plugins if p.name == ver}
for ver in supported_marshaller_api_versions()} | 7aad132f520b67d5b39e857175e4bc006fd3ad72 | 18,935 |
def justTransportResponse(transport):
"""
Helper function for creating a Response which uses the given transport.
All of the other parameters to L{Response.__init__} are filled with
arbitrary values. Only use this method if you don't care about any of
them.
"""
return Response((b'HTTP', 1, 1), 200, b'OK', _boringHeaders, transport) | 02a18a500cb9a623c287d4e2f3777237e3574ef6 | 18,936 |
def object_comparator_lookup(src_obj, dst_obj):
"""
Compare an object with another entry by entry
"""
dont_match = []
no_upstream = []
for i in dst_obj:
count_name = 0
count_value = 0
for j in src_obj:
if list(j.keys())[0] == list(i.keys())[0]:
count_name = 1
if j[list(j.keys())[0]] == i[list(i.keys())[0]]:
count_value = 1
if count_name == 0:
if list(i.keys())[0] != "last-modified":
print(i.keys(), list(i.keys())[0])
no_upstream.append(i)
else:
if count_value == 0:
dont_match.append(i)
if no_upstream or dont_match:
return 1
else:
return 0 | ba5767624255da915d9c07d25b62880c387f6f00 | 18,938 |
def line(
data_frame=None,
x=None,
y=None,
line_group=None,
color=None,
line_dash=None,
hover_name=None,
hover_data=None,
custom_data=None,
text=None,
facet_row=None,
facet_row_weights=None,
facet_col=None,
facet_col_weights=None,
facet_col_wrap=0,
facet_row_spacing=None,
facet_col_spacing=None,
error_x=None,
error_x_minus=None,
error_y=None,
error_y_minus=None,
animation_frame=None,
animation_group=None,
category_orders=None,
labels=None,
orientation=None,
color_discrete_sequence=None,
color_discrete_map=None,
line_dash_sequence=None,
line_dash_map=None,
log_x=False,
log_y=False,
range_x=None,
range_y=None,
line_shape=None,
render_mode="auto",
title=None,
template=None,
width=None,
height=None,
):
"""
In a 2D line plot, each row of `data_frame` is represented as vertex of
a polyline mark in 2D space.
"""
return make_figure(args=locals(), constructor=go.Scatter) | bcedfe2c9297f4d3c049e500265f9ffbc0dde85a | 18,939 |
def is_primitive(v):
"""
Checks if v is of primitive type.
"""
return isinstance(v, (int, float, bool, str)) | d22607c0e2b93b82b1da6beb50de68668624dd71 | 18,940 |
def linkify_only_full_urls(attrs, new=False):
"""Linkify only full links, containing the scheme."""
if not new: # This is an existing <a> tag, leave it be.
return attrs
# If the original text doesn't contain the scheme, don't linkify.
if not attrs['_text'].startswith(('http:', 'https:')):
return None
return attrs | 89fcc7f3fc53353686260779ae8ddb4c0523c57b | 18,941 |
def Precedence(op):
"""The numeric precedence of a binary operator."""
# Particularly convenient during layout of binary operators.
return float(sum(i * (op in grp[1:])
for i, grp in enumerate(precedence))) / len(precedence) | 0071d2972474c57376c334401c43673f1c4bde49 | 18,945 |
from typing import Dict
from typing import Any
def _FeastToExampleTransform(
pipeline: beam.Pipeline, exec_properties: Dict[str, Any], split_pattern: str
) -> beam.pvalue.PCollection:
"""Read from BigQuery and transform to TF examples.
Args:
pipeline: beam pipeline.
exec_properties: A dict of execution properties.
split_pattern: Split.pattern in Input config, a BigQuery sql string.
Returns:
PCollection of TF examples.
"""
# Load custom config dictionary
custom_config = _load_custom_config(exec_properties["custom_config"])
# Get Feast retrieval job
retrieval_job = _get_retrieval_job(
entity_query=split_pattern, custom_config=custom_config
)
# Setup datasource and converter.
if isinstance(retrieval_job, BigQueryRetrievalJob):
query = retrieval_job.to_sql()
# Internally Beam creates a temporary table and exports from the query.
datasource = utils.ReadFromBigQuery(query=query)
converter = converters._BigQueryConverter(
query, _get_gcp_project(exec_properties)
)
else:
raise NotImplementedError(
f"Support for {type(retrieval_job)} is not available yet. For now we only support BigQuery source."
)
# Setup converter from dictionary of str -> value to bytes
map_function = None
out_format = exec_properties.get(
"output_data_format", example_gen_pb2.FORMAT_TF_EXAMPLE
)
if out_format == example_gen_pb2.FORMAT_TF_EXAMPLE:
map_function = converter.RowToExampleBytes
elif out_format == example_gen_pb2.FORMAT_TF_SEQUENCE_EXAMPLE:
map_function = converter.RowToSequenceExampleBytes
else:
raise NotImplementedError(
f"Format {out_format} is not currently supported. Currently we only support tfexample"
)
# Setup pipeline
return (
pipeline
| "DataRetrieval" >> datasource
| f"To{out_format.capitalize()}Bytes" >> beam.Map(map_function)
) | 76451a98b9e11188eda42b5141e73647b87df94b | 18,946 |
import re
import logging
def parse_host_info(qhost_tree, queues_tree, queues_to_ignore=[]):
"""
:return: dictionary key: host, value HostInfo
"""
dctRet = {}
for host_node in qhost_tree.findall('host'):
host_name = host_node.get('name')
dct_hostvalues = dict([(hostvalue_node.get('name'), hostvalue_node.text) for hostvalue_node in host_node.findall('hostvalue')])
if dct_hostvalues['num_proc'] != '-':
slots = int(dct_hostvalues['num_proc'])
slots_used = sum([int(slots_used_node.text) for slots_used_node in host_node.findall(".//queuevalue[@name='slots_used']")])
memory = dehumanize_memory(dct_hostvalues['mem_total'])
mem_used = 0 if dct_hostvalues['mem_used'] == '-' else dehumanize_memory(dct_hostvalues['mem_used'])
dctRet[host_name] = HostInfo(host=host_name, slots=slots, memory=memory, state=None, slots_used=slots_used,
mem_used=mem_used, queues=set())
else:
dctRet[host_name] = HostInfo(host=host_name, slots=None, memory=None, state=None, slots_used=None,
mem_used=None, queues=set())
for queue_info in queues_tree.findall('*/Queue-List'):
state = queue_info.findtext('state')
if state is None: state = ''
# Ignore suspended state
state = re.sub('s', '', state)
# Ignore configuration ambiguous state
state = re.sub('c', '', state)
# If disabled, ignore other state flags, because they can vary between queues on a host
if 'd' in state:
state = 'd'
queue = queue_info.findtext('name')
queue_split = queue.split('@', 1)
host = queue_split[1]
queue_name = queue_split[0]
if queue_name in queues_to_ignore:
continue
host_info = dctRet.get(host)
host_info.queues.add(queue_name)
if len(state) > 0:
if host_info is None:
logging.log_message(host + " found in qstat but not qhost")
elif host_info.state is None:
dctRet[host] = host_info._replace(state=state)
elif not is_host_state_compatible(host_info.state, state):
raise Exception("Conflicting states for %s: %s != %s" % (host, host_info.state, state))
return dctRet | a5f5154ac50d358b4a523872ffcaba3030d2f722 | 18,948 |
import click
def _get_param_type_from_str(
type_name: str = None,
param_doc: docstring_parser.DocstringParam = None,
) -> t.Tuple[_ParamArgs, t.Union[click.ParamType, None]]:
"""Guess parameter type from parameter type name."""
type_name = type_name or ""
desc = param_doc.description if param_doc else ""
if type_name == "int":
return _ParamArgs.single, int
elif type_name == "float":
return _ParamArgs.single, float
elif type_name == "bytes":
return _ParamArgs.single, bytes
elif type_name == "bool":
return _ParamArgs.flag, None
elif type_name[:4] == "list":
args, element = _get_param_type_from_str(type_name[5:-1], param_doc)
assert args is _ParamArgs.single
return _ParamArgs.multiple, element
elif type_name[:5] == "tuple":
els = (_get_param_type_from_str(n)[1] for n in type_name[6:-1].split(", "))
return _ParamArgs.single, click.Tuple(els)
elif type_name == "io.FileIO":
return _ParamArgs.single, _build_file_param_type(desc)
elif type_name == "pathlib.Path":
return _ParamArgs.single, _build_path_param_type(desc)
elif type_name == "datetime.datetime":
return _ParamArgs.single, click.DateTime()
elif type_name == "uuid.UUID":
return _ParamArgs.single, click.UUID
else:
logger.warning("Cannot guess parameter type from name: %s", type_name)
return _ParamArgs.single, None | a13621ffbed428fbc32f6285e2fc0a2b53097cad | 18,949 |
def solve(task: str) -> int:
"""How many differently colored bags can contain shiny gold?"""
parents = process_data(task)
seen = set()
candidates = parents["shiny gold"]
while candidates:
candidate = candidates.pop()
if candidate not in seen:
seen.add(candidate)
candidates.extend(parents[candidate])
return len(seen) | ea505c346a4482b9516ad22baa71d251b7e1dc41 | 18,950 |
import urllib
import base64
import hashlib
import requests
def cityDesc(codePostal):
"""
code de retour :
100 : tout est normal
200 : la requete n'a pas abouti
300 : pas de cine dans la ville
400 : la ville n'existe pas
"""
headersUA = init_connect()
YMDstr = getDate()
searchField = codePostal
filterField = ''
countField = '500'
pageField = '1'
url = 'q=' + searchField + '&filter=' + filterField + '&count=' + countField + '&page=' + pageField + '&format=json&partner=' + allocine_partner + '&sed=' + YMDstr
toEncrypt = allocine_secret_key + url
sig = urllib.parse.quote_plus(base64.b64encode(hashlib.sha1(toEncrypt.encode('utf-8')).digest()))
urlComplete = 'http://api.allocine.fr/rest/v3/search?' + url + "&sig=" + sig
codeRetour = 200
listeCine = []
try:
req = requests.get(urlComplete, headers=headersUA)
except:
return listeCine, codeRetour
# print(req.json())
if req.status_code == 200:
codeRetour = 100
if 'location' in req.json()['feed']:
if 'theater' in req.json()['feed']:
for theaterCity in req.json()['feed']['theater']:
listeCine.append(theaterCity)
else:
codeRetour = 300
else:
codeRetour = 400
return listeCine, codeRetour | 06da49d9afe5420869204a423a2db31df11cc58e | 18,951 |
async def get_reposet(request: AthenianWebRequest, id: int) -> web.Response:
"""List a repository set.
:param id: Numeric identifier of the repository set to list.
:type id: repository set ID.
"""
rs_cols = [
RepositorySet.name,
RepositorySet.items,
RepositorySet.precomputed,
RepositorySet.tracking_re,
]
rs, _ = await fetch_reposet(id, rs_cols, request.uid, request.sdb, request.cache)
return model_response(RepositorySetWithName(
name=rs.name, items=rs.items, precomputed=rs.precomputed)) | a3a2cf6cb1152aadb81798cfa3e1be214635edad | 18,952 |
def is_called_at_module_level() -> bool:
"""
Check if the current function is being called at the module level.
Raise `RuntimeError` if `is_called_at_module_level()` is not called in a function.
"""
if not (frame := getcallerframe().f_back):
raise RuntimeError(
"is_called_at_module_level() expects to be called in a function"
)
# There is currently no reliable and officially-provided way to determine whether a
# function is called from the module level or not.
#
# Therefore we use a try-best-effort heuristic approach here.
#
# This check could emit false positive in the case of some advanced dynamic-reflection
# inspection tricks, like `func.__code__ = func.__code__.replace(co_name="<module>")`.
#
# However such case is so unlikely and rare that we should not be concerned.
#
# We are good with the current approach as it works for most cases.
return frame.f_code.co_name == "<module>" | 0c807205472021b20c7b7bad27c8b5f7a634dd85 | 18,954 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.