content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def extract_dominant_keypoints2D(keypoint_2D, dominant_hand):
""" Extract keypoint 2D.
# Look Later with Octavio
# Arguments
keypoint_2D: Numpy array of shape (num_keypoints, 1).
dominant_hand: List of size (2) with booleans.
# Returns
keypoint_visibility_2D_21: Numpy array of shape (num_keypoints, 1).
"""
keypoint_visibility_left = keypoint_2D[:LEFT_PINKY_TIP, :]
keypoint_visibility_right = keypoint_2D[RIGHT_WRIST:RIGHT_PINKY_TIP, :]
keypoint_visibility_2D_21 = np.where(
dominant_hand[:, :2], keypoint_visibility_left,
keypoint_visibility_right)
return keypoint_visibility_2D_21 | 2581b4cb68d6dad2da3933259582f9160224eef9 | 18,955 |
def translation_ev(h, t, tol=1e6):
"""Compute the eigenvalues of the translation operator of a lead.
Adapted from kwant.physics.leads.modes.
Parameters
----------
h : numpy array, real or complex, shape (N, N) The unit cell
Hamiltonian of the lead unit cell.
t : numpy array, real or complex, shape (N, M)
The hopping matrix from a lead cell to the one on which self-energy
has to be calculated (and any other hopping in the same direction).
tol : float
Numbers and differences are considered zero when they are smaller
than `tol` times the machine precision.
Returns
-------
ev : numpy array
Eigenvalues of the translation operator in the form lambda=r*exp(i*k),
for |r|=1 they are propagating modes.
"""
a, b = kwant.physics.leads.setup_linsys(h, t, tol, None).eigenproblem
ev = kwant.physics.leads.unified_eigenproblem(a, b, tol=tol)[0]
return ev | b5534b782b487ca195ab9b78438e68e81a62e74a | 18,956 |
def bsearch(n, pred):
"""
Given a boolean function pred that takes index arguments in [0, n).
Assume the boolean function pred returns all False and then all True for
values. Return the index of the first True, or n if that does not exist.
"""
# invariant: last False lies in [l, r) and pred(l) is False
if pred(0):
return 0
l = 0
r = n
while r-l > 1:
m = l + (r-l)//2
result = pred(m)
if result:
r = m
else:
l = m
return l+1 | 9274732aa9e24a0d0f73399ff50c19a544ac06a7 | 18,957 |
def test_rotating_file_handler_interval(tmpdir, logger, monkeypatch):
"""Test the rotating file handler when the rollover return a time smaller
than the current time.
"""
def rollover(obj, current_time):
return current_time - 0.1
monkeypatch.setattr(DayRotatingTimeHandler, 'computeRollover', rollover)
handler = DayRotatingTimeHandler(str(tmpdir.join('test.log')))
handler.interval = 0.2
logger.addHandler(handler)
# Probably because we gives a negative time.
assert len(tmpdir.listdir()) == 1
logger.info('test')
sleep(1)
logger.info('test')
assert len(tmpdir.listdir()) == 3 | f6394f215e452fd7875b1fc624db9cb50cc19ed8 | 18,958 |
def compute_zero_crossing_wavelength(period, water_depth, gravity=GRAVITY):
"""Computes zero-crossing wavelength from given period.
This uses the dispersion relation for linear waves.
"""
return wavenumber_to_wavelength(
frequency_to_wavenumber(1. / period, water_depth, gravity)
) | 575ff3d251575fa7a232c120bde15f8f57c1dac9 | 18,959 |
def launch_transport_listener(transport, bindaddr, role, remote_addrport, pt_config, ext_or_cookie_file=None):
"""
Launch a listener for 'transport' in role 'role' (socks/client/server/ext_server).
If 'bindaddr' is set, then listen on bindaddr. Otherwise, listen
on an ephemeral port on localhost.
'remote_addrport' is the TCP/IP address of the other end of the
circuit. It's not used if we are in 'socks' role.
'pt_config' contains configuration options (such as the state location)
which are of interest to the pluggable transport.
'ext_or_cookie_file' is the filesystem path where the Extended
ORPort Authentication cookie is stored. It's only used in
'ext_server' mode.
Return a tuple (addr, port) representing where we managed to bind.
Throws obfsproxy.transports.transports.TransportNotFound if the
transport could not be found.
Throws twisted.internet.error.CannotListenError if the listener
could not be set up.
"""
listen_host = bindaddr[0] if bindaddr else 'localhost'
listen_port = int(bindaddr[1]) if bindaddr else 0
if role == 'socks':
transport_class = FTETransportClient
if hasattr(socks, "OBFSSOCKSv5Factory"):
# obfsproxy >= 0.2.7 provides SOCKS5.
factory = socks.OBFSSOCKSv5Factory(transport_class, pt_config)
pt_config.fte_client_socks_version = 5
elif hasattr(socks, "SOCKSv4Factory"):
# obfsproxy < 0.2.7 provides SOCKS4.
factory = socks.SOCKSv4Factory(transport_class, pt_config)
pt_config.fte_client_socks_version = 4
else:
# This will only happen if the obfsproxy people change the socks
# code again. This really is a dependency issue, so raise an
# ImportError.
raise ImportError("Failed to setup an obfsproxy SOCKS server factory")
elif role == 'ext_server':
assert(remote_addrport and ext_or_cookie_file)
transport_class = FTETransportServer
factory = extended_orport.ExtORPortServerFactory(
remote_addrport, ext_or_cookie_file, transport, transport_class, pt_config)
elif role == 'client':
assert(remote_addrport)
transport_class = FTETransportClient
factory = network.StaticDestinationServerFactory(
remote_addrport, role, transport_class, pt_config)
elif role == 'server':
assert(remote_addrport)
transport_class = FTETransportServer
factory = network.StaticDestinationServerFactory(
remote_addrport, role, transport_class, pt_config)
else:
raise InvalidRoleException()
addrport = twisted.internet.reactor.listenTCP(
listen_port, factory, interface=listen_host)
return (addrport.getHost().host, addrport.getHost().port) | 98ba849b3adc58b14b8983d0e61a409bc5ce3af7 | 18,960 |
import socket
import zmq
def send_array(A, flags=0, copy=True, track=False):
"""send a numpy array with metadata
Inputs
------
A: (subplots,dim) np array to transmit
subplots - the amount of subplots that are
defined in the current plot
dim - the amount of data that you want to plot.
This is not fixed
"""
#If you get a float value, convert it to a numpy array
if(isinstance(A,float)):
A = np.array(A).reshape(1,1)
#If array is one dimensional, reshape to two dimensions
if(len(A.shape) ==1):
A = A.reshape(-1,1)
#Create dict to reconstruct array
md = dict(
dtype = str(A.dtype),
shape = A.shape,
)
#Send category
socket.send_string(SENDING_DATA)
#Send json description
socket.send_json(md, flags|zmq.SNDMORE)
#Send array
return socket.send(A, flags, copy=copy, track=track) | c53a17918c12e3ccec9046aad7fc7fc2f498a8ea | 18,962 |
import json
def api_file_upload(request):
""" Upload a file to the storage system """
try:
fobj = request.FILES["file"]
checksum, ext = fobj._name.split(".")
try:
request.user.check_staged_space(fobj._size, checksum)
except Exception as e:
return HttpResponseForbidden(str(e))
write_file_to_storage(fobj, check_valid=True)
StagedFile.objects.get_or_create(
checksum=checksum,
file_size=fobj._size,
uploaded_by=request.user
)
return HttpResponse(json.dumps({
"success": True,
}))
except KeyError:
return HttpResponseBadRequest("Invalid file upload request")
except Exception as e:
handle_server_error(request)
return HttpResponseServerError(content=str(e), reason=str(e)) | 80b15b4d92b5ba2f3a247f1baf7900e73b18781f | 18,963 |
def set_vars(api, file:str, tess_profile:dict):
"""
Reads the user-specific variables from the tess_profile
:param api:
:param file:
:param tess_profile:
:return:
"""
# Set necessary information
api.SetImageFile(file)
# Set Variable
api.SetVariable("save_blob_choices", "T")
if 'variables' in tess_profile:
for var in tess_profile['variables']:
api.SetVariable(var, str(tess_profile['variables'][var]['value']))
api.Recognize()
return 0 | a7fbe0c5bc584928623e2eadc36240ea3b0f37de | 18,966 |
def qtrfit(numpoints, defcoords, refcoords, nrot):
"""Find the quaternion, q, [and left rotation matrix, u] that minimizes
| qTXq - Y | ^ 2 [|uX - Y| ^ 2]
This is equivalent to maximizing Re (qTXTqY)
The left rotation matrix, u, is obtained from q by
u = qT1q
Parameters
numpoints: The number of points in each list (int)
defcoords: List of definition coordinates, with each set a list of form [x,y,z] (list)
refcoords: List of fitted coordinates, with each set a list of form [x,y,z] (list)
nrot: The maximum number of jacobi sweeps
Returns
quat: The best-fit quaternion
lrot: The best-fit left rotation matrix
"""
xxyx = 0.0
xxyy = 0.0
xxyz = 0.0
xyyx = 0.0
xyyy = 0.0
xyyz = 0.0
xzyx = 0.0
xzyy = 0.0
xzyz = 0.0
quat = []
cmat = []
for i in range(numpoints):
xxyx = xxyx + defcoords[i][0] * refcoords[i][0]
xxyy = xxyy + defcoords[i][0] * refcoords[i][1]
xxyz = xxyz + defcoords[i][0] * refcoords[i][2]
xyyx = xyyx + defcoords[i][1] * refcoords[i][0]
xyyy = xyyy + defcoords[i][1] * refcoords[i][1]
xyyz = xyyz + defcoords[i][1] * refcoords[i][2]
xzyx = xzyx + defcoords[i][2] * refcoords[i][0]
xzyy = xzyy + defcoords[i][2] * refcoords[i][1]
xzyz = xzyz + defcoords[i][2] * refcoords[i][2]
for i in range(4):
cmat.append([])
for _ in range(4):
cmat[i].append(0.0)
cmat[0][0] = xxyx + xyyy + xzyz
cmat[0][1] = xzyy - xyyz
cmat[0][2] = xxyz - xzyx
cmat[0][3] = xyyx - xxyy
cmat[1][1] = xxyx - xyyy - xzyz
cmat[1][2] = xxyy + xyyx
cmat[1][3] = xzyx + xxyz
cmat[2][2] = xyyy - xzyz - xxyx
cmat[2][3] = xyyz + xzyy
cmat[3][3] = xzyz - xxyx - xyyy
_, vmat = jacobi(cmat, nrot) # diagonalize c
for i in range(4):
quat.append(vmat[i][3])
lrot = q2mat(quat)
return quat, lrot | fdfde9deaf0b220bd468031264b029125c071fab | 18,967 |
import struct
def _embedded_bundles_partial_impl(
ctx,
bundle_embedded_bundles,
embeddable_targets,
frameworks,
plugins,
watch_bundles):
"""Implementation for the embedded bundles processing partial."""
_ignore = [ctx]
embeddable_providers = [
x[_AppleEmbeddableInfo]
for x in embeddable_targets
if _AppleEmbeddableInfo in x
]
transitive_frameworks = []
transitive_plugins = []
transitive_watch_bundles = []
for provider in embeddable_providers:
transitive_frameworks.append(provider.frameworks)
transitive_plugins.append(provider.plugins)
transitive_watch_bundles.append(provider.watch_bundles)
bundle_zips = []
if bundle_embedded_bundles:
bundle_zips.extend([
(processor.location.framework, None, depset(transitive = transitive_frameworks)),
(processor.location.plugin, None, depset(transitive = transitive_plugins)),
(processor.location.watch, None, depset(transitive = transitive_watch_bundles)),
])
# Clear the transitive lists to avoid propagating them, since they will be packaged in the
# bundle processing this partial and do not need to be propagated.
transitive_frameworks = []
transitive_plugins = []
transitive_watch_bundles = []
return struct(
bundle_zips = bundle_zips,
providers = [
_AppleEmbeddableInfo(
frameworks = depset(frameworks, transitive = transitive_frameworks),
plugins = depset(plugins, transitive = transitive_plugins),
watch_bundles = depset(watch_bundles, transitive = transitive_watch_bundles),
),
],
) | fe057887528a922ae89fe4c6d8066590d006e415 | 18,968 |
import yaml
def load_instrument(yml):
"""
Instantiate an instrument from YAML spec.
Parameters
----------
yml : str
filename for the instrument configuration in YAML format.
Returns
-------
hexrd.instrument.HEDMInstrument
Instrument instance.
"""
with open(yml, 'r') as f:
icfg = yaml.safe_load(f)
return instrument.HEDMInstrument(instrument_config=icfg) | a882b3b36def975b40124078c907a0f050b67a6f | 18,969 |
from typing import List
def serialize_model(self: models.Model, excludes: List[str] = None) -> dict:
"""
模型序列化,会根据 select_related 和 prefetch_related 关联查询的结果进行序列化,可以在查询时使用 only、defer 来筛选序列化的字段。
它不会自做主张的去查询数据库,只用你查询出来的结果,成功避免了 N+1 查询问题。
# See:
https://aber.sh/articles/A-new-idea-of-serializing-Django-model/
"""
excludes = excludes or []
serialized = set()
if getattr(settings, "DSA_SERIALIZE_TO_CAMELCASE", False):
to_camel_case_func = string_convert
else:
to_camel_case_func = do_nothing
def _serialize_model(model) -> dict:
# 当 model 存在一对一字段时,会陷入循环,使用闭包的自由变量存储已序列化的 model,
# 在第二次循环到该 model 时直接返回 model.pk,不再循环。
nonlocal serialized
if model in serialized:
return model.pk
else:
serialized.add(model)
# 当 model 存在一对一或一对多字段,且该字段的值为 None 时,直接返回空{},否则会报错。
if model is None:
return {}
result = {
to_camel_case_func(name): _serialize_model(foreign_key)
for name, foreign_key in model.__dict__["_state"]
.__dict__.get("fields_cache", {})
.items()
}
buried_fields = getattr(model, "buried_fields", [])
for name, value in model.__dict__.items():
# 敏感字段不需要序列化
if name in buried_fields:
continue
# 私有属性不需要序列化
if name.startswith("_"):
continue
result[to_camel_case_func(name)] = value
for name, queryset in model.__dict__.get(
"_prefetched_objects_cache", {}
).items():
result[to_camel_case_func(name)] = [_serialize_model(model) for model in queryset] # type: ignore
return result
results = _serialize_model(self)
# 剔除排斥的字段
for field_name in excludes:
del results[to_camel_case_func(field_name)]
return results | 2c9a95b4d0e671492eefc73800d43196b6daa604 | 18,970 |
def isint(x):
"""
For an ``mpf`` *x*, or any type that can be converted
to ``mpf``, determines whether *x* is exactly
integer-valued::
>>> from sympy.mpmath import *
>>> isint(3), isint(mpf(3)), isint(3.2)
(True, True, False)
"""
if isinstance(x, int_types):
return True
try:
x = mpmathify(x)
except:
return False
if isinstance(x, mpf):
if isnan(x) or isinf(x):
return False
return x == int(x)
return False | ec4516fa450cfc0e58c9a69d95f0f9b8aff2443c | 18,971 |
def update_header(file):
"""
Create a standard WCS header from the HDF5 header. To do this we clean up the
header data (which is initially stored in individual arrays). We then create
a new header dictionary with the old cleaned header info. Finally, we use
astropy.wcs.WCS to create an updated WCS header for the 2 spatial dimensions.
This is then saved to self.header while the header dictionary is saved
as self.hdr_dict.
Args:
file: hdf5 File object containing HDF5 file
"""
hdr_dict = {}
header_cols = [str(val[0]).replace("'b", '').replace("'", "").replace("b", '') for val in
list(file['header'][()])]
header_vals = [str(val[1]).replace("'b", '').replace("'", "").replace("b", '') for val in
list(file['header'][()])]
header_types = [val[3] for val in list(file['header'][()])]
for header_col, header_val, header_type in zip(header_cols, header_vals, header_types):
if 'bool' in str(header_type):
hdr_dict[header_col] = bool(header_val)
if 'float' in str(header_type):
hdr_dict[header_col] = float(header_val)
if 'int' in str(header_type):
hdr_dict[header_col] = int(header_val)
else:
try:
hdr_dict[header_col] = float(header_val)
except:
hdr_dict[header_col] = str(header_val)
hdr_dict['CTYPE3'] = 'WAVE-SIP'
hdr_dict['CUNIT3'] = 'm'
# hdr_dict['NAXIS1'] = 2064
# hdr_dict['NAXIS2'] = 2048
# Make WCS
wcs_data = WCS(hdr_dict, naxis=2)
header = wcs_data.to_header()
header.insert('WCSAXES', ('SIMPLE', 'T'))
header.insert('SIMPLE', ('NAXIS', 2), after=True)
# self.header.insert('NAXIS', ('NAXIS1', 2064), after=True)
# self.header.insert('NAXIS1', ('NAXIS2', 2048), after=True)
hdr_dict = hdr_dict
return header, hdr_dict | 1c46139a747acdf69ea6602ea123d20f540de30b | 18,972 |
def get_MD_psat():
""" MD data for saturation densities:
Thermodynamic properties of the 3D Lennard-Jones/spline model
Bjørn Hafskjold and Karl Patrick Travis and Amanda Bailey Hass and
Morten Hammer and Ailo Aasen and Øivind Wilhelmsen
doi: 10.1080/00268976.2019.1664780
"""
T = np.array([0.5501, 0.5499, 0.5496, 0.5997, 0.6500, 0.7000, 0.7504,
0.8000, 0.8202, 0.8407, 0.8596, 0.8688, 0.8771, 0.8775,
0.6898, 0.7723, 0.8070, 0.8407, 0.8437, 0.8570, 0.8687,
0.8723, 0.8762, 0.8770])
p = np.array([0.002158, 0.002084, 0.002123, 0.004656, 0.008804, 0.015332,
0.025052, 0.038927, 0.045588, 0.054326, 0.063949, 0.069529,
0.075501, 0.075752, 0.014112, 0.031532, 0.042154, 0.055300,
0.056660, 0.062675, 0.070558, 0.070944, 0.072616, 0.073748])
data = {}
data["T"] = T
data["P"] = P
return data | 363107962628ca9796397977f4f41e5b30bcfbc0 | 18,973 |
import logging
def get_reddit_client():
"""Utility to get a Reddit Client"""
reddit_username = redditUsername
reddit_password = redditPassword
reddit_user_agent = redditUserAgent
reddit_client_secret = redditClientSecret
reddit_client_id = redditClientID
logging.info("Logged in as user (%s).." % reddit_username)
reddit_client = praw.Reddit(client_id=reddit_client_id,
client_secret=reddit_client_secret,
password=reddit_password,
user_agent=reddit_user_agent,
username=reddit_username)
return reddit_client | fe3a783a3ceb27954c658bf9dc036a067ab103a8 | 18,974 |
import six
def get_member_id():
"""
Retrieve member if for the current process.
:rtype: ``bytes``
"""
proc_info = system_info.get_process_info()
member_id = six.b('%s_%d' % (proc_info['hostname'], proc_info['pid']))
return member_id | 1d1cc24ffa62cc8982a23ea986bdf3cbecca53ac | 18,975 |
def get_table_arn():
"""A method to get the DynamoDB table ARN string.
Returns
-------
dict
A dictionary with AWS ARN string for the table ARN.
"""
resp = dynamodb_client.describe_table(
TableName=table_name
)
return {
"table_arn": resp['Table']['TableArn']
} | ee6648048b1cabdbc04e6c3507cc4e1992f059b2 | 18,976 |
def replace_characters(request):
"""Function to process execute replace_characters function."""
keys = ['text', 'characters', 'replacement']
values = get_data(request, keys)
if not values[0]:
abort(400, 'missing text parameter')
if not values[2]:
values[2] = ''
return _call('replace_characters', keys, values) | 4765d7c3ace0a0069cac34adb04381efc7043355 | 18,977 |
def serialize_to_jsonable(obj):
"""
Serialize any object to a JSONable form
"""
return repr(obj) | c8632b8b475d49b56d47b29afa8b44676b7882a5 | 18,978 |
def compare(optimizers, problems, runs=20, all_kwargs={}):
"""Compare a set of optimizers.
Args:
optimizers: list/Optimizer; Either a list of optimizers to compare,
or a single optimizer to test on each problem.
problems: list/Problem; Either a problem instance or a list of problem instances,
one for each optimizer.
all_kwargs: dict/list<dict>; Either the Optimizer.optimize keyword arguments
for all optimizers, or a list of keyword arguments, one for each optimizer.
runs: int; How many times to run each optimizer (smoothness)
Returns:
dict; mapping optimizer identifier to stats.
"""
if not (isinstance(optimizers, collections.Iterable)
or isinstance(problems, collections.Iterable)):
raise TypeError('optimizers or problems must be iterable')
# If optimizers is not a list, repeat into list for each problem
if not isinstance(optimizers, collections.Iterable):
optimizers = [copy.deepcopy(optimizers) for _ in range(len(problems))]
# If problems is not a list, repeat into list for each optimizer
if not isinstance(problems, collections.Iterable):
problems = [copy.deepcopy(problems) for _ in range(len(optimizers))]
# If all_kwargs is not a list, repeat it into a list
if isinstance(all_kwargs, dict):
all_kwargs = [all_kwargs] * len(optimizers)
elif not isinstance(all_kwargs, collections.Iterable):
raise TypeError('all_kwargs must be dict or list of dict')
stats = {}
key_counts = {}
for optimizer, problem, kwargs in zip(optimizers, problems, all_kwargs):
# For nice human readable dictionaries, extract useful names from
# optimizer
class_name = optimizer.__class__.__name__
fitness_func_name = problem._fitness_function.__name__
key_name = '{} {}'.format(class_name, fitness_func_name)
# Keep track of how many optimizers of each class / fitness func
# for better keys in stats dict
try:
key_counts[key_name] += 1
except KeyError:
key_counts[key_name] = 1
# Foo 1, Foo 2, Bar 1, etc.
key = '{} {}'.format(key_name, key_counts[key_name])
print key + ': ',
# Finally, get the actual stats
stats[key] = benchmark(optimizer, problem, runs=runs, **kwargs)
print
return stats | c0f2ed52fe5de8b32e54ca6e3dc05be18145b1e8 | 18,979 |
def az_el2norm(az: float, el: float):
"""Return solar angle as normalized vector."""
theta = np.pi/2-el*np.pi/180
phi = az*np.pi/180
norm = np.asarray(
[
np.sin(theta)*np.cos(phi),
np.sin(theta)*np.sin(phi),
np.cos(theta)
])
return norm | 5d6e53b778846281a6d2944a52acc64c4386ade4 | 18,980 |
async def api_get_user(user_id: int, db: Session = Depends(get_db)):
"""
Gets user entity
- **user_id**: the user id
- **db**: current database session object
"""
try:
user = await User.get_by_id(id=user_id, db=db)
return user
except UserNotFoundException as e:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=e.detail) | 2d89fed33e4fbc81e3461b4791f93772a4814ffe | 18,981 |
import re
def remove_comments(json_like):
"""
Removes C-style comments from *json_like* and returns the result. Example::
>>> test_json = '''\
{
"foo": "bar", // This is a single-line comment
"baz": "blah" /* Multi-line
Comment */
}'''
>>> remove_comments('{"foo":"bar","baz":"blah",}')
'{\n "foo":"bar",\n "baz":"blah"\n}'
From: https://gist.github.com/liftoff/ee7b81659673eca23cd9fc0d8b8e68b7
"""
comments_re = re.compile(
r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"',
re.DOTALL | re.MULTILINE,
)
def replacer(match):
s = match.group(0)
if s[0] == "/":
return ""
return s
return comments_re.sub(replacer, json_like) | 32ddf8dd19a8d1029b0d5f221aed05c3883d8ee5 | 18,982 |
def install_editable(projectroot, **kwargs):
"""Install the given project as an "editable" install."""
return run_pip('install', '-e', projectroot, **kwargs) | 70b4f5dc1da26beaae31bb1aeaed4457a0d055a3 | 18,983 |
import time
def retry_get(tap_stream_id, url, config, params=None):
"""Wrap certain streams in a retry wrapper for frequent 500s"""
retries = 20
delay = 120
backoff = 1.5
attempt = 1
while retries >= attempt:
r = authed_get(tap_stream_id, url, config, params)
if r.status_code != 200:
logger.info(f'Got a status code of {r.status_code}, attempt '
f'{attempt} of {retries}. Backing off for {delay} '
f'seconds')
time.sleep(delay)
delay *= backoff
attempt += 1
else:
return r
logger.error(f'Status code of latest attempt: {r.status_code}')
logger.error(f'Latest attempt response {r.content}')
raise ValueError(f'Failed {retries} times trying to hit endpoint {url}') | af9a0dd8d5c7022467562b7c339f8340b6d87d73 | 18,984 |
from typing import Mapping
from datetime import datetime
from typing import Tuple
def build_trie_from_to(template_dictionary: Mapping, from_timestamp: datetime.datetime, to_timestamp: datetime.datetime) -> Tuple[ahocorasick.Automaton, Mapping]:
"""Function which builds the trie from the first timestamp tot the last one given"""
trie = ahocorasick.Automaton()
words_mapping = dict() # words mapping
word_templates = dict() # words template
# collect the words and the template associated (a list of them if multiple template is associated)
for template in template_dictionary:
# index first template to consider
index_first_timestamp = find_previous_timestamp(template_dictionary[template], from_timestamp) or 0
# for all the revisions of that template starting from the first date possible
for index in range(index_first_timestamp, len(template_dictionary[template])):
words_list, t_stamp = template_dictionary[template][index]
# stop the iteration because we overcome the to_timestamp limit
if t_stamp > to_timestamp:
break
if not template in words_mapping:
words_mapping[template] = list()
words_mapping[template].append(template_dictionary[template][index]) # word lists for that template
for word in words_list:
if not word in word_templates:
word_templates[word] = list()
word_templates[word].append(template)
for word in word_templates:
trie.add_word(word, (word_templates[word], word)) # key is the word to search, value is the template
trie.make_automaton()
if not word_templates:
return None, None
return trie, words_mapping | ddd655235a78834260ce1a46c19d0e251f62aede | 18,985 |
def check_monotonicity_at_split(
tree_df, tree_no, trend, variable, node, child_nodes_left, child_nodes_right
):
"""Function to check monotonic trend is in place at a given split in a single tree."""
if not isinstance(tree_df, pd.DataFrame):
raise TypeError("tree_df should be a pd.DataFrame")
if not isinstance(tree_no, int):
raise TypeError("tree_no should be an int")
if not isinstance(trend, int):
raise TypeError("trend should be an int")
if not isinstance(node, int):
raise TypeError("node should be an int")
if not isinstance(child_nodes_left, list):
raise TypeError("child_nodes_left should be an list")
if not isinstance(child_nodes_right, list):
raise TypeError("child_nodes_right should be an list")
all_child_nodes = child_nodes_left + child_nodes_right
tree_nodes = tree_df["nodeid"].tolist()
child_nodes_not_in_tree = list(set(all_child_nodes) - set(tree_nodes))
if len(child_nodes_not_in_tree) > 0:
raise ValueError(
"the following child nodes do not appear in tree; "
+ str(child_nodes_not_in_tree)
)
left_nodes_max_pred = tree_df.loc[
tree_df["nodeid"].isin(child_nodes_left), "weight"
].max()
right_nodes_min_pred = tree_df.loc[
tree_df["nodeid"].isin(child_nodes_right), "weight"
].min()
if trend == 1:
if left_nodes_max_pred <= right_nodes_min_pred:
monotonic = True
else:
monotonic = False
elif trend == -1:
if left_nodes_max_pred >= right_nodes_min_pred:
monotonic = True
else:
monotonic = False
else:
raise ValueError(
"unexpected value for trend; "
+ str(trend)
+ " variable; "
+ str(variable)
+ " node:"
+ str(node)
)
results = {
"variable": variable,
"tree": tree_no,
"nodeid": node,
"monotonic_trend": trend,
"monotonic": monotonic,
"child_nodes_left_max_prediction": left_nodes_max_pred,
"child_nodes_right_min_prediction": right_nodes_min_pred,
"child_nodes_left": str(child_nodes_left),
"child_nodes_right": str(child_nodes_right),
}
results_df = pd.DataFrame(results, index=[node])
return results_df | 97ed2422c6f85112e364e9c63ff0a54d18b6377d | 18,986 |
def allocate_usda_ers_mlu_land_in_urban_areas(df, attr, fbs_list):
"""
This function is used to allocate the USDA_ERS_MLU activity 'land in
urban areas' to NAICS 2012 sectors. Allocation is dependent on
assumptions defined in 'literature_values.py' as well as results from
allocating 'EIA_CBECS_Land' and 'EIA_MECS_Land' to land based sectors.
Methodology is based on the manuscript:
Lin Zeng and Anu Ramaswami
Impact of Locational Choices and Consumer Behaviors on Personal
Land Footprints: An Exploration Across the Urban–Rural Continuum in the
United States
Environmental Science & Technology 2020 54 (6), 3091-3102
DOI: 10.1021/acs.est.9b06024
:param df: df, USDA ERA MLU Land
:param attr: dictionary, attribute data from method yaml for activity set
:param fbs_list: list, FBS dfs for activities created prior
to the activity set that calls on this fxn
:return: df, allocated USDS ERS MLU Land, FBS format
"""
# define sector column to base calculations
sector_col = 'SectorConsumedBy'
vLogDetailed.info('Assuming total land use from MECS and CBECS included '
'in urban land area, so subtracting out calculated '
'MECS and CBECS land from MLU urban land area')
# read in the cbecs and mecs df from df_list
for df_i in fbs_list:
if (df_i['MetaSources'] == 'EIA_CBECS_Land').all():
cbecs = df_i
elif (df_i['MetaSources'] == 'EIA_MECS_Land').all():
mecs = df_i
# load the federal highway administration fees dictionary
fha_dict = get_transportation_sectors_based_on_FHA_fees()
df_fha = pd.DataFrame.from_dict(
fha_dict, orient='index').rename(
columns={'NAICS_2012_Code': sector_col})
# calculate total residential area from the American Housing Survey
residential_land_area = get_area_of_urban_land_occupied_by_houses_2013()
df_residential = df[df[sector_col] == 'F01000']
df_residential = df_residential.assign(FlowAmount=residential_land_area)
# make an assumption about the percent of urban area that is open space
openspace_multiplier = get_open_space_fraction_of_urban_area()
df_openspace = df[df[sector_col] == '712190']
df_openspace = df_openspace.assign(
FlowAmount=df_openspace['FlowAmount'] * openspace_multiplier)
# sum all uses of urban area that are NOT transportation
# first concat dfs for residential, openspace, commercial,
# and manufacturing land use
df_non_urban_transport_area = pd.concat(
[df_residential, df_openspace, cbecs, mecs], sort=False,
ignore_index=True)
df_non_urban_transport_area = \
df_non_urban_transport_area[['Location', 'Unit', 'FlowAmount']]
non_urban_transport_area_sum = df_non_urban_transport_area.groupby(
['Location', 'Unit'], as_index=False).agg(
{'FlowAmount': sum}).rename(columns={'FlowAmount': 'NonTransport'})
# compare units
compare_df_units(df, df_non_urban_transport_area)
# calculate total urban transportation by subtracting
# calculated areas from total urban land
df_transport = df.merge(non_urban_transport_area_sum, how='left')
df_transport = df_transport.assign(
FlowAmount=df_transport['FlowAmount'] - df_transport['NonTransport'])
df_transport.drop(columns=['NonTransport'], inplace=True)
# make an assumption about the percent of urban transport
# area used by airports
airport_multiplier = get_urban_land_use_for_airports()
df_airport = df_transport[df_transport[sector_col] == '488119']
df_airport = df_airport.assign(
FlowAmount=df_airport['FlowAmount'] * airport_multiplier)
# make an assumption about the percent of urban transport
# area used by railroads
railroad_multiplier = get_urban_land_use_for_railroads()
df_railroad = df_transport[df_transport[sector_col] == '482112']
df_railroad = df_railroad.assign(
FlowAmount=df_railroad['FlowAmount'] * railroad_multiplier)
# further allocate the remaining urban transportation area using
# Federal Highway Administration fees
# first subtract area for airports and railroads
air_rail_area = pd.concat([df_airport, df_railroad], sort=False)
air_rail_area = air_rail_area[['Location', 'Unit', 'FlowAmount']]
air_rail_area_sum = air_rail_area.groupby(
['Location', 'Unit'], as_index=False).agg(
{'FlowAmount': sum}).rename(columns={'FlowAmount': 'AirRail'})
df_highway = df_transport.merge(air_rail_area_sum, how='left')
df_highway = df_highway.assign(
FlowAmount=df_highway['FlowAmount'] - df_highway['AirRail'])
df_highway.drop(columns=['AirRail'], inplace=True)
# add fed highway administration fees
df_highway2 = df_highway.merge(df_fha, how='left')
df_highway2 = df_highway2[df_highway2['ShareOfFees'].notna()]
df_highway2 = df_highway2.assign(
FlowAmount=df_highway2['FlowAmount'] * df_highway2['ShareOfFees'])
df_highway2.drop(columns=['ShareOfFees'], inplace=True)
# concat all df subsets
allocated_urban_areas_df = pd.concat(
[df_residential, df_openspace, df_airport, df_railroad, df_highway2],
ignore_index=True, sort=False).reset_index(drop=True)
# aggregate because multiple rows to household data due to residential
# land area and highway fee shares
groupcols = list(df.select_dtypes(include=['object', 'int']).columns)
allocated_urban_areas_df_2 = aggregator(allocated_urban_areas_df,
groupcols)
return allocated_urban_areas_df_2 | 5fcb797b48b912595d722cde3ac0d499526ea899 | 18,987 |
def get_distances_between_points(ray_points3d, last_bin_width=1e10):
"""Estimates the distance between points in a ray.
Args:
ray_points3d: A tensor of shape `[A1, ..., An, M, 3]`,
where M is the number of points in a ray.
last_bin_width: A scalar indicating the witdth of the last bin.
Returns:
A tensor of shape `[A1, ..., An, M]` containing the distances between
the M points, with the distance of the last element set to a high value.
"""
shape.check_static(
tensor=ray_points3d,
tensor_name="ray_points3d",
has_dim_equals=(-1, 3))
shape.check_static(
tensor=ray_points3d,
tensor_name="ray_points3d",
has_rank_greater_than=1)
dists = tf.norm(ray_points3d[..., 1:, :] - ray_points3d[..., :-1, :], axis=-1)
if last_bin_width > 0.0:
dists = tf.concat([dists, tf.broadcast_to([last_bin_width],
dists[..., :1].shape)], axis=-1)
return dists | 34752bd64bcf4582945006c4bd3489397aa7350d | 18,988 |
def reclassification_heavy_duty_trucks_to_light_commercial_vehicles(register_df: pd.DataFrame) -> pd.DataFrame:
"""
Replace Category to Light Commercial Vehicles for Heavy Duty Trucks of weight below 3500kg
Es Tracta de vehicles registrats TIPUS CAMIONS i per tant classificats en la categoria Heavy Duty Trucks quan no hi
pertoquen degut a pes inferior a 3500kg.
"""
anti = register_df[(register_df['TIPUS'] == 'CAMIONS') &
(register_df['PES_BUIT'] < 3500) &
(register_df['Category'] == 'Heavy Duty Trucks')]
info_logger.info(f'Total number of Heavy Duty Trucks converted to Light Commercial Vehicles loaded: {anti.shape[0]}')
result = anti_join_all_cols(register_df, anti)
recategorized_rows = anti.assign(Category='Light Commercial Vehicles')
return result.append(recategorized_rows) | 638e3281c323c1dac4ddcde5e58e2eeac4131c04 | 18,989 |
def SpComp(rho, U, mesh, fea, penal):
"""Alias SpCompFunction class with the apply method"""
return SpCompFunction.apply(rho, U, mesh, fea, penal) | 4c0997fa5213e291376feb415f74e48e273ea071 | 18,990 |
def get_landmark_position_from_state(x, ind):
"""
Extract landmark position from state vector
"""
lm = x[STATE_SIZE + LM_SIZE * ind: STATE_SIZE + LM_SIZE * (ind + 1), :]
return lm | 048d31bbd6810663d51e9db34b5c87ebec9b6f27 | 18,992 |
def get_rectangle(roi):
"""
Get the rectangle that has changing colors in the roi.
Returns boolean success value and the four rectangle points in the image
"""
gaussian = cv2.GaussianBlur(roi, (9, 9), 10.0)
roi = cv2.addWeighted(roi, 1.5, gaussian, -0.5, 0, roi)
nh, nw, r = roi.shape
# cluster
Z = roi.reshape((-1, 3))
Z = np.float32(Z)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
K = 7
ret, label, centers = cv2.kmeans(Z, K, criteria, 10, 0)
centers = np.uint8(centers)
image_as_centers = centers[label.flatten()]
image_as_centers = image_as_centers.reshape((roi.shape))
labels = label.reshape((roi.shape[:2]))
possible_clusters = list(np.arange(K))
whiteness = map(lambda x: npl.norm(x - np.array([255, 255, 255])), centers)
whitest = np.argmin(whiteness)
possible_clusters.remove(whitest)
energys = []
correct_masks = []
for num, p in enumerate(possible_clusters):
mask_clusters = ma.masked_equal(labels, p)
draw_mask = mask_clusters.mask.astype(np.uint8)
draw_mask *= 255
labeled_array, num_features = mes.label(draw_mask)
count = np.bincount(labeled_array.flatten())
count = count[1:]
val = np.argmax(count)
mask_obj = ma.masked_equal(labeled_array, val + 1)
draw_mask = mask_obj.mask.astype(np.uint8)
draw_mask *= 255
# cv2.imshow(str(num), draw_mask)
# cv2.waitKey(0)
top = np.count_nonzero(draw_mask)
valz = np.fliplr(np.transpose(draw_mask.nonzero()))
rect = cv2.minAreaRect(valz)
box = cv2.cv.BoxPoints(rect)
box = np.int0(box)
rect_mask = np.zeros((nh, nw))
cv2.drawContours(rect_mask, [box], 0, 255, -1)
bottom = np.count_nonzero(rect_mask)
l, w, vcost = _get_lw(box)
if w < .001:
print 'WIDTH TOO SMALL'
continue
valz = np.fliplr(np.transpose(draw_mask.nonzero()))
area = cv2.contourArea(box)
area /= (nh * nw)
if vcost > .5:
print "VCOST TOO HIGH"
continue
if area < .03:
print area
print "TOOOO SMALL"
continue
if top / bottom < .7:
print "TOO SPARSE", top / bottom
continue
energy = area + 1.5 * top / bottom - abs(2.5 - l / w) - .2 * vcost
if energy < 0:
"LOW ENERGY!"
continue
print num, "area: ", area, "filled:", top, "total:", bottom, 'rat', top / bottom, "l/w", abs(2.5 - l / w), "vcost",
vcost, "energy", energy
energys.append(energy)
correct_masks.append(mask_obj)
if len(energys) == 0:
print "EVERY ENERGY WRONG"
return False, None
correct_masks = [x for y, x in sorted(zip(energys, correct_masks), reverse=True)]
energys = sorted(energys, reverse=True)
if len(energys) > 1 and abs(energys[0] - energys[1]) < .2:
print "TOO CLOSE TO CALLS"
return False, None
correct_mask = correct_masks[0]
colors = roi[correct_mask.mask]
draw_mask = correct_mask.mask.astype(np.uint8)
draw_mask *= 255
return True, colors | da3e6311f6a598cf57cffd2c0bb06f0ca53fa108 | 18,993 |
async def challenge_process_fixture() -> Challenge:
"""
Populate challenge with:
- Default user
- Is open
- Challenge in process
"""
return await populate_challenge() | cf553c0697c824df4273838c7d87ffe6e8ab6ae7 | 18,994 |
import pkgutil
def _read_pdg_masswidth(filename):
"""Read the PDG mass and width table and return a dictionary.
Parameters
----------
filname : string
Path to the PDG data file, e.g. 'data/pdg/mass_width_2015.mcd'
Returns
-------
particles : dict
A dictionary where the keys are the particle names with the charge
appended in case of a multiplet with different masses, e.g. 't'
for the top quark, 'K+' and 'K0' for kaons.
The value of the dictionary is again a dictionary with the following
keys:
- 'id': PDG particle ID
- 'mass': list with the mass, postitive and negative error in GeV
- 'width': list with the width, postitive and negative error in GeV
- 'name': same as the key
"""
data = pkgutil.get_data('flavio.physics', filename)
lines = data.decode('utf-8').splitlines()
particles_by_name = {}
for line in lines:
if line.strip()[0] == '*':
continue
mass = ((line[33:51]), (line[52:60]), (line[61:69]))
if mass[0].replace(' ', '') == '':
# if mass is empty, go to next line
# (necessasry for 2019 neutrino entries)
continue
mass = [float(m) for m in mass]
width = ((line[70:88]), (line[89:97]), (line[98:106]))
if width[0].strip() == '':
width = (0,0,0)
else:
width = [float(w) for w in width]
ids = line[0:32].split()
charges = line[107:128].split()[1].split(',')
if len(ids) != len(charges):
raise ValueError()
for i, id_ in enumerate(ids):
particle = {}
particle_charge = charges[i].strip()
particle[particle_charge] = {}
particle[particle_charge]['id'] = id_.strip()
particle[particle_charge]['mass'] = mass
particle[particle_charge]['charge'] = particle_charge
particle[particle_charge]['width'] = width
particle_name = line[107:128].split()[0]
particle[particle_charge]['name'] = particle_name
if particle_name in particles_by_name.keys():
particles_by_name[particle_name].update(particle)
else:
particles_by_name[particle_name] = particle
result = { k + kk: vv for k, v in particles_by_name.items() for kk, vv in v.items() if len(v) > 1}
result.update({ k: list(v.values())[0] for k, v in particles_by_name.items() if len(v) == 1})
return result | c61202aeb4ad36e22786457306de1ac5773b56d2 | 18,995 |
import torch
def fps_and_pred(model, batch, **kwargs):
"""
Get fingeprints and predictions from the model.
Args:
model (nff.nn.models): original NFF model loaded
batch (dict): batch of data
Returns:
results (dict): model predictions and its predicted
fingerprints, conformer weights, etc.
"""
model.eval()
# make the fingerprints
outputs, xyz = model.make_embeddings(batch, xyz=None, **kwargs)
# pool to get the learned weights and pooled fingerprints
pooled_fp, learned_weights = model.pool(outputs)
# get the final results
results = model.readout(pooled_fp)
# add sigmoid if it's a classifier and not in training mode
if model.classifier:
keys = list(model.readout.readout.keys())
for key in keys:
results[key] = torch.sigmoid(results[key])
# add any required gradients
results = model.add_grad(batch=batch, results=results, xyz=xyz)
# put into a dictionary
conf_fps = [i.cpu().detach() for i in outputs["conf_fps_by_smiles"]]
energy = batch.get("energy")
boltz_weights = batch.get("weights")
# with operations to de-batch
n_confs = [(n // m).item()
for n, m in zip(batch['num_atoms'], batch['mol_size'])]
for key, val in results.items():
results[key] = [i for i in val]
results.update({"fp": [i for i in pooled_fp],
"conf_fps": conf_fps,
"learned_weights": learned_weights,
"boltz_weights": (list(torch.split
(boltz_weights, n_confs)))})
if energy is not None:
results.update({"energy": list(torch.split(energy, n_confs))})
return results | 1a8cca3ffe0d386e506ab42f6e77e00b1a5975d1 | 18,996 |
import re
def preprocess_text(text):
"""
Should return a list of words
"""
text = contract_words(text)
text = text.lower()
# text = text.replace('"', "").replace(",", "").replace("'", "")
text = text.replace('"', "").replace(",", "").replace("'", "").replace(".", " .") ## added by PAVAN
## To capture multiple # feature -- added by PAVAN
if re.search(r'[a-z]+\#', text):
tmp_ls = text.split()
text = ' '.join(
[re.sub(pattern=r'\#', repl=' #', string=str(i)) if re.search(r'[a-z]+\#', str(i)) else i for i in tmp_ls])
## To capture # feature -- added by PAVAN
if re.search(r'\#[a-z]+', text):
tmp_ls = text.split()
text = ' '.join(
[re.sub(pattern=r'\#', repl='hashtagfea ', string=str(i)) if re.search(r'\#[a-z]+', str(i)) else i for i in
tmp_ls])
return text.split() | 0b42b57629e68c2f0bf3831dc226a2ba823cfdb3 | 18,997 |
import unicodedata
def unicodeToAscii(s):
"""unicodeToAscii
Turn a Unicode string to plain ASCII, thanks to https://stackoverflow.com/a/518232/2809427
For example, 'Ślusàrski' -> 'Slusarski'
"""
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
) | 7791ae244499b2448ac4bc5de0fde35057d1f5d5 | 18,998 |
def update_ip_table(nclicks, value):
"""
Function that updates the IP table in the Elasticsearch Database that
contains the frequency as well as the IP address of the machine querying
that particular domain.
Args:
nclicks: Contains the number of clicks registered by the submit button.
value: Contains the domain name corresponding to which the IP table has
to be returned.
Returns:
The IP address data regarding the number of times a particular domain
was queried by a particular machine.
"""
if value is None or value == '':
return []
else:
try:
count = es.get(index=value, id=1)['_source']['count']
domain_names = [key for (key, value) in sorted(count.items(),
key=lambda x: x[1],
reverse=True)]
data = [dict({'sl_no': j + 1, 'ip': i, 'count': count[i]})
for i, j in zip(domain_names, range(len(count)))]
except:
data = []
return data | ef0df6f1fb79d86a707990d0790dadabecbd22c1 | 18,999 |
def collect_subclasses(mod, cls, exclude=None):
"""Collecting all subclasses of `cls` in the module `mod`
@param mod: `ModuleType` The module to collect from.
@param cls: `type` or (`list` of `type`) The parent class(es).
@keyword exclude: (`list` of `type`) Classes to not include.
"""
out = []
for name in dir(mod):
attr = getattr(mod, name)
if (
isinstance(attr, type) and
(attr not in cls if isinstance(cls, (list, tuple)) else attr != cls) and
issubclass(attr, cls) and
(attr not in exclude if exclude else True)):
out.append(attr)
return out | 30e64b93fca4d68c3621cae54bb256350875eb77 | 19,000 |
import typing
from typing import Counter
def check_collections_equivalent(a: typing.Collection, b: typing.Collection,
allow_duplicates: bool = False,
element_converter: typing.Callable = identity) -> typing.Tuple[str, list]:
"""
:param a: one collection to compare
:param b: other collection to compare
:param allow_duplicates: allow collections to contain multiple elements
:param element_converter: optional function to convert elements of collections to a different value
for comparison
:return: (message, differences)
"""
a = Counter(map(element_converter, a))
b = Counter(map(element_converter, b))
if not allow_duplicates:
duplicates = []
for name, counts in [['a', a], ['b', b]]:
for key, count in counts.items():
if count > 1:
duplicates.append([name, key, count])
if duplicates:
return 'Duplicate elements ', ['|'.join(map(str, dup)) for dup in duplicates]
diffs = []
for el in a | b:
ac = a.get(el, 0)
bc = b.get(el, 0)
if ac != bc:
'Inconsistent element frequencies', diffs.append(f'{el} a={ac} b={bc}')
if diffs:
return "Inconsistent element frequencies: ", diffs
return 'Collections equivalent', [] | 61d78f522a6e87927db6b32b46637f0bb6a10513 | 19,001 |
def voting_classifier(*args, **kwargs):
"""
same as in gradient_boosting_from_scratch()
"""
return VotingClassifier(*args, **kwargs) | ed92138c23b699672197d1b436773a5250685250 | 19,002 |
import re
def replace_subject_with_object(sent, sub, obj):
"""Replace the subject with object and remove the original subject"""
sent = re.sub(r'{}'.format(obj), r'', sent, re.IGNORECASE)
sent = re.sub(r'{}'.format(sub), r'{} '.format(obj), sent, re.IGNORECASE)
return re.sub(r'{\s{2,}', r' ', sent, re.IGNORECASE) | 1c7f8115968c4e4ef10dcc3b83f0f259433f5082 | 19,003 |
def estimate_using_user_recent(list_type: str, username: str) -> int:
"""
Estimate the page number of a missing (entry which was just approved) entry
and choose the max page number
this requests a recent user's list, and uses checks if there are any
ids in that list which arent in the approved cache
"""
assert list_type in {"anime", "manga"}
logger.info(f"Estimating {list_type}list using {username}")
appr = approved_ids()
recently_updated_ids = user_recently_updated(
list_type=list_type, username=username, offset=0
)
ids = appr.anime if list_type == "anime" else appr.manga
sorted_approved = list(sorted(ids, reverse=True))
missing_approved = []
for aid in recently_updated_ids:
if aid not in ids:
missing_approved.append(aid)
estimate_pages = [_estimate_page(aid, sorted_approved) for aid in missing_approved]
max_page: int
if len(estimate_pages) == 0:
max_page = 0
else:
max_page = max(estimate_pages) + 1
logger.info(f"Estimated {max_page} {list_type} pages for {username}")
return max_page | b6a7a5bf6c0fa6e13021f10bf2fe613c4186f430 | 19,004 |
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_html2pdf package"""
reload_params = {"package": u"fn_html2pdf",
"incident_fields": [],
"action_fields": [],
"function_params": [u"html2pdf_data", u"html2pdf_data_type", u"html2pdf_stylesheet"],
"datatables": [],
"message_destinations": [u"fn_html2pdf"],
"functions": [u"fn_html2pdf"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_html2pdf"],
"actions": [u"Example: HTML2PDF"]
}
return reload_params | 45a5e974f3e02953a6d121e37c05022c448adae6 | 19,005 |
def instrument_keywords(instrument, caom=False):
"""Get the keywords for a given instrument service
Parameters
----------
instrument: str
The instrument name, i.e. one of ['niriss','nircam','nirspec',
'miri','fgs']
caom: bool
Query CAOM service
Returns
-------
pd.DataFrame
A DataFrame of the keywords
"""
# Retrieve one dataset to get header keywords
sample = instrument_inventory(instrument, return_data=True, caom=caom,
add_requests={'pagesize': 1, 'page': 1})
data = [[i['name'], i['type']] for i in sample['fields']]
keywords = pd.DataFrame(data, columns=('keyword', 'dtype'))
return keywords | 271f58615dbdbcde4fda9a5248d8ae3b40b90f6d | 19,006 |
from struct import unpack
from time import mktime, strftime, gmtime
def header_info(data_type, payload):
"""Report additional non-payload in network binary data.
These can be status, time, grapic or control structures"""
# Structures are defined in db_access.h.
if payload == None:
return ""
data_type = type_name(data_type)
if data_type.startswith("STS_"):
status, severity = unpack(">HH", payload[0:4])
# Expecting status = 0 (normal), severity = 1 (success)
return "{status:%d,severity:%d}" % (status, severity)
elif data_type.startswith("TIME_"):
status, severity = unpack(">HH", payload[0:4])
# The time stamp is represented as two uint32 values. The first is the
# number of seconds passed since 1 Jan 1990 00:00 GMT. The second is the
# number of nanoseconds within the second.
seconds, nanoseconds = unpack(">II", payload[4:12])
offset = mktime((1990, 1, 1, 0, 0, 0, 0, 0, 0)) - mktime(
(1970, 1, 1, 0, 0, 0, 0, 0, 0)
)
t = seconds + nanoseconds * 1e-9 + offset
timestamp = strftime("%Y-%m-%d %H:%M:%S GMT", gmtime(t))
return "{status:%d,severity:%d, timestamp:%s}" % (status, severity, timestamp)
elif data_type.startswith("GR_"):
status, severity = unpack(">HH", payload[0:4])
info = "status:%d,severity:%d, " % (status, severity)
if data_type.endswith("STRING"):
pass
elif data_type.endswith("SHORT"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("6h", payload[16 : 16 + 6 * 2])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("FLOAT"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">6f", payload[16 : 16 + 6 * 4])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
elif data_type.endswith("ENUM"):
nstrings, = unpack(">h", payload[4:6])
strings = payload[6 : 6 + 16 * 26]
info += "nstrings=%r" % nstrings
elif data_type.endswith("CHAR"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("6b", payload[16 : 16 + 6 * 1])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("LONG"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("6i", payload[16 : 16 + 6 * 4])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("DOUBLE"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">6d", payload[16 : 16 + 6 * 8])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
else:
info += "?"
info = info.restrip(", ")
return "{" + info + "}"
elif data_type.startswith("CTRL_"):
status, severity = unpack(">HH", payload[0:4])
info = "status:%d,severity:%d, " % (status, severity)
if data_type.endswith("STRING"):
pass
elif data_type.endswith("SHORT"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("8h", payload[16 : 16 + 8 * 2])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("FLOAT"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">8f", payload[16 : 16 + 8 * 4])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
elif data_type.endswith("ENUM"):
nstrings, = unpack(">h", payload[4:6])
strings = payload[6 : 6 + 16 * 26]
info += "nstrings=%r" % nstrings
elif data_type.endswith("CHAR"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("8b", payload[16 : 16 + 8 * 1])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("LONG"):
unit = payload[8:16].rstrip(b"\0")
limits = unpack("8i", payload[16 : 16 + 8 * 4])
info += "unit=%r,limits=%r" % (unit, limits)
elif data_type.endswith("DOUBLE"):
precision, = unpack(">h", payload[4:6])
unit = payload[8:16].rstrip(b"\0")
limits = unpack(">8d", payload[16 : 16 + 8 * 8])
info += "precision=%r,unit=%r,limits=%r" % (precision, unit, limits)
else:
info += "?"
info = info.rstrip(", ")
return "{" + info + "}"
return "" | 6e83be0dff2d7f81a99419baf505e82957518c64 | 19,007 |
def dsa_verify(message, public, signature, constants=None):
"""Checks if the signature (r, s) is correct"""
r, s = signature
p, q, g = get_dsa_constants(constants)
if r <= 0 or r >= q or s <= 0 or s >= q:
return False
w = inverse_mod(s, q)
u1 = (bytes_to_num(sha1_hash(message)) * w) % q
u2 = (r * w) % q
v = ((pow(g, u1, p) * pow(public, u2, p)) % p) % q
return v == r | 9d6eeb9b5b2d84edd054cba01bdd47cb9bab120e | 19,008 |
def set_up_cgi():
"""
Return a configured instance of the CGI simulator on RST.
Sets up the Lyot stop and filter from the configfile, turns off science instrument (SI) internal WFE, and reads
the FPM setting from the configfile.
:return: CGI instrument instance
"""
webbpsf.setup_logging('ERROR')
#Set actuators numbesr
mode_in = CONFIG_PASTIS.get('RST', 'mode')
nbactuator = int(CONFIG_PASTIS.get('RST', 'nb_subapertures'))
nbactuator_in = int(np.sqrt(nbactuator))
if nbactuator_in**2 != nbactuator:
error_msg = f"The number of subapertures from config_pastis.ini is {nbactuator}, which is not the square of the actuators per row (={nbactuator_in})!"
log.error(error_msg)
raise ValueError(error_msg)
cgi = webbpsf.roman.CGI(mode=mode_in, nbactuator=int(nbactuator_in))
cgi.include_si_wfe = False
cgi.apodizer = CONFIG_PASTIS.get('RST', 'apodizer')
cgi.fpm = CONFIG_PASTIS.get('RST', 'fpm')
cgi.lyotstop = CONFIG_PASTIS.get('RST', 'lyotstop')
cgi.camera = CONFIG_PASTIS.get('RST', 'camera')
cgi.filter = CONFIG_PASTIS.get('RST', 'filter_name')
return cgi | 68559e88b9cebb5e2edb049f63d80c9545112804 | 19,009 |
def plot_line(
timstof_data, # alphatims.bruker.TimsTOF object
selected_indices: np.ndarray,
x_axis_label: str,
colorscale_qualitative: str,
title: str = "",
y_axis_label: str = "intensity",
remove_zeros: bool = False,
trim: bool = True,
height: int = 400
) -> go.Figure:
"""Plot an XIC, mobilogram or spectrum as a lineplot.
Parameters
----------
timstof_data : alphatims.bruker.TimsTOF object
An alphatims.bruker.TimsTOF data object.
selected_indices : np.ndarray
The raw indices that are selected for this plot. These are typically obtained by slicing the TimsTOF data object with e.g. data[..., "raw"].
x_axis_label : str
The label of the x-axis. Options are:
- mz
- rt
- mobility
y_axis_label : str
Should not be set for a 1D line plot. Default is "intensity".
title : str
The title of the plot. Default is "".
remove_zeros : bool
If True, zeros are removed. Note that a line plot connects consecutive points, which can lead to misleading plots if non-zeros are removed. If False, use the full range of the appropriate dimension of the timstof_data. Default is False.
trim : bool
If True, zeros on the left and right are trimmed. Default is True.
height : int
Plot height. Default is 400.
Returns
-------
plotly.graph_objects.Figure object
A lne plot showing an XIC, mobilogram or spectrum.
"""
axis_dict = {
"mz": "m/z, Th",
"rt": "RT, min",
"mobility": "Inversed IM, V·s·cm\u207B\u00B2",
"intensity": "Intensity",
}
x_axis_label = axis_dict[x_axis_label]
y_axis_label = axis_dict[y_axis_label]
labels = {
'm/z, Th': "mz_values",
'RT, min': "rt_values",
'Inversed IM, V·s·cm\u207B\u00B2': "mobility_values",
}
x_dimension = labels[x_axis_label]
intensities = timstof_data.bin_intensities(selected_indices, [x_dimension])
if x_dimension == "mz_values":
x_ticks = timstof_data.mz_values
plot_title = "Spectrum"
elif x_dimension == "mobility_values":
x_ticks = timstof_data.mobility_values
plot_title = "Mobilogram"
elif x_dimension == "rt_values":
x_ticks = timstof_data.rt_values / 60
plot_title = "XIC"
non_zeros = np.flatnonzero(intensities)
if len(non_zeros) == 0:
x_ticks = np.empty(0, dtype=x_ticks.dtype)
intensities = np.empty(0, dtype=intensities.dtype)
else:
if remove_zeros:
x_ticks = x_ticks[non_zeros]
intensities = intensities[non_zeros]
elif trim:
start = max(0, non_zeros[0] - 1)
end = non_zeros[-1] + 2
x_ticks = x_ticks[start: end]
intensities = intensities[start: end]
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=x_ticks,
y=intensities,
mode='lines',
text=[f'{x_axis_label}'.format(i + 1) for i in range(len(x_ticks))],
hovertemplate='<b>%{text}:</b> %{x};<br><b>Intensity:</b> %{y}.',
name=" ",
marker=dict(color=getattr(px.colors.qualitative, colorscale_qualitative)[0])
)
)
fig.update_layout(
title=dict(
text=plot_title,
font=dict(
size=16,
),
x=0.5,
xanchor='center',
yanchor='top'
),
xaxis=dict(
title=x_axis_label,
titlefont_size=14,
tickmode='auto',
tickfont_size=14,
),
yaxis=dict(
title=y_axis_label,
),
template="plotly_white",
height=height,
hovermode="x"
)
return fig | 5db0468710e49158c4b13fc56446a23949544e57 | 19,010 |
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
data_list = [None] * world_size
dist.all_gather_object(data_list, data)
return data_list | 46f34975d89766c842b6c20e312ad3dea4f3d7ff | 19,011 |
from ..utils import check_adata
def draw_graph(
adata,
layout=None,
color=None,
alpha=None,
groups=None,
components=None,
legend_loc='right margin',
legend_fontsize=None,
legend_fontweight=None,
color_map=None,
palette=None,
right_margin=None,
size=None,
title=None,
show=None,
save=None,
ax=None):
"""Scatter plot in graph-drawing basis.
Parameters
----------
adata : AnnData
Annotated data matrix.
layout : {'fr', 'drl', ...}, optional (default: last computed)
One of the `draw_graph` layouts, see sc.tl.draw_graph. By default,
the last computed layout is taken.
color : string or list of strings, optional (default: None)
Keys for sample/cell annotation either as list `["ann1", "ann2"]` or
string `"ann1,ann2,..."`.
groups : str, optional (default: all groups)
Restrict to a few categories in categorical sample annotation.
components : str or list of str, optional (default: '1,2')
String of the form '1,2' or ['1,2', '2,3'].
legend_loc : str, optional (default: 'right margin')
Location of legend, either 'on data', 'right margin' or valid keywords
for matplotlib.legend.
legend_fontsize : int (default: None)
Legend font size.
color_map : str (default: `matplotlib.rcParams['image.cmap']`)
String denoting matplotlib color map.
palette : list of str (default: None)
Colors to use for plotting groups (categorical annotation).
right_margin : float or list of floats (default: None)
Adjust the width of the space right of each plotting panel.
size : float (default: None)
Point size.
title : str, optional (default: None)
Provide title for panels either as `["title1", "title2", ...]` or
`"title1,title2,..."`.
show : bool, optional (default: None)
Show the plot.
save : bool or str, optional (default: None)
If True or a str, save the figure. A string is appended to the
default filename.
ax : matplotlib.Axes
A matplotlib axes object.
Returns
-------
matplotlib.Axes object
"""
adata = check_adata(adata)
if layout is None: layout = adata.add['draw_graph_layout'][-1]
if 'X_draw_graph_' + layout not in adata.smp_keys():
raise ValueError('Did not find {} in adata.smp. Did you compute layout {}?'
.format('draw_graph_' + layout, layout))
axs = scatter(
adata,
basis='draw_graph_' + layout,
color=color,
alpha=alpha,
groups=groups,
components=components,
projection='2d',
legend_loc=legend_loc,
legend_fontsize=legend_fontsize,
legend_fontweight=legend_fontweight,
color_map=color_map,
palette=palette,
right_margin=right_margin,
size=size,
title=title,
show=show,
save=save,
ax=ax)
return axs | 006121b0162afcf6b91703dac8c1ea3e6d1351bc | 19,013 |
def post_token():
"""
Receives authentication credentials in order to generate an access
token to be used to access protected models. Tokens generated
by this endpoint are JWT Tokens.
"""
# First we verify the request is an actual json request. If not, then we
# responded with a HTTP 400 Bad Request result code.
if not request.is_json:
app.logger.warning('Request without JSON payload received on token endpoint')
return jsonify({"msg": "Only JSON request is supported"}), 400
# Read credentials from json request
params = request.get_json()
# Try to ready username and password properties. If one of them is not found,
# then we generate an error and stop execution.
username = params.get('username', None)
password = params.get('password', None)
if not username:
app.logger.warning('Request without username parameter received on token endpoint')
return jsonify({"msg": "A username parameter must be provided"}), 400
if not password:
app.logger.warning('Request without password parameter received on token endpoint')
return jsonify({"msg": "A password parameter must be provided"}), 400
# If we get here, is because a username and password credentials were
# provided, so now we must verify them.
user = get_user_by_username(username)
if user is not None:
if user.authenticate(password):
# ACCESS TOKEN
access_token_expires = app.config['JWT_ACCESS_TOKEN_VALIDITY_HOURS']
access_token = create_access_token(identity=user.user_id, expires_delta=access_token_expires)
# REFRESH TOKEN
refresh_token_expires = app.config['JWT_REFRESH_TOKEN_VALIDITY_DAYS']
refresh_token = create_refresh_token(identity=user.user_id, expires_delta=refresh_token_expires)
app.logger.info('A new token has been generated for user [' + user.user_id + "]")
return jsonify({
'access_token': access_token,
'expiration': access_token_expires.total_seconds(),
'refresh_token': refresh_token
}), 200
else:
app.logger.warning('Request with invalid username was received')
return jsonify({"msg": "Unable to find user with [" + username + "] username"}), 404 | d51de9aa201fdb0d879190c5f08352b43f425be4 | 19,014 |
def judgement(seed_a, seed_b):
"""Return amount of times last 16 binary digits of generators match."""
sample = 0
count = 0
while sample <= 40000000:
new_a = seed_a * 16807 % 2147483647
new_b = seed_b * 48271 % 2147483647
bin_a = bin(new_a)
bin_b = bin(new_b)
last16_a = bin_a[-16:]
last16_b = bin_b[-16:]
if last16_a == last16_b:
count += 1
seed_a = new_a
seed_b = new_b
sample += 1
return count | 9d778909ba6b04e4ca3adbb542fce9ef89d7b2b7 | 19,015 |
def GJK(shape1, shape2):
""" Implementation of the GJK algorithm
PARAMETERS
----------
shape{1, 2}: Shape
RETURN
------
: bool
Signifies if the given shapes intersect or not.
"""
# Initialize algorithm parameters
direction = Vec(shape1.center, shape2.center).direction
A = support(shape1, shape2, direction)
simplex = [A]
direction = Vec(simplex[0], Point()).direction
while True: # while new valid support found. `direction` is updated each iteration.
B = support(shape1, shape2, direction)
AB = Vec(simplex[0], B)
if dot_vec_dir(AB, direction) <= 0: # No support past the origin
return False
else:
simplex.append(B)
if handle_simplex(simplex, direction):
return True | 4e3b24ec9fab1d2625c3d99ae3ffc2325c1dcaf8 | 19,016 |
def _set_int_config_parameter(value: OZWValue, new_value: int) -> int:
"""Set a ValueType.INT config parameter."""
try:
new_value = int(new_value)
except ValueError as err:
raise WrongTypeError(
(
f"Configuration parameter type {value.type} does not match "
f"the value type {type(new_value)}"
)
) from err
if (value.max is not None and new_value > value.max) or (
value.min is not None and new_value < value.min
):
raise InvalidValueError(
f"Value {new_value} out of range of parameter (Range: {value.min}-{value.max})"
)
value.send_value(new_value) # type: ignore
return new_value | e9e168aa1959dfab141622a0d0f3751a1e042dfd | 19,017 |
def split_dataset(dataset_file, trainpct):
"""
Split a file containing the full path to individual annotation files into
train and test datasets, with a split defined by trainpct.
Inputs:
- dataset_file - a .txt or .csv file containing file paths pointing to annotation files.
(Expects that these have no header)
- trainpct = 0.8 produces an 80:20 train:test split
"""
if type(dataset_file) is list:
full_dataset = pd.DataFrame(dataset_file, columns=["Filename"])
else:
full_dataset = pd.read_csv(dataset_file, names=["Filename"])
print(
"You've chosen a training percentage of: {} (this variable has type: {})".format(
trainpct, type(trainpct)
)
)
testsize = 1.0 - trainpct
train, test = train_test_split(
full_dataset, test_size=testsize, shuffle=True, random_state=42
) # set the random seed so we get reproducible results!
return train, test | 0f24d29efdf3645a743bbb6d9e2e27b9087552be | 19,018 |
def accession(data):
"""
Get the accession for the given data.
"""
return data["mgi_marker_accession_id"] | 132dcbdd0712ae30ce7929e58c4bc8cdf73aacb2 | 19,019 |
def get_phase_dir(self):
"""Get the phase rotating direction of stator flux stored in LUT
Parameters
----------
self : LUT
a LUT object
Returns
----------
phase_dir : int
rotating direction of phases +/-1
"""
if self.phase_dir not in [-1, 1]:
# recalculate phase_dir from Phi_wind
self.phase_dir = get_phase_dir_DataTime(self.Phi_wind[0])
return self.phase_dir | e335f78d6219f0db5a390cf47aaa7aa093f7c329 | 19,020 |
def atomic_number(request):
"""
An atomic number.
"""
return request.param | 6f1a868c94d0a1ee4c84a76f04b4cabc3e0356e0 | 19,021 |
def plot_metric(title = 'Plot of registration metric vs iterations'):
"""Plots the mutual information over registration iterations
Parameters
----------
title : str
Returns
-------
fig : matplotlib figure
"""
global metric_values, multires_iterations
fig, ax = plt.subplots()
ax.set_title(title)
ax.set_xlabel('Iteration Number', fontsize=12)
ax.set_ylabel('Mutual Information Cost', fontsize=12)
ax.plot(metric_values, 'r')
ax.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*', label = 'change in resolution')
ax.legend()
return fig | 488d96876a469522263f6c7118b94b35a25e36de | 19,022 |
from re import T
def cross_entropy(model, _input, _target):
""" Compute Cross Entropy between target and output diversity.
Parameters
----------
model : Model
Model for generating output for compare with target sample.
_input : theano.tensor.matrix
Input sample.
_target : theano.tensor.matrix
Target sample.
Returns
-------
theano.tensor.matrix
Return Cross Entropy.
"""
return T.nnet.categorical_crossentropy(model.output(_input), _target).mean() | c65efe3185269d8f7132e23abbd517ca9273d481 | 19,023 |
def get_groups_links(groups, tenant_id, rel='self', limit=None, marker=None):
"""
Get the links to groups along with 'next' link
"""
url = get_autoscale_links(tenant_id, format=None)
return get_collection_links(groups, url, rel, limit, marker) | 35188c3c6d01026153a6e18365ae0b4b596a8883 | 19,025 |
def over(expr: ir.ValueExpr, window: win.Window) -> ir.ValueExpr:
"""Construct a window expression.
Parameters
----------
expr
A value expression
window
Window specification
Returns
-------
ValueExpr
A window function expression
See Also
--------
ibis.window
"""
prior_op = expr.op()
if isinstance(prior_op, ops.WindowOp):
op = prior_op.over(window)
else:
op = ops.WindowOp(expr, window)
result = op.to_expr()
try:
name = expr.get_name()
except com.ExpressionError:
pass
else:
result = result.name(name)
return result | e9c8f656403520d5f3287de38c139b8fd8446d13 | 19,026 |
def node_value(node: Node) -> int:
"""
Computes the value of node
"""
if not node.children:
return sum(node.entries)
else:
value = 0
for entry in node.entries:
try:
# Entries start at 1 so subtract all entries by 1
value += node_value(node.children[entry - 1])
except IndexError:
pass
return value | c22ac3f73995e138f7eb329499caba3fc67175a5 | 19,027 |
import re
def load_mac_vendors() :
""" parses wireshark mac address db and returns dict of mac : vendor """
entries = {}
f = open('mac_vendors.db', 'r')
for lines in f.readlines() :
entry = lines.split()
# match on first column being first six bytes
r = re.compile(r'^([0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2})$')
if len(entry) > 0 and r.match(entry[0]) :
# lowercase as convention
entries[entry[0].lower()] = entry[1]
return entries | 361e9c79de8b473c8757ae63384926d266b68bbf | 19,028 |
def parse_time(s):
"""
Parse time spec with optional s/m/h/d/w suffix
"""
if s[-1].lower() in secs:
return int(s[:-1]) * secs[s[-1].lower()]
else:
return int(s) | 213c601143e57b5fe6cd123631c6cd562f2947e9 | 19,029 |
def resize_labels(labels, size):
"""Helper function to resize labels.
Args:
labels: A long tensor of shape `[batch_size, height, width]`.
Returns:
A long tensor of shape `[batch_size, new_height, new_width]`.
"""
n, h, w = labels.shape
labels = F.interpolate(labels.view(n, 1, h, w).float(),
size=size,
mode='nearest')
labels = labels.squeeze_(1).long()
return labels | 87c7127643e9e46878bc526ebed4068d40f25ece | 19,030 |
import re
def _extract_urls(html):
"""
Try to find all embedded links, whether external or internal
"""
# substitute real html symbols
html = _replace_ampersands(html)
urls = set()
hrefrx = re.compile("""href\s*\=\s*['"](.*?)['"]""")
for url in re.findall(hrefrx, html):
urls.add(str(url))
srcrx = re.compile("""src\s*\=\s*['"](.*?)['"]""")
for url in re.findall(srcrx, html):
urls.add(str(url))
html = re.sub('%20', ' ', html, flags=re.DOTALL)
# extract URLs that are not surrounded by quotes
urlrx = re.compile("""[^'"](http[s]?://[\.a-zA-Z0-9/]+?)\s""")
for url in re.findall(urlrx, html):
urls.add(str(url))
# extract URLs that are surrounded by quotes
# remove whitespace
html = re.sub('\s+', '', html)
urlrx = re.compile("'(http[s]?://[\.a-zA-Z0-9/]+?)'", flags=re.DOTALL)
urlrx = re.compile('"(http[s]?://[\.a-zA-Z0-9/]+?)"', flags=re.DOTALL)
for url in re.findall(urlrx, html):
urls.add(url)
# remove empty string if exists
try:
urls.remove('')
except KeyError:
pass
return sorted(urls) | 5303cf7b750926aa5919bbfa839bd227319aa9f7 | 19,031 |
def reorganize_data(texts):
"""
Reorganize data to contain tuples of a all signs combined and all trans combined
:param texts: sentences in format of tuples of (sign, tran)
:return: data reorganized
"""
data = []
for sentence in texts:
signs = []
trans = []
for sign, tran in sentence:
signs.append(sign)
trans.append(tran)
data.append((signs, trans))
return data | 27b4efd99bbf470a9f8f46ab3e34c93c606d0234 | 19,032 |
def client_new():
"""Create new client."""
form = ClientForm(request.form)
if form.validate_on_submit():
c = Client(user_id=current_user.get_id())
c.gen_salt()
form.populate_obj(c)
db.session.add(c)
db.session.commit()
return redirect(url_for('.client_view', client_id=c.client_id))
return render_template(
'invenio_oauth2server/settings/client_new.html',
form=form,
) | b355f43cd80e0f7fef3027f5f1d1832c4e4ece5a | 19,033 |
def query_schema_existence(conn, schema_name):
"""Function to verify whether the current database schema ownership is correct."""
with conn.cursor() as cur:
cur.execute('SELECT EXISTS(SELECT 1 FROM information_schema.schemata WHERE SCHEMA_NAME = %s)',
[schema_name])
return cur.fetchone().exists | 9c556283d255f580fc69a9e41a4d452d15e1eb17 | 19,034 |
def get_number_of_params(model, trainable_only=False):
"""
Get the number of parameters in a PyTorch Model
:param model(torch.nn.Model):
:param trainable_only(bool): If True, only count the trainable parameters
:return(int): The number of parameters in the model
"""
return int(np.sum([np.prod(param.size()) for param in model.parameters()
if param.requires_grad or (not trainable_only)])) | 4e02e977e9fc2949a62ce433c9ff6d732d74a746 | 19,035 |
import time
import requests
def chart1(request):
"""
This view tests the server speed for transferring JSON and XML objects.
:param request: The AJAX request
:return: JsonResponse of the dataset.
"""
full_url = HttpRequest.build_absolute_uri(request)
relative = HttpRequest.get_full_path(request)
base_url = full_url[:-len(relative)]
request_amount = ['10', '100', '200', '500', '1000']
json_urls = list()
xml_urls = list()
for x in request_amount:
json_urls.append(reverse('objects:leads_json', args=[x]))
xml_urls.append(reverse('objects:leads_xml', args=[x]))
json_data = list()
xml_data = list()
for x in json_urls:
start = time.perf_counter()
requests.get(base_url + x)
end = time.perf_counter()
json_data.append((end - start))
for x in xml_urls:
start = time.perf_counter()
requests.get(base_url + x)
end = time.perf_counter()
xml_data.append((end - start))
final_data = {
'labels': request_amount,
'datasets': [
{
'label': 'JSON',
'backgroundColor': 'rgba(255, 99, 132, 0.2)',
'borderColor': 'rgba(255,99,132,1)',
'data': json_data,
'borderWidth': 2,
'yAxisID': 'first-y-axis'
},
{
'label': 'XML',
'backgroundColor': 'rgba(54, 162, 235, 0.2)',
'borderColor': 'rgba(54, 162, 235, 1)',
'data': xml_data,
'borderWidth': 2,
'yAxisID': 'first-y-axis'
}
]
}
return JsonResponse(final_data) | 6eb88d3ef1aed85799832d5751ec4e30c54aaa07 | 19,036 |
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, bias=False) | 115dd9e8afdaa850293fa08c103ab2966eceedbf | 19,037 |
import mimetypes
def img_mime_type(img):
"""Returns image MIME type or ``None``.
Parameters
----------
img: `PIL.Image`
PIL Image object.
Returns
-------
mime_type : `str`
MIME string like "image/jpg" or ``None``.
"""
if img.format:
ext = "." + img.format
return mimetypes.types_map.get(ext.lower())
return None | fe46af6e5c03a1ae80cb809c81ab358ac5c085fa | 19,038 |
def check_satisfy_dataset(w, D, involved_predicates=[]):
"""
This function is to check whether all facts in ``D'' have been installed in each of ruler intervals
of the given Window ``w'' if facts in ruler intervals holds in ``D''.
Args:
w (a Window instance):
D (dictionary of dictionary object): contain all facts
involved_predicates (a list of str): contain all predicates that are needed to be checked.
Returns:
boolean
"""
for ruler_interval in w.ruler_intervals:
for predicate in involved_predicates:
if type(D[predicate]) == list:
interval_list = D[predicate]
if interval_intesection_intervallist(ruler_interval, interval_list) and Atom(predicate) not \
in w.get_ruler_intervals_literals()[ruler_interval]:
return False
else:
for entity, interval_list in D[predicate].items():
if interval_intesection_intervallist(ruler_interval, interval_list) and Atom(predicate) not \
in w.get_ruler_intervals_literals()[ruler_interval]:
return False
return True | b2715ca1eba03bbcf0581fdfeb97177cde8d12d7 | 19,040 |
def interp_at(d, g, varargs=None, dim=None, dask="parallelized"):
"""
Interpolates a variable to another.
Example : varargs = [THETA, mld] : THETA(t, z, y, x) is interpolated with Z=mld(t, y, x)
"""
var, coordvar = varargs
dim = (
dim if dim is not None else set(d[var].dims).difference(d[coordvar].dims).pop()
)
X = d[dim].values
data = xr.apply_ufunc(
_interp1DAt,
d[var],
d[coordvar],
input_core_dims=[[dim], []],
dask=dask,
output_dtypes=[float],
kwargs={"X": X},
keep_attrs=True,
)
data.attrs.update(
long_name=d[var].attrs.get("long_name", var)
+ " interpolated to {} along {}".format(coordvar, dim),
name="{}_{}_{}".format(var, dim, coordvar),
)
return data | a31cccee447deb2fd2612460471598d74e347c53 | 19,041 |
def get_history():
"""Get command usage history from History.sublime-project"""
f = open('%s/%s/%s' % (sublime.packages_path(),
"TextTransmute",
"History.sublime-project"), 'r')
content = f.readlines()
f.close()
return [x.strip() for x in content] | fab11f52c2b90d1fb29ace944c8f80f67fc9170e | 19,042 |
from typing import Dict
from typing import Callable
from typing import Any
import asyncio
def inprogress(metric: Gauge, labels: Dict[str, str] = None) -> Callable[..., Any]:
"""
This decorator provides a convenient way to track in-progress requests
(or other things) in a callable.
This decorator function wraps a function with code to track how many
of the measured items are in progress.
The metric is incremented before calling the wrapped function and
decremented when the wrapped function is complete.
:param metric: a metric to increment and decrement. The metric object
being updated is expected to be a Gauge metric object.
:param labels: a dict of extra labels to associate with the metric.
:return: a coroutine function that wraps the decortated function
"""
if not isinstance(metric, Gauge):
raise Exception(
"inprogess decorator expects a Gauge metric but got: {}".format(metric)
)
def track(func):
"""
This function wraps a decorated callable with metric incremeting
and decrementing logic.
:param func: the callable to be tracked.
:returns: the return value from the decorated callable.
"""
@wraps(func)
async def func_wrapper(*args, **kwds):
metric.inc(labels)
rv = func(*args, **kwds)
if isinstance(rv, asyncio.Future) or asyncio.iscoroutine(rv):
rv = await rv
metric.dec(labels)
return rv
return func_wrapper
return track | c10adbb07796c26fd59d1038a786b25b6346fd97 | 19,043 |
from typing import Optional
import base64
def b58_wrapper_to_b64_public_address(b58_string: str) -> Optional[str]:
"""Convert a b58-encoded PrintableWrapper address into a b64-encoded PublicAddress protobuf"""
wrapper = b58_wrapper_to_protobuf(b58_string)
if wrapper:
public_address = wrapper.public_address
public_address_bytes = public_address.SerializeToString()
return base64.b64encode(public_address_bytes).decode("utf-8")
return None | a4c46800c0d22ef96d3fa622b6a37bf55e1960da | 19,044 |
def render_to_AJAX(status, messages):
"""return an HTTP response for an AJAX request"""
xmlc = Context({'status': status,
'messages': messages})
xmlt = loader.get_template("AJAXresponse.xml")
response = xmlt.render(xmlc)
return HttpResponse(response) | cb5ad3ead4d5bee9a710767d1f49c81b2b137441 | 19,045 |
def parse_params(environ, *include):
"""Parse out the filter, sort, etc., parameters from a request"""
if environ.get('QUERY_STRING'):
params = parse_qs(environ['QUERY_STRING'])
else:
params = {}
param_handlers = (
('embedded', params_serializer.unserialize_string, None),
('filter', params_serializer.unserialize_string, None),
('sort', params_serializer.unserialize_string, None),
('offset', int, 0),
('limit', int, 0),
('show_hidden', bool_field, False)
)
results = {}
if len(include) > 0:
include = set(include)
else:
include = None
for name, fn, default in param_handlers:
if include and name not in include:
continue
results[name] = parse_param(params, name, fn, default=default)
if not include or 'context' in include:
results['context'] = get_context(environ)
return results | ff8d263e4495804e1bb30d0c4da352b2437fc8f8 | 19,046 |
def create_dict_facade_for_object_vars_and_mapping_with_filters(cls, # type: Type[Mapping]
include, # type: Union[str, Tuple[str]]
exclude, # type: Union[str, Tuple[str]]
private_name_prefix=None # type: str
):
# type: (...) -> DictMethods
"""
:param cls:
:param include:
:param exclude:
:param private_name_prefix: if provided, only the fields not starting with this prefix will be exposed. Otherwise
all will be exposed
:return:
"""
public_fields_only = private_name_prefix is not None
def __iter__(self):
"""
Generated by @autodict.
Implements the __iter__ method from collections.Iterable by relying on a filtered vars(self)
:param self:
:return:
"""
myattrs = tuple(att_name for att_name in iterate_on_vars(self))
for att_name in chain(myattrs, (o for o in super(cls, self).__iter__() if o not in myattrs)):
# filter based on the name (include/exclude + private/public)
if is_attr_selected(att_name, include=include, exclude=exclude) and \
(not public_fields_only or not att_name.startswith(private_name_prefix)):
# use that name
yield att_name
def __getitem__(self, key):
"""
Generated by @autodict.
Implements the __getitem__ method from collections.Mapping by relying on a filtered getattr(self, key)
"""
if hasattr(self, key):
key = possibly_replace_with_property_name(self.__class__, key)
if is_attr_selected(key, include=include, exclude=exclude) and \
(not public_fields_only or not key.startswith(private_name_prefix)):
return getattr(self, key)
else:
try:
# noinspection PyUnresolvedReferences
return super(cls, self).__getitem__(key)
except Exception as e:
raise KeyError('@autodict generated dict view - {key} is a '
'hidden field and super[{key}] raises an exception: {etyp} {err}'
''.format(key=key, etyp=type(e).__name__, err=e))
else:
try:
# noinspection PyUnresolvedReferences
return super(cls, self).__getitem__(key)
except Exception as e:
raise KeyError('@autodict generated dict view - {key} is an '
'invalid field name (was the constructor called?). Delegating to '
'super[{key}] raises an exception: {etyp} {err}'
''.format(key=key, etyp=type(e).__name__, err=e))
return DictMethods(iter=__iter__, getitem=__getitem__) | cccdc19b43ca269cfedb4f7d6ad1d7b8abba78e1 | 19,047 |
import time
def now():
"""
此时的时间戳
:return:
"""
return int(time.time()) | 39c05a695bfe4239ebb3fab6f3a5d0967bea6820 | 19,048 |
def get_yourContactINFO(rows2):
"""
Function that returns your personal contact info details
"""
yourcontactINFO = rows2[0]
return yourcontactINFO | beea815755a2e6817fb57a37ccc5aa479455bb81 | 19,049 |
def hafnian(
A, loop=False, recursive=True, rtol=1e-05, atol=1e-08, quad=True, approx=False, num_samples=1000
): # pylint: disable=too-many-arguments
"""Returns the hafnian of a matrix.
For more direct control, you may wish to call :func:`haf_real`,
:func:`haf_complex`, or :func:`haf_int` directly.
Args:
A (array): a square, symmetric array of even dimensions.
loop (bool): If ``True``, the loop hafnian is returned. Default is ``False``.
recursive (bool): If ``True``, the recursive algorithm is used. Note:
the recursive algorithm does not currently support the loop hafnian.
If ``loop=True``, then this keyword argument is ignored.
rtol (float): the relative tolerance parameter used in ``np.allclose``.
atol (float): the absolute tolerance parameter used in ``np.allclose``.
quad (bool): If ``True``, the hafnian algorithm is performed with quadruple precision.
approx (bool): If ``True``, an approximation algorithm is used to estimate the hafnian. Note that
the approximation algorithm can only be applied to matrices ``A`` that only have non-negative entries.
num_samples (int): If ``approx=True``, the approximation algorithm performs ``num_samples`` iterations
for estimation of the hafnian of the non-negative matrix ``A``.
Returns:
np.int64 or np.float64 or np.complex128: the hafnian of matrix A.
"""
# pylint: disable=too-many-return-statements,too-many-branches
input_validation(A, rtol=rtol, atol=atol)
matshape = A.shape
if matshape == (0, 0):
return 1
if matshape[0] % 2 != 0 and not loop:
return 0.0
if np.allclose(np.diag(np.diag(A)), A, rtol=rtol, atol=atol):
if loop:
return np.prod(np.diag(A))
return 0
if matshape[0] % 2 != 0 and loop:
A = np.pad(A, pad_width=((0, 1), (0, 1)), mode="constant")
A[-1, -1] = 1.0
matshape = A.shape
if matshape[0] == 2:
if loop:
return A[0, 1] + A[0, 0] * A[1, 1]
return A[0][1]
if matshape[0] == 4:
if loop:
result = (
A[0, 1] * A[2, 3]
+ A[0, 2] * A[1, 3]
+ A[0, 3] * A[1, 2]
+ A[0, 0] * A[1, 1] * A[2, 3]
+ A[0, 1] * A[2, 2] * A[3, 3]
+ A[0, 2] * A[1, 1] * A[3, 3]
+ A[0, 0] * A[2, 2] * A[1, 3]
+ A[0, 0] * A[3, 3] * A[1, 2]
+ A[0, 3] * A[1, 1] * A[2, 2]
+ A[0, 0] * A[1, 1] * A[2, 2] * A[3, 3]
)
return result
return A[0, 1] * A[2, 3] + A[0, 2] * A[1, 3] + A[0, 3] * A[1, 2]
if approx:
if np.any(np.iscomplex(A)):
raise ValueError("Input matrix must be real")
if np.any(A < 0):
raise ValueError("Input matrix must not have negative entries")
if A.dtype == np.complex:
# array data is complex type
if np.any(np.iscomplex(A)):
# array values contain non-zero imaginary parts
return haf_complex(A, loop=loop, recursive=recursive, quad=quad)
# all array values have zero imaginary parts
return haf_real(np.float64(A.real), loop=loop, recursive=recursive, quad=quad)
if np.issubdtype(A.dtype, np.integer) and not loop:
# array data is an integer type, and the user is not
# requesting the loop hafnian
return haf_int(np.int64(A))
if np.issubdtype(A.dtype, np.integer) and loop:
# array data is an integer type, and the user is
# requesting the loop hafnian. Currently no
# integer function for loop hafnians, have to instead
# convert to float and use haf_real
A = np.float64(A)
return haf_real(
A, loop=loop, recursive=recursive, quad=quad, approx=approx, nsamples=num_samples
) | 0e90f1d372bf7be636ae8eb333d2c59a387b58f1 | 19,050 |
def filter_out_nones(data):
"""
Filter out any falsey values from data.
"""
return (l for l in data if l) | 39eb0fb7aafe799246d231c5a7ad8a150ed4341e | 19,051 |
def BytesToGb(size):
"""Converts a disk size in bytes to GB."""
if not size:
return None
if size % constants.BYTES_IN_ONE_GB != 0:
raise calliope_exceptions.ToolException(
'Disk size must be a multiple of 1 GB. Did you mean [{0}GB]?'
.format(size // constants.BYTES_IN_ONE_GB + 1))
return size // constants.BYTES_IN_ONE_GB | 49bc846ce6887fd47ac7be70631bddd8353c72ed | 19,053 |
def build_report(drivers: dict, desc=False) -> [[str, str, str], ...]:
"""
Creates a race report: [[Driver.name, Driver.team, Driver.time], ...]
Default order of drivers from best time to worst.
"""
sorted_drivers = sort_drivers_dict(drivers, desc)
return [driver.get_stats for driver in sorted_drivers.values()] | c8c84319c0a14867b21d09c8b66ea434d13786aa | 19,054 |
def add_sites_sheet(ws, cols, lnth):
"""
"""
for col in cols:
cell = "{}1".format(col)
ws[cell] = "='Capacity_km2_MNO'!{}".format(cell)
for col in cols[:2]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
ws[cell] = "='Capacity_km2_MNO'!{}".format(cell)
for col in cols[2:]:
for i in range(2, lnth):
cell = "{}{}".format(col, i)
part1 = "=MIN(IF('Lookups'!$H$3:$H$250>'Data_km2'!{}".format(cell)
part2 = ",'Lookups'!$E$3:$E$250))*Area!{}".format(cell)
ws[cell] = part1 + part2
ws.formula_attributes[cell] = {'t': 'array', 'ref': "{}:{}".format(cell, cell)}
columns = ['C','D','E','F','G','H','I','J','K','L']
ws = format_numbers(ws, columns, (1, 200), 'Comma [0]', 0)
set_border(ws, 'A1:L{}'.format(lnth-1), "thin", "000000")
return ws | c1db2b585021e8eef445963f8c300d726600e12a | 19,055 |
import itertools
def testBinaryFile(filePath):
"""
Test if a file is in binary format
:param fileWithPath(str): File Path
:return:
"""
file = open(filePath, "rb")
#Read only a couple of lines in the file
binaryText = None
for line in itertools.islice(file, 20):
if b"\x00" in line:
#Return to the beginning of the binary file
file.seek(0)
#Read the file in one step
binaryText = file.read()
break
file.close()
#Return the result
return binaryText | 809a962881335ce0a3a05e341a13b413c381fedf | 19,056 |
def dup_max_norm(f, K):
"""
Returns maximum norm of a polynomial in ``K[x]``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x = ring("x", ZZ)
>>> R.dup_max_norm(-x**2 + 2*x - 3)
3
"""
if not f:
return K.zero
else:
return max(dup_abs(f, K)) | 1ba4744781a3f5cb8e71b59bc6588579f88a6d43 | 19,058 |
def greedy_algorithm(pieces, material_size):
"""Implementation of the First-Fit Greedy Algorithm
Inputs:
pieces - list[] of items to place optimally
material_size - length of Boards to cut from, assumes unlimited supply
Output:
Optimally laid out BoardCollection.contents, which is a list[] of Boards"""
bc = BoardCollection()
bc.append(Board(material_size))
pieces.sort(reverse=True) # sort in ascending order
# we must copy pieces, else our actual list will get modified
for piece in pieces.copy():
piece_added = False # for recording state: did we add this piece to BoardCollection yet?
# if piece fits, add it on that Board, remove it from the list, mark it as such and break out of for loop
for board in bc.contents:
if board.space_remaining >= piece:
board.insert(piece)
pieces.remove(piece)
piece_added = True
break
# if it hasn't been added yet, make a new Board and put it there
if piece_added is False:
bc.append(Board(material_size))
bc.last.insert(piece)
pieces.remove(piece)
return bc.contents | f42b2372b50385c693765d65614d73a8b21f496b | 19,059 |
def start_tv_session(hypes):
"""
Run one evaluation against the full epoch of data.
Parameters
----------
hypes : dict
Hyperparameters
Returns
-------
tuple
(sess, saver, summary_op, summary_writer, threads)
"""
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.merge_all_summaries()
# Create a saver for writing training checkpoints.
if 'keep_checkpoint_every_n_hours' in hypes['solver']:
kc = hypes['solver']['keep_checkpoint_every_n_hours']
else:
kc = 10000.0
saver = tf.train.Saver(max_to_keep=utils.cfg.max_to_keep,
keep_checkpoint_every_n_hours=kc)
# Create a session for running Ops on the Graph.
sess = tf.Session()
# Run the Op to initialize the variables.
init = tf.initialize_all_variables()
sess.run(init)
# Start the queue runners.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.train.SummaryWriter(hypes['dirs']['output_dir'],
graph=sess.graph)
return sess, saver, summary_op, summary_writer, coord, threads | f54cfa17871badf2cbe92200144a758d0fc2dc41 | 19,060 |
def factor_size(value, factor):
"""
Factors the given thumbnail size. Understands both absolute dimensions
and percentages.
"""
if type(value) is int:
size = value * factor
return str(size) if size else ''
if value[-1] == '%':
value = int(value[:-1])
return '{0}%'.format(value * factor)
size = int(value) * factor
return str(size) if size else '' | 41b061fb368d56ba18b52cd7a6a3322292671d83 | 19,061 |
def categoryProfile(request, pk):
"""
Displays the profile of a :class:`gestion.models.Category`.
pk
The primary key of the :class:`gestion.models.Category` to display profile.
"""
category = get_object_or_404(Category, pk=pk)
return render(request, "gestion/category_profile.html", {"category": category}) | 87318332c7e317a49843f5becda8196951743ce5 | 19,062 |
def eoms(_x, t, _params):
"""Rigidy body equations of motion.
_x is an array/list in the following order:
q1: Yaw q2: Lean |-(Euler 3-1-2 angles used to orient A
q3: Pitch /
q4: N[1] displacement of mass center.
q5: N[2] displacement of mass center.
q6: N[3] displacement of mass center.
u1: A[1] measure number of angular velocity
u2: A[2] measure number of angular velocity
u3: A[3] measure number of angular velocity
u4: N[1] velocity of mass center.
u5: N[2] velocity of mass center.
u6: N[3] velocity of mass center.
_params is an array/list in the following order:
m: Mass of first pendulum point mass.
g: Gravitational constant.
I11: Principal moment of inertia about A[1]
I22: Principal moment of inertia about A[2]
I33: Principal moment of inertia about A[3]
"""
# Unpack function arguments
q1, q2, q3, q4, q5, q6, u1, u2, u3, u4, u5, u6 = _x
# Unpack function parameters
m, g, I11, I22, I33 = _params
# Trigonometric functions
c2 = cos(q2)
c3 = cos(q3)
s3 = sin(q3)
t2 = tan(q2)
# Calculate return values
q1d = c3*u3/c2 - s3*u1/c2
q2d = c3*u1 + s3*u3
q3d = s3*t2*u1 - c3*t2*u3 + u2
q4d = u4
q5d = u5
q6d = u6
u1d = (I22 - I33)*u2*u3/I11
u2d = (I33 - I11)*u1*u3/I22
u3d = -(I22 - I11)*u1*u2/I33
u4d = 0
u5d = 0
u6d = g
# Return calculated values
return [q1d, q2d, q3d, q4d, q5d, q6d, u1d, u2d, u3d, u4d, u5d, u6d] | 3868411e2c082617311f59f3b39deec9d3a370fa | 19,063 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.