content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import requests
def zoom_api_call(user, verb, url, *args, **kwargs):
"""
Perform an API call to Zoom with various checks.
If the call returns a token expired event,
refresh the token and try the call one more time.
"""
if not settings.SOCIAL_AUTH_ZOOM_OAUTH2_KEY:
raise DRFValidationError(
"Server is not configured with Zoom OAuth2 credentials."
)
if not user.is_authenticated:
raise DRFValidationError("You are not authenticated.")
social = user.social_auth.filter(provider="zoom-oauth2").first()
if social is None:
raise DRFValidationError("You have not linked your Zoom account yet.")
is_retry = "retry" in kwargs
if is_retry:
del kwargs["retry"]
out = requests.request(
verb,
url.format(uid=social.uid),
*args,
headers={"Authorization": f"Bearer {social.get_access_token(load_strategy())}"},
**kwargs,
)
if out.status_code == 204:
return out
# check for token expired event
data = out.json()
if data.get("code") == 124 and not is_retry:
social.refresh_token(load_strategy())
kwargs["retry"] = True
return zoom_api_call(user, verb, url, *args, **kwargs)
return out | 5c359a4a7acd69a942aedcb78fc156b8218ab239 | 10,725 |
def addHtmlImgTagExtension(notionPyRendererCls):
"""A decorator that add the image tag extension to the argument list. The
decorator pattern allows us to chain multiple extensions. For example, we
can create a renderer with extension A, B, C by writing:
addAExtension(addBExtension(addCExtension(notionPyRendererCls)))
"""
def newNotionPyRendererCls(*extraExtensions):
new_extension = [HTMLBlock, HTMLSpan]
return notionPyRendererCls(*chain(new_extension, extraExtensions))
return newNotionPyRendererCls | 914d2395cdf9c5f52f94eef80e3f7469a70eb0ae | 10,727 |
def get_symmetry_projectors(character_table, conjugacy_classes, print_results=False):
"""
:param character_table: each row gives the characters of a different irreducible rep. Each column
corresponds to a different conjugacy classes
:param conjugacy_classes: List of lists of conjugacy class elements
:param print_results:
:return projs:
"""
if not validate_char_table(character_table, conjugacy_classes):
raise Exception("invalid character table/conjugacy class combination")
# columns (or rows, since orthogonal mat) represent basis states that can be transformed into one another by symmetries
states_related_by_symm = sum([sum([np.abs(g) for g in cc]) for cc in conjugacy_classes])
# only need sums over conjugacy classes to build projectors
class_sums = [sum(cc) for cc in conjugacy_classes]
projs = [reduce_symm_projector(
sum([np.conj(ch) * cs for ch, cs in zip(chars, class_sums)]), chars[0], states_related_by_symm, print_results=print_results)
for chars in character_table]
# test projector size
proj_to_dims = np.asarray([p.shape[0] for p in projs]).sum()
proj_from_dims = projs[0].shape[1]
if proj_to_dims != proj_from_dims:
raise Exception("total span of all projectors was %d, but expected %d." % (proj_to_dims, proj_from_dims))
return projs | 8780ef1a9ebb3f6e6960d04d07677e323e7565b9 | 10,729 |
from typing import List
def is_permutation_matrix(matrix: List[List[bool]]) -> bool:
"""Returns whether the given boolean matrix is a permutation matrix."""
return (all(sum(v) == 1 for v in matrix) and
sum(any(v) for v in matrix) == len(matrix)) | b53d6f4ba6e8e1ba445783350de831b614aa187e | 10,730 |
import torch
def DPT_Hybrid(pretrained=True, **kwargs):
""" # This docstring shows up in hub.help()
MiDaS DPT-Hybrid model for monocular depth estimation
pretrained (bool): load pretrained weights into model
"""
model = DPTDepthModel(
path=None,
backbone="vitb_rn50_384",
non_negative=True,
)
if pretrained:
checkpoint = (
"https://github.com/intel-isl/MiDaS/releases/download/v3/dpt_hybrid-midas-501f0c75.pt"
)
state_dict = torch.hub.load_state_dict_from_url(
checkpoint, map_location=torch.device('cpu'), progress=True, check_hash=True
)
model.load_state_dict(state_dict)
return model | 0a5cb661e9e0f08daae73b8c49ba8324e0cfb3e9 | 10,731 |
def show_counts(input_dict):
"""Format dictionary count information into a string
Args:
input_dict (dictionary): input keys and their counts
Return:
string: formatted output string
"""
out_s = ''
in_dict_sorted = {k: v for k, v in sorted(input_dict.items(), key=lambda item: item[1], reverse=True)}
for idx, (k, v) in enumerate(in_dict_sorted.items()):
out_s += '\t{}:\t{} ({})\n'.format(idx, k, v)
out_s += '\n'
return out_s | 078d1f7599b22741f474c0e6d1b02f44edfc1f9b | 10,732 |
def encipher_railfence(message,rails):
"""
Performs Railfence Encryption on plaintext and returns ciphertext
Examples
========
>>> from sympy.crypto.crypto import encipher_railfence
>>> message = "hello world"
>>> encipher_railfence(message,3)
'horel ollwd'
Parameters
==========
message : string, the message to encrypt.
rails : int, the number of rails.
Returns
=======
The Encrypted string message.
References
==========
.. [1] https://en.wikipedia.org/wiki/Rail_fence_cipher
"""
r = list(range(rails))
p = cycle(r + r[-2:0:-1])
return ''.join(sorted(message, key=lambda i: next(p))) | b1a56cdb255065b18caa4ba6da1fa11759f87152 | 10,733 |
import inspect
def format_signature(name: str, signature: inspect.Signature) -> str:
"""Formats a function signature as if it were source code.
Does not yet handle / and * markers.
"""
params = ', '.join(
format_parameter(arg) for arg in signature.parameters.values())
if signature.return_annotation is signature.empty:
return_annotation = ''
else:
return_annotation = ' -> ' + _annotation_name(
signature.return_annotation)
return f'{name}({params}){return_annotation}' | a14fde11850d420d15d2f9d7f3ac4cbe9aee03cc | 10,734 |
def extract_ratios_from_ddf(ddf):
"""The same as the df version, but works with
dask dataframes instead."""
# we basicaly abuse map_partition's ability to expand indexes for lack of a working
# groupby(level) in dask
return ddf.map_partitions(extract_ratios_from_df, meta={'path': str, 'ratio': str, 'url': str}).clear_divisions() | fcb816677d3d0816b2327d458a3fdd1b820bac9e | 10,735 |
def check_if_prime(number):
"""checks if number is prime
Args:
number (int):
Raises:
TypeError: if number of type float
Returns:
[bool]: if number prime returns ,True else returns False
"""
if type(number) == float:
raise TypeError("TypeError: entered float type")
if number > 1 :
for i in range( 2, int(number / 2) + 1 ):
if number % i == 0:
return False
return True
else:
return False | 0a15a4f133b12898b32b1f52a317939cf5e30d34 | 10,736 |
import inspect
def get_signatures() -> {}:
"""
Helper method used to identify the valid arguments that can be passed
to any of the pandas IO functions used by the program
:return: Returns a dictionary containing the available arguments for each pandas IO method
"""
# Creates an empty dictionary to collect the function names and signatures
sigreturn = {}
# Loops over the functions that are used for IO operations
for io in PANDAS_IO:
# Gets the name of the function in question
funcname = io.__name__
# Gets the list of arguments that the function can take
args = list(inspect.signature(io).parameters.keys())
# Adds the arguments to the dictionary with the function name as the key
sigreturn[funcname] = args
# Returns the dictionary object
return sigreturn | 243b798e1c4c57a89749fff1d33be660ab4e973b | 10,737 |
from typing import List
def batch_answer_same_context(questions: List[str], context: str) -> List[str]:
"""Answers the questions with the given context.
:param questions: The questions to answer.
:type questions: List[str]
:param context: The context to answer the questions with.
:type context: str
:return: The answers.
:rtype: List[str]
"""
return _batch_answer_same_context[get_mode()](questions, context) | b58df72f1252427ea3e58e2f8379b8e77ea55273 | 10,740 |
import typing
def dynamic_embedding_lookup(keys: tf.Tensor,
config: de_config_pb2.DynamicEmbeddingConfig,
var_name: typing.Text,
service_address: typing.Text = "",
skip_gradient_update: bool = False,
timeout_ms: int = -1) -> tf.Tensor:
"""Returns the embeddings of from given keys.
Args:
keys: A string `Tensor` of shape [batch_size] or [batch_size,
max_sequence_length] where an empty string would be mapped to an all zero
embedding.
config: A DynamicEmbeddingConfig proto that configures the embedding.
var_name: A unique name for the given embedding.
service_address: The address of a knowledge bank service. If empty, the
value passed from --kbs_address flag will be used instead.
skip_gradient_update: A boolean indicating if gradient update is needed.
timeout_ms: Timeout millseconds for the connection. If negative, never
timout.
Returns:
A `Tensor` of shape with one of below:
- [batch_size, config.embedding_dimension] if the input Tensor is 1D, or
- [batch_size, max_sequence_length, config.embedding_dimension] if the
input is 2D.
Raises:
ValueError: If name is not specified.
"""
if not var_name:
raise ValueError("Must specify a valid var_name.")
# If skip_gradient_update is true, reate a dummy variable so that the
# gradients can be passed in.
if skip_gradient_update:
grad_placeholder = tf.constant(0.0)
else:
grad_placeholder = tf.Variable(0.0)
context.add_to_collection(var_name, config)
resource = gen_carls_ops.dynamic_embedding_manager_resource(
config.SerializeToString(), var_name, service_address, timeout_ms)
return gen_carls_ops.dynamic_embedding_lookup(keys, grad_placeholder,
resource,
config.embedding_dimension) | c1d69548e60ff00e55ab04fe83607cae31b6558c | 10,742 |
def register_unary_op(registered_name, operation):
"""Creates a `Transform` that wraps a unary tensorflow operation.
If `registered_name` is specified, the `Transform` is registered as a member
function of `Series`.
Args:
registered_name: the name of the member function of `Series` corresponding
to the returned `Transform`.
operation: a unary TensorFlow operation.
"""
doc = DOC_FORMAT_STRING.format(operation.__name__, operation.__doc__)
@property
def name(self):
return operation.__name__
@property
def input_valency(self):
return 1
@property
def _output_names(self):
return "output"
def _apply_transform(self, input_tensors):
input_tensor = input_tensors[0]
if isinstance(input_tensor, ops.SparseTensor):
result = ops.SparseTensor(input_tensor.indices,
operation(input_tensor.values),
input_tensor.shape)
else:
result = operation(input_tensor)
# pylint: disable=not-callable
return self.return_type(result)
cls = type(operation.__name__,
(transform.Transform,),
{"name": name,
"__doc__": doc,
"input_valency": input_valency,
"_output_names": _output_names,
"_apply_transform": _apply_transform})
series.Series.register_unary_op(registered_name)(cls) | c0fb56a8e93936a4c45e199e28889ccef67d19de | 10,743 |
def add_climatology(data, clim):
"""Add 12-month climatology to a data array with more times.
Suppose you have anomalies data and you want to add back its
climatology to it. In this sense, this function does the opposite
of `get_anomalies`. Though in this case there is no way to obtain
the climatology so it has to be provided.
Parameters
----------
data: xarray.DataArray
Input must have a named `time` coordinate.
clim: xarray.DataArray
The climatology must have the same spatial dimensions as
`data`. Naturally, the time dimension can differ. The values
of this array will be replicated as many times as `data` has.
Returns
-------
xarray.DataArray with both fields added.
""" # noqa
# make sure shapes are correct
ddims = len(data.dims)
cdims = len(clim.dims)
if ddims != cdims:
msg = 'both data arrays must have same dimensions'
raise ValueError(msg)
# get number of years in dataarray
years = np.unique(data.time.dt.year)
nyear = years.size
# get tiled shape
tshape = np.ones(ddims, dtype=int)
tshape[0] = nyear
# create tiled climatology
tclim = np.tile(clim.values, tshape)
# add climatology to data array
new = data.copy()
new.values = np.array(data.values) + tclim
return new | 28845fc1455bc317d158b503ed07a7d0c1af5655 | 10,744 |
from typing import List
def already_exists(statement: str, lines: List[str]) -> bool:
"""
Check if statement is in lines
"""
return any(statement in line.strip() for line in lines) | 194d8c6c48609f5a2accacdb2ed0857815d48d1d | 10,745 |
import random
def uniform(lower_list, upper_list, dimensions):
"""Fill array """
if hasattr(lower_list, '__iter__'):
return [random.uniform(lower, upper)
for lower, upper in zip(lower_list, upper_list)]
else:
return [random.uniform(lower_list, upper_list)
for _ in range(dimensions)] | 59bcb124f0d71fd6e5890cd1d6c200319ab5910e | 10,746 |
import torch
def prepare_data(files, voxel_size, device='cuda'):
"""
Loads the data and prepares the input for the pairwise registration demo.
Args:
files (list): paths to the point cloud files
"""
feats = []
xyz = []
coords = []
n_pts = []
for pc_file in files:
pcd0 = o3d.io.read_point_cloud(pc_file)
xyz0 = np.array(pcd0.points)
# Voxelization
sel0 = ME.utils.sparse_quantize(xyz0 / voxel_size, return_index=True)
# Make point clouds using voxelized points
xyz0 = xyz0[sel0[1],:]
# Get features
npts0 = xyz0.shape[0]
xyz.append(to_tensor(xyz0))
n_pts.append(npts0)
feats.append(np.ones((npts0, 1)))
coords.append(np.floor(xyz0 / voxel_size))
coords_batch0, feats_batch0 = ME.utils.sparse_collate(coords, feats)
data = {'pcd0': torch.cat(xyz, 0).float(), 'sinput0_C': coords_batch0,
'sinput0_F': feats_batch0.float(), 'pts_list': torch.tensor(n_pts)}
return data | 1c11444d4f6ca66396651bb49b8c655bedf6b8fa | 10,747 |
def reshape(box, new_size):
"""
box: (N, 4) in y1x1y2x2 format
new_size: (N, 2) stack of (h, w)
"""
box[:, :2] = new_size * box[:, :2]
box[:, 2:] = new_size * box[:, 2:]
return box | 56fbeac7c785bd81c7964d7585686e11864ff034 | 10,748 |
import json
def sort_actions(request):
"""Sorts actions after drag 'n drop.
"""
action_list = request.POST.get("objs", "").split('&')
if len(action_list) > 0:
pos = 10
for action_str in action_list:
action_id = action_str.split('=')[1]
action_obj = Action.objects.get(pk=action_id)
action_obj.position = pos
action_obj.save()
pos = pos + 10
result = json.dumps({
"message": _(u"The actions have been sorted."),
}, cls=LazyEncoder)
return HttpResponse(result, content_type='application/json') | 80f2042858f7a0ecad3663ae4bf50ad73935be3b | 10,749 |
def fetch_file(parsed_url, config):
"""
Fetch a file from Github.
"""
if parsed_url.scheme != 'github':
raise ValueError(f'URL scheme must be "github" but is "{parsed_url.github}"')
ghcfg = config.get('github')
if not ghcfg:
raise BuildRunnerConfigurationError('Missing configuration for github in buildrunner.yaml')
nlcfg = ghcfg.get(parsed_url.netloc)
if not nlcfg:
gh_cfgs = ', '.join(ghcfg.keys())
raise BuildRunnerConfigurationError(
f'Missing github configuration for {parsed_url.netloc} in buildrunner.yaml'
f' - known github configurations: {gh_cfgs}'
)
ver = nlcfg.get('version')
# NOTE: potentially the v3_fetch_file() works for other github API versions.
if ver == 'v3':
contents = v3_fetch_file(parsed_url, nlcfg)
else:
raise NotImplementedError(f'No version support for github API version {ver}')
return contents | c688a68aeaa4efa0cda21f3b58a94075e4555004 | 10,750 |
import calendar
def number_of_days(year: int, month: int) -> int:
"""
Gets the number of days in a given year and month
:param year:
:type year:
:param month:
:type month:
:return:
:rtype:
"""
assert isinstance(year, int) and 0 <= year
assert isinstance(month, int) and 0 < month <= 12
c = calendar.Calendar()
days = c.itermonthdays(year, month)
days = set(days)
days.remove(0)
return len(days) | d585f037292eef36ecc753fbf702035577513a15 | 10,751 |
from functools import reduce
def medstddev(data, mask=None, medi=False, axis=0):
"""
This function computes the stddev of an n-dimensional ndarray with
respect to the median along a given axis.
Parameters:
-----------
data: ndarray
A n dimensional array frmo wich caculate the median standar
deviation.
mask: ndarray
Mask indicating the good and bad values of data.
medi: boolean
If True return a tuple with (stddev, median) of data.
axis: int
The axis along wich the median std deviation is calculated.
Examples:
--------
>>> import medstddev as m
>>> b = np.array([[1, 3, 4, 5, 6, 7, 7],
[4, 3, 4, 15, 6, 17, 7],
[9, 8, 7, 6, 5, 4, 3]])
>>> c = np.array([b, 1-b, 2+b])
>>> std, med = m.medstddev(c, medi=True, axis=2)
>>> print(median(c, axis=2))
[[ 5. 6. 6.]
[-4. -5. -5.]
[ 7. 8. 8.]]
>>> print(med)
[[ 5. 6. 6.]
[-4. -5. -5.]
[ 7. 8. 8.]]
>>> print(std)
[[ 2.23606798 6.05530071 2.1602469 ]
[ 2.23606798 6.05530071 2.1602469 ]
[ 2.23606798 6.05530071 2.1602469 ]]
>>> # take a look at the first element of std
>>> d = c[0,0,:]
>>> print(d)
[1, 3, 4, 5, 6, 7, 7]
>>> print(m.medstddev1d(d))
2.2360679775
>>> # See medstddev1d for masked examples
Modification history:
---------------------
2010-11-05 patricio Written by Patricio Cubillos
[email protected]
"""
# flag to return median value
retmed = medi
# get shape
shape = np.shape(data)
# default mask, all good.
if mask is None:
mask = np.ones(shape)
# base case: 1D
if len(shape) == 1:
return medstddev1d(data, mask, retmed)
newshape = np.delete(shape, axis)
# results
std = np.zeros(newshape)
medi = np.zeros(newshape)
# reduce dimensions until 1D case
reduce(medstddev1d, data, mask, std, medi, axis)
# return statement:
if retmed:
return (std, medi)
return std | bbab9eede714d7c64344af271f8b6e817723d837 | 10,753 |
def load_npz(filename: FileLike) -> JaggedArray:
""" Load a jagged array in numpy's `npz` format from disk.
Args:
filename: The file to read.
See Also:
save_npz
"""
with np.load(filename) as f:
try:
data = f["data"]
shape = f["shape"]
return JaggedArray(data, shape)
except KeyError:
msg = "The file {!r} does not contain a valid jagged array".format(filename)
raise RuntimeError(msg) | 640add32dab0b7bd12784a7a29331b59521a0f8a | 10,754 |
import re
def _egg_link_name(raw_name: str) -> str:
"""
Convert a Name metadata value to a .egg-link name, by applying
the same substitution as pkg_resources's safe_name function.
Note: we cannot use canonicalize_name because it has a different logic.
"""
return re.sub("[^A-Za-z0-9.]+", "-", raw_name) + ".egg-link" | 923ff815b600b95ccb5750a8c1772ee9156e53b2 | 10,755 |
def my_view(request):
"""Displays info details from nabuco user"""
owner, c = User.objects.get_or_create(username='nabuco')
# Owner of the object has full permissions, otherwise check RBAC
if request.user != owner:
# Get roles
roles = get_user_roles(request.user, owner)
# Get operation
op, c = RBACOperation.objects.get_or_create(name='display')
# Per-model permission:
# Has user permission to display groups that nabuco belongs to?
if not RBACGenericPermission.objects.get_permission(owner, Group, op, roles):
return HttpResponseForbidden("Sorry, you are not allowed to see nabuco groups")
# Per-object permission:
# Has user permission to see this group which nabuco belong to?
group_inst = get_object_or_404(Group, name='punks')
if not RBACPermission.objects.get_permission(owner, owner, op, roles):
return HttpResponseForbidden("Sorry, you are not allowed to see this group details")
return render_to_response("base.html",
{'owner': owner,
'model': Group,
'model_inst': owner,
'operation': op,
'roles': roles},
context_instance=RequestContext(request)) | 55c3443f24d56b6ea22e02c9685d6057dfc79c7e | 10,756 |
def handler500(request):
"""
Custom 500 view
:param request:
:return:
"""
return server_error(request, template_name='base/500.html') | 91db9daeaac6f7f6b2207a3c8be7fa09f932b50f | 10,757 |
def get_badpixel_mask(shape, bins):
"""Get the mask of bad pixels and columns.
Args:
shape (tuple): Shape of image.
bins (tuple): CCD bins.
Returns:
:class:`numpy.ndarray`: 2D binary mask, where bad pixels are marked with
*True*, others *False*.
The bad pixels are found *empirically*.
"""
mask = np.zeros(shape, dtype=np.bool)
if bins == (1, 1) and shape == (4136, 4096):
ny, nx = shape
mask[349:352, 627:630] = True
mask[349:ny//2, 628] = True
mask[1604:ny//2, 2452] = True
mask[280:284,3701] = True
mask[274:ny//2, 3702] = True
mask[272:ny//2, 3703] = True
mask[274:282, 3704] = True
mask[1720:1722, 3532:3535] = True
mask[1720, 3535] = True
mask[1722, 3532] = True
mask[1720:ny//2,3533] = True
mask[347:349, 4082:4084] = True
mask[347:ny//2,4083] = True
mask[ny//2:2631, 1909] = True
else:
print('No bad pixel information for this CCD size.')
raise ValueError
return mask | 2e636aef86d2462815683a975ef99fbcdeeaee19 | 10,758 |
import six
import yaml
def maybe_load_yaml(item):
"""Parses `item` only if it is a string. If `item` is a dictionary
it is returned as-is.
Args:
item:
Returns: A dictionary.
Raises:
ValueError: if unknown type of `item`.
"""
if isinstance(item, six.string_types):
return yaml.load(item)
elif isinstance(item, dict):
return item
else:
raise ValueError("Got {}, expected string or dict", type(item)) | 9288012f0368e2b087c9ef9cd9ffaca483b4f11b | 10,760 |
def histeq(im,nbr_bins=256):
"""histogram equalize an image"""
#get image histogram
im = np.abs(im)
imhist,bins = np.histogram(im.flatten(),nbr_bins,normed=True)
cdf = imhist.cumsum() #cumulative distribution function
cdf = 255 * cdf / cdf[-1] #normalize
#use linear interpolation of cdf to find new pixel values
im2 = np.interp(im.flatten(),bins[:-1],cdf)
return im2.reshape(im.shape) | bbb0e758e519a7cfcc866e3193cd1ff26bf5efbc | 10,761 |
def txgamma(v, t, gamma, H0):
"""
Takes in:
v = values at z=0;
t = list of redshifts to integrate over;
gamma = interaction term.
Returns a function f = [dt/dz, d(a)/dz,
d(e'_m)/dz, d(e'_de)/dz,
d(z)/dz,
d(dl)/dz]
"""
(t, a, ombar_m, ombar_de, z, dl) = v #omegam, omegade, z, dl) = v
Hz = H0 * (ombar_m + ombar_de)**(1/2)
if np.isnan(Hz):
print('txgamma')
print('z = %s, Hz = %s, gamma = %s, ombar_m = %s, ombar_de = %s'
%(z, Hz, gamma, ombar_m, ombar_de))
irate = (gamma/(-t+0.0001))*(1-ombar_de/(ombar_de+ombar_m)) /(1+z)/Hz
# first derivatives of functions I want to find:
f = [# dt/dz (= f.d wrt z of time)
-1/((1+z) * Hz),
# d(a)/dz (= f.d wrt z of scale factor)
-(1+z)**(-2),
# d(ombar_m)/dz (= f.d wrt z of density_m(t) / crit density(t0))
3*ombar_m /(1+z) - irate,
# d(ombar_de)/dz (= f.d wrt z of density_de(t) / crit desnity(t0))
irate,
# d(z)/dz (= f.d wrt z of redshift)
1,
# d(dl)/dz (= f.d wrt z of luminosty distance)
1/Hz] # H + Hdz*(1+z)
return f | a1506ea0b54f468fd63cd2a8bd96e8a9c46a92f3 | 10,762 |
def text_pb(tag, data, description=None):
"""Create a text tf.Summary protobuf.
Arguments:
tag: String tag for the summary.
data: A Python bytestring (of type bytes), a Unicode string, or a numpy data
array of those types.
description: Optional long-form description for this summary, as a `str`.
Markdown is supported. Defaults to empty.
Raises:
TypeError: If the type of the data is unsupported.
Returns:
A `tf.Summary` protobuf object.
"""
try:
tensor = tensor_util.make_tensor_proto(data, dtype=np.object)
except TypeError as e:
raise TypeError("tensor must be of type string", e)
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description
)
summary = summary_pb2.Summary()
summary.value.add(tag=tag, metadata=summary_metadata, tensor=tensor)
return summary | 43d652ebb9ab1d52c0514407a3c47d56816cbb65 | 10,763 |
def sanitize_input(args: dict) -> dict:
"""
Gets a dictionary for url params and makes sure it doesn't contain any illegal keywords.
:param args:
:return:
"""
if "mode" in args:
del args["mode"] # the mode should always be detailed
trans = str.maketrans(ILLEGAL_CHARS, ' ' * len(ILLEGAL_CHARS))
for k, v in args.copy().items():
if isinstance(v, str): # we only need to verify v because k will never be entered by a user
args[k] = v.translate(trans)
return args | 063d314cb3800d24606b56480ce63b7dda3e8e51 | 10,764 |
def sum_to(containers, goal, values_in_goal=0):
"""
Find all sets of containers which sum to goal, store the number of
containers used to reach the goal in the sizes variable.
"""
if len(containers) == 0:
return 0
first = containers[0]
remain = containers[1:]
if first > goal:
with_first = 0
elif first == goal:
sizes.append(values_in_goal + 1)
with_first = 1
else:
with_first = sum_to(remain, goal-first, values_in_goal + 1)
return with_first + sum_to(remain, goal, values_in_goal) | db5297929332a05606dec033318ca0d7c9661b1d | 10,765 |
def rt2add_enc_v1(rt, grid):
"""
:param rt: n, k, 2 | log[d, tau] for each ped (n,) to each vic (k,)
modifies rt during clipping to grid
:param grid: (lx, ly, dx, dy, nx, ny)
lx, ly | lower bounds for x and y coordinates of the n*k (2,) in rt
dx, dy | step sizes of the regular grid
nx, ny | number of grid points in each coordinate (so nx*ny total)
:return: n, m | m = nx*ny, encoding for each ped
uses row-major indexing for the flattened (2d) indices
for nx 'rows' and ny 'columns'
"""
n, k = rt.shape[:2]
nx, ny = np.array(grid[-2:]).astype(np.int32)
m = nx * ny
Z = np.zeros((n, m), dtype=np.float32)
clip2grid(rt, grid)
# n, k
a_x = np.empty((n, k), dtype=np.int32)
r_x = np.empty((n, k), dtype=np.float32)
np.divmod(rt[..., 0] - grid[0], grid[2], a_x, r_x, casting='unsafe')
th_x = 1 - r_x / grid[2]
a_y = np.empty((n, k), dtype=np.int32)
r_y = np.empty((n, k), dtype=np.float32)
np.divmod(rt[..., 1] - grid[1], grid[3], a_y, r_y, casting='unsafe')
th_y = 1 - r_y / grid[3]
# 1d inds for m, | n, k
c_x = ny * a_x + a_y
offsets = np.array([0, ny, 1, ny+1], dtype=np.int32)
# n, k, 4
inds = c_x[..., np.newaxis] + offsets[np.newaxis, :]
vals = np.dstack((th_x*th_y, (1-th_x)*th_y, th_x*(1-th_y), (1-th_x)*(1-th_y)))
row_inds = np.repeat(np.arange(n, dtype=np.int32), 4*k)
np.add.at(Z, (row_inds, inds.ravel()), vals.ravel())
return Z | 3af0b8e15fdcc4d9bbeb604faffbd45cf013e86b | 10,766 |
import heapq
def draw_with_replacement(heap):
"""Return ticket drawn with replacement from given heap of tickets.
Args:
heap (list): an array of Tickets, arranged into a heap using heapq.
Such a heap is also known as a 'priority queue'.
Returns:
the Ticket with the least ticket number in the heap.
Side-effects:
the heap maintains its size, as the drawn ticket is replaced
by the next ticket for that id.
Example:
>>> x = Ticket('0.234', 'x', 2)
>>> y = Ticket('0.354', 'y', 1)
>>> z = Ticket('0.666', 'z', 2)
>>> heap = []
>>> heapq.heappush(heap, x)
>>> heapq.heappush(heap, y)
>>> heapq.heappush(heap, z)
>>> heap
[Ticket(ticket_number='0.234', id='x', generation=2),
Ticket(ticket_number='0.354', id='y', generation=1),
Ticket(ticket_number='0.666', id='z', generation=2)]
>>> draw_with_replacement(heap)
Ticket(ticket_number='0.234', id='x', generation=2)
>>> heap
[Ticket(ticket_number='0.354', id='y', generation=1),
Ticket(ticket_number='0.666', id='z', generation=2),
Ticket(ticket_number='0.54783080274940261636464668679572\
2512609112766306951592422621788875312684400211',
id='x', generation=3)]
"""
ticket = heapq.heappop(heap)
heapq.heappush(heap, next_ticket(ticket))
return ticket | 06eb982ecf32090da51f02356a6996429773e233 | 10,767 |
def get_string(entry):
"""
This function ...
:param entry:
:return:
"""
value = entry.split(" / ")[0].rstrip()
return value | 38a1dc41fd06b49aa8724cc783466b485c9017fb | 10,769 |
import collections
def get_top_words(words):
"""
Получить список наиболее часто встречающихся слов, с указанием частоты
:param words: список слов для анализа
:return: [(слово1, количество повторений слова1), ..]
"""
return collections.Counter(words).most_common() | 632317f57e734a93b6f3f20dfef001028b40c6b3 | 10,771 |
def get_slope(x, y, L):
"""
Funcao que retorna o slope da serie temporal dos dados
"""
try:
x=np.array(x).reshape(-1, 1)
y=np.array(y).reshape(-1, 1)
lr=LinearRegression()
lr.fit (x[:L],y[:L])
return lr.coef_[0][0]
except:
return 0 | 23f3419049ee1372d5963823e2f52b895bc766e8 | 10,772 |
from typing import Dict
from typing import Any
def _minimize_price(price: Dict[str, Any]) -> Price:
"""
Return only the keys and values of a price the end user would be interested in.
"""
keys = ['id', 'recurring', 'type', 'currency', 'unit_amount', 'unit_amount_decimal', 'nickname',
'product', 'metadata']
return {k: price[k] for k in keys} | 7414e0f3e5ae11f55b5781a679e593294122aed2 | 10,773 |
def project(signals, q_matrix):
"""
Project the given signals on the given space.
Parameters
----------
signals : array_like
Matrix with the signals in its rows
q_matrix : array_like
Matrix with an orthonormal basis of the space in its rows
Returns
-------
proj_signals : ndarray
Matrix with the projected signals in its rows
"""
signals = np.asarray(signals)
q_matrix = np.asarray(q_matrix)
return q_matrix.T.dot(q_matrix.dot(signals.T)).T | 0d6aa780d0d424260df5f8391821c806e12c81e5 | 10,774 |
def all_movies():
"""
Returns all movie in the database for Movies
service
"""
movies = ut.get_movies()
if len(movies) == 0:
abort(404)
return make_response(jsonify({"movies":movies}),200) | d8b2e3a66adf52830d7027953c22071449d2b29a | 10,775 |
def format_map(mapping, st):
"""
Format string st with given map.
"""
return st.format_map(mapping) | 462e0a744177d125db50739eac1f2e7a62128010 | 10,776 |
def communities_greedy_modularity(G,f):
"""
Adds a column to the dataframe f with the community of each node.
The communitys are detected using greedy modularity.
G: a networkx graph.
f: a pandas dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
communities_dic = nx.algorithms.community.greedy_modularity_communities(G)
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_greedy_modularity': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
f = pd.merge(f, communities_df, on='name')
return f | cfca6ef66730f3a6ef467f1c51c66c5d46296351 | 10,777 |
import json
def load_loglin_stats(infile_path):
"""read in data in json format"""
# convert all 'stats' to pandas data frames
with open(infile_path) as infile:
data = json.load(infile)
new_data = {}
for position_set in data:
try:
new_key = eval(position_set)
except NameError:
new_key = position_set
new_data[new_key] = {}
for key, value in list(data[position_set].items()):
if key == "stats":
value = read_json(value)
new_data[new_key][key] = value
return new_data | c307ff2cf4e07bbb7843971cceaf74744422276c | 10,778 |
def _simple_logistic_regression(x,y,beta_start=None,verbose=False,
CONV_THRESH=1.e-3,MAXIT=500):
"""
Faster than logistic_regression when there is only one predictor.
"""
if len(x) != len(y):
raise ValueError, "x and y should be the same length!"
if beta_start is None:
beta_start = NA.zeros(2,x.dtype.char)
iter = 0; diff = 1.; beta = beta_start # initial values
if verbose:
print 'iteration beta log-likliehood |beta-beta_old|'
while iter < MAXIT:
beta_old = beta
p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))
l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood
s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)]) # scoring function
# information matrix
J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)],
[NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]])
beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta
diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences
if verbose:
print iter+1, beta, l, diff
if diff <= CONV_THRESH: break
iter = iter + 1
return beta, J_bar, l | c37190b167e634df31127f79163aaeb56bac217e | 10,779 |
def preemphasis(signal,coeff=0.95):
"""perform preemphasis on the input signal.
:param signal: The signal to filter.
:param coeff: The preemphasis coefficient. 0 is no filter, default is 0.95.
:returns: the filtered signal.
"""
return np.append(signal[0],signal[1:]-coeff*signal[:-1]) | c5173708e7b349decd34ac886493103eaadb023d | 10,780 |
from sacremoses import MosesTokenizer
from sacremoses import MosesPunctNormalizer
def build_moses_tokenizer(tokenizer: MosesTokenizerSpans,
normalizer: MosesPunctNormalizer = None) -> Callable[[str], List[Token]]:
"""
Wrap Spacy model to build a tokenizer for the Sentence class.
:param model a Moses tokenizer instance
:return a tokenizer function to provide to Sentence class constructor
"""
try:
except ImportError:
raise ImportError(
"Please install sacremoses or better before using the Spacy tokenizer, otherwise you can use segtok_tokenizer as advanced tokenizer."
)
moses_tokenizer: MosesTokenizerSpans = tokenizer
if normalizer:
normalizer: MosesPunctNormalizer = normalizer
def tokenizer(text: str) -> List[Token]:
if normalizer:
text = normalizer.normalize(text=text)
doc = moses_tokenizer.span_tokenize(text=text, escape=False)
previous_token = None
tokens: List[Token] = []
for word, (start_pos, end_pos) in doc:
word: str = word
token = Token(
text=word, start_position=start_pos, whitespace_after=True
)
tokens.append(token)
if (previous_token is not None) and (
token.start_pos - 1
== previous_token.start_pos + len(previous_token.text)
):
previous_token.whitespace_after = False
previous_token = token
return tokens
return tokenizer | 0dee31ab9030e387dd6907efad60c188eb0241b2 | 10,781 |
from typing import Callable
from typing import Hashable
from typing import Union
def horizontal_block_reduce(
obj: T_DataArray_or_Dataset,
coarsening_factor: int,
reduction_function: Callable,
x_dim: Hashable = "xaxis_1",
y_dim: Hashable = "yaxis_1",
coord_func: Union[str, CoordFunc] = coarsen_coords_coord_func,
) -> T_DataArray_or_Dataset:
"""A generic horizontal block reduce function for xarray data structures.
This is a convenience wrapper around block_reduce for applying coarsening
over n x n patches of array elements. It should only be used if a dask
implementation of the reduction method has not been implemented (e.g. for
median) or if a custom reduction method is used that is not implemented in
xarray. Otherwise, block_coarsen should be used.
Args:
obj: Input Dataset or DataArray.
coarsening_factor: Integer coarsening factor to use.
reduction_function: Array reduction function which accepts a tuple of
axes to reduce along.
x_dim: x dimension name (default 'xaxis_1').
y_dim: y dimension name (default 'yaxis_1').
coord_func: function that is applied to the coordinates, or a
mapping from coordinate name to function. See `xarray's coarsen
method for details
<http://xarray.pydata.org/en/stable/generated/xarray.DataArray.coarsen.html>`_.
Returns:
xr.Dataset or xr.DataArray.
"""
block_sizes = {x_dim: coarsening_factor, y_dim: coarsening_factor}
return xarray_block_reduce(
obj, block_sizes, reduction_function, coord_func=coord_func,
) | 07fc497ae8c5cd90699bc73bfbeab705c13ed0c6 | 10,782 |
def statements_api(context, request):
"""List all the statements for a period."""
dbsession = request.dbsession
owner = request.owner
owner_id = owner.id
period = context.period
inc_case = case([(AccountEntry.delta > 0, AccountEntry.delta)], else_=None)
dec_case = case([(AccountEntry.delta < 0, AccountEntry.delta)], else_=None)
statement_rows = (
dbsession.query(
Statement.id,
Statement.source,
Statement.filename,
func.count(inc_case).label('inc_count'),
func.count(dec_case).label('dec_count'),
func.sum(inc_case).label('inc_total'),
func.sum(dec_case).label('dec_total'),
)
.outerjoin(AccountEntry, AccountEntry.statement_id == Statement.id)
.filter(
Statement.owner_id == owner_id,
Statement.file_id == period.file_id,
Statement.period_id == period.id,
)
.group_by(Statement.id)
.order_by(Statement.id)
.all()
)
statements = [{
'id': str(row.id),
'source': row.source,
'filename': row.filename,
'inc_count': row.inc_count,
'dec_count': row.dec_count,
'inc_total': row.inc_total,
'dec_total': row.dec_total,
} for row in statement_rows]
now = dbsession.query(now_func).scalar()
return {
'now': now,
'statements': statements,
} | 87a1ec3e5fc5730eda30367a5f9f34aef6cf7339 | 10,783 |
def fp(x):
"""Function used in **v(a, b, th, nu, dimh, k)** for **analytic_solution_slope()**
:param x: real number
:type x: list
:return: fp value
:rtype: list
"""
rx = np.sqrt(x * 2 / np.pi)
s_fresnel, c_fresnel = sp.fresnel(rx)
return - 2 * 1j * np.sqrt(x) * np.exp(-1j * x) * np.sqrt(np.pi / 2.) \
* (.5 - c_fresnel + 1j * (.5 - s_fresnel)) | 202000557fb239e589ffd4d7b9709b60678ab784 | 10,784 |
def get_truck_locations(given_address):
"""
Get the location of the food trucks in Boston TODAY within 1 mile
of a given_address
:param given_address: a pair of coordinates
:return: a list of features with unique food truck locations
"""
formatted_address = '{x_coordinate}, {y_coordinate}'.format(
x_coordinate=given_address['x'],
y_coordinate=given_address['y']
)
QUERY["geometry"] = formatted_address
trucks = gis_utils.get_features_from_feature_server(BASE_URL, QUERY)
truck_unique_locations = []
for t in trucks:
if t['attributes']['Day'] == DAY:
truck_unique_locations.append(t)
return truck_unique_locations | f1d5e290c5c46e1587a2f98c2e82edee3890fc05 | 10,785 |
import inspect
import warnings
def _getRelevantKwds(method, kwds):
"""return kwd args for the given method, and remove them from the given kwds"""
argspec = inspect.getargspec(method)
d = dict()
for a in kwds:
if a not in argspec.args:
warnings.warn("Unrecognized kwd: {!r}".format(a))
for a in argspec.args:
if a in kwds:
d[a] = kwds[a]
del kwds[a]
return d | bca410b99e750f233a5e4476413e6bacfa52dcb9 | 10,786 |
import requests
def find_overview_details(park_code):
""" Find overview details from park code """
global API_KEY
fields = "&fields=images,entranceFees,entrancePasses,operatingHours,exceptions"
url = "https://developer.nps.gov/api/v1/parks?parkCode=" + park_code + "&api_key=" + API_KEY + fields
response = requests.get(url)
json_object = response.json()
overview = json_object['data']
return {'overview': overview} | 95cf281828154c45eae1e239f33d2de8bcf9e7fa | 10,787 |
import torch
def embed_nomenclature(
D,
embedding_dimension,
loss="rank",
n_steps=1000,
lr=10,
momentum=0.9,
weight_decay=1e-4,
ignore_index=None,
):
"""
Embed a finite metric into a target embedding space
Args:
D (tensor): 2D-cost matrix of the finite metric
embedding_dimension (int): dimension of the target embedding space
loss (str): embedding loss to use distortion base (loss='disto') or rank based (loss='rank')
n_steps (int): number of gradient iterations
lr (float): learning rate
momentum (float): momentum
weight_decay (float): weight decay
Returns:
embedding (tensor): embedding of each vertex of the finite metric space, shape n_vertex x embedding_dimension
"""
n_vertex = D.shape[0]
mapping = torch.rand(
(n_vertex, embedding_dimension), requires_grad=True, device=D.device
)
if loss == "rank":
crit = RankLoss(D, n_triplets=1000)
elif loss == "disto":
crit = DistortionLoss(D, scale_free=False)
else:
raise ValueError
optimizer = torch.optim.SGD(
[mapping], lr=lr, momentum=momentum, weight_decay=weight_decay
)
print("Embedding nomenclature . . .")
for i in range(n_steps):
loss = crit(mapping)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(
"Step {}: loss {:.4f} ".format(i + 1, loss.cpu().detach().numpy(), end="\r")
)
print("Final loss {:.4f}".format(crit(mapping).cpu().detach().numpy()))
return mapping.detach() | 1e9ca98dec0c3e42af0af483b6e9ef9efa11b225 | 10,788 |
def raw_env():
"""
To support the AEC API, the raw_env() function just uses the from_parallel
function to convert from a ParallelEnv to an AEC env
"""
env = parallel_env()
env = parallel_to_aec(env)
return env | dcb491c2beb50f73ba0fdab96bcd069916ce9b6d | 10,789 |
def cmd_line(preprocessor: Preprocessor, args: str) -> str:
"""the line command - prints the current line number"""
if args.strip() != "":
preprocessor.send_warning("extra-arguments", "the line command takes no arguments")
context = preprocessor.context.top
pos = context.true_position(preprocessor.current_position.begin)
return str(context.file.line_number(pos)[0]) | 061bcf2ced6c22c77d81bb30ec00a5c1964c3624 | 10,790 |
from xml.dom import expatbuilder
from xml.dom import pulldom
def parse(file, parser=None, bufsize=None):
"""Parse a file into a DOM by filename or file object."""
if parser is None and not bufsize:
return expatbuilder.parse(file)
else:
return _do_pulldom_parse(pulldom.parse, (file,),
{'parser': parser, 'bufsize': bufsize}) | 0d4bc592143ecb7c093eceaf4f5fe0d18869ea9c | 10,792 |
import hashlib
def create_hash256(max_length=None):
"""
Generate a hash that can be used as an application secret
Warning: this is not sufficiently secure for tasks like encription
Currently, this is just meant to create sufficiently random tokens
"""
hash_object = hashlib.sha256(force_bytes(get_random_string(32)))
hash_object.update(force_bytes(settings.SECRET_KEY))
output_hash = hash_object.hexdigest()
if max_length is not None and len(output_hash) > max_length:
return output_hash[:max_length]
return output_hash | 4856be59c475bcfc07137b62511de4d5c7531eb3 | 10,793 |
from io import StringIO
def assert_content_in_file(file_name, expected_content):
"""
Fabric assertion: Check if some text is in the specified file (result of installing a test product)
Provision dir: PROVISION_ROOT_PATH
:param file_name: File name
:param expected_content: String to be found in file
:return: True if given content is in file (dir: PROVISION_ROOT_PATH).
"""
file_path = PROVISION_ROOT_PATH.format(file_name)
fd = StringIO()
get(file_path, fd)
file_content = fd.getvalue()
return expected_content in file_content | eba68222d39c55902da1c4c4ae7055b7edc170e0 | 10,794 |
import math
import numpy
def _calculate_hwp_storage_fut(
hwp_shapes, base_dataset_uri, c_hwp_uri, bio_hwp_uri, vol_hwp_uri,
yr_cur, yr_fut, process_pool=None):
"""Calculates carbon storage, hwp biomassPerPixel and volumePerPixel due to
harvested wood products in parcels on current landscape.
hwp_shapes - a dictionary containing the current and/or future harvest
maps (or nothing)
hwp_shapes['cur'] - oal shapefile indicating harvest map from the
current landscape
hwp_shapes['fut'] - oal shapefile indicating harvest map from the
future landscape
c_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for current calculation
bio_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for land cover under interest
vol_hwp - an output GDAL rasterband representing carbon stored in
harvested wood products for land cover under interest
yr_cur - year of the current landcover map
yr_fut - year of the current landcover map
process_pool - a process pool for parallel processing (can be None)
No return value"""
############### Start
pixel_area = pygeoprocessing.geoprocessing.get_cell_size_from_uri(base_dataset_uri) ** 2 / 10000.0 #convert to Ha
nodata = -5.0
c_hwp_cur_uri = pygeoprocessing.geoprocessing.temporary_filename()
bio_hwp_cur_uri = pygeoprocessing.geoprocessing.temporary_filename()
vol_hwp_cur_uri = pygeoprocessing.geoprocessing.temporary_filename()
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, c_hwp_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, bio_hwp_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, vol_hwp_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
#Create a temporary shapefile to hold values of per feature carbon pools
#HWP biomassPerPixel and volumePerPixel, will be used later to rasterize
#those values to output rasters
calculatedAttributeNames = ['c_hwp_pool', 'bio_hwp', 'vol_hwp']
if 'cur' in hwp_shapes:
hwp_shape = ogr.Open(hwp_shapes['cur'])
hwp_shape_copy = \
ogr.GetDriverByName('Memory').CopyDataSource(hwp_shape, '')
hwp_shape_layer_copy = \
hwp_shape_copy.GetLayer()
#Create fields in the layers to hold hardwood product pools,
#biomassPerPixel and volumePerPixel
for fieldName in calculatedAttributeNames:
field_def = ogr.FieldDefn(fieldName, ogr.OFTReal)
hwp_shape_layer_copy.CreateField(field_def)
#Visit each feature and calculate the carbon pool, biomassPerPixel,
#and volumePerPixel of that parcel
for feature in hwp_shape_layer_copy:
#This makes a helpful dictionary to access fields in the feature
#later in the code
field_args = _get_fields(feature)
#If start date and/or the amount of carbon per cut is zero, it
#doesn't make sense to do any calculation on carbon pools or
#biomassPerPixel/volumePerPixel
if field_args['start_date'] != 0 and field_args['cut_cur'] != 0:
time_span = (yr_fut + yr_cur) / 2.0 - field_args['start_date']
start_years = yr_fut - field_args['start_date']
#Calculate the carbon pool due to decaying HWP over the
#time_span
feature_carbon_storage_per_pixel = (
pixel_area * _carbon_pool_in_hwp_from_parcel(
field_args['cut_cur'], time_span, start_years,
field_args['freq_cur'], field_args['decay_cur']))
#Claculate biomassPerPixel and volumePerPixel of harvested wood
numberOfHarvests = \
math.ceil(time_span / float(field_args['freq_cur']))
#The measure of biomass is in terms of Mg/ha
biomassInFeaturePerArea = field_args['cut_cur'] * \
numberOfHarvests / float(field_args['c_den_cur'])
biomassPerPixel = biomassInFeaturePerArea * pixel_area
volumePerPixel = biomassPerPixel / field_args['bcef_cur']
#Copy biomassPerPixel and carbon pools to the temporary
#feature for rasterization of the entire layer later
for field, value in zip(calculatedAttributeNames,
[feature_carbon_storage_per_pixel,
biomassPerPixel, volumePerPixel]):
feature.SetField(feature.GetFieldIndex(field), value)
#This saves the changes made to feature back to the shape layer
hwp_shape_layer_copy.SetFeature(feature)
#burn all the attribute values to a raster
for attributeName, raster_uri in zip(calculatedAttributeNames,
[c_hwp_cur_uri, bio_hwp_cur_uri, vol_hwp_cur_uri]):
nodata = -1.0
pygeoprocessing.geoprocessing.new_raster_from_base_uri(base_dataset_uri, raster_uri, 'GTiff', nodata, gdal.GDT_Float32, fill_value=nodata)
raster = gdal.Open(raster_uri, gdal.GA_Update)
gdal.RasterizeLayer(raster, [1], hwp_shape_layer_copy, options=['ATTRIBUTE=' + attributeName])
raster.FlushCache()
raster = None
#handle the future term
if 'fut' in hwp_shapes:
hwp_shape = ogr.Open(hwp_shapes['fut'])
hwp_shape_copy = \
ogr.GetDriverByName('Memory').CopyDataSource(hwp_shape, '')
hwp_shape_layer_copy = \
hwp_shape_copy.GetLayer()
#Create fields in the layers to hold hardwood product pools,
#biomassPerPixel and volumePerPixel
for fieldName in calculatedAttributeNames:
field_def = ogr.FieldDefn(fieldName, ogr.OFTReal)
hwp_shape_layer_copy.CreateField(field_def)
#Visit each feature and calculate the carbon pool, biomassPerPixel,
#and volumePerPixel of that parcel
for feature in hwp_shape_layer_copy:
#This makes a helpful dictionary to access fields in the feature
#later in the code
field_args = _get_fields(feature)
#If start date and/or the amount of carbon per cut is zero, it
#doesn't make sense to do any calculation on carbon pools or
#biomassPerPixel/volumePerPixel
if field_args['cut_fut'] != 0:
time_span = yr_fut - (yr_fut + yr_cur) / 2.0
start_years = time_span
#Calculate the carbon pool due to decaying HWP over the
#time_span
feature_carbon_storage_per_pixel = pixel_area * \
_carbon_pool_in_hwp_from_parcel(
field_args['cut_fut'], time_span, start_years,
field_args['freq_fut'], field_args['decay_fut'])
#Claculate biomassPerPixel and volumePerPixel of harvested wood
numberOfHarvests = \
math.ceil(time_span / float(field_args['freq_fut']))
biomassInFeaturePerArea = field_args['cut_fut'] * \
numberOfHarvests / float(field_args['c_den_fut'])
biomassPerPixel = biomassInFeaturePerArea * pixel_area
volumePerPixel = biomassPerPixel / field_args['bcef_fut']
#Copy biomassPerPixel and carbon pools to the temporary
#feature for rasterization of the entire layer later
for field, value in zip(calculatedAttributeNames,
[feature_carbon_storage_per_pixel,
biomassPerPixel, volumePerPixel]):
feature.SetField(feature.GetFieldIndex(field), value)
#This saves the changes made to feature back to the shape layer
hwp_shape_layer_copy.SetFeature(feature)
#burn all the attribute values to a raster
for attributeName, (raster_uri, cur_raster_uri) in zip(
calculatedAttributeNames, [(c_hwp_uri, c_hwp_cur_uri), (bio_hwp_uri, bio_hwp_cur_uri), (vol_hwp_uri, vol_hwp_cur_uri)]):
temp_filename = pygeoprocessing.geoprocessing.temporary_filename()
pygeoprocessing.geoprocessing.new_raster_from_base_uri(
base_dataset_uri, temp_filename, 'GTiff',
nodata, gdal.GDT_Float32, fill_value=nodata)
temp_raster = gdal.Open(temp_filename, gdal.GA_Update)
gdal.RasterizeLayer(temp_raster, [1], hwp_shape_layer_copy,
options=['ATTRIBUTE=' + attributeName])
temp_raster.FlushCache()
temp_raster = None
#add temp_raster and raster cur raster into the output raster
nodata = -1.0
base_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(
raster_uri)
cur_nodata = pygeoprocessing.geoprocessing.get_nodata_from_uri(
cur_raster_uri)
def add_op(base, current):
"""add two rasters"""
nodata_mask = (base == base_nodata) | (current == cur_nodata)
return numpy.where(nodata_mask, nodata, base+current)
pixel_size_out = (
pygeoprocessing.geoprocessing.get_cell_size_from_uri(
raster_uri))
pygeoprocessing.geoprocessing.vectorize_datasets(
[cur_raster_uri, temp_filename], add_op, raster_uri,
gdal.GDT_Float32, nodata,
pixel_size_out, "intersection", dataset_to_align_index=0,
vectorize_op=False) | 71b597c62014c120a3deb99ceea14d84612e3b19 | 10,796 |
def wcxf2arrays_symmetrized(d):
"""Convert a dictionary with a Wilson coefficient
name followed by underscore and numeric indices as keys and numbers as
values to a dictionary with Wilson coefficient names as keys and
numbers or numpy arrays as values.
In contrast to `wcxf2arrays`, here the numpy arrays fulfill the same
symmetry relations as the operators (i.e. they contain redundant entries)
and they do not contain undefined indices.
Zero arrays are added for missing coefficients."""
C = wcxf2arrays(d)
C = symmetrize_nonred(C)
C = add_missing(C)
return C | 6cca03761b9799a3af7b933877ff70d6d68f7644 | 10,799 |
def gather_inputs(headers, test_suites, inputs_class=Inputs):
"""Read the list of inputs to test psa_constant_names with."""
inputs = inputs_class()
for header in headers:
inputs.parse_header(header)
for test_cases in test_suites:
inputs.parse_test_cases(test_cases)
inputs.gather_arguments()
return inputs | 18300cab225f817a7a09f73e4b957713ee45d0c8 | 10,800 |
def key_create(adapter_id):
"""Creates a key using a certain adapter."""
adapter = get_adapter(adapter_id)
if not adapter:
return output.failure("That adapter doesn't (yet) exist. Please check the adapter name and try again.", 501)
if not adapter.do_verify(request.headers):
return output.failure("Credential verification failed. Please check your credentials and try again.", 401)
result = adapter.do_key_create(request.headers, request.json)
if 'error' in result:
return output.failure(result['error'], result['status'])
return output.success(result['data'], result['status']) | ec07091f3bb96f469338643f36b63ade50de3205 | 10,801 |
def is_right(side1, side2, side3):
"""
Takes three side lengths and returns true if triangle is right
:param side1: int or float
:param side2: int or float
:param side3: int or float
:return: bool
"""
return False | 2d22bbc7d0d363b360f578002a6380a4ae5f5b63 | 10,802 |
def parser_electron_number(electron_line):
"""
function of parser for electron information
Args:
electron_line (str): line
Returns:
list: electron information
"""
electron_list = parser_split_line_by_length(electron_line.rstrip(), CPF_FORMAT["ELECTRON"]["length"], "int")
return electron_list | 3a444aa0cb062ea5cfaac3e7686ff762e42ebf4c | 10,803 |
def summary(t, rtol=1e-5, atol=1e-8):
"""
Parameters
----------
t
rtol
atol
Returns
-------
"""
deltas = np.diff(t)
if np.allclose(deltas, deltas[0], rtol, atol):
# constant time steps
return deltas[0], deltas, ''
# non-constant time steps!
unqdt = np.unique(deltas)
mode = stats.mode(deltas)
dt = mode.mode
if len(unqdt) > 5:
info = f'{len(unqdt)} unique values between {deltas.min(), deltas.max()}'
else:
info = str(unqdt)
return dt, unqdt, f'Non-constant time steps: {info}' | 0f1a5a65d832be8db35b8bdf145e6240d6072f71 | 10,804 |
def masked_huber(input, target, lengths):
"""
Always mask the first (non-batch dimension) -> usually time
:param input:
:param target:
:param lengths:
:return:
"""
m = mask(input.shape, lengths, dim=1).float().to(input.device)
return F.smooth_l1_loss(input * m, target * m, reduction='sum') / m.sum() | c4eab136b73ffc92034a217252ac290848f77982 | 10,805 |
def calcProbabilisticResiduals(
coords_actual,
coords_desired,
covariances_actual
):
"""
Calculate the probabilistic residual.
Parameters
----------
coords_actual : `~numpy.ndarray` (N, M)
Actual N coordinates in M dimensions.
coords_desired : `~numpy.ndarray` (N, M)
The desired N coordinates in M dimensions.
sigmas_actual : `~numpy.ndarray` (N, M)
The 1-sigma uncertainties of the actual coordinates.
covariances_actual : list of N `~numpy.ndarray`s (M, M)
The covariance matrix in M dimensions for each
actual observation if available.
Returns
-------
p : `~numpy.ndarray` (N)
The probability that the actual coordinates given their uncertainty
belong to the same multivariate normal distribution as the desired
coordinates.
d : `~numpy.ndarray` (N)
The Mahalanobis distance of each coordinate compared to the desired
coordinates.
"""
d = np.zeros(len(coords_actual))
p = np.zeros(len(coords_actual))
for i, (actual, desired, covar) in enumerate(zip(coords_actual, coords_desired, covariances_actual)):
# Calculate the degrees of freedom
k = len(actual)
# Calculate the mahalanobis distance between the two coordinates
d_i = mahalanobis(
actual,
desired,
np.linalg.inv(covar)
)
# Calculate the probability that both sets of coordinates are drawn from
# the same multivariate normal
p_i = 1 - chi2.cdf(d_i, k)
# Append results
d[i] = d_i
p[i] = p_i
return p, d | c5bdc4048d9fef2e6b40e3bc48c80e6f6e2fcca7 | 10,806 |
import re
def split_words_and_quoted_text(text):
"""Split string text by space unless it is
wrapped inside double quotes, returning a list
of the elements.
For example
if text =
'Should give "3 elements only"'
the resulting list would be:
['Should', 'give', '3 elements only']
"""
# using shlex
# return shlex.split(text)
# using re
result = list()
pattern = re.findall(r'\w+\s*|\".+?\"', text)
for char in pattern:
result.append(char.strip().replace('"', ''))
return result | befb31949d4c52fac96765fd78bc1b9d644282ba | 10,807 |
def scheduler(epoch):
"""Generating learning rate value for a given epoch.
inputs:
epoch = number of current epoch
outputs:
learning_rate = float learning rate value
"""
if epoch < 100:
return 1e-3
elif epoch < 125:
return 1e-4
else:
return 1e-5 | 916cbc12ff76b8d022a96c89083b8bd2a3078c69 | 10,808 |
def external_search(query, feature_type, url):
""" Makes an external search request to a specified URL. The url will have the search
text appended to it. Returns geojson matches with extra data for the geocoder.
"""
logger.info("using external API for feature lookup: %s", url + query)
req = ExternalAPIRequest(
url=url + query,
layer=feature_type,
q={},
paginate=False
)
# Fetch features.
feature_collection = fetch_geojson_features([req])
features = feature_collection[0].geojson['features']
geocoder_features = []
for feature in features:
feature['layer'] = feature_type
feature['center'] = (feature.geometry.coordinates[0],
feature.geometry.coordinates[1])
feature['place_name'] = str(feature.properties['well_tag_number'])
geocoder_features.append(feature)
return geocoder_features | f90ea54dd8036b4237a74dd398cf3f2698ab4d0f | 10,809 |
def joinpath(base, end):
"""Like Path.joinpath(), but ensures the result is inside `base`.
Should be used for user-supplied `end`.
"""
result = (base / end).resolve()
if base not in result.parents:
print(base, end, result)
raise ValueError(end)
return result | 1b4f5afcdca21ceb6e676385602dd07b252db3ad | 10,812 |
def multicolored_line_collection(x, y, z, colors):
""" Color a 2D line based on which state it is in
:param x: data x-axis values
:param y: data y-axis values
:param z: values that determine the color of each (x, y) pair
"""
nstates = colors.shape[0]
# come up with color map and normalization (i.e. boundaries of colors)
cmap = ListedColormap(colors)
bounds = np.arange(-1, nstates) + 0.1
norm = BoundaryNorm(bounds, cmap.N) # add
# create line segments to color individually
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
# Set the values used for colormapping
lc = LineCollection(segments, cmap=cmap, norm=norm)
lc.set_array(z)
lc.set_linewidth(2)
return lc | 6d9438a58547d4be253ca2a505e05da259c73118 | 10,813 |
def featurise_distances(diagram):
"""Create feature vector by distance-to-diagonal calculation.
Creates a feature vector by calculating distances to the diagonal
for every point in the diagram and returning a sorted vector. The
representation is *stable* but might not be discriminative.
Parameters
----------
diagram : `PersistenceDiagram`
Persistence diagram to featurise. Can also be a generic 2D
container for iterating over tuples.
Returns
-------
Sorted vector of distances to diagonal. The vector is sorted in
descending order, such that high persistence points precede the
ones of low persistence.
"""
distances = [_persistence(x, y) for x, y in diagram]
return sorted(distances, reverse=True) | 9c4f20be1deb2ed5073015939d48615f3b04c21b | 10,815 |
def resize(source, width=None, height=None, filter=None, radius=1,
wrapx=False, wrapy=False):
"""Create a new numpy image with the desired size.
Either width or height can be null, in which case its value
is inferred from the aspect ratio of the source image.
Filter can be HERMITE, TRIANGLE, GAUSSIAN, NEAREST, LANCZOS, or
MITCHELL.
"""
assert len(source.shape) == 3, 'Shape is not rows x cols x channels'
assert width != None or height != None, 'Missing target size'
aspect = source.shape[1] / source.shape[0]
if width == None: width = height * aspect
if height == None: height = width / aspect
magnifying = width > source.shape[1]
if filter == None: filter = MITCHELL if magnifying else LANCZOS
return resample(source, width, height, filter, radius, wrapx, wrapy) | 08fdc077dcea013fd8b0be4a195a860e6d5291ec | 10,816 |
def load_and_classify_payload(config, service, entity, raw_record):
"""Return a loaded and classified payload."""
# prepare the payloads
payload = load_stream_payload(service, entity, raw_record)
payload = list(payload.pre_parse())[0]
classifier = StreamClassifier(config=config)
classifier.load_sources(service, entity)
classifier.classify_record(payload)
return payload | 1931804b1535ba00b495879061492e25a43f91e8 | 10,817 |
def render_text(self, block: str, block_type: str, y: int) -> int:
"""
:param self: MarkdownRenderer
:param block: string of text
:param block_type: type of the text (e.g. headers, ordered/unordered lists, blockquotes, code etc)
:param y: y-coordinate to start rendering on
:return: y-coordinate after rendering is finished
"""
start_of_line_x = self.x
if block_type == 'blockquote':
start_of_line_x += self.indentation_quote
quote_y_start = y
x = start_of_line_x
# Cleanup and stripping
block = block \
.replace('\n', ' ') \
.strip(' ')
if block[:3] == '<p>':
block = block[3:]
if block[-4:] == '</p>':
block = block[:-4]
code_flag = False
bold_flag = False
italic_flag = False
position = None
if block_type in ('h1', 'h2', 'h3'): # insert additional gap in front of h1 or h2 headers
y += self.gap_line
for word in block.split(" "):
# _________ PREPARATION _________ #
# inline code, bold and italic formatting
word, position, code_flag, bold_flag, italic_flag = self.inline_formatting_preparation(word, position, code_flag, bold_flag, italic_flag)
# _________ TEXT BLITTING _________ #
# create surface to get width of the word to identify necessary linebreaks
word = word + " "
word = word.replace(">", ">").replace("<", "<")
if code_flag:
if position == 'first' or position == 'single':
x += self.code_padding
surface = self.get_surface(word, 'code', bold_flag, italic_flag)
else:
surface = self.get_surface(word, block_type, bold_flag, italic_flag)
text_height = surface.get_height() # update for next line
if not(x + surface.get_width() < self.x + self.w): # new line necessary
y = y + text_height + self.gap_line
x = start_of_line_x
if self.is_visible(y) and self.is_visible(y + text_height):
if block_type == 'blockquote': # draw quote-rectangle in front of text
self.draw_quote_rect(y, y + self.get_surface(word, 'blockquote').get_height())
self.draw_code_background(code_flag, word, x, y, position)
self.screen.blit(surface, (x, y))
# Update x for the next word
x = x + surface.get_width()
if code_flag and position in ('single', 'last'):
x -= self.code_padding # reduce empty space by padding.
# _________ FORMATTING RESET FOR NEXT WORD _________ #
bold_flag = False if bold_flag and position == 'last' else bold_flag
code_flag = False if code_flag and (position == 'last' or position == 'single') else code_flag
italic_flag = False if italic_flag and position == 'last' else italic_flag
position = 'Middle' if position == 'first' else position
if block_type in ('h1', 'h2'):
y = y + text_height * 0.5 # add an additional margin below h1 and h2 headers
if block_type == 'h1': # insert subline below h1 headers
y = y + text_height * 0.5 # add an additional margin below h1 headers for the subheader line
y = self.draw_subheader_line(y)
return y | ed3e18d9988d612f911d9f6c647cbdf7dfbf7b07 | 10,818 |
def tvadam_reconstructor(dataset='ellipses', name=None):
"""
:param dataset: Can be 'ellipses' or 'lodopab'
:return: TV reconstructor for the specified dataset
"""
try:
params = Params.load('{}_tvadam'.format(dataset))
standard_dataset = load_standard_dataset(dataset)
if name is None:
name = 'TV-Adam'
reconstructor = TVAdamReconstructor(standard_dataset.ray_trafo,
hyper_params=params.dict,
name=name)
return reconstructor
except Exception as e:
raise Exception('The reconstructor doesn\'t exist') | 0b69d0ce60f05dc522449af66f70ee655389e13c | 10,819 |
import re
def process_spf_data(res, data):
"""
This function will take the text info of a TXT or SPF record, extract the
IPv4, IPv6 addresses and ranges, request process include records and return
a list of IP Addresses for the records specified in the SPF Record.
"""
# Declare lists that will be used in the function.
ipv4 = []
ipv6 = []
includes = []
ip_list = []
# check first if it is a sfp record
if not re.search(r'v\=spf', data):
return
# Parse the record for IPv4 Ranges, individual IPs and include TXT Records.
ipv4.extend(re.findall('ip4:(\S*) ', "".join(data)))
ipv6.extend(re.findall('ip6:(\S*)', "".join(data)))
# Create a list of IPNetwork objects.
for ip in ipv4:
for i in IPNetwork(ip):
ip_list.append(i)
for ip in ipv6:
for i in IPNetwork(ip):
ip_list.append(i)
# Extract and process include values.
includes.extend(re.findall('include:(\S*)', "".join(data)))
for inc_ranges in includes:
for spr_rec in res.get_txt(inc_ranges):
spf_data = process_spf_data(res, spr_rec[2])
if spf_data is not None:
ip_list.extend(spf_data)
# Return a list of IP Addresses
return [str(ip) for ip in ip_list] | 537a59dd9091df35ac2502e8b03f87e625b74b76 | 10,820 |
def create_knight():
"""
Creates a new knight according to player input.
Checks the knights module for how many points are to spend,
and which attributes are available. It then asks the player
for a name for the knight and to spend their points on the
available attributes.
Returns:
A knight instance with the player's values
"""
knight_class = get_class()
# get the constants from the knights module
max_attr_points = knights.MAX_ATTRIBUTE_POINTS
attributes = knights.ATTRIBUTES
knight = None # this will be the instance to be returned
name = input("What is your name?\n")
# reapet until the input was correct and a knight was created
while not knight:
# display the attributes and how many points are to be spent
spent_points = input(
f"You have {max_attr_points} points to spend on "
f"the attributes: { ', '.join(attributes) }.\n"
"Submit your points separated either by commas or by spaces, "
"like the list above with numbers instead of attribute names. "
"Points must be integers.\n"
)
try:
# we allow to use commas or spaces, so we check what was used
# we cast all input attribute points to integer since
# attribute points are integer numbers
if "," in spent_points:
points = [int(val) for val in spent_points.split(",")]
else:
points = [int(val) for val in spent_points.split(" ")]
# if not enough attributes were inputted, repeat the loop
if len(points) != len(attributes): continue
# knight the knight! Since knights take attributes as
# one parameter each, we unzip the input list into the call
knight = knight_class(name, *points)
except ValueError:
# When the casting to integer fails
print("Could not parse. Were the points all integer?")
continue
except knights.KnightError as e:
# a special error from the knights module that occurs when
# there are errors in knighting a new knight
print(f"Could not knight the knight: {str(e)}")
continue
return knight | 8feed9cd71b68868d14cd1bcfe14ff9291cf2abd | 10,821 |
async def get_bank_name(guild: discord.Guild = None) -> str:
"""Get the current bank name.
Parameters
----------
guild : `discord.Guild`, optional
The guild to get the bank name for (required if bank is
guild-specific).
Returns
-------
str
The bank's name.
Raises
------
RuntimeError
If the bank is guild-specific and guild was not provided.
"""
return await bank.get_bank_name(guild) | 1e0e3f1a1de7925daf5810ac3bcc75508993a642 | 10,822 |
from typing import Optional
from typing import Dict
from typing import Any
import tempfile
def build_cli_lib(to_save_location: Optional[str] = None, render_kwargs: Optional[Dict[str, Any]] = None) -> str:
"""Create project-specific cli.fif lib"""
if not to_save_location:
to_save_location: str = tempfile.mkstemp(suffix='.fif')[1]
logger.info(f"👽 Save ton-cli to {to_save_location}")
loader = FileSystemLoader(f"{project_root}/modules/fift")
env = Environment(
loader=loader,
autoescape=select_autoescape()
)
template = env.get_template(f"cli.fif.template")
render_kwargs = {} if render_kwargs is None else render_kwargs
if 'is_project' not in render_kwargs:
render_kwargs['is_project'] = 0
rendered = template.render(**render_kwargs)
with open(to_save_location, 'w', encoding='utf-8') as f:
f.write(rendered)
return to_save_location | 0231433f94b129213de95ac50b406ede88860f23 | 10,823 |
def match(A, S, trueS):
"""Rearranges columns of S to best fit the components they likely represent (maximizes sum of correlations)"""
cov = np.cov(trueS, S)
k = S.shape[0]
corr = np.zeros([k, k])
for i in range(k):
for j in range(k):
corr[i][j] = cov[i + k][j] / np.sqrt(cov[i + k][i + k] * cov[j][j])
arrangement = linear_sum_assignment(-corr)
resS = np.zeros_like(S)
resAT = np.zeros_like(A.T)
for t in range(k):
resS[arrangement[1][t]] = S[arrangement[0][t]]
resAT[arrangement[1][t]] = A.T[arrangement[0][t]]
return resAT.T, resS | a0ec70ec768a1dfc610e8a5050d190a94266b307 | 10,824 |
def image_field_data(request, include_empty_option=False):
"""Returns a list of tuples of all images.
Generates a sorted list of images available. And returns a list of
(id, name) tuples.
:param request: django http request object
:param include_empty_option: flag to include a empty tuple in the front of
the list
:return: list of (id, name) tuples
"""
try:
images = get_available_images(request, request.user.project_id)
except Exception:
exceptions.handle(request, _('Unable to retrieve images'))
images.sort(key=lambda c: c.name)
images_list = [('', _('Select Image'))]
for image in images:
image_label = u"{} ({})".format(image.name,
sizeformat.diskgbformat(image.size))
images_list.append((image.id, image_label))
if not images:
return [("", _("No images available")), ]
return images_list | f209cbc9ae9aa18fd22e320fdc96ba97690f8a7d | 10,825 |
def posts_completed(scraped_posts, limit):
"""Returns true if the amount of posts scraped from
profile has reached its limit.
"""
if len(scraped_posts) == limit:
return True
else:
return False | ff72474349a32f326b63b95070927c4b379be800 | 10,826 |
def mag(x):
"""Returns the absolute value squared of the input"""
return np.abs(x)**2 | bd081775a0b99e050287160cf3369faa819e20cf | 10,827 |
def get_zero_columns(matrix):
""" Returns a list of the columns which are all 0 """
rows = matrix.shape[0]
columns = matrix.shape[1]
result = []
for j in range(columns):
is_zero_column = True
for i in range(rows):
is_zero_column = is_zero_column and matrix[i, j] == 0.0
result.append(is_zero_column)
return result | 35694592f4155f710e5ed3c2148a138591cd683f | 10,828 |
def traditional_constants_icr_equation_empty_fixed(fixed_params, X_col):
""" Traditional ICR equation with constants from ACE consensus """
a = 450
tdd = X_col[0]
return a / tdd | 2931e4b3592a94690d98b0cb4cb90f712ff4a449 | 10,829 |
def sort_completions_key(completion):
"""
sort completions according to their type
Args:
completion (jedi.api.classes.Completion): completion
Returns:
int: sorting order
"""
if completion.type == "function":
return 2
elif completion.type == "instance":
return 1
else:
return 3 | 7bf767d908c83c11dafa5e0fd694bbb31a98c404 | 10,830 |
def _is_git_url_mismatch(mismatch_item):
"""Returns whether the given mismatch item is for a GitHub URL."""
_, (required, _) = mismatch_item
return required.startswith('git') | b1c3cec3d8cf3c7d3ffa5c405522b1a08754223b | 10,831 |
def from_url(url, output_path=None, options=None):
"""
Convert file of files from URLs to PDF document
:param url: URL or list of URLs to be saved
:param output_path: (optional) path to output PDF file. If not provided, PDF will be returned as string
:param options: (optional) dict to configure pyppeteer page.pdf action
Returns: output_path if provided else PDF Binary
"""
return async_to_sync(api_async.from_url)(url, output_path, options) | 8543410dcfba9d44adc8939f3dc8be702f5e922b | 10,832 |
def parse_identifier(stream: TokenStream) -> expression.Identifier:
"""Read an identifier from the token stream.
<ident>.<ident>
<ident>["<ident>"]
<ident>["<ident>"].<ident>
<ident>[<ident --> int/str>]
<ident>[<ident>.<ident --> int/str>]
<ident>[<int>]
<ident>[<int>].<ident>
"""
path: expression.IdentifierPath = []
while stream.current.type in IDENTIFIER_TOKENS:
if stream.current.type == TOKEN_IDENTIFIER:
path.append(IdentifierPathElement(stream.current.value))
elif stream.current.type == TOKEN_INTEGER:
path.append(IdentifierPathElement(int(stream.current.value)))
elif stream.current.type == TOKEN_LBRACKET:
stream.next_token() # Eat open bracket
if stream.current.type == TOKEN_STRING:
path.append(IdentifierPathElement(stream.current.value))
elif stream.current.type == TOKEN_NEGATIVE:
expect_peek(stream, TOKEN_INTEGER)
stream.next_token()
path.append(IdentifierPathElement(-int(stream.current.value)))
elif stream.current.type == TOKEN_INTEGER:
path.append(IdentifierPathElement(int(stream.current.value)))
elif stream.current.type == TOKEN_IDENTIFIER:
# Recursive call to parse_identifier. If it's not a string or
# integer, anything inside a pair of square brackets could be
# another identifier that resolves to a string or integer.
path.append(parse_identifier(stream))
else:
raise LiquidSyntaxError(
f"invalid identifier, found {stream.current.type}"
)
expect_peek(stream, TOKEN_RBRACKET)
stream.next_token() # Eat close bracket
elif stream.current.type == TOKEN_DOT:
pass
else:
raise LiquidSyntaxError(f"invalid identifier, found {stream.current.type}")
stream.next_token()
stream.push(stream.current)
return expression.Identifier(path) | 0679a112a841d90d51806d83cd381aad7632c77b | 10,834 |
from typing import List
def cubemap_projection_matrices(from_point: Vector3D, far_plane: float) -> List[np.ndarray]:
"""
Create the required Cubemap projection matrices.
This method is suitable for generating a Shadow Map.
Simply speaking, this method generates 6 different camera matrices from the center of
an imaginary cube and covers all surfaces without conflicting.
Keyword arguments;
from_point -- Imaginary camera location
far_plane -- How far the camera is capable of seeing. (Effects performance!)
"""
def a2np(a: List[float]) -> np.ndarray:
return np.array(a, dtype=np.float32)
shadow_proj = pyrr.matrix44.create_perspective_projection(90.0, 1.0, 0.01, far_plane, np.float32)
lightpos = np.array(list(from_point), dtype=np.float32)[:3]
nx = pyrr.matrix44.create_look_at(
lightpos,
np.array(
lightpos + a2np([-1.0, 0, 0]),
dtype=np.float32,
),
a2np([0, -1.0, 0]),
dtype=np.float32,
)
px = pyrr.matrix44.create_look_at(
lightpos,
np.array(
lightpos + a2np([1, 0, 0]),
dtype=np.float32,
),
a2np([0, -1.0, 0]),
dtype=np.float32,
)
ny = pyrr.matrix44.create_look_at(
lightpos,
np.array(
lightpos + a2np([0, -1, 0]),
dtype=np.float32,
),
a2np([0, 0, -1.0]),
dtype=np.float32,
)
py = pyrr.matrix44.create_look_at(
lightpos,
np.array(
lightpos + a2np([0, 1, 0]),
dtype=np.float32,
),
a2np([0, 0, 1.0]),
dtype=np.float32,
)
pz = pyrr.matrix44.create_look_at(
lightpos,
np.array(
lightpos + a2np([0, 0, 1]),
dtype=np.float32,
),
a2np([0, -1.0, 0]),
dtype=np.float32,
)
nz = pyrr.matrix44.create_look_at(
lightpos,
np.array(
lightpos + a2np([0, 0, -1]),
dtype=np.float32,
),
a2np([0, -1.0, 0]),
dtype=np.float32,
)
return [
px.dot(shadow_proj),
nx.dot(shadow_proj),
py.dot(shadow_proj),
ny.dot(shadow_proj),
pz.dot(shadow_proj),
nz.dot(shadow_proj),
] | e576aceec831df8267bff1c4de3cb7f0a58c3be7 | 10,835 |
from win32com.shell import shellcon, shell
def loadOptionsFile():
"""Find the .buildbot/FILENAME file. Crawl from the current directory up
towards the root, and also look in ~/.buildbot . The first directory
that's owned by the user and has the file we're looking for wins. Windows
skips the owned-by-user test.
@rtype: dict
@return: a dictionary of names defined in the options file. If no options
file was found, return an empty dict.
"""
here = os.path.abspath(os.getcwd())
if runtime.platformType == 'win32':
# never trust env-vars, use the proper API
appdata = shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0)
home = os.path.join(appdata, "buildbot")
else:
home = os.path.expanduser("~/.buildbot")
searchpath = []
toomany = 20
while True:
searchpath.append(os.path.join(here, ".buildbot"))
next = os.path.dirname(here)
if next == here:
break # we've hit the root
here = next
toomany -= 1 # just in case
if toomany == 0:
raise ValueError("Hey, I seem to have wandered up into the "
"infinite glories of the heavens. Oops.")
searchpath.append(home)
localDict = {}
for d in searchpath:
if os.path.isdir(d):
if runtime.platformType != 'win32':
if os.stat(d)[stat.ST_UID] != os.getuid():
print "skipping %s because you don't own it" % d
continue # security, skip other people's directories
optfile = os.path.join(d, "options")
if os.path.exists(optfile):
try:
f = open(optfile, "r")
options = f.read()
exec options in localDict
except:
print "error while reading %s" % optfile
raise
break
for k in localDict.keys():
if k.startswith("__"):
del localDict[k]
return localDict | 2674c6e37de32f673e4fb9aeb6bb11981bee23d0 | 10,836 |
def get_high_accuracy_voronoi_nodes(structure, rad_dict, probe_rad=0.1):
"""
Analyze the void space in the input structure using high accuracy
voronoi decomposition.
Calls Zeo++ for Voronoi decomposition.
Args:
structure: pymatgen.core.structure.Structure
rad_dict (optional): Dictionary of radii of elements in structure.
If not given, Zeo++ default values are used.
Note: Zeo++ uses atomic radii of elements.
For ionic structures, pass rad_dict with ionic radii
probe_rad (optional): Sampling probe radius in Angstroms.
Default is 0.1 A
Returns:
voronoi nodes as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
voronoi face centers as pymatgen.core.structure.Strucutre within the
unit cell defined by the lattice of input structure
"""
with ScratchDir('.'):
name = "temp_zeo1"
zeo_inp_filename = name + ".cssr"
ZeoCssr(structure).write_file(zeo_inp_filename)
rad_flag = True
rad_file = name + ".rad"
with open(rad_file, 'w+') as fp:
for el in rad_dict.keys():
print("{} {}".format(el, rad_dict[el].real), file=fp)
atmnet = AtomNetwork.read_from_CSSR(
zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file)
# vornet, vor_edge_centers, vor_face_centers = \
# atmnet.perform_voronoi_decomposition()
red_ha_vornet = \
prune_voronoi_network_close_node(atmnet)
# generate_simplified_highaccuracy_voronoi_network(atmnet)
# get_nearest_largest_diameter_highaccuracy_vornode(atmnet)
red_ha_vornet.analyze_writeto_XYZ(name, probe_rad, atmnet)
voro_out_filename = name + '_voro.xyz'
voro_node_mol = ZeoVoronoiXYZ.from_file(voro_out_filename).molecule
species = ["X"] * len(voro_node_mol.sites)
coords = []
prop = []
for site in voro_node_mol.sites:
coords.append(list(site.coords))
prop.append(site.properties['voronoi_radius'])
lattice = Lattice.from_parameters(structure.lattice.parameters)
vor_node_struct = Structure(
lattice, species, coords, coords_are_cartesian=True,
to_unit_cell=True, site_properties={"voronoi_radius": prop})
return vor_node_struct | 2f671f9c8a357bd82f364f767cd387fae2661979 | 10,837 |
def setUpBlobDetector():
"""
Configure parameters for a cv2 blob detector, and returns the detector.
"""
params = cv2.SimpleBlobDetector_Params()
params.minThreshold = 0
params.maxThreshold = 255
params.filterByArea = True
params.minArea = 1500
params.maxArea = 25000
params.filterByCircularity = False
params.filterByColor = False
params.filterByConvexity = False
params.filterByInertia = False
detector = cv2.SimpleBlobDetector_create(params)
return detector | d311f46d9b87d759edae0f15583c66dc31f80602 | 10,838 |
def raise_keymap():
"""
! @ # $ % || ^ & * ( )
DEL ESC || PGDN PGUP PSCR
CAPS volup ENT reset || UP
voldn super shift space bspc|| alt ent LEFT DOWN RGHT
"""
left = [
[KC.N1, KC.N2, KC.N3, KC.N4, KC.N5],
[KC.F1, KC.F2, KC.F3, KC.F4, KC.F5],
[KC.F11, KC.F12, KC.LPRN, KC.RPRN, KC.AMPR],
[KC.NO, KC.INS, KC.LGUI, KC.LSFT, KC.SPC, KC.BSPC],
]
right = [
[ KC.N6, KC.N7, KC.N8, KC.N9, KC.N0],
[ KC.F6, KC.F7, KC.F8, KC.F9, KC.F10],
[ KC.GRV, KC.LBRC, KC.RBRC, KC.PSLS, KC.BSLS],
[KC.LALT, KC.ENT, KC.TRNS, KC.DOT, KC.PMNS, KC.EQL],
]
return [left, right] | 94beda8275f65f16353b12b22809138d0342f512 | 10,839 |
import click
from typing import cast
def sample_cmd() -> Command:
"""Useful for testing constraints against a variety of parameter kinds.
Parameters have names that should make easy to remember their "kind"
without the need for looking up this code."""
@cloup.command()
# Optional arguments
@click.argument('arg1', required=False)
@click.argument('arg2', required=False)
# Plain options without default
@cloup.option('--str-opt')
@cloup.option('--int-opt', type=int)
@cloup.option('--bool-opt', type=bool)
# Flags
@cloup.option('--flag / --no-flag')
@cloup.option('--flag2', is_flag=True)
# Options with default
@cloup.option('--def1', default=1)
@cloup.option('--def2', default=2)
# Options that take a tuple
@cloup.option('--tuple', nargs=2, type=int)
# Options that can be specified multiple times
@cloup.option('--mul1', type=int, multiple=True)
@cloup.option('--mul2', type=int, multiple=True)
def f(**kwargs):
print('It works')
return cast(Command, f) | c5a8ed369d910872e52ef080707c8f0ae7436487 | 10,840 |
import inspect
def get_linenos(obj):
"""Get an object’s line numbers in its source code file"""
try:
lines, start = inspect.getsourcelines(obj)
except TypeError: # obj is an attribute or None
return None, None
except OSError: # obj listing cannot be found
# This happens for methods that are not explicitly defined
# such as the __init__ method for a dataclass
return None, None
else:
return start, start + len(lines) - 1 | 248ad7e377995e03969d3f7e1ded88670d8b08ea | 10,841 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.