content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import requests
def move_release_to_another_collection_folder(user: UserWithUserTokenBasedAuthentication,
username: str,
source_folder_id: int,
destination_folder_id: int,
release_id: int,
instance_id: int
) -> requests.models.Response:
"""
Move the instance of an release to another folder.
User Authentication needed.
Parameters:
user: user object (required)
username: string (required)
-> The username of the collection you are trying to retrieve.
source_folder_id: number (required)
-> The ID of the source folder.
destination_folder_id: number (required)
-> The ID of the destination folder.
release_id: number (required)
-> The ID of the release you are modifying.
instance_id: number (required)
-> The ID of the instance.
"""
url = f"{USERS_URL}/{username}/collection/folders/{source_folder_id}/releases/{release_id}/instances/{instance_id}"
params = user.params
headers = user.headers
data = {"folder_id": destination_folder_id}
return requests.post(url, headers=headers, params=params, json=data) | 47434146219d323b176db76905a6b5ffb4c25955 | 11,998 |
def load_region_maps(region_file):
"""Extracts creates a map from PHI region id to a continuous region id."""
region_ids = [] # Used mainly for eval
region_ids_inv = {} # Used in data loader
region_names_inv = {} # Used in eval
for l in region_file.read().strip().split('\n'):
tok_name_id, _ = l.strip().split(';') # second field is frequency, unused
region_name, region_id = tok_name_id.split('_')
region_name = region_name.strip()
region_id = int(region_id)
# Ignore unknown regions:
if ((region_name == 'Unknown Provenances' and region_id == 884) or
(region_name == 'unspecified subregion' and region_id == 885) or
(region_name == 'unspecified subregion' and region_id == 1439)):
continue
region_ids.append(region_id)
region_ids_inv[region_id] = len(region_ids_inv)
region_names_inv[len(region_names_inv)] = region_name
return {
'ids': region_ids,
'ids_inv': region_ids_inv,
'names_inv': region_names_inv
} | 201240ce485b4039b12741bb03c547de7976c99a | 11,999 |
def _find_nearest_idx(a, a0):
"""Element idx in nd array `a` closest to the scalar value `a0`."""
if isinstance(a, list):
a = np.array(a)
idx = np.abs(a - a0).argmin()
return idx | c681b96ee8d3629daacd15650428a36041189739 | 12,000 |
def does_user_have_product(product, username):
"""Return True/False if a user has the specified product."""
try:
instance = adobe_api.AdobeAPIObject(username)
except adobe_api.AdobeAPINoUserException:
return False
return instance.has_product(product) | eda5a4a983fac8fa089c575788982685836b4b87 | 12,002 |
from datetime import datetime
def items_for_result(cl, result, form):
"""
Generate the actual list of data.
"""
def link_in_col(is_first, field_name, cl):
if cl.list_display_links is None:
return False
if is_first and not cl.list_display_links:
return True
return field_name in cl.list_display_links
first = True
pk = cl.lookup_opts.pk.attname
for field_index, field_name in enumerate(cl.list_display):
empty_value_display = cl.model_admin.get_empty_value_display()
row_classes = ['field-%s' % _coerce_field_name(field_name, field_index)]
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except ObjectDoesNotExist:
result_repr = empty_value_display
else:
empty_value_display = getattr(attr, 'empty_value_display', empty_value_display)
if f is None or f.auto_created:
if field_name == 'action_checkbox':
row_classes = ['action-checkbox']
boolean = getattr(attr, 'boolean', False)
result_repr = display_for_value(value, empty_value_display, boolean)
if isinstance(value, (datetime.date, datetime.time)):
row_classes.append('nowrap')
else:
if isinstance(f.remote_field, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = empty_value_display
else:
result_repr = field_val
else:
result_repr = display_for_field(value, f, empty_value_display)
if isinstance(f, (models.DateField, models.TimeField, models.ForeignKey)):
row_classes.append('nowrap')
elif isinstance(f, (models.BooleanField)):
row_classes.append('checkmark-td')
if value:
row_classes.append('positive')
else:
row_classes.append('negative')
elif isinstance(f, models.FileField):
row_classes.append('file-td')
row_class = mark_safe(' class="%s"' % ' '.join(row_classes))
# If list_display_links not defined, add the link tag to the first field
if link_in_col(first, field_name, cl):
table_tag = 'td'
first = False
# Display link to the result's change_view if the url exists, else
# display just the result's representation.
try:
url = cl.url_for_result(result)
except NoReverseMatch:
link_or_text = result_repr
else:
url = add_preserved_filters({'preserved_filters': cl.preserved_filters, 'opts': cl.opts}, url)
# Convert the pk to something that can be used in Javascript.
# Problem cases are non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
link_or_text = result_repr
# format_html(
# '<a href="{}"{}>{}</a>',
# url,
# format_html(
# ' data-popup-opener="{}"', value
# ) if cl.is_popup else '',
# result_repr)
yield format_html('<{}{}>{}</{}>', table_tag, row_class, link_or_text, table_tag)
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(str(bf.errors) + str(bf))
yield format_html('<td{}>{}</td>', row_class, result_repr)
info = (result._meta.app_label, result._meta.model_name)
admin_url = reverse('admin:%s_%s_change' % info, args=(result.pk,))
yield format_html(f'<td><a href={admin_url}></a></td>')
if form and not form[cl.model._meta.pk.name].is_hidden:
yield format_html('<td>{}</td>', form[cl.model._meta.pk.name]) | 99ec81a16f833d095de880f459dcd97dce056ccd | 12,003 |
def system_to_ntp_time(timestamp):
"""Convert a system time to a NTP time.
Parameters:
timestamp -- timestamp in system time
Returns:
corresponding NTP time
"""
return timestamp + NTP_DELTA | 2f0081e6c473b05302a5c08dc1818cea0c500caa | 12,004 |
def bcewithlogits_loss(weight=None, size_average=None, reduce=None, reduction='mean', pos_weight=None):
"""Creates a criterion that combines a `Sigmoid` layer and the `BCELoss` in one single
class
Arguments:
weights(Tensor, optional) : A manual rescaling weight given to the loss of each batch element.
size_average (bool, optional) : By default, the losses are averaged over each loss element in the batch. Note that for some losses, there are multiple
elements per sample. If the field size_average is set to False, the losses are instead summed for each minibatch. Ignored when reduce is False.
(default: True)
reduce (bool, optional) : By default, the losses are averaged or summed over observations for each minibatch depending on size_average. When reduce is
False, returns a loss per batch element instead and ignores size_average.
(default: True)
reduction (string, optional) : Specifies the reduction to apply to the output: 'none' | 'mean' | 'sum'.
(default: 'mean')
pos_weight (Tensor, optional) : a weight of positive examples. Must be a vector with length equal to the number of classes.
Returns:
BCEWithLogitsLoss
"""
return nn.BCEWithLogitsLoss(weight, size_average, reduce, reduction, pos_weight) | 57c125abb1df39d6e8b77bc741b31aa9bbaba9fc | 12,005 |
from typing import Optional
def transition_matrix(
adata: AnnData,
vkey: str = "velocity",
backward: bool = False,
weight_connectivities: Optional[float] = None,
sigma_corr: Optional[float] = None,
scale_by_variances: bool = False,
var_key: Optional[str] = "velocity_graph_uncertainties",
var_min: float = 0.1,
use_negative_cosines: bool = True,
self_transitions: bool = False,
perc: Optional[float] = None,
threshold: Optional[float] = None,
density_normalize: bool = True,
) -> KernelExpression:
"""
Compute a transition matrix based on a combination of RNA Velocity and transcriptomic similarity.
To learn more about the way in which the transition matrices are computed, see
:class:`cellrank.tl.kernels.VelocityKernel` for the velocity-based transition matrix and
:class:`cellrank.tl.kernels.ConnectivityKernel` for the transcriptomic-similarity-based transition matrix.
Params
------
adata: :class:`anndata.AnnData`
Annotated data object.
vkey
Key from :paramref:`adata` `.layers` to access the velocities.
backward
Direction of the process.
weight_connectivities
Weight given to transcriptomic similarities as opposed to velocities. Must be in `[0, 1]`.
use_negative_cosines
Whether to use correlations with cells that have an angle > 90 degree with :math:`v_i`.
sigma_corr
Scaling parameter for the softmax. Larger values will lead to a more concentrated distribution (more peaked).
Default is to use 1 / median_velocity_correlation.
scale_by_variances
Use velocity variances to scale the softmax.
var_key
Key from `adata.uns` to acess velocity variances.
var_min
Variances are clipped to this value at the lower end.
self_transitions
Assigns elements to the diagonal of the velocity-graph based on a confidence measure
perc
Quantile of the distribution of exponentiated velocity correlations. This is used as a threshold to set
smaller values to zero.
threshold
Set a threshold to remove exponentiated velocity correlations smaller than :paramref:`threshold`.
density_normalize
Whether to use density correction when computing the transition probabilities.
Density correction is done as by [Haghverdi16]_.
Returns
-------
:class:`cellrank.tl.KernelExpression`
A kernel expression object.
"""
# initialise the velocity kernel and compute transition matrix
vk = VelocityKernel(
adata,
backward=backward,
vkey=vkey,
use_negative_cosines=use_negative_cosines,
var_key=var_key,
)
vk.compute_transition_matrix(
sigma_corr=sigma_corr,
scale_by_variances=scale_by_variances,
var_min=var_min,
self_transitions=self_transitions,
perc=perc,
threshold=threshold,
density_normalize=density_normalize,
)
if weight_connectivities is not None:
if 0 < weight_connectivities < 1:
logg.info(
f"Using a connectivity kernel with weight `{weight_connectivities}`"
)
ck = ConnectivityKernel(adata, backward=backward).compute_transition_matrix(
density_normalize=density_normalize
)
final = (1 - weight_connectivities) * vk + weight_connectivities * ck
elif weight_connectivities == 0:
final = vk
elif weight_connectivities == 1:
final = ConnectivityKernel(
adata, backward=backward
).compute_transition_matrix(density_normalize=density_normalize)
else:
raise ValueError(
f"The parameter `weight_connectivities` must be in range `[0, 1]`, found `{weight_connectivities}`."
)
else:
final = vk
final.write_to_adata()
return final | fde82fe71e6e63a5842cf041de60afa10316442d | 12,006 |
from typing import Union
from typing import Any
from datetime import datetime
def create_access_token(
subject: Union[str, Any], expires_delta: timedelta = None, is_superuser: bool = False
) -> str:
"""
generate jwt token
:param subject: subject need to save in token
:param expires_delta: expires time
:return: token
"""
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=settings.ACCESS_TOKEN_EXPIRE)
to_encode = {"exp": expire, "sub": str(subject)}
# superuser token can always access
if is_superuser:
to_encode.pop("exp")
encoded_jwt = jwt.encode(
to_encode, settings.SECRET_KEY, algorithm=settings.TOKEN_ALGORITHMS
)
return encoded_jwt | b07c94ae32311f71737daec6e168ef9deb12ef38 | 12,007 |
import numpy
def transparency(wavelength):
"""Returns the sky transparency in [0, 1] for wavelength in [m]"""
wavelength = wavelength / 10**-9
idx = numpy.argmin(numpy.abs(
data_transparency['wavelength'] * 1000 - wavelength))
return data_transparency['fraction'][idx] | 168691af8b8772203493df146b4b9629100e1002 | 12,008 |
def parse_all_moves(moves_string):
""" Parse a move string """
moves = []
if not moves_string:
raise ValueError("No Moves Given")
moves_strings = moves_string.split(" ")
for move_string in moves_strings:
move = CubeMove.parse(move_string)
moves.append(move)
return moves | ddc063574b8cfe5a2a7bb604033976e505f85df2 | 12,010 |
import itertools
def merge_samples(samples, nchannels, weight_table=None):
"""
Merges two samples
:param samples: the samples, must have the same sample rate and channel count
:param nchannels: the number of channels
:param weight_table: adds a specific weight to each sample when merging the sound
:return: the merged sample
"""
zipped = itertools.zip_longest(*samples, fillvalue=(0 for _ in range(nchannels)))
mapped = map(lambda x:
(__weighted_avg(itertools.islice(itertools.chain(*x), c, len(samples), nchannels), weight_table,
len(samples)) for c in range(nchannels)),
zipped)
return mapped | dcae84c506ea13cc130c3846cbe250d59b6f5115 | 12,011 |
def get_spam_info(msg: Message, max_score=None) -> (bool, str):
"""parse SpamAssassin header to detect whether a message is classified as spam.
Return (is spam, spam status detail)
The header format is
```X-Spam-Status: No, score=-0.1 required=5.0 tests=DKIM_SIGNED,DKIM_VALID,
DKIM_VALID_AU,RCVD_IN_DNSWL_BLOCKED,RCVD_IN_MSPIKE_H2,SPF_PASS,
URIBL_BLOCKED autolearn=unavailable autolearn_force=no version=3.4.2```
"""
spamassassin_status = msg["X-Spam-Status"]
if not spamassassin_status:
return False, ""
return get_spam_from_header(spamassassin_status, max_score=max_score) | e11f806a8b8007d25898545b9b16f9ec2207fa89 | 12,012 |
def _check(isamAppliance, id=None):
"""
Check if the last created user has the exact same id or id exists
:param isamAppliance:
:param comment:
:return:
"""
ret_obj = get_all(isamAppliance)
if id != None:
for users in ret_obj['data']:
if users['id'] == id:
return True
return False | 36ccb79bf303a6cf0c3eb7a33e7b2c8c1d1090d7 | 12,013 |
def plot_boxes_on_image(image, boxes, color=(0,255,255), thickness=2):
"""
Plot the boxes onto the image.
For the boxes a center, size representation is expected: [cx, cy, w, h].
:param image: The image onto which to draw.
:param boxes: The boxes which shall be plotted.
:return: An image with the boxes overlayed over the image.
"""
for box in boxes:
start_point = tuple([int(x) for x in box[:2] - box[2:] // 2])
end_point = tuple([int(x) for x in box[:2] + box[2:] // 2])
image = cv2.rectangle(image, start_point, end_point, color, thickness)
return image | 9bd58bb72e8b37b446f342eb145203af50175732 | 12,015 |
import re
def get_text(markup: str) -> str:
"""Remove html tags, URLs and spaces using regexp"""
text = re.sub(r"<.*?>", "", markup)
url_pattern = r"(http|ftp)s?://(?:[a-zA-Z]|[0-9]|[$-_@.&#+]|[!*\(\),]|\
(?:%[0-9a-fA-F][0-9a-fA-F]))+"
text = re.sub(url_pattern, "", text)
text = re.sub(r"\s+", " ", text)
return text.strip()
def preprocess_token(token: Token) -> str:
"""Remove grave accents and return lemmatized token lower case"""
result = remplace_accents(token.lemma_.strip().lower())
return result
def is_token_allowed(token: Token) -> bool:
"""No Stop words, No Punctuations or len token >= 3"""
# Avoid token: inmiscuyéndose lemma_ "inmiscuir el"
if (
not token
or token.is_space
or token.is_stop
or token.is_punct
or len(token) < 3
or " " in token.lemma_.strip()
):
return False
return True | 76b1c86cd852f82fecaa6011aa5e828bae0b4b45 | 12,016 |
def intercept_channel(channel, *interceptors):
"""Intercepts a channel through a set of interceptors.
This is an EXPERIMENTAL API.
Args:
channel: A Channel.
interceptors: Zero or more objects of type
UnaryUnaryClientInterceptor,
UnaryStreamClientInterceptor,
StreamUnaryClientInterceptor, or
StreamStreamClientInterceptor.
Interceptors are given control in the order they are listed.
Returns:
A Channel that intercepts each invocation via the provided interceptors.
Raises:
TypeError: If interceptor does not derive from any of
UnaryUnaryClientInterceptor,
UnaryStreamClientInterceptor,
StreamUnaryClientInterceptor, or
StreamStreamClientInterceptor.
"""
from grpc import _interceptor # pylint: disable=cyclic-import
return _interceptor.intercept_channel(channel, *interceptors) | 0f56be58125afcad9afd7aefd5ba55c6ca3b2970 | 12,017 |
def create_static_ip(compute, project, region, name):
"""Create global static IP
:param compute: GCE compute resource object using googleapiclient.discovery
:param project: string, GCE Project Id
:param region: string, GCE region
:param name: string, Static IP name
:return: Operation information
:rtype: dict
"""
return compute.addresses().insert(project=project, region=region, body={
'name': name,
}).execute() | 9f2f608ab3878c1534b3af47421866411d1ca523 | 12,018 |
import torch
def ctc_loss(encoder_outputs, labels, frame_lens, label_lens, reduction, device):
"""
All sorts of stupid restrictions from documentation:
In order to use CuDNN, the following must be satisfied:
1. targets must be in concatenated format,
2. all input_lengths must be T.
3. blank=0
4. target_lengths \leq 256,
5. the integer arguments must be of dtype torch.int32.
"""
assert (frame_lens[1:] - frame_lens[:-1] >= 0).all() # assert in increasing len
# req (5)
labels, frame_lens, label_lens = transform_data(lambda data: torch.tensor(data, dtype=torch.int32).to(device),
labels, frame_lens, label_lens)
# req (4)
skipped_indices, working_indices = filter_data_on_len(label_lens, max_len=256)
if len(skipped_indices) > 0:
print('some labels too long, unable to compute CTC...')
if len(working_indices) == 0:
print('skipping entire batch')
return None
print('skipping indices in batch: ' + str(skipped_indices))
working_indices = torch.LongTensor(working_indices).to(device)
(encoder_outputs, labels, frame_lens,
label_lens) = transform_data(lambda data: data.index_select(0, working_indices),
encoder_outputs, labels, frame_lens, label_lens)
# frame_lens 1, 1, 2, 3, 3, 3, 4
# frame_len[1:] 1, 2, 3, 3, 3, 4
# frame_lebs[:-1] 1, 1, 2, 3, 3, 3
# diff 0, 1, 1, 0, 0, 1
# nonzero_idx 1, 2, 5
# change_points 2, 3, 6
change_points = (frame_lens[1:] - frame_lens[:-1]).nonzero().squeeze(dim=-1) + 1
change_points = torch.cat([change_points, torch.LongTensor([len(frame_lens)]).to(device)]) # add last portion
# req 2
prev_change_point = 0
total_loss = 0
count = 0
global_encoder_outputs, global_labels, global_frame_lens, global_label_lens = encoder_outputs, labels, frame_lens, label_lens
for change_point in change_points:
# we call this a minibatch
minibatch_size = len(frame_lens)
(encoder_outputs, labels, frame_lens,
label_lens) = transform_data(lambda data: data[prev_change_point:change_point],
global_encoder_outputs, global_labels, global_frame_lens, global_label_lens)
# req 3; moves up so that we leave idx=0 to blank
labels = labels + 1
# req 1
concat_labels = torch.cat([label[:label_len] for label, label_len in zip(labels, label_lens)])
loss = F.ctc_loss(encoder_outputs.transpose(0, 1).cpu(), concat_labels.cpu(), frame_lens.cpu(), label_lens.cpu(), blank=0, reduction=reduction)
if torch.isinf(loss):
print('inf CTC loss occurred...')
skipped_indices, working_indices = ctc_fallback(encoder_outputs, labels, frame_lens, label_lens, 0)
if len(working_indices) == 0:
print('skipping the entire minibatch')
continue
print('skipping indices in minibatch: ' + str(skipped_indices))
working_indices = torch.LongTensor(working_indices).to(device)
(encoder_outputs, labels, frame_lens,
label_lens) = transform_data(lambda data: data.index_select(0, working_indices),
encoder_outputs, labels, frame_lens, label_lens)
concat_labels = torch.cat([label[:label_len] for label, label_len in zip(labels, label_lens)])
loss = F.ctc_loss(encoder_outputs.transpose(0, 1).cpu(), concat_labels.cpu(), frame_lens.cpu(), label_lens.cpu(), blank=0, reduction=reduction)
minibatch_size = len(working_indices)
if reduction == 'mean':
loss *= minibatch_size
count += minibatch_size
total_loss += loss
prev_change_point = change_point
if total_loss == 0:
# all data points failed
return None
return total_loss / count if reduction == 'mean' else total_loss | 9907e1890669da7843efea080f5da51f64a82c1a | 12,019 |
import re
def expand_parameters(host, params):
"""Expand parameters in hostname.
Examples:
* "target{N}" => "target1"
* "{host}.{domain} => "host01.example.com"
"""
pattern = r"\{(.*?)\}"
def repl(match):
param_name = match.group(1)
return params[param_name]
return re.sub(pattern, repl, host) | 04f62924fdc77b02f3a393e5cc0c5382d1d4279a | 12,020 |
import math
import time
def search_s1(saturation, size, startTime):
"""
First stage for sequential adsorption.
Returns list of circles, current saturation, list of times and list
of saturations.
Keyword arguments:
size -- radius of single circle
saturation -- max saturation
startTime -- start time of algorithm
"""
D = size*2
rC = size*5
com_sat = 0
N = 0
ntimeList = []
satList = []
circles = [plt.Circle((np.random.rand(),np.random.rand()), size)]
while(com_sat < saturation and N <= 1000):
N += 1
newX = np.random.rand()
newY = np.random.rand()
neighborList = neighbors(newX, newY, circles, rC)
if len(neighborList) != 0:
for e in neighborList:
circleX = circles[e].get_center()[0]
circleY = circles[e].get_center()[1]
if (math.sqrt((newX - circleX)**2 + (newY - circleY)**2) < D or
math.sqrt((newX - circleX-V)**2 + (newY - circleY)**2) < D or
math.sqrt((newX - circleX+V)**2 + (newY - circleY)**2) < D or
math.sqrt((newX - circleX)**2 + (newY - circleY-V)**2) < D or
math.sqrt((newX - circleX)**2 + (newY - circleY+V)**2) < D or
math.sqrt((newX - circleX+V)**2 + (newY - circleY+V)**2) < D or
math.sqrt((newX - circleX-V)**2 + (newY - circleY+V)**2) < D or
math.sqrt((newX - circleX+V)**2 + (newY - circleY-V)**2) < D or
math.sqrt((newX - circleX-V)**2 + (newY - circleY-V)**2) < D):
collision = 1
break
else:
collision = 0
if (collision == 0):
circles.append(plt.Circle((newX, newY), size))
com_sat = math.pi * size**2 * len(circles) * 100
ntimeList.append(time.time() - startTime)
satList.append(com_sat)
N = 0
else:
circles.append(plt.Circle((newX, newY), size))
return circles, com_sat, satList, ntimeList | 386909b285a5504a075f496edeb6e45bb41b6bc3 | 12,021 |
def draw_labeled_bounding_boxes(img, labeled_frame, num_objects):
"""
Starting from labeled regions, draw enclosing rectangles in the original color frame.
"""
# Iterate through all detected cars
for car_number in range(1, num_objects + 1):
# Find pixels with each car_number label value
rows, cols = np.where(labeled_frame == car_number)
# Find minimum enclosing rectangle
x_min, y_min = np.min(cols), np.min(rows)
x_max, y_max = np.max(cols), np.max(rows)
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=(255, 0, 0), thickness=6)
return img | 3eeacafb08a15a98ec70567361c2b7d807af156c | 12,022 |
import re
def _skip_comments_and_whitespace(lines, idx):
###############################################################################
"""
Starting at idx, return next valid idx of lines that contains real data
"""
if (idx == len(lines)):
return idx
comment_re = re.compile(r'^[#!]')
lines_slice = lines[idx:]
for line in lines_slice:
line = line.strip()
if (comment_re.match(line) is not None or line == ""):
idx += 1
else:
return idx
return idx | b2b794681859eaa22dfc1807211bf050423cd107 | 12,023 |
def named_payload(name, parser_fn):
"""Wraps a parser result in a dictionary under given name."""
return lambda obj: {name: parser_fn(obj)} | 259525b93d056e045b0f8d5355d4028d67bfac45 | 12,024 |
def prod_finished(job):
"""Check if prod stage is finished."""
try:
step = "prod" + str(job.doc.prod_replicates_done - 1)
except (KeyError, AttributeError):
step = "prod" + "0"
run_file = job.ws + "/run.{}".format(step)
if job.isfile("run.{}".format(step)):
with open(run_file) as myfile:
return "Program ended" in myfile.read()
else:
return False | 4ce509bb6656555a26384f733ccc91b974636d5f | 12,026 |
def PrimaryCaps(layer_input, name, dim_capsule, channels, kernel_size=9, strides=2, padding='valid'):
""" PrimaryCaps layer can be seen as a convolutional layer with a different
activation function (squashing)
:param layer_input
:param name
:param dim_capsule
:param channels
:param kernel_size
"""
assert channels % dim_capsule == 0, "Invalid size of channels and dim_capsule"
# I.e. each primary capsule contains 8 convoutional units with a 9x9 kernel and a stride of 2.
num_filters = channels * dim_capsule
conv_layer = layers.Conv2D(
name=name,
filters=num_filters,
kernel_size=kernel_size,
strides=strides,
activation=None, # We apply squasing later, therefore no activation funciton is needed here
padding=padding)(layer_input)
# In total PrimaryCapsules has [32x6x6] capsule outputs (each outpus is an 8D vector) and each
# capsule in the [6x6] grid is sharing their weights with each other
# See https://keras.io/layers/core/#reshape
reshaped_conv = layers.Reshape(target_shape=(-1, dim_capsule))(conv_layer)
# Now lets apply the squashing function
return layers.Lambda(squashing)(reshaped_conv) | 41bb066e6b2fdac74f3002ae63055096228bd386 | 12,027 |
def get_font(args):
"""
Gets a font.
:param args: Arguments (ttf and ttfsize).
:return: Font.
"""
try:
return ImageFont.truetype(args.ttf, args.ttfsize)
except:
return ImageFont.load_default() | aee4761c93ef177f26b1bfd81ad5149186e32d47 | 12,028 |
def zeros(shape):
"""
Creates and returns a new array with the given shape which is filled with zeros.
"""
mat = empty(shape)
return fill(mat, 0.0) | 7cbe68c9928094e3588560643b8029867fa51ab7 | 12,029 |
def unpack_puzzle_input(dir_file: str) -> tuple[list, list]:
"""
Args:
dir_file (str): location of .txt file to pull data from
Returns:
bingo numbers and bingo cards in list format
"""
with open(dir_file, "r") as file:
content = file.read().splitlines()
bingo_numbers = [int(i) for i in content[0].split(",")]
bingo_cards = []
for index in range(2, len(content)):
if content[index-1] == '':
bingo_cards.append([[int(i) for i in content[index].split()]])
elif content[index] != '':
bingo_cards[-1].append([int(i) for i in content[index].split()])
return bingo_numbers, bingo_cards | 47ea8846233aabf1bc8e07f22e9993b7a5a328e1 | 12,030 |
def blank_response():
"""Fixture that constructs a response with a blank body."""
return build_response(data="") | 5ffe8c5b0b775db68c626f1e9cce8a83c66978ff | 12,031 |
import torch
def dense_image_warp(image:torch.Tensor, flow:torch.Tensor) -> torch.Tensor:
"""Image warping using per-pixel flow vectors.
See [1] for the original reference (Note that the tensor shape is different, etc.).
[1] https://www.tensorflow.org/addons/api_docs/python/tfa/image/dense_image_warp
Parameters
----------
image : torch.Tensor [shape=(batch, channels, height, width)]
flow : torch.Tensor [shape=(batch, 2, height, width)]
Returns
-------
warped_image : torch.Tensor [shape=(batch, channels, height, width)]
"""
batch_size, channels, height, width = image.shape
# The flow is defined on the image grid. Turn the flow into a list of query
# points in the grid space.
y_range = torch.arange(0., height, device=image.device, requires_grad=False)
x_range = torch.arange(0., width, device=image.device, requires_grad=False)
y_grid, x_grid = torch.meshgrid(y_range, x_range)
stacked_grid = torch.stack((y_grid, x_grid), dim=0) # shape=(2, height, width)
batched_grid = stacked_grid.unsqueeze(0) # shape=(1, 2, height, width)
query_points_on_grid = batched_grid - flow # shape=(batch_size, 2, height, width)
query_points_flattened = einops.rearrange(query_points_on_grid, 'b x h w -> b (h w) x') # shape=(batch_size, height * width, 2)
# Compute values at the query points, then reshape the result back to the
# image grid.
interpolated = interpolate_bilinear(image, query_points_flattened) # shape=(batch_size, channels, n_queries)
interpolated = einops.rearrange(interpolated, 'b c (h w) -> b c h w', h=height, w=width)
return interpolated | 324be2c28dceeeb8079b4a89d94908e1a208c0a1 | 12,032 |
def validateRange(rangeStr : str) -> bool:
"""Validates the range argument"""
# type cast and compare
try:
# get range indices
ranges = rangeStr.split(",", 1)
rangeFrom = 0 if ranges[0] == "" else int(ranges[0])
rangeTo = 0 if ranges[1] == "" else int(ranges[1])
# check first if both ranges are not set
# using the -r , hack
if ranges == ["", ""]:
return False
# check if any of the range param is set
# and do testing per side
# if either range start/end is set and is <= 0:
if (ranges[0] != "" and rangeFrom < 0) or\
(ranges[1] != "" and rangeTo < 0):
return False
elif (ranges[0] != "") and (ranges[1] != ""):
# if both are set, do conditions here
# if from == to or from > to or from,to <=0, fail
if (rangeFrom == rangeTo) or\
(rangeFrom > rangeTo) or\
((rangeFrom <= 0) or (rangeTo <= 0)):
return False
except (ValueError, IndexError, AttributeError):
return False
return True | 375d80ef61c429a4e22df7321d223fe18939f597 | 12,033 |
from operator import add
def update_log_ip_dict_per_ingress_egress_point(flow_ingress_asn, flow_ip, origin_asn, ip_prefix, country_code, flow_bytes, flow_packets, d_ipsrc_level_analysis_perpoint):
"""
Account for unique IPAddresses, BGP prefixes, origin_asn per ingress/egress points.
:param flow_ingress_asn:
:param flow_ip:
:param origin_asn:
:param ip_prefix:
:param d_ipsrc_level_analysis_perpoint:
:return: dict of dict {'1234': {('10.10.10.1', 23456, '10.0.0.0/8'): [1]},
'5678': {('181.3.50.1', 98765, '181.3.50.0/20'): [1]}, ...}
"""
k = (flow_ip, origin_asn, ip_prefix, country_code)
values = [1, flow_bytes, flow_packets]
flow_ingress_asn = frozenset(flow_ingress_asn)
if flow_ingress_asn not in d_ipsrc_level_analysis_perpoint.keys():
d_ipsrc_level_analysis_perpoint[flow_ingress_asn] = dict()
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = values
else:
if k not in d_ipsrc_level_analysis_perpoint[flow_ingress_asn]:
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = values
else:
d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k] = map(add, d_ipsrc_level_analysis_perpoint[flow_ingress_asn][k], values)
return d_ipsrc_level_analysis_perpoint | ad6ccefd62b11f3cf1a7b5e452789ddf22fcad55 | 12,034 |
import yaml
def _config_from_file(configfile):
"""Return a dict containing all of the config values found in the given
configfile.
"""
conf = {}
# set from config if possible
if configfile:
with open(configfile, 'r') as fp:
config_yaml = yaml.load(fp)
conf = config_yaml
# in the config yaml, 'years' is a map of years to styles; in the config
# dict used in this module, 'year_styles' is that map and 'years' is
# simply a list of the years to graph
conf['year_styles'] = conf.pop('years', {})
conf['years'] = list(conf['year_styles'].keys())
return conf | 2ab4d779fd18c13054e3c40ae411106856fae9ae | 12,035 |
def odr_planar_fit(points, rand_3_estimate=False):
"""
Fit a plane to 3d points.
Orthogonal distance regression is performed using the odrpack.
Parameters
----------
points : list of [x, y, z] points
rand_3_estimate : bool, optional
First estimation of the plane using 3 random points from the input points list.
Default is False which implies a regular least square fit for the first estimation.
Returns
-------
ndarray
"""
def f_3(beta, xyz):
""" implicit definition of the plane"""
return beta[0] * xyz[0] + beta[1] * xyz[1] + beta[2] * xyz[2] + beta[3]
# # Coordinates of the 2D points
x = points[:, 0]
y = points[:, 1]
z = points[:, 2]
# x = np.r_[9, 35, -13, 10, 23, 0]
# y = np.r_[34, 10, 6, -14, 27, -10]
# z = np.r_[100, 101, 101, 100, 101, 101]
if rand_3_estimate:
# initial guess for parameters
# select 3 random points
i = np.random.choice(len(x), size=3, replace=False)
# Form the 3 points
r_point_1 = np.r_[x[i[0]], y[i[0]], z[i[0]]]
r_point_2 = np.r_[x[i[1]], y[i[1]], z[i[1]]]
r_point_3 = np.r_[x[i[2]], y[i[2]], z[i[2]]]
# Two vectors on the plane
v_1 = r_point_1 - r_point_2
v_2 = r_point_1 - r_point_3
# normal to the 3-point-plane
u_1 = np.cross(v_1, v_2)
# Construct the first estimation, beta0
d_0 = u_1[0] * r_point_1[0] + u_1[1] * r_point_1[1] + u_1[2] * r_point_1[2]
beta0 = np.r_[u_1[0], u_1[1], u_1[2], d_0]
else:
beta0 = lstsq_planar_fit(points)
# Create the data object for the odr. The equation is given in the implicit form 'a*x + b*y + c*z + d = 0' and
# beta=[a, b, c, d] (beta is the vector to be fitted). The positional argument y=1 means that the dimensionality
# of the fitting is 1.
lsc_data = odr.Data(np.row_stack([x, y, z]), y=1)
# Create the odr model
lsc_model = odr.Model(f_3, implicit=True)
# Create the odr object based on the data, the model and the first estimation vector.
lsc_odr = odr.ODR(lsc_data, lsc_model, beta0)
# run the regression.
lsc_out = lsc_odr.run()
return lsc_out.beta / lsc_out.beta[3] | d4b542f1527fc89937be92e3ff1ffc827cb5cced | 12,036 |
def adjust_learning_rate_lrstep(epoch, opt):
"""Sets the learning rate to the initial LR decayed by decay rate every steep step"""
steps = np.sum(epoch > np.asarray(opt.lr_decay_epochs))
if steps > 0:
new_lr = opt.lr_init * (opt.lr_decay_rate ** steps)
return new_lr
return opt.lr_init | e3af0a5e654595f309a2a202d0620b57e5968530 | 12,037 |
def subplot(n, m, k):
"""
Create a subplot command
Example::
import numpy as np
x = np.linspace(-5, 5, 1000)
figure(1)
subplot(2, 1, 1)
plot(x, np.sin(x), "r+")
subplot(2, 1, 2)
plot(x, np.cos(x), "g-")
show()
"""
global _current_axes
lig = (k - 1) / m
col = (k - 1) % m
fig = gcf()
axe = fig.get_axes(lig, col)
_current_axes = axe
return axe | d7793d099a57ad8d825e16e737dc72f28dd0456c | 12,038 |
from typing import Any
from typing import cast
def _create_gdcm_image(src: bytes, **kwargs: Any) -> "gdcm.Image":
"""Return a gdcm.Image from the `src`.
Parameters
----------
src : bytes
The raw image frame data to be encoded.
**kwargs
Required parameters:
* `rows`: int
* `columns`: int
* `samples_per_pixel`: int
* `number_of_frames`: int
* `bits_allocated`: int
* `bits_stored`: int
* `pixel_representation`: int
* `photometric_interpretation`: str
Returns
-------
gdcm.Image
An Image containing the `src` as a single uncompressed frame.
"""
rows = kwargs['rows']
columns = kwargs['columns']
samples_per_pixel = kwargs['samples_per_pixel']
number_of_frames = kwargs['number_of_frames']
pixel_representation = kwargs['pixel_representation']
bits_allocated = kwargs['bits_allocated']
bits_stored = kwargs['bits_stored']
photometric_interpretation = kwargs['photometric_interpretation']
pi = gdcm.PhotometricInterpretation.GetPIType(
photometric_interpretation
)
# GDCM's null photometric interpretation gets used for invalid values
if pi == gdcm.PhotometricInterpretation.PI_END:
raise ValueError(
"An error occurred with the 'gdcm' plugin: invalid photometric "
f"interpretation '{photometric_interpretation}'"
)
# `src` uses little-endian byte ordering
ts = gdcm.TransferSyntax.ImplicitVRLittleEndian
image = gdcm.Image()
image.SetNumberOfDimensions(2)
image.SetDimensions((columns, rows, 1))
image.SetPhotometricInterpretation(
gdcm.PhotometricInterpretation(pi)
)
image.SetTransferSyntax(gdcm.TransferSyntax(ts))
pixel_format = gdcm.PixelFormat(
samples_per_pixel,
bits_allocated,
bits_stored,
bits_stored - 1,
pixel_representation
)
image.SetPixelFormat(pixel_format)
if samples_per_pixel > 1:
# Default `src` is planar configuration 0 (i.e. R1 G1 B1 R2 G2 B2)
image.SetPlanarConfiguration(0)
# Add the Pixel Data element and set the value to `src`
elem = gdcm.DataElement(gdcm.Tag(0x7FE0, 0x0010))
elem.SetByteStringValue(src)
image.SetDataElement(elem)
return cast("gdcm.Image", image) | d0844d92d3b0c6b62164019c56864cc498c51334 | 12,039 |
def _4_graphlet_contains_3star(adj_mat):
"""Check if a given graphlet of size 4 contains a 3-star"""
return (4 in [a.sum() for a in adj_mat]) | 307f03707d1a7032df0ccb4f7951eec0c75832fe | 12,040 |
from pathlib import Path
def af4_path() -> Path:
"""Return the abspath of Go bio-target-rna-fusion binary. Builds the binary if necessary"""
global AF4_PATH
if not AF4_PATH:
af4_label = "//go/src/github.com/grailbio/bio/cmd/bio-fusion"
build([af4_label])
AF4_PATH = go_executable(af4_label)
return AF4_PATH | f70b97661bf17eb4e3b67af20c5437ebd6123266 | 12,042 |
def get_sentence_content(sentence_token):
"""Extrac sentence string from list of token in present in sentence
Args:
sentence_token (tuple): contains length of sentence and list of all the token in sentence
Returns:
str: setence string
"""
sentence_content = ''
for word in sentence_token[1]:
sentence_content += word.text
return sentence_content | 4f6f1bb557bb508e823704fc645c2901e5f8f03f | 12,043 |
def sequence_generator(data, look_back = 50):
"""\
Description:
------------
Input data for LSTM: Convert to user trajectory (maximum length: look back)
"""
train,test, valid = [],[],[]
unique_users = set(data[:,0])
items_per_user = {int(user):[0 for i in range(look_back)] for user in unique_users}
for (idx,row) in enumerate(data):
user,item,time = int(row[0]),int(row[1]),row[2]
items_per_user[user] = items_per_user[user][1:]+[item+1]
current_items = items_per_user[user]
if row[3]==0:
train.append([current_items[:-1],current_items[-1]])
elif row[3]==2:
test.append([current_items[:-1],current_items[-1]])
else:
valid.append([current_items[:-1],current_items[-1]])
return train,test | 688e572edf1b6d2dea2f069742b01c10ec36f928 | 12,045 |
def prefit_clf__svm(gamma: float = 0.001) -> base.ClassifierMixin:
"""Returns an unfitted SVM classifier object.
:param gamma: ...
:return:
"""
return svm.SVC(gamma=gamma) | 481409fdd7b2970d3595a7f60e411e71ebb00ac0 | 12,046 |
def option_not_exist_msg(option_name, existing_options):
""" Someone is referencing an option that is not available in the current package
options
"""
result = ["'options.%s' doesn't exist" % option_name]
result.append("Possible options are %s" % existing_options or "none")
return "\n".join(result) | 7ffa0afa81483d78a1ed0d40d68831e09710b7e1 | 12,047 |
def elslib_CylinderD2(*args):
"""
:param U:
:type U: float
:param V:
:type V: float
:param Pos:
:type Pos: gp_Ax3
:param Radius:
:type Radius: float
:param P:
:type P: gp_Pnt
:param Vu:
:type Vu: gp_Vec
:param Vv:
:type Vv: gp_Vec
:param Vuu:
:type Vuu: gp_Vec
:param Vvv:
:type Vvv: gp_Vec
:param Vuv:
:type Vuv: gp_Vec
:rtype: void
"""
return _ElSLib.elslib_CylinderD2(*args) | af11eee7a0a429ead43d40ed678f932abd2313f7 | 12,048 |
def get_version_message(version: str):
"""Get the message for the zygrader version from the changelog"""
changelog = load_changelog()
msg = [f"zygrader version {version}", ""]
version_index = 0
for line in changelog:
if line == version:
version_index = changelog.index(line) + 1
line = changelog[version_index]
while line:
msg.append(line)
version_index += 1
line = changelog[version_index]
return msg | 8530180c9d2dc413a1057c2ca255aa0e3dddd72c | 12,049 |
def get_arity(p, b_addr):
"""
Retrieves the arity by inspecting a funciton call
:param p: angr project
:param b_addr: basic block address
:return: arity of the function
"""
return len(get_ord_arguments_call(p, b_addr)) | 47b7721421a226d969aada8d873c43f8f58810e9 | 12,050 |
def draw_des3_plot():
"""
This function is to draw the plot of DES 3.
"""
objects = ('Singapore', 'Uruguay', 'Chile', 'Belgium', 'Denmark', 'Qatar', 'Portugal', 'Canada', 'Spain', 'Ireland')
y_pos = np.arange(len(objects))
performance = [71, 69, 68, 66, 65, 65, 64, 63, 63, 62]
plt.xkcd()
fig = plt.figure(figsize=(9, 6), dpi=35)
fig.suptitle('Number of people fully vaccinated by country')
ax = fig.add_subplot(111)
ax.barh(y_pos, performance, align='center', alpha=0.5)
plt.yticks(y_pos, objects)
ax.set_xlabel('Share of people fully vaccinated')
return fig | 6cc1bb3331e9eed57a700596d133d2bf00c3398e | 12,051 |
def get_pi_id(rc):
"""
Gets the database id of the group PI
Parameters
----------
rc: runcontrol object
The runcontrol object. It must contain the 'groups' and 'people'
collections in the needed databases
Returns
-------
The database '_id' of the group PI
"""
groupiter = list(all_docs_from_collection(rc.client, "groups"))
peoplecoll = all_docs_from_collection(rc.client, "people")
pi_ref = [i.get("pi_name") for i in groupiter if
i.get("name").casefold() == rc.groupname.casefold()]
pi = fuzzy_retrieval(peoplecoll, ["_id", "aka", "name"], pi_ref[0])
return pi.get("_id") | 12a1e9c0805e8549be7861247b97f52defd576d9 | 12,052 |
def gather_from_processes(chunk, split_sizes, displacements, comm=MPI.COMM_WORLD):
"""Gather data chunks on rank zero
:param chunk: Data chunks, living on ranks 0, 1, ..., comm.size-1
:type chunk: np.ndarray
:param split_sizes: Chunk lenghts on individual ranks
:type split_sizes: np.ndarray
:param displacements: Chunk displacements (compare scatter_to_processes)
:type displacements: np.ndarray
:return: Dataset gathered again, living on rank 0
:type return: np.ndarray
Inspired by: https://stackoverflow.com/a/36082684
Licensed under the Academic Free License version 3.0
"""
comm.Barrier()
total_length = np.array(chunk.shape[0])
gathered = np.empty((comm.allreduce(total_length), chunk.shape[1]), dtype=chunk.dtype)
comm.Gatherv(chunk, [gathered, split_sizes, displacements, MPI.DOUBLE], root=0)
return gathered | 8f696241e0dc61bbb5dd9867f307e29e8358d69e | 12,053 |
import json
def edit_collab() :
"""
Endpoint to edit a specified collaboration's member variables. This endpoint requires the requesting user to be an
authenticated user to properly function.
Request Body Parameters:
id: string, JSON, required
owner: string, JSON, optional
size: int, JSON, optional
members: array of strings, JSON, optional
date: int, JSON, optional
duration: int, JSON, optional
location, string, JSON, optional
status: bool, JSON, optional
title: string, JSON, optional
description: string, JSON, optional
classes: array of strings, JSON, optional
skills: array of strings, JSON, optional
applicants: array of strings, JSON, optional
This endpoint queries the database for the specified collaboration. If the collaboration is found, other variables
included, if any, are updated. If the search fails, an appropriate error message is returned.
"""
data = request.get_json()
collab_id = data['id']
record = collabDB.find({'_id' : ObjectId(collab_id)})
if record is None:
return json.dumps({'error': "No collaborations to update matched id", 'code': 996})
else:
try:
if 'owner' in data and isinstance(data['owner'], str):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"owner": data['owner']
}
}
)
if 'size' in data and isinstance(data['size'], int):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"size": data['size']
}
}
)
if 'members' in data and isinstance(data['members'], list):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"members": data['members']
}
}
)
if 'date' in data and isinstance(data['date'], int):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"date": data['date']
}
}
)
if 'duration' in data and isinstance(data['duration'], int):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"duration": data['duration']
}
}
)
if 'location' in data and isinstance(data['location'], str):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"location": data['location']
}
}
)
if 'status' in data and isinstance(data['status'], bool):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"status": data['status']
}
}
)
if 'title' in data and isinstance(data['title'], str):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"title": data['title']
}
}
)
if 'description' in data and isinstance(data['description'], str):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"description": data['description']
}
}
)
if 'classes' in data and isinstance(data['classes'], list):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"classes": data['classes']
}
}
)
if 'skills' in data and isinstance(data['skills'], list):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"skills": data['skills']
}
}
)
if 'applicants' in data and isinstance(data['applicants'], list):
record = collabDB.update_one(
{"_id": ObjectId(collab_id)},
{
"$set": {
"applicants": data['applicants']
}
}
)
if record.modified_count > 0:
return json.dumps({'success': True})
else:
return json.dumps({'success': True})
except Exception as e:
print(e)
return json.dumps({'error': "Error while trying to update existing doc.", 'code': 997}) | f205302c6f1368542c870f3ab89bbde374e0957b | 12,055 |
def create_lkas_ui(packer, main_on, enabled, steer_alert, defog, ahbc, ahbcramping, config, noipma, stats, persipma, dasdsply, x30, daschime, lines):
"""Creates a CAN message for the Ford Steer Ui."""
values = {
"PersIndexIpma_D_Actl": persipma,
"DasStats_D_Dsply": dasdsply,
"Set_Me_X30": x30,
"Lines_Hud": lines,
"Hands_Warning_W_Chime": steer_alert,
"CamraDefog_B_Req": defog,
"AhbHiBeam_D_Rq": ahbc,
"AhbcRampingV_D_Rq": ahbcramping,
"FeatConfigIpmaActl": config,
"FeatNoIpmaActl": noipma,
"CamraStats_D_Dsply": stats,
"DasWarn_D_Dsply": daschime,
}
return packer.make_can_msg("Lane_Keep_Assist_Ui", 0, values) | b9c855b6210b34fde8943eaa8293d691ee291527 | 12,056 |
import torch
def _contextual_loss(x, y, reduction='mean'):
"""Contextual loss
"""
loss = -torch.log(_contextual_similarity(x, y))
if reduction == 'mean':
loss = loss.mean()
return loss | 3dee81131c0b1468e822cdc36f1817204ec9eba3 | 12,057 |
def _actual_center(pos, angle):
"""
Calculate the position of the geometric center of the agent
The value of self.cur_pos is the center of rotation.
"""
dir_vec = get_dir_vec(angle)
return pos + (CAMERA_FORWARD_DIST - (ROBOT_LENGTH / 2)) * dir_vec | bd354e1ef64cd14132944da9e436df2a4696d0fe | 12,058 |
def load_one_batch_mnist(batch_size=64, shuffle=True):
"""Return a single batch (inputs, labels) of MNIST data."""
dataloader = get_mnist_dataloder(batch_size, shuffle)
X, y = next(iter(dataloader))
return X, y | 2a2036a72cecf957fd7c8a5344c85a59935fb442 | 12,059 |
def KmeansInterCompare(k, data, nbTests):
"""Réalisation d'un nombre donné de classification Kmeans.
Le meilleur résultat selon le critère d'inertie inter-groupe est affiché"""
KmeansResults = []
for i in range(0, nbTests):
KmeansResults.append(Kmeans(k, data))
# on maximise l'inertie inter-groupe donc on privilégie la séparation des groupes
best_kmeans = 0
for i in range(1, nbTests):
if inerInter(KmeansResults[best_kmeans][0], KmeansResults[best_kmeans][1]) < inerInter(KmeansResults[i][0], KmeansResults[i][1]):
best_kmeans = i
return KmeansResults[best_kmeans] | 42d8d900ad5e7d6e80bbd16736178fe5a14f878c | 12,061 |
from bs4 import BeautifulSoup
def get_all_reports_url(url_1,url_2, headers=None):
""" Returns all reports URLs on a single 'url' """
if headers == None:
header = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36'}
else:
header = headers
url = urljoin(url_1, url_2)
# initialize the session
session = HTMLSession()
# make the HTTP request and retrieve response
response = session.get(url, headers=header)
# execute Javascript with a timeout of 20 seconds
# response.html.render(timeout=20) ## pyppeteer.errors.TimeoutError: Navigation Timeout Exceeded: 20000 ms exceeded.
# construct the soup parser
soup = BeautifulSoup(response.html.html, "html.parser")
urls = []
table = soup.find("table", class_="ms-rteTable-5")
for report, name in zip(table.find_all("td", class_="ms-rteTableEvenCol-5"), table.find_all("td", class_="ms-rteTableOddCol-5")) :
report_url = report.find("a").attrs.get("href")
name = ((''.join(name.text.split())).replace("/", "-")).replace(" ", "").replace("\u200b", "")
if not report_url:
# if img does not contain src attribute, just skip
continue
# make the URL absolute by joining domain with the URL that is just extracted
report_url = urljoin(url_1, report_url)
try:
pos = report_url.index("?")
report_url = report_url[:pos]
except ValueError:
pass
# finally, if the url is valid
if is_valid(report_url):
urls.append({'url':report_url, 'name':name})
# close the session to end browser process
session.close()
# print total images found in URL
print(f"Total {len(urls)} Reports Found!")
return urls | 705047c4d21167f9ecde23f163e383e9f6aa00b2 | 12,062 |
def left_shift(k, n=32):
"""
Returns the n*n matrix corresponding to the operation
lambda v: vec_from_int(int_from_vec(v) << k, n)
>>> print_mat(left_shift(2, 6))
000000
000000
100000
010000
001000
000100
>>> int_from_vec(left_shift(2) * vec_from_int(42)) == 42 << 2
True
"""
D = set(range(n))
return Mat((D, D), {(j + k, j): one for j in range(n - k)}) | 216011184a8b6a02675524fd2906f092553396c6 | 12,064 |
from typing import List
from datetime import datetime
def get_tensorboard_logger(
trainer: Engine, evaluators: ThreeEvaluators, metric_names: List[str]
) -> TensorboardLogger:
"""
creates a ``tensorboard`` logger which read metrics from given evaluators and attaches it to a given trainer
:param trainer: an ``ignite`` trainer to attach to
:param evaluators: a triple of train, validation, and test evaluators to get metrics from
:param metric_names: a list of metrics to log during validation and testing
"""
tb_logger = TensorboardLogger(
log_dir=f"runs/{datetime.now()}", flush_secs=1
)
training_loss = OutputHandler(
"training",
["running_loss"],
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach(trainer, training_loss, Events.EPOCH_COMPLETED)
validation_loss = OutputHandler(
"validation",
metric_names,
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach(evaluators.validation, validation_loss, Events.COMPLETED)
test_loss = OutputHandler(
"test",
metric_names,
global_step_transform=global_step_from_engine(trainer),
)
tb_logger.attach(evaluators.test, test_loss, Events.COMPLETED)
return tb_logger | 30a920c056cf10df4656cbda2a1e92fb28388f06 | 12,065 |
def get_vocabs(datasets):
"""Build vocabulary from an iteration of dataset objects
Args:
dataset: a list of dataset objects
Returns:
two sets of all the words and tags respectively in the dataset
"""
print("Building vocabulary...")
vocab_words = set()
vocab_tags = set()
for dataset in datasets:
for words, tags in dataset:
vocab_words.update(words)
vocab_tags.update(tags)
print("- done. {} tokens".format(len(vocab_words)))
return vocab_words, vocab_tags | 1fce7fe7b9dbdd3216802d0816ac5fabc542b859 | 12,066 |
def check_row_uniqueness(board: list) -> bool:
"""
Return True if each row has no repeated digits.
Return False otherwise.
>>> check_row_uniqueness([\
"**** ****",\
"***1 ****",\
"** 3****",\
"* 4 1****",\
" 9 5 ",\
" 6 83 *",\
"3 1 **",\
" 8 2***",\
" 2 ****"\
])
True
>>> check_row_uniqueness([\
"**** ****",\
"***1 ****",\
"** 3****",\
"* 4 1****",\
" 5 9 5 ",\
" 6 83 *",\
"3 1 **",\
" 8 2***",\
" 2 ****"\
])
False
"""
global NUMBER
for row in board:
count = 0
row_set = set()
for char in row:
if char.isdigit():
if int(char) in range(1, NUMBER + 1):
count += 1
row_set.add(char)
if len(row_set) != count:
return False
return True | b4d937f14de0da90e694621fb2c70e9a59e80f0a | 12,067 |
def calculate_distance_to_divide(
grid, longest_path=True, add_to_grid=False, clobber=False
):
"""Calculate the along flow distance from drainage divide to point.
This utility calculates the along flow distance based on the results of
running flow accumulation on the grid. It will use the connectivity
used by the FlowAccumulator (e.g. D4, D8, Dinf).
Parameters
----------
grid : ModelGrid
longest_path : bool, optional
Take the longest (or shortest) path to a drainage divide. Default is
true.
add_to_grid : boolean, optional
Flag to indicate if the stream length field should be added to the
grid. Default is False. The field name used is ``distance_to_divide``.
clobber : boolean, optional
Flag to indicate if adding the field to the grid should not clobber an
existing field with the same name. Default is False.
Returns
-------
distance_to_divide : float ndarray
The distance that has to be covered from an imaginary flow, located in
each node of the grid, to reach the watershed's outlet.
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.utils.distance_to_divide import (
... calculate_distance_to_divide)
>>> mg = RasterModelGrid((5, 4))
>>> elev = np.array([0., 0., 0., 0.,
... 0., 10., 10., 0.,
... 0., 20., 20., 0.,
... 0., 30., 30., 0.,
... 0., 0., 0., 0.])
>>> _ = mg.add_field("topographic__elevation", elev, at="node")
>>> mg.set_closed_boundaries_at_grid_edges(
... bottom_is_closed=False,
... left_is_closed=True,
... right_is_closed=True,
... top_is_closed=True)
>>> fr = FlowAccumulator(mg, flow_director = 'D8')
>>> fr.run_one_step()
>>> distance_to_divide = calculate_distance_to_divide(
... mg,
... add_to_grid=True,
... clobber=True,
... )
>>> mg.at_node['distance_to_divide']
array([ 0., 3., 3., 0.,
0., 2., 2., 0.,
0., 1., 1., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.])
Now, let's change to MFD the flow_director method, which routes flow to
multiple nodes.
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.utils.distance_to_divide import (
... calculate_distance_to_divide)
>>> mg = RasterModelGrid((5, 4), xy_spacing=(1, 1))
>>> elev = np.array([0., 0., 0., 0.,
... 0., 10., 10., 0.,
... 0., 20., 20., 0.,
... 0., 30., 30., 0.,
... 0., 0., 0., 0.])
>>> _ = mg.add_field("topographic__elevation", elev, at="node")
>>> mg.set_closed_boundaries_at_grid_edges(
... bottom_is_closed=False,
... left_is_closed=True,
... right_is_closed=True,
... top_is_closed=True)
>>> fr = FlowAccumulator(mg, flow_director = 'MFD')
>>> fr.run_one_step()
>>> distance_to_divide = calculate_distance_to_divide(
... mg,
... add_to_grid=True,
... clobber=True,
... )
>>> mg.at_node['distance_to_divide']
array([ 0., 3., 3., 0.,
0., 2., 2., 0.,
0., 1., 1., 0.,
0., 0., 0., 0.,
0., 0., 0., 0.])
The distance_to_divide utility can also work on irregular grids. For the
example we will use a Hexagonal Model Grid, a special type of Voroni Grid
that has regularly spaced hexagonal cells.
>>> from landlab import HexModelGrid
>>> from landlab.components import FlowAccumulator
>>> from landlab.utils.distance_to_divide import (
... calculate_distance_to_divide)
>>> dx = 1
>>> hmg = HexModelGrid((5, 3), dx)
>>> _ = hmg.add_field(
... "topographic__elevation",
... hmg.node_x + np.round(hmg.node_y),
... at="node",
... )
>>> hmg.status_at_node[hmg.boundary_nodes] = hmg.BC_NODE_IS_CLOSED
>>> hmg.status_at_node[0] = hmg.BC_NODE_IS_FIXED_VALUE
>>> fr = FlowAccumulator(hmg, flow_director = 'D4')
>>> fr.run_one_step()
>>> distance_to_divide = calculate_distance_to_divide(
... hmg,
... add_to_grid=True,
... clobber=True,
... )
>>> hmg.at_node['distance_to_divide']
array([ 3., 0., 0.,
0., 2., 1., 0.,
0., 1., 1., 0., 0.,
0., 0., 0., 0.,
0., 0., 0.])
"""
# check that flow__receiver nodes exists
if "flow__receiver_node" not in grid.at_node:
raise FieldError(
"A 'flow__receiver_node' field is required at the "
"nodes of the input grid."
)
if "flow__upstream_node_order" not in grid.at_node:
raise FieldError(
"A 'flow__upstream_node_order' field is required at the "
"nodes of the input grid."
)
if "drainage_area" not in grid.at_node:
raise FieldError(
"A 'flow__upstream_node_order' field is required at the "
"nodes of the input grid."
)
# get the reciever nodes, depending on if this is to-one, or to-multiple,
# we'll need to get a different at-node field.
if grid.at_node["flow__receiver_node"].size != grid.size("node"):
to_one = False
else:
to_one = True
flow__receiver_node = grid.at_node["flow__receiver_node"]
drainage_area = grid.at_node["drainage_area"]
# get the upstream node order
flow__upstream_node_order = grid.at_node["flow__upstream_node_order"]
# get downstream flow link lengths, result depends on type of grid.
if isinstance(grid, RasterModelGrid):
flow_link_lengths = grid.length_of_d8[
grid.at_node["flow__link_to_receiver_node"]
]
else:
flow_link_lengths = grid.length_of_link[
grid.at_node["flow__link_to_receiver_node"]
]
# create an array that representes the distance to the divide.
distance_to_divide = np.zeros(grid.nodes.size)
if not longest_path:
distance_to_divide[:] = 2 * grid.size("node") * np.max(flow_link_lengths)
# iterate through the flow__upstream_node_order backwards.
for node in reversed(flow__upstream_node_order):
# if drainage are is equal to node cell area, set distance to zeros
# this should handle the drainage divide cells as boundary cells have
# their area set to zero.
if drainage_area[node] == grid.cell_area_at_node[node]:
distance_to_divide[node] = 0
# get flow recievers
reciever = flow__receiver_node[node]
if to_one:
# if not processing an outlet node.
if reciever != node:
if longest_path:
cond = (
distance_to_divide[reciever]
< distance_to_divide[node] + flow_link_lengths[node]
)
else:
cond = (
distance_to_divide[reciever]
> distance_to_divide[node] + flow_link_lengths[node]
)
if cond:
distance_to_divide[reciever] = (
distance_to_divide[node] + flow_link_lengths[node]
)
else:
# non-existant links are coded with -1
useable_receivers = np.where(reciever != grid.BAD_INDEX)[0]
for idx in range(len(useable_receivers)):
r = reciever[useable_receivers][idx]
fll = flow_link_lengths[node][useable_receivers][idx]
# if not processing an outlet node.
if r != node:
if longest_path:
cond = distance_to_divide[r] < distance_to_divide[node] + fll
else:
cond = distance_to_divide[r] > distance_to_divide[node] + fll
if cond:
distance_to_divide[r] = distance_to_divide[node] + fll
# store on the grid
if add_to_grid:
grid.add_field(
"distance_to_divide", distance_to_divide, at="node", clobber=clobber
)
return distance_to_divide | 99afc09e88aee3ea7486a9dfddc45d98358d0bd7 | 12,068 |
def string_unquote(value: str):
"""
Method to unquote a string
Args:
value: the value to unquote
Returns:
unquoted string
"""
if not isinstance(value, str):
return value
return value.replace('"', "").replace("'", "") | e062c012fc43f9b41a224f168de31732d885b21f | 12,070 |
def translate(tx, ty, tz):
"""Translate."""
return affine(t=[tx, ty, tz]) | f3b11f6bcf0f77b39423d8aac3d34149ae8b93a7 | 12,071 |
import torch
def randomized_svd_gpu(M, n_components, n_oversamples=10, n_iter='auto',
transpose='auto', random_state=0, lib='pytorch',tocpu=True):
"""Computes a truncated randomized SVD on GPU. Adapted from Sklearn.
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter : int or 'auto' (default is 'auto')
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) `n_iter` in which case is set to 7.
This improves precision with few components.
transpose : True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
lib : {'cupy', 'pytorch'}, str optional
Chooses the GPU library to be used.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == 'auto':
# Checks if the number of iterations is explicitly specified
n_iter = 7 if n_components < .1 * min(M.shape) else 4
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
M = M.T # this implementation is a bit faster with smaller shape[1]
if lib == 'pytorch':
M_gpu = torch.Tensor.cuda(torch.from_numpy(M.astype('float32')))
# Generating normal random vectors with shape: (M.shape[1], n_random)
Q = torch.cuda.FloatTensor(M_gpu.shape[1], n_random).normal_()
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of M in Q
for i in range(n_iter):
Q = torch.mm(M_gpu, Q)
Q = torch.mm(torch.transpose(M_gpu, 0, 1), Q)
# Sample the range of M using by linear projection of Q. Extract an orthonormal basis
Q, _ = torch.qr(torch.mm(M_gpu, Q))
# project M to the (k + p) dimensional space using the basis vectors
B = torch.mm(torch.transpose(Q, 0, 1), M_gpu)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = torch.svd(B)
del B
U = torch.mm(Q, Uhat)
if transpose:
# transpose back the results according to the input convention
U, s, V=(torch.transpose(V[:n_components, :], 0, 1),s[:n_components],torch.transpose(U[:, :n_components], 0, 1))
else:
U, s, V=( U[:, :n_components], s[:n_components], V[:n_components, :])
if tocpu is True:
return np.array(U.cpu()).astype('float'), np.array(s.cpu()).astype('float'), np.array(V.cpu()).astype('float')
else:
return U, s, V | 31483d2f83b66d8cd4bc15a929ecf657c67d0af2 | 12,072 |
import re
def clean_column_names(df: pd.DataFrame) -> pd.DataFrame:
"""Cleans the column names of the given dataframe by applying the following steps
after using the janitor `clean_names` function:
* strips any 'unnamed' field, for example 'Unnamed: 0'
* replaces the first missing name with 'is_away'
* coverts '#' to '_nbr'
* converts '%' to '_pct'
Args:
df (pd.DataFrame): The dataframe to clean the column names of.
Returns:
pd.DataFrame: The dataframe with cleaned column names.
"""
df = clean_names(df)
cols = df.columns
cols = [re.sub("unnamed_[0-9]+_level_[0-9]", "", x).strip("_") for x in cols]
# away will always be the first empty string following cleaning step above
cols[cols.index("")] = "is_away"
cols = [x.replace("#", "_nbr") for x in cols]
cols = [x.replace("%", "_pct") for x in cols]
cols = ["is_active" if x == "status" else x for x in cols]
cols = ["is_start" if x == "gs" else x for x in cols]
df.columns = cols
return df | 37c41b02846cacab1e412d402b50d94e18e1e20e | 12,073 |
def cos(x):
"""Return the cosine.
INPUTS
x (Variable object or real number)
RETURNS
if x is a Variable, then return a Variable with val and der.
if x is a real number, then return the value of np.cos(x).
EXAMPLES
>>> x = Variable(0, name='x')
>>> t = cos(x)
>>> print(t.val, t.der['x'])
1.0 0.0
"""
try:
val = np.cos(x.val)
ders = defaultdict(float)
sec_ders = defaultdict(float)
for key in x.der:
ders[key] += -np.sin(x.val) * (x.der[key])
sec_ders[key] += -x.sec_der[key]*np.sin(x.val)+(x.der[key]**2)*(-np.cos(x.val))
return Variable(val, ders, sec_ders)
except AttributeError:
return np.cos(x) | 472bfb53345c14545a8f1bf75deb5679fe1916f8 | 12,074 |
def _get_regions(connection):
""" Get list of regions in database excluding GB. If no regions are found,
a ValueError is raised.
"""
query_regions = connection.execute(
db.select([models.Region.code]).where(models.Region.code != "GB")
)
regions = [r[0] for r in query_regions]
if not regions:
raise ValueError("NPTG data not populated yet.")
return regions | ffb58c5c695a8dd669497f8dccdf5ef8202e5a21 | 12,075 |
def retrieve_from_stream(iden, interval=60):
"""
Return messages from a stream.
:param iden: Identifier of the stream.
:param interval: defaults to messages of last 60 seconds.
"""
return stm_str.get_messages(str(UID), str(TOKEN), interval, iden) | 166918258ff3bc8ff972164027df4bf93b4a280e | 12,077 |
def train_op(tot_loss, lr, var_opt, name):
"""
When only the discriminator is trained, the learning rate is set to be 0.0008
When the generator model is also trained, the learning rate is set to be 0.0004
Since there are batch_normalization layers in the model, we need to use update_op for keeping train and test moving average
of the batch_norm parameters
"""
# optimizer = tf.train.RMSPropOptimizer(learning_rate = lr)
epsilon = 1e-4 # added on 18th of July
optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=epsilon, name=name)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
grads = optimizer.compute_gradients(tot_loss, var_list=var_opt)
print("================================================")
print("I am printing the non gradient")
for grad, var in grads:
if grad is None:
print("no gradient", grad, var)
print("================================================")
opt = optimizer.apply_gradients(grads)
return opt | 52dc967b9adac561f210fe4305d69b8724841607 | 12,078 |
import math
def logadd(x, y):
"""Adds two log values.
Ensures accuracy even when the difference between values is large.
"""
if x < y:
temp = x
x = y
y = temp
z = math.exp(y - x)
logProb = x + math.log(1.0 + z)
if logProb < _MinLogExp:
return _MinLogExp
else:
return logProb | d1993f9e3d9fd5f44938509df12d818fb5eb7f3d | 12,079 |
def getResourceNameString(ro_config, rname, base=None):
"""
Returns a string value corresoponding to a URI indicated by the supplied parameter.
Relative references are assumed to be paths relative to the supplied base URI or,
if no rbase is supplied, relative to the current directory.
"""
rsplit = rname.split(":")
if len(rsplit) == 2:
# Try to interpret name as CURIE
for rpref in ro_config["annotationPrefixes"]:
if rsplit[0] == rpref:
rname = ro_config["annotationPrefixes"][rpref]+rsplit[1]
if urlparse.urlsplit(rname).scheme == "":
if base:
rname = resolveUri(rname, base)
else:
rname = resolveFileAsUri(rname)
return rname | f5840a7effb4ac91a77810531f10d323a03490ce | 12,080 |
def get_mean_from_protobin(filename):
"""Get image mean from protobinary and return ndarray with skimage format.
"""
img = read_caffe_protobin(filename)
size = (img.channels, img.height, img.width)
img = caffe.io.blobproto_to_array(img).reshape(size)
img = img.transpose([1, 2, 0])
return img | ba03cdeb534d00885c1c6bee22bee15af3880a85 | 12,083 |
def has_key(key):
"""
Check if key is in the minion datastore
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' data.has_key <mykey>
"""
store = load()
return key in store | 7d551773da4c7d98f090d7ff9b489bcbecbec32e | 12,084 |
def __create_setting(ingest):
"""Creates the setting for a particular family"""
signer, addresser, auth_keys, threshold = ingest
settings = Settings(
auth_list=','.join(auth_keys),
threshold=threshold)
return (
signer,
addresser,
SettingPayload(
action=SettingPayload.CREATE,
dimension=addresser.family,
data=settings.SerializeToString())) | a74d7c5a109555de591d26f5e123c31a5d0b4a7a | 12,085 |
def paser_bs(sent):
"""Convert compacted bs span to triple list
Ex:
"""
sent=sent.strip('<sos_b>').strip('<eos_b>')
sent = sent.split()
belief_state = []
domain_idx = [idx for idx,token in enumerate(sent) if token in all_domain]
for i,d_idx in enumerate(domain_idx):
next_d_idx = len(sent) if i+1 == len(domain_idx) else domain_idx[i+1]
domain = sent[d_idx]
sub_span = sent[d_idx+1:next_d_idx]
sub_s_idx = [idx for idx,token in enumerate(sub_span) if token in all_slots]
for j,s_idx in enumerate(sub_s_idx):
next_s_idx = len(sub_span) if j == len(sub_s_idx) - 1 else sub_s_idx[j+1]
slot = sub_span[s_idx]
value = ' '.join(sub_span[s_idx+1:next_s_idx])
bs = " ".join([domain,slot,value])
belief_state.append(bs)
return list(set(belief_state)) | b517d58e7a7958b9186c7f9216cb33e506c148f7 | 12,086 |
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI container server apps."""
conf = global_conf.copy()
conf.update(local_conf)
return ContainerController(conf) | d493112d714e0303b807304b8be11d0d8b8c5b37 | 12,087 |
import traceback
def retrieve_succinct_traceback() -> str:
"""
A utility that retrive succint traceback digest from a complete traceback string.
"""
tb = traceback.format_exc()
return "\n".join(pg.splitlines()[-1] for pg in split_paragraphs(tb)) | 882e190138fd51be807d37f014c22aead57f88ba | 12,088 |
def get_canonical_format_name(format_name):
"""
Get the canonical format name for a possible abbreviation
Args:
format_name (str): Format name or abbreviation
Returns:
The canonical name from CANONICAL_FORMATS, or None if the format is
not recognized.
"""
try:
return CANONICAL_FORMATS[format_name.lower()]
except KeyError:
return None | ae95d0321e2f8880ccbd7710e48666991208a470 | 12,089 |
def build_protoc_args(
ctx,
plugin,
proto_infos,
out_arg,
extra_options = [],
extra_protoc_args = [],
short_paths = False,
resolve_tools = True):
"""
Build the args for a protoc invocation.
This does not include the paths to the .proto files, which should be done external to this function.
Args:
ctx: The Bazel rule execution context object.
plugin: The ProtoPluginInfo for the plugin to use.
proto_infos: The list of ProtoInfo providers.
out_arg: The path to provide as the output arg to protoc, usually the generation root dir.
extra_options: An optional list of extra options to pass to the plugin.
extra_protoc_args: An optional list of extra args to add to the command.
short_paths: Whether to use the .short_path instead of .path when creating paths. The short_path is used when
making a test/executable and referencing the runfiles.
resolve_tools: Whether to resolve and add the tools to returned inputs.
Returns:
- The list of args.
- The inputs required for the command.
- The input manifests required for the command.
"""
# Specify path getter
get_path = _short_path if short_paths else _path
# Build inputs and manifests list
inputs = []
input_manifests = []
if plugin.tool and resolve_tools:
plugin_runfiles, plugin_input_manifests = ctx.resolve_tools(tools = [plugin.tool])
inputs += plugin_runfiles.to_list()
input_manifests += plugin_input_manifests
inputs += plugin.data
# Get plugin name
plugin_name = plugin.name
if plugin.protoc_plugin_name:
plugin_name = plugin.protoc_plugin_name
# Build args
args_list = []
# Load all descriptors (direct and transitive) and remove dupes
descriptor_sets = depset([
descriptor_set
for proto_info in proto_infos
for descriptor_set in proto_info.transitive_descriptor_sets.to_list()
]).to_list()
inputs += descriptor_sets
# Add descriptors
pathsep = ctx.configuration.host_path_separator
args_list.append("--descriptor_set_in={}".format(pathsep.join(
[get_path(f) for f in descriptor_sets],
)))
# Add --plugin if not a built-in plugin
if plugin.tool_executable:
# If Windows, mangle the path. It's done a bit awkwardly with
# `host_path_seprator` as there is no simple way to figure out what's
# the current OS.
if ctx.configuration.host_path_separator == ";":
plugin_tool_path = get_path(plugin.tool_executable).replace("/", "\\")
else:
plugin_tool_path = get_path(plugin.tool_executable)
args_list.append("--plugin=protoc-gen-{}={}".format(plugin_name, plugin_tool_path))
# Add plugin --*_out/--*_opt args
plugin_options = list(plugin.options)
plugin_options.extend(extra_options)
if plugin_options:
opts_str = ",".join(
[option.replace("{name}", ctx.label.name) for option in plugin_options],
)
if plugin.separate_options_flag:
args_list.append("--{}_opt={}".format(plugin_name, opts_str))
else:
out_arg = "{}:{}".format(opts_str, out_arg)
args_list.append("--{}_out={}".format(plugin_name, out_arg))
# Add any extra protoc args provided or that plugin has
args_list.extend(extra_protoc_args)
if plugin.extra_protoc_args:
args_list.extend(plugin.extra_protoc_args)
return args_list, inputs, input_manifests | 24796ea7d817dd3a11ec2c1b54de23573c6da275 | 12,090 |
def make_rare_deleterious_variants_filter(sample_ids_list=None):
""" Function for retrieving rare, deleterious variants """
and_list = [
{
"$or":
[
{"cadd.esp.af": {"$lt": 0.051}},
{"cadd.esp.af": {"$exists": False}}
]
},
{
"$or":
[
{"func_knowngene": "exonic"},
{"func_knowngene": "splicing"}
]
},
{"cadd.phred": {"$gte": 10}},
{"exonicfunc_knowngene": {"$ne": "synonymous SNV"}},
{"1000g2015aug_all": {"$lt": 0.051}}
]
result = _append_sample_id_constraint_if_needed(and_list, sample_ids_list)
return result | 2a97e5a0aa96b2a221c32639dde20fcf8de09bce | 12,092 |
def PluginCompleter(unused_self, event_object):
"""Completer function that returns a list of available plugins."""
ret_list = []
if not IsLoaded():
return ret_list
if not '-h' in event_object.line:
ret_list.append('-h')
plugins_list = parsers_manager.ParsersManager.GetWindowsRegistryPlugins()
for plugin_cls in plugins_list.GetKeyPlugins(RegCache.hive_type):
plugins_list = plugin_cls(reg_cache=RegCache.reg_cache)
plugin_name = plugins_list.plugin_name
if plugin_name.startswith('winreg'):
plugin_name = plugin_name[7:]
if plugin_name == 'default':
continue
ret_list.append(plugin_name)
return ret_list | c03b765ed39c1e43d3b67aaf9486c0f015b7bc4e | 12,093 |
def train_model(item_user_data) -> []:
""""Returns trained model"""
model = implicit.als.AlternatingLeastSquares(factors=50)
model.fit(item_user_data)
return model | d3917cb422707bebdd519bab04b9332c1056ec7a | 12,094 |
def refresh_blind_balances(wallet, balances, storeback=True):
""" Given a list of (supposedly) unspent balances, iterate over each one
and verify it's status on the blockchain. Each balance failing
this verification updates own status in the database (if storeback is True).
Returns a list of TRULY unspent balances.
"""
rpc = wallet.rpc
unspent = [ ]
for balance in balances:
result = rpc.get_blinded_balances([balance["commitment"]])
if len(result) == 0:
if storeback:
wallet.modifyBlindBalance(balance["commitment"], used=True)
else:
unspent.append(balance)
return unspent | 2d468827ae32d359b323921d5933796ada22d627 | 12,095 |
def hull_area(par, llhs, above_min=1):
"""Estimate projected area of llh minimum for single parameter
Parameters
----------
par : np.ndarray
the parameter values
llhs : np.ndarray
the llh values
Returns
-------
float
"""
min_llh = llhs.min()
try:
Hull = ConvexHull(np.stack([par, llhs]).T[llhs < min_llh+above_min])
return Hull.volume
except QhullError:
return np.inf | 3284a9742dfd9889fff67fd6f68ab9435858a521 | 12,097 |
def assemble_chain(leaf, store):
"""Assemble the trust chain.
This assembly method uses the certificates subject and issuer common name and
should be used for informational purposes only. It does *not*
cryptographically verify the chain!
:param OpenSSL.crypto.X509 leaf: The leaf certificate from which to build the
chain.
:param list[OpenSSL.crypto.X509] store: A list of certificates to use to
resolve the chain.
:return: The trust chain.
:rtype: list[OpenSSL.crypto.X509]
"""
store_dict = {}
for cert in store:
store_dict[cert.get_subject().CN] = cert
chain = [leaf]
current = leaf
try:
while current.get_issuer().CN != current.get_subject().CN:
chain.append(store_dict[current.get_issuer().CN])
current = store_dict[current.get_issuer().CN]
except KeyError:
invalid = crypto.X509()
patch_certificate(invalid)
invalid.set_subject(current.get_issuer())
chain.append(invalid)
chain.reverse()
return chain | c59025ddcbb777f4f5358f8d89e05191c22eb780 | 12,098 |
def dilate( data, iterations=1, structure=None ):
"""Dilate a binary ND array by a number of iterations."""
# Convert to binary, just in case.
mask = binarise(data).astype(int)
if not structure:
structure = ndimage.generate_binary_structure(3,1)
# Check we have positive iterations - no header available here to convert from mm.
iterations = np.abs(iterations)
# Slightly awkward as I'm not certain iterations == voxels
print (" Dilating {0} iterations ...".format(iterations))
if iterations > 0:
dilated_mask = ndimage.binary_dilation( mask, structure, iterations )
return dilated_mask
# End of dilate() definition | 724b10f0c1d0d417f4ca693a5322349f390da17c | 12,100 |
from PyPDF4 import PdfFileReader
def text_from_pdf(file_name : str) -> str:
"""
Extract text from PDF file
==========================
Parameters
----------
file_name : str
Name of the file to extract text from.
Returns
-------
str
The extracted text.
"""
text = ''
with open(file_name, 'rb') as instream:
reader = PdfFileReader(instream)
for i in range(reader.numPages):
text += '{}\n'.format(reader.getPage(i).extractText())
return text | a1b0077b143b4fee211dd38b6beabf58c7692177 | 12,101 |
import configparser
def read_section(section, fname):
"""Read the specified section of an .ini file."""
conf = configparser.ConfigParser()
conf.read(fname)
val = {}
try:
val = dict((v, k) for v, k in conf.items(section))
return val
except configparser.NoSectionError:
return None | 65d6b81b45fc7b75505dd6ee4dda19d13ebf7095 | 12,102 |
import torch
def freqz(
b, a=1, worN=512, whole=False, fs=2 * np.pi, log=False, include_nyquist=False
):
"""Compute the frequency response of a digital filter."""
h = None
lastpoint = 2 * np.pi if whole else np.pi
if log:
w = np.logspace(0, lastpoint, worN, endpoint=include_nyquist and not whole)
else:
w = np.linspace(0, lastpoint, worN, endpoint=include_nyquist and not whole)
w = torch.tensor(w, device=b.device)
if a.size() == 1:
n_fft = worN if whole else worN * 2
h = torch.fft.rfft(b, n=n_fft)[:worN]
h /= a
if h is None:
zm1 = torch.exp(-1j * w)
h = polyval(b, zm1) / (polyval(a, zm1) + 1e-16)
# need to catch NaNs here
w = w * fs / (2 * np.pi)
return w, h | d6950acc8535791968d34edf8c4ebd557000b72e | 12,103 |
def _helper_fit_partition(self, pnum, endog, exog, fit_kwds,
init_kwds_e={}):
"""handles the model fitting for each machine. NOTE: this
is primarily handled outside of DistributedModel because
joblib cannot handle class methods.
Parameters
----------
self : DistributedModel class instance
An instance of DistributedModel.
pnum : scalar
index of current partition.
endog : array_like
endogenous data for current partition.
exog : array_like
exogenous data for current partition.
fit_kwds : dict-like
Keywords needed for the model fitting.
init_kwds_e : dict-like
Additional init_kwds to add for each partition.
Returns
-------
estimation_method result. For the default,
_est_regularized_debiased, a tuple.
"""
temp_init_kwds = self.init_kwds.copy()
temp_init_kwds.update(init_kwds_e)
model = self.model_class(endog, exog, **temp_init_kwds)
results = self.estimation_method(model, pnum, self.partitions,
fit_kwds=fit_kwds,
**self.estimation_kwds)
return results | 30b7e6d48c2f0fa3eb2d2486fee9a87dad609886 | 12,104 |
def generate_2d_scatter(data, variables, class_data=None, class_names=None,
nrows=None, ncols=None, sharex=False, sharey=False,
show_legend=True, xy_line=False, trendline=False,
cmap_class=None, shorten_variables=False,
**kwargs):
"""Generate 2D scatter plots from the given data and variables.
This method will generate 2D scatter plots for all combinations
of the given variables.
Parameters
----------
data : object like :class:`pandas.core.frame.DataFrame`
The data we will plot here.
variables : list of strings
The variables we will generate scatter plots for.
class_data : object like :class:`pandas.core.series.Series`, optional
Class information for the points (if available).
class_names : dict of strings, optional
A mapping from the class data to labels/names.
nrows : integer, optional
The number of rows to use in a figure.
ncols : integer, optional
The number of columns to use in a figure.
sharex : boolean, optional
If True, the scatter plots will share the x-axis.
sharey : boolean, optional
If True, the scatter plots will share the y-axis.
show_legend : boolean, optional
If True, we will create a legend here and show it.
xy_line : boolean, optional
If True, we will add a x=y line to the plot.
trendline : boolean, optional
If True, we will add a trend line to the plot.
cmap_class : string or object like :class:`matplotlib.colors.Colormap`, optional
A color map to use for classes.
kwargs : dict, optional
Additional arguments used for the plotting.
Returns
-------
figures : list of objects like :class:`matplotlib.figure.Figure`
The figures containing the plots.
axes : list of objects like :class:`matplotlib.axes.Axes`
The axes containing the plots.
"""
nplots = comb(len(variables), 2, exact=True)
figures, axes = create_fig_and_axes(
nplots, nrows=nrows, ncols=ncols, sharex=sharex, sharey=sharey,
**kwargs,
)
fig = None
for i, (xvar, yvar) in enumerate(combinations(variables, 2)):
# We do not want to repeat the legend in all subplots:
show_legend_ax = False
if axes[i].figure != fig:
fig = axes[i].figure
show_legend_ax = True
xlabel = None
ylabel = None
if shorten_variables:
if len(xvar) > 5:
xlabel = xvar[:3] + '...'
if len(yvar) > 5:
ylabel = yvar[:3] + '...'
_, _, patches, labels = plot_scatter(
data,
xvar,
yvar,
axi=axes[i],
xlabel=xlabel,
ylabel=ylabel,
class_data=class_data,
class_names=class_names,
cmap_class=cmap_class,
**kwargs,
)
if xy_line:
line_xy = add_xy_line(axes[i], alpha=0.7, color='black')
patches.append(line_xy)
labels.append('x = y')
if trendline:
line_trend = add_trendline(axes[i], data[xvar], data[yvar],
alpha=0.7, ls='--', color='black')
patches.append(line_trend)
labels.append('y = a + bx')
if show_legend and show_legend_ax and patches and labels:
axes[i].legend(patches, labels)
return figures, axes | 9d2a843f07cbfed921831a64ad39b4c13a947500 | 12,105 |
def getOptions(options):
"""translate command line options to PAML options."""
codeml_options = {}
if options.analysis == "branch-specific-kaks":
codeml_options["seqtype"] = "1"
codeml_options["model"] = "1"
elif options.analysis == "branch-fixed-kaks":
codeml_options["seqtype"] = "1"
codeml_options["model"] = "0"
elif options.analysis == "branch-all-but-one-fixed-kaks":
codeml_options["seqtype"] = "1"
codeml_options["model"] = "2"
if not tree:
raise ValueError("please supply a tree for this mode.")
if not options.filename_output_tree:
raise ValueError(
"please speficy filename-output-tree as location "
"(relative to this script) for trees.")
elif options.analysis == "site-specific-kaks":
codeml_options["ncatG"] = "10"
codeml_options["getSE"] = "1"
codeml_options["seqtype"] = "1"
codeml_options["NSsites"] = "0 3 1 2 7 8"
codeml_options["model"] = "0"
codeml_options["CodonFreq"] = "2"
elif options.analysis == "pairwise":
codeml_options["seqtype"] = "1"
codeml_options["model"] = "0"
codeml_options["runmode"] = "-2"
if options.multiple_genes:
codeml_options["Malpha"] = "0"
codeml_options["Mgene"] = "0"
if options.omega is not None:
codeml_options["omega"] = str(options.omega)
if options.estimate_ancestors:
codeml_options["RateAncestor"] = "1"
if options.codon_frequencies is not None:
c = options.codon_frequencies.upper()
if c in ("UNIFORM", "FEQUAL"):
a = "0"
elif c == "F1X4":
a = "1"
elif c == "F3X4":
a = "2"
elif c == "F61":
a = "3"
else:
a = options.codon_frequencies
codeml_options["CodonFreq"] = a
if options.method is not None:
codeml_options["method"] = str(options.method)
if options.optimization_threshold is not None:
codeml_options["Small_Diff"] = str(options.optimization_threshold)
if options.clean_data:
codeml_options["cleandata"] = options.clean_data
return codeml_options | 86b0fa157e9a9c48d7bf683e57f24f49e32f15e7 | 12,106 |
def get_invocations(benchmark: Benchmark):
"""
Returns a list of invocations that invoke the tool for the given benchmark.
It can be assumed that the current directory is the directory from which execute_invocations.py is executed.
For QCOMP 2020, this should return a list of invocations for all tracks in which the tool can take part. For each track an invocation with default settings has to be provided and in addition, an optimized setting (e.g., the fastest engine and/or solution technique for this benchmark) can be specified. Only information about the model type, the property type and the state space size are allowed to be used to tweak the parameters.
If this benchmark is not supported, an empty list has to be returned.
"""
if not is_benchmark_supported(benchmark):
return []
prec = dict()
prec["epsilon-correct"] = "0.000001"
prec["probably-epsilon-correct"] = "0.05"
prec["often-epsilon-correct"] = "0.001"
prec["often-epsilon-correct-10-min"] = "0.001"
result = []
for track in prec.keys():
benchmark_settings = "./pet.sh reachability --precision {} --relative-error --only-result -m {} -p {} --property {}".format(
prec[track],
benchmark.get_prism_program_filename(),
benchmark.get_prism_property_filename(),
benchmark.get_property_name(),
)
if benchmark.get_open_parameter_def_string() != "":
benchmark_settings += " --const {}".format(
benchmark.get_open_parameter_def_string()
)
if (
"haddad" in benchmark.get_prism_program_filename()
or "gathering" in benchmark.get_prism_program_filename()
):
benchmark_settings = "./fix-syntax " + benchmark_settings
# default settings PET eps-corr
default_inv = Invocation()
default_inv.identifier = "default"
default_inv.note = "Default settings."
default_inv.track_id = track
default_inv.add_command(benchmark_settings)
result += [default_inv]
if (
track == "epsilon-correct"
or benchmark.get_model_type() == "ctmc"
or "haddad" in benchmark.get_prism_program_filename()
or "csma" in benchmark.get_prism_program_filename()
or "wlan" in benchmark.get_prism_program_filename()
or "gathering" in benchmark.get_prism_program_filename()
):
# smc is prob eps correct, cannot handle ctmc and haddad monmege cannot be parsed by it
continue
if benchmark.get_num_states_tweak() is None:
# need this info
continue
smc_settings = "./smc.sh {} {} -prop {} -heuristic RTDP_ADJ -RTDP_ADJ_OPTS 1 -colourParams S:{},Av:10,e:{},d:0.05,p:0.05,post:64".format(
benchmark.get_prism_program_filename(),
benchmark.get_prism_property_filename(),
benchmark.get_property_name(),
benchmark.get_num_states_tweak(),
prec[track],
)
if benchmark.get_open_parameter_def_string() != "":
smc_settings += " -const {}".format(
benchmark.get_open_parameter_def_string()
)
if (
"haddad" in benchmark.get_prism_program_filename()
or "gathering" in benchmark.get_prism_program_filename()
):
smc_settings = "./fix-syntax " + smc_settings
# SMC invocations
SMC_inv = Invocation()
SMC_inv.identifier = "specific"
SMC_inv.note = "Statistical model checking with limited information (no transition probabilities)"
SMC_inv.track_id = track
SMC_inv.add_command(smc_settings)
result += [SMC_inv]
return result | 4afeaa62cecd2ae21f6e112fe5c87dff54310a95 | 12,108 |
def see(node: "Position", move: Move = None) -> float:
"""Static-Exchange-Evaluation
Args:
node: The current position to see
move (Move, optional): The capture move to play. Defaults to None.
Returns:
float: The score associated with this capture. Positive is good.
"""
c = node.state.turn
bitboards = node.boards
if move is None:
return 0
if not move.is_capture:
return 0
i = 0
gain = [0] * 32
target = bitboards.piece_at(move._to)
if target is None:
return 0
occ = bitboards.occupancy
from_bb = Bitboard(1 << move._from)
attack_defend_bb = bitboards.attack_defend_to(move._to, c)
xrays = bitboards.xrays_bb
gain[i] = PIECE_VALUES[target._type]
assert target is not None
pt = (bitboards.piece_at(move._from))._type
while True:
i += 1
gain[i] = PIECE_VALUES[pt] - gain[i-1]
if max(-gain[i-1], gain[i]) < 0:
break
attack_defend_bb ^= from_bb
occ ^= from_bb
from_bb, pt = least_valuable_attacker(~c, bitboards, attack_defend_bb)
if not from_bb:
break
i -= 1
while i:
gain[i-1] = -max(-gain[i-1], gain[i])
i -= 1
return gain[0] | e6062b7cd09e9b2dca514e5be23b7fa870ff923f | 12,109 |
def rk4(f, t0, y0, h, N):
""""Solve IVP given by y' = f(t, y), y(t_0) = y_0 with step size h > 0, for N steps,
using the Runge-Kutta 4 method.
Also works if y is an n-vector and f is a vector-valued function."""
t = t0 + np.array([i * h for i in range(N+1)])
m = len(y0)
y = np.zeros((N+1, m))
y[0] = y0
# Repeatedly approximate next value.
for n in range(N):
k1 = f(t[n], y[n])
k2 = f(t[n] + h/2, y[n] + k1 * h/2)
k3 = f(t[n] + h/2, y[n] + k2 * h/2)
k4 = f(t[n] + h, y[n] + k3 * h)
y[n+1] = y[n] + h * (k1 + 2 * k2 + 2 * k3 + k4) / 6
return t, y | e6b7c3d1ac0ea765a3ac9ebac69159dd2c2eab78 | 12,111 |
import torch
def parse_predictions(est_data, gt_data, config_dict):
""" Parse predictions to OBB parameters and suppress overlapping boxes
Args:
est_data, gt_data: dict
{point_clouds, center, heading_scores, heading_residuals,
size_scores, size_residuals, sem_cls_scores}
config_dict: dict
{dataset_config, remove_empty_box, use_3d_nms, nms_iou,
use_old_type_nms, conf_thresh, per_class_proposal}
Returns:
batch_pred_map_cls: a list of len == batch size (BS)
[pred_list_i], i = 0, 1, ..., BS-1
where pred_list_i = [(pred_sem_cls, box_params, box_score)_j]
where j = 0, ..., num of valid detections - 1 from sample input i
"""
eval_dict = {}
pred_center = est_data['center'] # B,num_proposal,3
pred_heading_class = torch.argmax(est_data['heading_scores'], -1) # B,num_proposal
heading_residuals = est_data['heading_residuals_normalized'] * (
np.pi / config_dict['dataset_config'].num_heading_bin) # Bxnum_proposalxnum_heading_bin
pred_heading_residual = torch.gather(heading_residuals, 2,
pred_heading_class.unsqueeze(-1)) # B,num_proposal,1
pred_heading_residual.squeeze_(2)
pred_size_class = torch.argmax(est_data['size_scores'], -1) # B,num_proposal
size_residuals = est_data['size_residuals_normalized'] * torch.from_numpy(
config_dict['dataset_config'].mean_size_arr.astype(np.float32)).cuda().unsqueeze(0).unsqueeze(0)
pred_size_residual = torch.gather(size_residuals, 2,
pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 1,
3)) # B,num_proposal,1,3
pred_size_residual.squeeze_(2)
pred_sem_cls = torch.argmax(est_data['sem_cls_scores'], -1) # B,num_proposal
sem_cls_probs = softmax(est_data['sem_cls_scores'].detach().cpu().numpy()) # B,num_proposal,10
pred_sem_cls_prob = np.max(sem_cls_probs, -1) # B,num_proposal
num_proposal = pred_center.shape[1]
# Since we operate in upright_depth coord for points, while util functions
# assume upright_camera coord.
bsize = pred_center.shape[0]
pred_corners_3d_upright_camera = np.zeros((bsize, num_proposal, 8, 3))
pred_center_upright_camera = flip_axis_to_camera(pred_center.detach().cpu().numpy())
for i in range(bsize):
for j in range(num_proposal):
heading_angle = config_dict['dataset_config'].class2angle( \
pred_heading_class[i, j].detach().cpu().numpy(), pred_heading_residual[i, j].detach().cpu().numpy())
box_size = config_dict['dataset_config'].class2size( \
int(pred_size_class[i, j].detach().cpu().numpy()), pred_size_residual[i, j].detach().cpu().numpy())
corners_3d_upright_camera = get_3d_box(box_size, -heading_angle, pred_center_upright_camera[i, j, :])
pred_corners_3d_upright_camera[i, j] = corners_3d_upright_camera
K = pred_center.shape[1] # K==num_proposal
nonempty_box_mask = np.ones((bsize, K))
if config_dict['remove_empty_box']:
# -------------------------------------
# Remove predicted boxes without any point within them..
batch_pc = gt_data['point_clouds'].cpu().numpy()[:, :, 0:3] # B,N,3
for i in range(bsize):
pc = batch_pc[i, :, :] # (N,3)
for j in range(K):
box3d = pred_corners_3d_upright_camera[i, j, :, :] # (8,3)
box3d = flip_axis_to_depth(box3d)
pc_in_box, inds = extract_pc_in_box3d(pc, box3d)
if len(pc_in_box) < 5:
nonempty_box_mask[i, j] = 0
# -------------------------------------
obj_logits = est_data['objectness_scores'].detach().cpu().numpy()
obj_prob = softmax(obj_logits)[:, :, 1] # (B,K)
if not config_dict['use_3d_nms']:
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K), dtype=np.uint8)
for i in range(bsize):
boxes_2d_with_prob = np.zeros((K, 5))
for j in range(K):
boxes_2d_with_prob[j, 0] = np.min(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_2d_with_prob[j, 2] = np.max(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_2d_with_prob[j, 1] = np.min(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_2d_with_prob[j, 3] = np.max(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_2d_with_prob[j, 4] = obj_prob[i, j]
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
pick = nms_2d_faster(boxes_2d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert (len(pick) > 0)
pred_mask[i, nonempty_box_inds[pick]] = 1
eval_dict['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict['use_3d_nms'] and (not config_dict['cls_nms']):
# ---------- NMS input: pred_with_prob in (B,K,7) -----------
pred_mask = np.zeros((bsize, K), dtype=np.uint8)
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K, 7))
for j in range(K):
boxes_3d_with_prob[j, 0] = np.min(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_3d_with_prob[j, 1] = np.min(pred_corners_3d_upright_camera[i, j, :, 1])
boxes_3d_with_prob[j, 2] = np.min(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_3d_with_prob[j, 3] = np.max(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_3d_with_prob[j, 4] = np.max(pred_corners_3d_upright_camera[i, j, :, 1])
boxes_3d_with_prob[j, 5] = np.max(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_3d_with_prob[j, 6] = obj_prob[i, j]
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
pick = nms_3d_faster(boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert (len(pick) > 0)
pred_mask[i, nonempty_box_inds[pick]] = 1
eval_dict['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
elif config_dict['use_3d_nms'] and config_dict['cls_nms']:
# ---------- NMS input: pred_with_prob in (B,K,8) -----------
pred_mask = np.zeros((bsize, K), dtype=np.uint8)
for i in range(bsize):
boxes_3d_with_prob = np.zeros((K, 8))
for j in range(K):
boxes_3d_with_prob[j, 0] = np.min(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_3d_with_prob[j, 1] = np.min(pred_corners_3d_upright_camera[i, j, :, 1])
boxes_3d_with_prob[j, 2] = np.min(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_3d_with_prob[j, 3] = np.max(pred_corners_3d_upright_camera[i, j, :, 0])
boxes_3d_with_prob[j, 4] = np.max(pred_corners_3d_upright_camera[i, j, :, 1])
boxes_3d_with_prob[j, 5] = np.max(pred_corners_3d_upright_camera[i, j, :, 2])
boxes_3d_with_prob[j, 6] = obj_prob[i, j]
boxes_3d_with_prob[j, 7] = pred_sem_cls[i, j] # only suppress if the two boxes are of the same class!!
nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0]
pick = nms_3d_faster_samecls(boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :],
config_dict['nms_iou'], config_dict['use_old_type_nms'])
assert (len(pick) > 0)
pred_mask[i, nonempty_box_inds[pick]] = 1
eval_dict['pred_mask'] = pred_mask
# ---------- NMS output: pred_mask in (B,K) -----------
return eval_dict, {'pred_corners_3d_upright_camera': pred_corners_3d_upright_camera,
'sem_cls_probs': sem_cls_probs,
'obj_prob': obj_prob,
'pred_sem_cls': pred_sem_cls} | 9d31b44e37e7af458084b29927eaa29d2e1889af | 12,113 |
def benchmark_op(op, burn_iters: int = 2, min_iters: int = 10):
"""Final endpoint for all kb.benchmarks functions."""
assert not tf.executing_eagerly()
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
bm = tf.test.Benchmark()
result = bm.run_op_benchmark(
sess, op, burn_iters=burn_iters, min_iters=min_iters
)
summarize(result)
return result | 4b46cbb4f487332b43e1c06daef294e9e01f13a5 | 12,114 |
def run_truncated_sprt(list_alpha, list_beta, logits_concat, labels_concat, verbose=False):
""" Calculate confusion matrix, mean hitting time, and truncate rate of a batch.
Args:
list_alpha: A list of floats.
list_beta: A list of floats with the same length as list_alpha's.
logits_concat: A logit Tensor with shape (batch, (duration - order_sprt), order_sprt + 1, 2). This is the output of datasets.data_processing.sequential_concat(logit_slice, labels_slice)
labels_concat: A binary label Tensor with shape (batch size,) with label = 0 or 1. This is the output of datasets.data_processing.sequential_concat(logit_slice, labels_slice).
Returns:
dict_confmx_sprt: A dictionary with keys like "thresh=0.2342,-0.2342". Value is a confusion matrix Tensor.
dict_mean_hittimes: A dictionary with keys like "thresh=0.2342,-0.2342". Value is a mean hitting time.
dict_truncate_rates: A dictionary with keys like "thresh=0.2342,-0.2342". Value is an truncate rate.
"""
dict_confmx_sprt = dict()
dict_mean_hittimes = dict()
dict_var_hittimes = dict()
dict_truncate_rates = dict()
batch_size_tmp = labels_concat.shape[0]
for alpha, beta in zip(list_alpha, list_beta):
# Calc thresholds
alpha = float(alpha)
beta = float(beta)
thresh = [np.log(beta/(1-alpha)), np.log((1-beta)/alpha)]
key = "thresh={:6.4f},{:7.4f}".format(thresh[0], thresh[1])
# Run truncated sprt
confmx, mean_hittime, var_hittime, truncate_rate = binary_truncated_sprt(logits_concat, labels_concat, alpha, beta)
dict_confmx_sprt[key] = confmx
dict_mean_hittimes[key] = mean_hittime
dict_var_hittimes[key] = var_hittime
dict_truncate_rates[key] = truncate_rate
if verbose:
print("====================================")
print("SPRT w/ alpha={}, beta={}".format(alpha, beta))
print("Thresholds = {}".format(thresh))
print("Confusion Matrix")
print(confmx)
print("Mean Hitting Time: {} +- {}".format(mean_hittime, tf.sqrt(var_hittime)))
print("truncate: {} / {} = {}".format(tf.round(truncate_rate*batch_size_tmp), batch_size_tmp, truncate_rate))
print("====================================")
return dict_confmx_sprt, dict_mean_hittimes, dict_var_hittimes, dict_truncate_rates | 7934b1de29c60a59df056cbb1e4dce42e76ca540 | 12,115 |
def get_users(metadata):
"""
Pull users, handles hidden user errors
Parameters:
metadata: sheet of metadata from mwclient
Returns:
the list of users
"""
users = []
for rev in metadata:
try:
users.append(rev["user"])
except (KeyError):
users.append(None)
return users | 48dbae6a63019b0e4c2236a97e147102fe4d8758 | 12,116 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.