content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def build_tree(train, max_depth, min_size, n_features):
"""build_tree(创建一个决策树)
Args:
train 训练数据集
max_depth 决策树深度不能太深,不然容易导致过拟合
min_size 叶子节点的大小
n_features 选取的特征的个数
Returns:
root 返回决策树
"""
# 返回最优列和相关的信息
root = get_split(train, n_features)
# 对左右2边的数据 进行递归的调用,由于最优特征使用过,所以在后面进行使用的时候,就没有意义了
# 例如: 性别-男女,对男使用这一特征就没任何意义了
split(root, max_depth, min_size, n_features, 1)
return root | 5afd343436f14d9ab704636eb480d92a31d59f04 | 5,000 |
import pprint
def delete_bucket(bucket_name: str, location: str, verbose: bool) -> bool:
"""Delete the specified S3 bucket
Args:
bucket_name (str): name of the S3 bucket
location (str): the location (region) the S3 bucket resides in
verbose (bool): enable verbose output
Returns:
bool: True if the specified S3 bucket was successfully deleted,
False otherwise
"""
try:
print(f'Deleting S3 bucket {bucket_name} in location {location} ...')
start = timer()
s3_client = boto3.client('s3', region_name=location)
response = s3_client.delete_bucket(Bucket=bucket_name)
end = timer()
elapsed_time = round(end - start, 3)
print(f'Deleted bucket in {elapsed_time} seconds')
if verbose:
print('delete_bucket() response:')
pprint.pprint(response)
print()
if response['ResponseMetadata']['HTTPStatusCode'] == 204:
print(f'S3 bucket {bucket_name} successfully deleted')
return True
except ClientError as e:
print(f'S3 ClientError occurred while trying to delete bucket:')
print(f"\t{e.response['Error']['Code']}: {e.response['Error']['Message']}")
return False | 79c225c9f8caa0d8c3431709d3f08ccaefe3fc1c | 5,001 |
import re
def generate_ordered_match_str_from_subseqs(r1,
subseqs_to_track,
rc_component_dict,
allow_overlaps=False):
"""Generates an ordered subsequences match string for the input sequence.
Args:
r1: (str) R1 sequence to scan for subsequence matches.
subseqs_to_track: (list) Subsequences to look for in R1.
rc_component_dict: (dict) Dict mapping DNA sequence to label.
allow_overlaps: (boolean) Whether to allow matches that overlap on R1. If
False, then it will identify a maximal non-overlapping set of matches.
Returns:
(str) labeled components for r1 in the form: 'label_1;label_2;...;label_n'
"""
# Generate ordered set of subseq matches to r1 sequence.
match_tups = []
for mer_label in subseqs_to_track:
mer = rc_component_dict[mer_label]
for match in re.finditer(mer, r1):
xstart = match.start()
xend = xstart + len(mer)
match_tups.append((xstart, xend, mer_label))
match_tups.sort(reverse=True)
# Create a maximal independent set that does not allow overlapping subseqs.
if not allow_overlaps and len(match_tups) > 0:
mer_graph = nx.Graph()
mer_graph.add_nodes_from(match_tups)
for i in range(len(match_tups)):
for j in range(i + 1, len(match_tups)):
# Check if the end of match_tups[j] overlaps the start of match_tups[i].
if match_tups[i][0] < match_tups[j][1]:
mer_graph.add_edge(match_tups[i], match_tups[j])
# Generate a non-overlapping list of subseqs.
match_tups = nx.maximal_independent_set(mer_graph)
match_tups.sort(reverse=True)
match_str = BCS_SEP.join([match_tup[-1] for match_tup in match_tups])
return match_str | 202f228b40b73518342b1cc2419ca466626fc166 | 5,002 |
def combination(n: int, r: int) -> int:
""":return nCr = nPr / r!"""
return permutation(n, r) // factorial(r) | 6cf58428cacd0e09cc1095fb120208aaeee7cb7c | 5,003 |
def _extract_operator_data(fwd, inv_prep, labels, method='dSPM'):
"""Function for extracting forward and inverse operator matrices from
the MNE-Python forward and inverse data structures, and assembling the
source identity map.
Input arguments:
================
fwd : ForwardOperator
The fixed_orientation forward operator.
Instance of the MNE-Python class Forward.
inv_prep : Inverse
The prepared inverse operator.
Instance of the MNE-Python class InverseOperator.
labels : list
List of labels belonging to the used parcellation, e.g. the
Desikan-Killiany, Destrieux, or Schaefer parcellation.
May not contain 'trash' labels/parcels (unknown or medial wall), those
should be deleted from the labels array!
method : str
The inversion method. Default 'dSPM'.
Other methods ('MNE', 'sLORETA', 'eLORETA') have not been tested.
Output arguments:
=================
source_identities : ndarray
Vector mapping sources to parcels or labels.
fwd_mat : ndarray [sensors x sources]
The forward operator matrix.
inv_mat : ndarray [sources x sensors]
The prepared inverse operator matrix.
"""
# counterpart to forwardOperator, [sources x sensors]. ### pick_ori None for free, 'normal' for fixed orientation.
K, noise_norm, vertno, source_nn = _assemble_kernel(
inv=inv_prep, label=None, method=method, pick_ori='normal')
# get source space
src = inv_prep.get('src')
vert_lh, vert_rh = src[0].get('vertno'), src[1].get('vertno')
# get labels, vertices and src-identities
src_ident_lh = np.full(len(vert_lh), -1, dtype='int')
src_ident_rh = np.full(len(vert_rh), -1, dtype='int')
# find sources that belong to the left hemisphere labels
n_labels = len(labels)
for la, label in enumerate(labels[:n_labels//2]):
for v in label.vertices:
src_ident_lh[np.where(vert_lh == v)] = la
# find sources that belong to the right hemisphere labels. Add by n left.
for la, label in enumerate(labels[n_labels//2:n_labels]):
for v in label.vertices:
src_ident_rh[np.where(vert_rh == v)] = la
src_ident_rh[np.where(src_ident_rh<0)] = src_ident_rh[np.where(
src_ident_rh<0)] -n_labels/2
src_ident_rh = src_ident_rh + (n_labels // 2)
source_identities = np.concatenate((src_ident_lh,src_ident_rh))
# extract fwd and inv matrices
fwd_mat = fwd['sol']['data'] # sensors x sources
"""If there are bad channels the corresponding rows can be missing
from the forward matrix. Not sure if the same can occur for the
inverse. This is not a problem if bad channels are interpolated.""" ### MOVED from weight_inverse_operator, just before """Compute the weighted operator."""
ind = np.asarray([i for i, ch in enumerate(fwd['info']['ch_names'])
if ch not in fwd['info']['bads']])
fwd_mat = fwd_mat[ind, :]
# noise_norm is used with dSPM and sLORETA. Other methods return null.
if method != 'dSPM' or method != 'sLORETA':
noise_norm = 1.
inv_mat = K * noise_norm # sources x sensors
return source_identities, fwd_mat, inv_mat | 6daded6f6df4abbd3dea105927ca39e02e64b970 | 5,004 |
def create_new_containers(module, intended, facts):
"""
Create missing container to CVP Topology.
Parameters
----------
module : AnsibleModule
Object representing Ansible module structure with a CvpClient connection
intended : list
List of expected containers based on following structure:
facts : dict
Facts from CVP collected by cv_facts module
"""
count_container_creation = 0
# Get root container of topology
topology_root = tools_tree.get_root_container(containers_fact=facts['containers'])
# Build ordered list of containers to create: from Tenant to leaves.
container_intended_tree = tools_tree.tree_build_from_dict(containers=intended, root=topology_root)
MODULE_LOGGER.debug("The ordered dict is: %s", str(container_intended_tree))
container_intended_ordered_list = tools_tree.tree_to_list(json_data=container_intended_tree, myList=list())
MODULE_LOGGER.debug("The ordered list is: %s", str(container_intended_ordered_list))
# Parse ordered list of container and check if they are configured on CVP.
# If not, then call container creation process.
for container_name in container_intended_ordered_list:
found = False
# Check if container name is found in CVP Facts.
for fact_container in facts['containers']:
if container_name == fact_container['name']:
found = True
break
# If container has not been found, we create it
if not found:
# module.fail_json(msg='** Create container'+container_name+' attached to '+intended[container_name]['parent_container'])
MODULE_LOGGER.debug('sent process_container request with %s / %s', str(
container_name), str(intended[container_name]['parent_container']))
response = process_container(module=module,
container=container_name,
parent=intended[container_name]['parent_container'],
action='add')
MODULE_LOGGER.debug('sent process_container request with %s / %s and response is : %s', str(
container_name), str(intended[container_name]['parent_container']), str(response))
# If a container has been created, increment creation counter
if response[0]:
count_container_creation += 1
# Build module message to return for creation.
if count_container_creation > 0:
return [True, {'containers_created': "" + str(count_container_creation) + ""}]
return [False, {'containers_created': "0"}] | d173c49a40e6a7a71588618e18378260c05018c6 | 5,005 |
from prompt_toolkit.interface import CommandLineInterface
from .containers import Window
from .controls import BufferControl
def find_window_for_buffer_name(cli, buffer_name):
"""
Look for a :class:`~prompt_toolkit.layout.containers.Window` in the Layout
that contains the :class:`~prompt_toolkit.layout.controls.BufferControl`
for the given buffer and return it. If no such Window is found, return None.
"""
assert isinstance(cli, CommandLineInterface)
for l in cli.layout.walk(cli):
if isinstance(l, Window) and isinstance(l.content, BufferControl):
if l.content.buffer_name == buffer_name:
return l | 7912cc96365744c3a4daa44a72f272b083121e3c | 5,006 |
def create_pilot(username='kimpilot', first_name='Kim', last_name='Pilot', email='[email protected]', password='secret'):
"""Returns a new Pilot (User) with the given properties."""
pilot_group, _ = Group.objects.get_or_create(name='Pilots')
pilot = User.objects.create_user(username, email, password, first_name=first_name, last_name=last_name)
pilot.groups.add(pilot_group)
return pilot | 6c173a94a97d64182dcb28b0cef510c0838a545f | 5,007 |
def dict_to_datasets(data_list, components):
"""add models and backgrounds to datasets
Parameters
----------
datasets : `~gammapy.modeling.Datasets`
Datasets
components : dict
dict describing model components
"""
models = dict_to_models(components)
datasets = []
for data in data_list["datasets"]:
dataset = DATASETS.get_cls(data["type"]).from_dict(data, components, models)
datasets.append(dataset)
return datasets | e021317aae6420833d46782b3a611d17fb7156dc | 5,008 |
def of(*args: _TSource) -> Seq[_TSource]:
"""Create sequence from iterable.
Enables fluent dot chaining on the created sequence object.
"""
return Seq(args) | eb8ea24c057939cf82f445099c953a84a4b51895 | 5,009 |
def find_anagrams(word_list: list) -> dict:
"""Finds all anagrams in a word list and returns it in a dictionary
with the letters as a key.
"""
d = dict()
for word in word_list:
unique_key = single(word)
if unique_key in d:
d[unique_key].append(word)
else:
d[unique_key] = [word]
return d | 5e3514344d396d11e8a540b5faa0c31ae3ee6dab | 5,010 |
import itertools
def resolver(state_sets, event_map):
"""Given a set of state return the resolved state.
Args:
state_sets(list[dict[tuple[str, str], str]]): A list of dicts from
type/state_key tuples to event_id
event_map(dict[str, FrozenEvent]): Map from event_id to event
Returns:
dict[tuple[str, str], str]: The resolved state map.
"""
# First split up the un/conflicted state
unconflicted_state, conflicted_state = _seperate(state_sets)
# Also fetch all auth events that appear in only some of the state sets'
# auth chains.
auth_diff = _get_auth_chain_difference(state_sets, event_map)
# Now order the conflicted state and auth_diff by power level (falling
# back to event_id to tie break consistently).
event_id_to_level = [
(_get_power_level_for_sender(event_id, event_map), event_id)
for event_id in set(itertools.chain(
itertools.chain.from_iterable(conflicted_state.values()),
auth_diff,
))
]
event_id_to_level.sort()
events_sorted_by_power = [eid for _, eid in event_id_to_level]
# Now we reorder the list to ensure that auth dependencies of an event
# appear before the event in the list
sorted_events = []
def add_to_list(event_id):
event = event_map[event_id]
for aid, _ in event.auth_events:
if aid in events_sorted_by_power:
events_sorted_by_power.remove(aid)
add_to_list(aid)
sorted_events.append(event_id)
# First, lets pick out all the events that (probably) require power
leftover_events = []
while events_sorted_by_power:
event_id = events_sorted_by_power.pop()
if _is_power_event(event_map[event_id]):
add_to_list(event_id)
else:
leftover_events.append(event_id)
# Now we go through the sorted events and auth each one in turn, using any
# previously successfully auth'ed events (falling back to their auth events
# if they don't exist)
overridden_state = {}
event_id_to_auth = {}
for event_id in sorted_events:
event = event_map[event_id]
auth_events = {}
for aid, _ in event.auth_events:
aev = event_map[aid]
auth_events[(aev.type, aev.state_key)] = aev
for key, eid in overridden_state.items():
auth_events[key] = event_map[eid]
try:
event_auth.check(
event, auth_events,
do_sig_check=False,
do_size_check=False
)
allowed = True
overridden_state[(event.type, event.state_key)] = event_id
except AuthError:
allowed = False
event_id_to_auth[event_id] = allowed
resolved_state = {}
# Now for each conflicted state type/state_key, pick the latest event that
# has passed auth above, falling back to the first one if none passed auth.
for key, conflicted_ids in conflicted_state.items():
sorted_conflicts = []
for eid in sorted_events:
if eid in conflicted_ids:
sorted_conflicts.append(eid)
sorted_conflicts.reverse()
for eid in sorted_conflicts:
if event_id_to_auth[eid]:
resolved_eid = eid
resolved_state[key] = resolved_eid
break
resolved_state.update(unconflicted_state)
# OK, so we've now resolved the power events. Now mainline them.
sorted_power_resolved = sorted(resolved_state.values())
mainline = []
def add_to_list_two(event_id):
ev = event_map[event_id]
for aid, _ in ev.auth_events:
if aid not in mainline and event_id_to_auth.get(aid, True):
add_to_list_two(aid)
if event_id not in mainline:
mainline.append(event_id)
while sorted_power_resolved:
ev_id = sorted_power_resolved.pop()
ev = event_map[ev_id]
if _is_power_event(ev):
add_to_list_two(ev_id)
mainline_map = {ev_id: i + 1 for i, ev_id in enumerate(mainline)}
def get_mainline_depth(event_id):
if event_id in mainline_map:
return mainline_map[event_id]
ev = event_map[event_id]
if not ev.auth_events:
return 0
depth = max(
get_mainline_depth(aid)
for aid, _ in ev.auth_events
)
return depth
leftover_events_map = {
ev_id: get_mainline_depth(ev_id)
for ev_id in leftover_events
}
leftover_events.sort(key=lambda ev_id: (leftover_events_map[ev_id], ev_id))
for event_id in leftover_events:
event = event_map[event_id]
auth_events = {}
for aid, _ in event.auth_events:
aev = event_map[aid]
auth_events[(aev.type, aev.state_key)] = aev
for key, eid in overridden_state.items():
auth_events[key] = event_map[eid]
try:
event_auth.check(
event, auth_events,
do_sig_check=False,
do_size_check=False
)
allowed = True
overridden_state[(event.type, event.state_key)] = event_id
except AuthError:
allowed = False
event_id_to_auth[event_id] = allowed
for key, conflicted_ids in conflicted_state.items():
sorted_conflicts = []
for eid in leftover_events:
if eid in conflicted_ids:
sorted_conflicts.append(eid)
sorted_conflicts.reverse()
for eid in sorted_conflicts:
if event_id_to_auth[eid]:
resolved_eid = eid
resolved_state[key] = resolved_eid
break
resolved_state.update(unconflicted_state)
return resolved_state | 90b8f78e46e13904a9c898cda417378964667ff8 | 5,011 |
def parse_study(study):
"""Parse study
Args:
study (object): object from DICOMDIR level 1 object (children of patient_record)
Returns:
children_object
appending_keys
"""
#study_id = study.StudyID
study_date = study.StudyDate
study_time = study.StudyTime
study_des = study.StudyDescription
return study.children, study_date, study_time, study_des | d0e85d991e4f2f13e6f2bd87c0823858ea9c83bc | 5,012 |
def list_organizational_units_for_parent_single_page(self, **kwargs):
"""
This will continue to call list_organizational_units_for_parent until there are no more pages left to retrieve.
It will return the aggregated response in the same structure as list_organizational_units_for_parent does.
:param self: organizations client
:param kwargs: these are passed onto the list_organizational_units_for_parent method call
:return: organizations_client.list_organizational_units_for_parent.response
"""
return slurp(
'list_organizational_units_for_parent',
self.list_organizational_units_for_parent,
'OrganizationalUnits',
'NextToken', 'NextToken',
**kwargs
) | 73e942d59026830aac528b9dd358f08ebe8a66b3 | 5,013 |
def daemon(target, name=None, args=None, kwargs=None, after=None):
"""
Create and start a daemon thread.
It is same as `start()` except that it sets argument `daemon=True`.
"""
return start(target, name=name, args=args, kwargs=kwargs,
daemon=True, after=after) | 27d608c9cc5be1ab45abe9666e52bbbf89a1f066 | 5,014 |
import threading
def add_image_to_obj(obj, img, *args, **kwargs):
"""
"""
# skip everything if there is no image
if img == None:
return None
# find out of the object is an artist or an album
# then add the artist or the album to the objects
objs = {}
if isinstance(obj, Artist):
objs['artist'] = obj
t = 'artist'
elif isinstance(obj, Album):
objs['album'] = obj
t = 'album'
# delete old objects in S3 if editing:
reprocess = kwargs.pop('reprocess', False)
editing = kwargs.pop('edit', False)
if editing:
prefix = f"images/{obj.__class__.__name__}/{str(obj.uri)}/"
# delete the old objects from the database and S3
if settings.USE_S3:
s3 = boto3.resource('s3')
image_mngr = getattr(obj, f"{t}_image")
images = image_mngr.all()
for item in images:
if item.file:
if settings.USE_S3:
s3.Object(settings.AWS_STORAGE_BUCKET_NAME, item.file.name)#.delete()
# else: # delete file locally... who cares...
if reprocess:
if not item.is_original:
item.delete()
else:
item.delete()
def process_image(image_obj):
width, height = get_image_dimensions(image_obj.file.file)
image_obj.width = width
image_obj.height = height
image_obj.save()
# post processing, creating duplicates, etc...
# create new thread...
t = threading.Thread(target=resize_image_async, args=[image_obj])
t.setDaemon(True)
t.start()
return image_obj
# create the object
if type(img) == str:
image_obj = Image.objects.create(reference=img, is_original=True, height=1, width=1, **objs)
elif type(img) == dict:
image_obj = Image.objects.create(reference=img['image'], is_original=True, height=image['height'], width=image['width'], **objs)
else: # image is the file
if reprocess:
image_obj = Image.objects.filter(**{f"{t}": obj, 'is_original': True}).first()
else:
image_obj = Image.objects.create(file=img, is_original=True, height=1, width=1, **objs) # image is stored in S3
image_obj = process_image(image_obj)
return image_obj | 60b2f9eb871e5b4943b4ab68c817afdd8cf47cab | 5,015 |
from datetime import datetime
def rr_category_ad(context, ad_zone, ad_category, index=0):
"""
Returns a rr advert from the specified category based on index.
Usage:
{% load adzone_tags %}
{% rr_category_ad 'zone_slug' 'my_category_slug' 1 %}
"""
to_return = {'random_int': randint(1000000, 10000000)}
# Retrieve a rr ad for the category and zone
ad = AdBase.objects.get_rr_ad(ad_zone, ad_category, index)
to_return['ad'] = ad
# Record a impression for the ad
if settings.ADZONE_LOG_AD_IMPRESSIONS and 'from_ip' in context and ad:
from_ip = context.get('from_ip')
try:
AdImpression.objects.create(
ad=ad, impression_date=datetime.now(), source_ip=from_ip)
except Exception:
pass
return to_return | db78853ebdf64267e2cca217589ac309706333a1 | 5,016 |
def decoderCNN(x, layers):
""" Construct the Decoder
x : input to decoder
layers : the number of filters per layer (in encoder)
"""
# Feature unpooling by 2H x 2W
for _ in range(len(layers) - 1, 0, -1):
n_filters = layers[_]
x = Conv2DTranspose(n_filters, (3, 3), strides=(2, 2), padding='same', use_bias=False,
kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Last unpooling, restore number of channels
x = Conv2DTranspose(1, (3, 3), strides=(2, 2), padding='same', use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
return x | 9c48c5242b0793c71385f9137729393d17d2db06 | 5,017 |
import torch
def binary_dice_iou_score(
y_pred: torch.Tensor,
y_true: torch.Tensor,
mode="dice",
threshold=None,
nan_score_on_empty=False,
eps=1e-7,
ignore_index=None,
) -> float:
"""
Compute IoU score between two image tensors
:param y_pred: Input image tensor of any shape
:param y_true: Target image of any shape (must match size of y_pred)
:param mode: Metric to compute (dice, iou)
:param threshold: Optional binarization threshold to apply on @y_pred
:param nan_score_on_empty: If true, return np.nan if target has no positive pixels;
If false, return 1. if both target and input are empty, and 0 otherwise.
:param eps: Small value to add to denominator for numerical stability
:param ignore_index:
:return: Float scalar
"""
assert mode in {"dice", "iou"}
# Make binary predictions
if threshold is not None:
y_pred = (y_pred > threshold).to(y_true.dtype)
if ignore_index is not None:
mask = (y_true != ignore_index).to(y_true.dtype)
y_true = y_true * mask
y_pred = y_pred * mask
intersection = torch.sum(y_pred * y_true).item()
cardinality = (torch.sum(y_pred) + torch.sum(y_true)).item()
if mode == "dice":
score = (2.0 * intersection) / (cardinality + eps)
else:
score = intersection / (cardinality - intersection + eps)
has_targets = torch.sum(y_true) > 0
has_predicted = torch.sum(y_pred) > 0
if not has_targets:
if nan_score_on_empty:
score = np.nan
else:
score = float(not has_predicted)
return score | 9d4b751dbdd9c3b7e5f2490c7f7cd8ac08868233 | 5,018 |
def get_uname_arch():
"""
Returns arch of the current host as the kernel would interpret it
"""
global _uname_arch # pylint: disable=global-statement
if not _uname_arch:
_uname_arch = detect_uname_arch()
return _uname_arch | b30946675f6cad155eab3f81b711618551b49f44 | 5,019 |
from typing import Tuple
def _getSTSToken() -> Tuple[str, BosClient, str]:
"""
Get the token to upload the file
:return:
"""
if not Define.hubToken:
raise Error.ArgumentError('Please provide a valid token', ModuleErrorCode, FileErrorCode, 4)
config = _invokeBackend("circuit/genSTS", {"token": Define.hubToken})
bosClient = BosClient(
BceClientConfiguration(
credentials=BceCredentials(
str(
config['accessKeyId']),
str(
config['secretAccessKey'])),
endpoint='http://bd.bcebos.com',
security_token=str(
config['sessionToken'])))
return Define.hubToken, bosClient, config['dest'] | 553844ce8530911bab70fc823bdec65b058b70a4 | 5,020 |
import os
async def get_thumb_file(mass: MusicAssistant, url, size: int = 150):
"""Get path to (resized) thumbnail image for given image url."""
assert url
cache_folder = os.path.join(mass.config.data_path, ".thumbs")
cache_id = await mass.database.get_thumbnail_id(url, size)
cache_file = os.path.join(cache_folder, f"{cache_id}.png")
if os.path.isfile(cache_file):
# return file from cache
return cache_file
# no file in cache so we should get it
os.makedirs(cache_folder, exist_ok=True)
# download base image
async with mass.http_session.get(url, verify_ssl=False) as response:
assert response.status == 200
img_data = BytesIO(await response.read())
# save resized image
if size:
basewidth = size
img = Image.open(img_data)
wpercent = basewidth / float(img.size[0])
hsize = int((float(img.size[1]) * float(wpercent)))
img = img.resize((basewidth, hsize), Image.ANTIALIAS)
img.save(cache_file, format="png")
else:
with open(cache_file, "wb") as _file:
_file.write(img_data.getvalue())
# return file from cache
return cache_file | fe9a10e6460453b44e8509f2b2b42b30639be48b | 5,021 |
import pickle
def load_pickle(filename):
"""Load Pickfle file"""
filehandler = open(filename, 'rb')
return pickle.load(filehandler) | f93b13616f94c31bc2673232de14b834a8163c5f | 5,022 |
import json
def columnize(s, header=None, width=40):
"""Dump an object and make each line the given width
The input data will run though `json.loads` in case it is a JSON object
Args:
s (str): Data to format
header (optional[str]): Header to prepend to formatted results
width (optional[int]): Max width of the resulting lines
Returns:
list[str]: List of formatted lines
"""
try:
j = json.loads(s)
except: # Assume that the value is a string
j = s
s = pformat(j, width=40)
ls = [l.ljust(width) for l in s.splitlines()]
if header is not None:
ls.insert(0, header.ljust(width))
ls.insert(1, '-' * width)
return ls | 36343f682677f04d0b3670882539e58b48146c46 | 5,023 |
def create_eeg_epochs(config):
"""Create the data with each subject data in a dictionary.
Parameter
----------
subject : string of subject ID e.g. 7707
trial : HighFine, HighGross, LowFine, LowGross
Returns
----------
eeg_epoch_dataset : dataset of all the subjects with different conditions
"""
eeg_epoch_dataset = {}
for subject in config['subjects']:
data = nested_dict()
for trial in config['trials']:
epochs = eeg_epochs_dataset(subject, trial, config)
data['eeg'][trial] = epochs
eeg_epoch_dataset[subject] = data
return eeg_epoch_dataset | a33abcb056b9e94a637e58a42936b886e90a94f2 | 5,024 |
def to_newick(phylo):
"""
Returns a string representing the simplified Newick code of the input.
:param: `PhyloTree` instance.
:return: `str` instance.
"""
return phylo_to_newick_node(phylo).newick | 814610413223e37a6417ff8525262f0beb2e8091 | 5,025 |
import functools
def pipe(*functions):
"""
pipes functions one by one in the provided order
i.e. applies arg1, then arg2, then arg3, and so on
if any arg is None, just skips it
"""
return functools.reduce(
lambda f, g: lambda x: f(g(x)) if g else f(x),
functions[::-1],
lambda x: x) if functions else None | f58afedd5c7fe83edd605b12ca0e468657a78b56 | 5,026 |
import logging
def remove_news_update(removed_update_name: str, expired: bool) -> None:
"""Removes any expired news articles or any articles that have been
manuallyremoved by the user.
If an update has expired, a loop is used to find the update and remove it
from the global list of updates. Otherwise, updates need to be removed
manually, this is done by searching for the removed udpate in the list
of updates and removing it from the scheduler queue (the event id is
assigned to a variable and then used to remove the update) and global list.
A try accept is used to catch any repeat upates that have already expired
(not in the update queue) but are still manually removed.
Args:
removed_update_name (str): The name of the update to be removed, given
as a string. This enables the update to be removed from the
scheduler queue (Allows for the event ID to be found) and is used
to ensure the correct update is removed, regardless of whether it
had expired or was manually removed.
expired (bool): A boolean value indicating whether or not a scheduled
update currently being displayed has already expired. Consequently
, this is used to help remove any expired updates.
Returns:
None: Global variables are the only thing altered during the execution
of the function, so nothing needs to be returned.
"""
logging.debug("Entering the remove_news_update function.")
# Expired updates are removed from the global list of articles.
if expired is True:
for update in news_queue_info:
if update['event_name'] == removed_update_name:
logging.info("Expired update removed from global list.")
news_queue_info.remove(update)
# Iterates through the global list of events, if the removed event.
# Is in the global list, it is removed from the queue and the list.
for update in news_queue_info:
if update["event_name"] == removed_update_name:
event_to_remove = update['event_id']
# Events must be in (and removed from) both the global list
# And the queue if the event has not expired.
news_queue_info.remove(update)
try:
news_scheduler.cancel(event_to_remove)
logging.info("Update removed from queue and list.")
except ValueError:
logging.warning("Repeat update removed from list.")
logging.debug("Exiting the remove_news_update function.")
return None | a8a01af22ef2377265f49f35f288f6eebabef3f0 | 5,027 |
import torch
def initialize_graph_batch(batch_size):
""" Initialize a batch of empty graphs to begin the generation process.
Args:
batch_size (int) : Batch size.
Returns:
generated_nodes (torch.Tensor) : Empty node features tensor (batch).
generated_edges (torch.Tensor) : Empty edge features tensor (batch).
generated_n_nodes (torch.Tensor) : Number of nodes per graph in `nodes` and `edges`
(batch), currently all 0.
"""
# define tensor shapes
node_shape = ([batch_size + 1] + C.dim_nodes)
edge_shape = ([batch_size + 1] + C.dim_edges)
# initialize tensors
nodes = torch.zeros(node_shape, dtype=torch.float32, device="cuda")
edges = torch.zeros(edge_shape, dtype=torch.float32, device="cuda")
n_nodes = torch.zeros(batch_size + 1, dtype=torch.int64, device="cuda")
# add a dummy non-empty graph at top, since models cannot receive as input
# purely empty graphs
nodes[0] = torch.ones(([1] + C.dim_nodes), device="cuda")
edges[0, 0, 0, 0] = 1
n_nodes[0] = 1
return nodes, edges, n_nodes | f7ae56b3a0d728dd0fd4b40a3e45e960f65bcf31 | 5,028 |
def TimestampFromTicks(ticks):
"""Construct an object holding a timestamp value from the given ticks value
(number of seconds since the epoch).
This function is part of the `DBAPI 2.0 specification
<http://www.python.org/dev/peps/pep-0249/>`_.
:rtype: :class:`datetime.datetime`
"""
return Timestamp(*localtime(ticks)[:6]) | ccb377b793d600d1a98363e35fc6cd041517b50a | 5,029 |
from typing import Counter
def extract_object_token(data, num_tokens, obj_list=[], verbose=True):
""" Builds a set that contains the object names. Filters infrequent tokens. """
token_counter = Counter()
for img in data:
for region in img['objects']:
for name in region['names']:
if not obj_list or name in obj_list:
token_counter.update([name])
tokens = set()
# pick top N tokens
token_counter_return = {}
for token, count in token_counter.most_common():
tokens.add(token)
token_counter_return[token] = count
if len(tokens) == num_tokens:
break
if verbose:
print(('Keeping %d / %d objects'
% (len(tokens), len(token_counter))))
return tokens, token_counter_return | c35ea7a9eaa2f259c9b38b47e3c982b9ee11682b | 5,030 |
def test_lambda_expressions():
"""Lambda 表达式"""
# 这个函数返回两个参数的和:lambda a, b: a+b
# 与嵌套函数定义一样,lambda函数可以引用包含范围内的变量。
def make_increment_function(delta):
"""本例使用 lambda 表达式返回函数"""
return lambda number: number + delta
increment_function = make_increment_function(42)
assert increment_function(0) == 42
assert increment_function(1) == 43
assert increment_function(2) == 44
# lambda 的另一种用法是将一个小函数作为参数传递。
pairs = [(1, 'one'), (2, 'two'), (3, 'three'), (4, 'four')]
# 按文本键对排序。
pairs.sort(key=lambda pair: pair[1])
assert pairs == [(4, 'four'), (1, 'one'), (3, 'three'), (2, 'two')] | e727df25b2165bb0cd7c9cce47700e86d37a2a1a | 5,031 |
def _generate_with_relative_time(initial_state, condition, iterate, time_mapper) -> Observable:
"""Generates an observable sequence by iterating a state from an
initial state until the condition fails.
Example:
res = source.generate_with_relative_time(0, lambda x: True, lambda x: x + 1, lambda x: 0.5)
Args:
initial_state: Initial state.
condition: Condition to terminate generation (upon returning
false).
iterate: Iteration step function.
time_mapper: Time mapper function to control the speed of
values being produced each iteration, returning relative times, i.e.
either floats denoting seconds or instances of timedelta.
Returns:
The generated sequence.
"""
def subscribe(observer, scheduler=None):
scheduler = scheduler or timeout_scheduler
mad = MultipleAssignmentDisposable()
state = [initial_state]
has_result = [False]
result = [None]
first = [True]
time = [None]
def action(scheduler, _):
if has_result[0]:
observer.on_next(result[0])
try:
if first[0]:
first[0] = False
else:
state[0] = iterate(state[0])
has_result[0] = condition(state[0])
if has_result[0]:
result[0] = state[0]
time[0] = time_mapper(state[0])
except Exception as e:
observer.on_error(e)
return
if has_result[0]:
mad.disposable = scheduler.schedule_relative(time[0], action)
else:
observer.on_completed()
mad.disposable = scheduler.schedule_relative(0, action)
return mad
return Observable(subscribe) | d3f5549f94125065b387515299014b5701411be8 | 5,032 |
def is_prime(num):
"""判断一个数是不是素数"""
for factor in range(2, int(num ** 0.5) + 1):
if num % factor == 0:
return False
return True if num != 1 else False | c0e8435b046a87dd15278149f5e1af7258634a01 | 5,033 |
def ptcorr(y1, y2, dim=-1, eps=1e-8, **kwargs):
"""
Compute the correlation between two PyTorch tensors along the specified dimension(s).
Args:
y1: first PyTorch tensor
y2: second PyTorch tensor
dim: dimension(s) along which the correlation is computed. Any valid PyTorch dim spec works here
eps: offset to the standard deviation to avoid exploding the correlation due to small division (default 1e-8)
**kwargs: passed to final numpy.mean operatoin over standardized y1 * y2
Returns: correlation tensor
"""
y1 = (y1 - y1.mean(dim=dim, keepdim=True)) / (y1.std(dim=dim, keepdim=True) + eps)
y2 = (y2 - y2.mean(dim=dim, keepdim=True)) / (y2.std(dim=dim, keepdim=True) + eps)
return (y1 * y2).mean(dim=dim, **kwargs) | 140cad4de4452edeb5ea0fb3e50267c66df948c1 | 5,034 |
import typing
def discretize_time_difference(
times, initial_time, frequency, integer_timestamps=False
) -> typing.Sequence[int]:
"""method that discretizes sequence of datetimes (for prediction slices)
Arguments:
times {Sequence[datetime] or Sequence[float]} -- sequence of datetime objects
initial_time {datetime or float} -- last datetime instance from training set
(to offset test datetimes)
frequency {str} -- string alias representing granularity of pd.datetime object
Keyword Arguments:
integer_timestamps {bool} -- whether timestamps are integers or datetime values
Returns:
typing.Sequence[int] -- prediction intervals expressed at specific time granularity
"""
# take differences to convert to deltas
time_differences = times - initial_time
# edge case for integer timestamps
if integer_timestamps:
return time_differences.values.astype(int)
# convert to seconds representation
if type(time_differences.iloc[0]) is pd._libs.tslibs.timedeltas.Timedelta:
time_differences = time_differences.apply(lambda t: t.total_seconds())
if frequency == "YS":
return [round(x / S_PER_YEAR_0) for x in time_differences]
elif frequency == "MS" or frequency == 'M':
return [round(x / S_PER_MONTH_31) for x in time_differences]
elif frequency == "W":
return [round(x / S_PER_WEEK) for x in time_differences]
elif frequency == "D":
return [round(x / S_PER_DAY) for x in time_differences]
elif frequency == "H":
return [round(x / S_PER_HR) for x in time_differences]
else:
return [round(x / SECONDS_PER_MINUTE) for x in time_differences] | 871726102dbdedfbc92570bff4f73faf1054e986 | 5,035 |
def pathlines(u_netcdf_filename,v_netcdf_filename,w_netcdf_filename,
startx,starty,startz,startt,
t,
grid_object,
t_max,delta_t,
u_netcdf_variable='UVEL',
v_netcdf_variable='VVEL',
w_netcdf_variable='WVEL',
u_grid_loc='U',v_grid_loc='V',w_grid_loc='W',
u_bias_field=None,
v_bias_field=None,
w_bias_field=None):
"""!A three-dimensional lagrangian particle tracker. The velocity fields must be four dimensional (three spatial, one temporal) and have units of m/s.
It should work to track particles forwards or backwards in time (set delta_t <0 for backwards in time). But, be warned, backwards in time hasn't been thoroughly tested yet.
Because this is a very large amount of data, the fields are passed as netcdffile handles.
The variables are:
* ?_netcdf_filename = name of the netcdf file with ?'s data in it.
* start? = intial value for x, y, z, or t.
* t = vector of time levels that are contained in the velocity data.
* grid_object is m.grid if you followed the standard naming conventions.
* ?_netcdf_variable = name of the "?" variable field in the netcdf file.
* t_max = length of time to track particles for, in seconds. This is always positive
* delta_t = timestep for particle tracking algorithm, in seconds. This can be positive or negative.
* ?_grid_loc = where the field "?" is located on the C-grid. Possibles options are, U, V, W, T and Zeta.
* ?_bias_field = bias to add to that velocity field omponent. If set to -mean(velocity component), then only the time varying portion of that field will be used.
"""
if u_grid_loc == 'U':
x_u = grid_object['Xp1'][:]
y_u = grid_object['Y'][:]
z_u = grid_object['Z'][:]
elif u_grid_loc == 'V':
x_u = grid_object['X'][:]
y_u = grid_object['Yp1'][:]
z_u = grid_object['Z'][:]
elif u_grid_loc == 'W':
x_u = grid_object['X'][:]
y_u = grid_object['Y'][:]
z_u = grid_object['Zl'][:]
elif u_grid_loc == 'T':
x_u = grid_object['X'][:]
y_u = grid_object['Y'][:]
z_u = grid_object['Z'][:]
elif u_grid_loc == 'Zeta':
x_u = grid_object['Xp1'][:]
y_u = grid_object['Yp1'][:]
z_u = grid_object['Z'][:]
else:
print 'u_grid_loc not set correctly. Possible options are: U,V,W,T, and Zeta'
return
if v_grid_loc == 'U':
x_v = grid_object['Xp1'][:]
y_v = grid_object['Y'][:]
z_v = grid_object['Z'][:]
elif v_grid_loc == 'V':
x_v = grid_object['X'][:]
y_v = grid_object['Yp1'][:]
z_v = grid_object['Z'][:]
elif v_grid_loc == 'W':
x_v = grid_object['X'][:]
y_v = grid_object['Y'][:]
z_v = grid_object['Zl'][:]
elif v_grid_loc == 'T':
x_v = grid_object['X'][:]
y_v = grid_object['Y'][:]
z_v = grid_object['Z'][:]
elif v_grid_loc == 'Zeta':
x_v = grid_object['Xp1'][:]
y_v = grid_object['Yp1'][:]
z_v = grid_object['Z'][:]
else:
print 'v_grid_loc not set correctly. Possible options are: U,V,W,T, and Zeta'
return
if w_grid_loc == 'U':
x_w = grid_object['Xp1'][:]
y_w = grid_object['Y'][:]
z_w = grid_object['Z'][:]
elif w_grid_loc == 'V':
x_w = grid_object['X'][:]
y_w = grid_object['Yp1'][:]
z_w = grid_object['Z'][:]
elif w_grid_loc == 'W':
x_w = grid_object['X'][:]
y_w = grid_object['Y'][:]
z_w = grid_object['Zl'][:]
elif w_grid_loc == 'T':
x_w = grid_object['X'][:]
y_w = grid_object['Y'][:]
z_w = grid_object['Z'][:]
elif w_grid_loc == 'Zeta':
x_w = grid_object['Xp1'][:]
y_w = grid_object['Yp1'][:]
z_w = grid_object['Z'][:]
else:
print 'w_grid_loc not set correctly. Possible options are: U,V,W,T, and Zeta'
return
len_x_u = len(x_u)
len_y_u = len(y_u)
len_z_u = len(z_u)
len_x_v = len(x_v)
len_y_v = len(y_v)
len_z_v = len(z_v)
len_x_w = len(x_w)
len_y_w = len(y_w)
len_z_w = len(z_w)
len_t = len(t)
if u_bias_field is None:
u_bias_field = np.zeros_like(grid_object['wet_mask_U'][:])
if v_bias_field is None:
v_bias_field = np.zeros_like(grid_object['wet_mask_V'][:])
if w_bias_field is None:
w_bias_field = np.zeros_like(grid_object['wet_mask_W'][:])
x_stream = np.ones((int(np.fabs(t_max/delta_t))+2))*startx
y_stream = np.ones((int(np.fabs(t_max/delta_t))+2))*starty
z_stream = np.ones((int(np.fabs(t_max/delta_t))+2))*startz
t_stream = np.ones((int(np.fabs(t_max/delta_t))+2))*startt
t_RK = startt #set the initial time to be the given start time
z_RK = startz
y_RK = starty
x_RK = startx
i=0
u_netcdf_filehandle = netCDF4.Dataset(u_netcdf_filename)
v_netcdf_filehandle = netCDF4.Dataset(v_netcdf_filename)
w_netcdf_filehandle = netCDF4.Dataset(w_netcdf_filename)
t_index = np.searchsorted(t,t_RK)
t_index_new = np.searchsorted(t,t_RK) # this is later used to test if new data needs to be read in.
if t_index == 0:
raise ValueError('Given time value is outside the given velocity fields - too small')
elif t_index == len_t:
raise ValueError('Given time value is outside the given velocity fields - too big')
# load fields in ready for the first run through the loop
# u
u_field,x_index_u,y_index_u,z_index_u = indices_and_field(x_u,y_u,z_u,
x_RK,y_RK,z_RK,t_index,
len_x_u,len_y_u,len_z_u,len_t,
u_netcdf_filehandle,u_netcdf_variable,u_bias_field)
u_field,x_index_u_new,y_index_u_new,z_index_u_new = indices_and_field(x_u,y_u,z_u,
x_RK,y_RK,z_RK,t_index,
len_x_u,len_y_u,len_z_u,len_t,
u_netcdf_filehandle,u_netcdf_variable,u_bias_field)
# v
v_field,x_index_v,y_index_v,z_index_v = indices_and_field(x_v,y_v,z_v,
x_RK,y_RK,z_RK,t_index,
len_x_v,len_y_v,len_z_v,len_t,
v_netcdf_filehandle,v_netcdf_variable,v_bias_field)
v_field,x_index_v_new,y_index_v_new,z_index_v_new = indices_and_field(x_v,y_v,z_v,
x_RK,y_RK,z_RK,t_index,
len_x_v,len_y_v,len_z_v,len_t,
v_netcdf_filehandle,v_netcdf_variable,v_bias_field)
# w
w_field,x_index_w,y_index_w,z_index_w = indices_and_field(x_w,y_w,z_w,
x_RK,y_RK,z_RK,t_index,
len_x_w,len_y_w,len_z_w,len_t,
w_netcdf_filehandle,w_netcdf_variable,w_bias_field)
w_field,x_index_w_new,y_index_w_new,z_index_w_new = indices_and_field(x_w,y_w,z_w,
x_RK,y_RK,z_RK,t_index,
len_x_w,len_y_w,len_z_w,len_t,
w_netcdf_filehandle,w_netcdf_variable,w_bias_field)
# Prepare for spherical polar grids
deg_per_m = np.array([1,1])
# Runge-Kutta fourth order method to estimate next position.
while i < np.fabs(t_max/delta_t):
#t_RK < t_max + startt:
if grid_object['grid_type']=='polar':
# use degrees per metre and convert all the velocities to degrees / second# calculate degrees per metre at current location - used to convert the m/s velocities in to degrees/s
deg_per_m = np.array([1./(1852.*60.),np.cos(starty*np.pi/180.)/(1852.*60.)])
# Compute indices at location given
if (y_index_u_new==y_index_u and
x_index_u_new==x_index_u and
z_index_u_new==z_index_u and
y_index_v_new==y_index_v and
x_index_v_new==x_index_v and
z_index_v_new==z_index_v and
y_index_w_new==y_index_w and
x_index_w_new==x_index_w and
z_index_w_new==z_index_w and
t_index_new == t_index):
# the particle hasn't moved out of the grid cell it was in.
# So the loaded field is fine; there's no need to reload it.
pass
else:
t_index = np.searchsorted(t,t_RK)
if t_index == 0:
raise ValueError('Given time value is outside the given velocity fields - too small')
elif t_index == len_t:
raise ValueError('Given time value is outside the given velocity fields - too big')
# for u
u_field,x_index_u,y_index_u,z_index_u = indices_and_field(x_u,y_u,z_u,
x_RK,y_RK,z_RK,t_index,
len_x_u,len_y_u,len_z_u,len_t,
u_netcdf_filehandle,u_netcdf_variable,u_bias_field)
# for v
v_field,x_index_v,y_index_v,z_index_v = indices_and_field(x_v,y_v,z_v,
x_RK,y_RK,z_RK,t_index,
len_x_v,len_y_v,len_z_v,len_t,
v_netcdf_filehandle,v_netcdf_variable,v_bias_field)
# for w
w_field,x_index_w,y_index_w,z_index_w = indices_and_field(x_w,y_w,z_w,
x_RK,y_RK,z_RK,t_index,
len_x_w,len_y_w,len_z_w,len_t,
w_netcdf_filehandle,w_netcdf_variable,w_bias_field)
# Interpolate velocities to initial location
u_loc = quadralinear_interp(x_RK,y_RK,z_RK,t_RK,
u_field,
x_u,y_u,z_u,t,
len_x_u,len_y_u,len_z_u,len_t,
x_index_u,y_index_u,z_index_u,t_index)
v_loc = quadralinear_interp(x_RK,y_RK,z_RK,t_RK,
v_field,
x_v,y_v,z_v,t,len_x_v,len_y_v,len_z_v,len_t,
x_index_v,y_index_v,z_index_v,t_index)
w_loc = quadralinear_interp(x_RK,y_RK,z_RK,t_RK,
w_field,
x_w,y_w,z_w,t,len_x_w,len_y_w,len_z_w,len_t,
x_index_w,y_index_w,z_index_w,t_index)
u_loc = u_loc*deg_per_m[1]
v_loc = v_loc*deg_per_m[0]
dx1 = delta_t*u_loc
dy1 = delta_t*v_loc
dz1 = delta_t*w_loc
u_loc1 = quadralinear_interp(x_RK + 0.5*dx1,y_RK + 0.5*dy1,z_RK + 0.5*dz1,t_RK + 0.5*delta_t,
u_field,
x_u,y_u,z_u,t,len_x_u,len_y_u,len_z_u,len_t,
x_index_u,y_index_u,z_index_u,t_index)
v_loc1 = quadralinear_interp(x_RK + 0.5*dx1,y_RK + 0.5*dy1,z_RK + 0.5*dz1,t_RK + 0.5*delta_t,
v_field,
x_v,y_v,z_v,t,len_x_v,len_y_v,len_z_v,len_t,
x_index_v,y_index_v,z_index_v,t_index)
w_loc1 = quadralinear_interp(x_RK + 0.5*dx1,y_RK + 0.5*dy1,z_RK + 0.5*dz1,t_RK + 0.5*delta_t,
w_field,
x_w,y_w,z_w,t,len_x_w,len_y_w,len_z_w,len_t,
x_index_w,y_index_w,z_index_w,t_index)
u_loc1 = u_loc1*deg_per_m[1]
v_loc1 = v_loc1*deg_per_m[0]
dx2 = delta_t*u_loc1
dy2 = delta_t*v_loc1
dz2 = delta_t*w_loc1
u_loc2 = quadralinear_interp(x_RK + 0.5*dx2,y_RK + 0.5*dy2,z_RK + 0.5*dz2,t_RK + 0.5*delta_t,
u_field,
x_u,y_u,z_u,t,len_x_u,len_y_u,len_z_u,len_t,
x_index_u,y_index_u,z_index_u,t_index)
v_loc2 = quadralinear_interp(x_RK + 0.5*dx2,y_RK + 0.5*dy2,z_RK + 0.5*dz2,t_RK + 0.5*delta_t,
v_field,
x_v,y_v,z_v,t,len_x_v,len_y_v,len_z_v,len_t,
x_index_v,y_index_v,z_index_v,t_index)
w_loc2 = quadralinear_interp(x_RK + 0.5*dx2,y_RK + 0.5*dy2,z_RK + 0.5*dz2,t_RK + 0.5*delta_t,
w_field,
x_w,y_w,z_w,t,len_x_w,len_y_w,len_z_w,len_t,
x_index_w,y_index_w,z_index_w,t_index)
u_loc2 = u_loc2*deg_per_m[1]
v_loc2 = v_loc2*deg_per_m[0]
dx3 = delta_t*u_loc2
dy3 = delta_t*v_loc2
dz3 = delta_t*w_loc2
u_loc3 = quadralinear_interp(x_RK + dx3,y_RK + dy3,z_RK + dz3,t_RK + delta_t,
u_field,
x_u,y_u,z_u,t,len_x_u,len_y_u,len_z_u,len_t,
x_index_u,y_index_u,z_index_u,t_index)
v_loc3 = quadralinear_interp(x_RK + dx3,y_RK + dy3,z_RK + dz3,t_RK + delta_t,
v_field,
x_v,y_v,z_v,t,len_x_v,len_y_v,len_z_v,len_t,
x_index_v,y_index_v,z_index_v,t_index)
w_loc3 = quadralinear_interp(x_RK + dx3,y_RK + dy3,z_RK + dz3,t_RK + delta_t,
w_field,
x_w,y_w,z_w,t,len_x_w,len_y_w,len_z_w,len_t,
x_index_w,y_index_w,z_index_w,t_index)
u_loc3 = u_loc3*deg_per_m[1]
v_loc3 = v_loc3*deg_per_m[0]
dx4 = delta_t*u_loc3
dy4 = delta_t*v_loc3
dz4 = delta_t*w_loc3
#recycle the variables to keep the code clean
x_RK = x_RK + (dx1 + 2*dx2 + 2*dx3 + dx4)/6
y_RK = y_RK + (dy1 + 2*dy2 + 2*dy3 + dy4)/6
z_RK = z_RK + (dz1 + 2*dz2 + 2*dz3 + dz4)/6
t_RK += delta_t
i += 1
x_stream[i] = x_RK
y_stream[i] = y_RK
z_stream[i] = z_RK
t_stream[i] = t_RK
t_index_new = np.searchsorted(t,t_RK)
x_index_w_new = np.searchsorted(x_w,x_RK)
y_index_w_new = np.searchsorted(y_w,y_RK)
if z_RK < 0:
z_index_w_new = np.searchsorted(-z_w,-z_RK)
else:
z_index_w_new = np.searchsorted(z_w,z_RK)
x_index_v_new = np.searchsorted(x_v,x_RK)
y_index_v_new = np.searchsorted(y_v,y_RK)
if z_RK < 0:
z_index_v_new = np.searchsorted(-z_v,-z_RK)
else:
z_index_v_new = np.searchsorted(z_v,z_RK)
x_index_u_new = np.searchsorted(x_u,x_RK)
y_index_u_new = np.searchsorted(y_u,y_RK)
if z_RK < 0:
z_index_u_new = np.searchsorted(-z_u,-z_RK)
else:
z_index_u_new = np.searchsorted(z_u,z_RK)
u_netcdf_filehandle.close()
v_netcdf_filehandle.close()
w_netcdf_filehandle.close()
return x_stream,y_stream,z_stream,t_stream | 2c7da1a6de8157c690fb6ea57e30906108728711 | 5,036 |
def firing_rate(x, theta=0.5, alpha=0.12):
""" Sigmoidal firing rate function
Parameters
----------
x : float
Mean membrane potential.
theta : float
Inflection point (mean firing activity) of sigmoidal curve (default
value 0.12)
alpha : float
Steepness of sigmoidal curve (default value 0.12)
Returns
-------
f : float
Firing rate of x.
"""
expo = np.exp((theta - x) / alpha)
f = 1 / (1 + expo)
return f | ddb4ce078f8613a088971d4ed0a4a71d746772b5 | 5,037 |
def map_points(pois, sample_size=-1, kwd=None, show_bbox=False, tiles='OpenStreetMap', width='100%', height='100%'):
"""Returns a Folium Map displaying the provided points. Map center and zoom level are set automatically.
Args:
pois (GeoDataFrame): A GeoDataFrame containing the POIs to be displayed.
sample_size (int): Sample size (default: -1; show all).
kwd (string): A keyword to filter by (optional).
show_bbox (bool): Whether to show the bounding box of the GeoDataFrame (default: False).
tiles (string): The tiles to use for the map (default: `OpenStreetMap`).
width (integer or percentage): Width of the map in pixels or percentage (default: 100%).
height (integer or percentage): Height of the map in pixels or percentage (default: 100%).
Returns:
A Folium Map object displaying the given POIs.
"""
# Set the crs to WGS84
pois = to_wgs84(pois)
# Filter by keyword
if kwd is None:
pois_filtered = pois
else:
pois_filtered = filter_by_kwd(pois, kwd)
# Pick a sample
if sample_size > 0 and sample_size < len(pois_filtered.index):
pois_filtered = pois_filtered.sample(sample_size)
# Automatically center the map at the center of the bounding box enclosing the POIs.
bb = bbox(pois_filtered)
map_center = [bb.centroid.y, bb.centroid.x]
# Initialize the map
m = folium.Map(location=map_center, tiles=tiles, width=width, height=height)
# Automatically set the zoom level
m.fit_bounds(([bb.bounds[1], bb.bounds[0]], [bb.bounds[3], bb.bounds[2]]))
# Create the marker cluster
locations = list(zip(pois_filtered.geometry.y.tolist(),
pois_filtered.geometry.x.tolist(),
pois_filtered.id.tolist(),
pois_filtered.name.tolist(),
pois_filtered.kwds.tolist()))
callback = """\
function (row) {
var icon, marker;
icon = L.AwesomeMarkers.icon({
icon: 'map-marker', markerColor: 'blue'});
marker = L.marker(new L.LatLng(row[0], row[1]));
marker.setIcon(icon);
var popup = L.popup({height: '300'});
popup.setContent(row[2] + '<br/>' + row[3] + '<br/>' + row[4]);
marker.bindPopup(popup);
return marker;
};
"""
m.add_child(folium.plugins.FastMarkerCluster(locations, callback=callback))
# Add pois to a marker cluster
#coords, popups = [], []
#for idx, poi in pois.iterrows():
# coords.append([poi.geometry.y, poi.geometry.x)]
# label = str(poi['id']) + '<br>' + str(poi['name']) + '<br>' + ' '.join(poi['kwds'])
# popups.append(folium.IFrame(label, width=300, height=100))
#poi_layer = folium.FeatureGroup(name='pois')
#poi_layer.add_child(MarkerCluster(locations=coords, popups=popups))
#m.add_child(poi_layer)
# folium.GeoJson(pois, tooltip=folium.features.GeoJsonTooltip(fields=['id', 'name', 'kwds'],
# aliases=['ID:', 'Name:', 'Keywords:'])).add_to(m)
if show_bbox:
folium.GeoJson(bb).add_to(m)
folium.LatLngPopup().add_to(m)
return m | cb8e2a32b62ca364e54c90b94d2d4c3da74fc12a | 5,038 |
def read_data_file():
"""
Reads Data file from datafilename given name
"""
datafile = open(datafilename, 'r')
old = datafile.read()
datafile.close()
return old | 5aa6aa7cbf0305ca51c026f17e29188e472e61f3 | 5,039 |
def squared_loss(y_hat, y):
"""均方损失。"""
return (y_hat - y.reshape(y_hat.shape))**2 / 2 | 4f796ed753de6ed77de50578271a4eca04fc1ffb | 5,040 |
import re
def normalize_string(string, lowercase=True, convert_arabic_numerals=True):
"""
Normalize the given string for matching.
Example::
>>> normalize_string("tétéà 14ème-XIV, foobar")
'tetea XIVeme xiv, foobar'
>>> normalize_string("tétéà 14ème-XIV, foobar", False)
'tetea 14eme xiv, foobar'
:param string: The string to normalize.
:param lowercase: Whether to convert string to lowercase or not. Defaults
to ``True``.
:param convert_arabic_numerals: Whether to convert arabic numerals to roman
ones. Defaults to ``True``.
:return: The normalized string.
"""
# ASCIIfy the string
string = unidecode.unidecode(string)
# Replace any non-alphanumeric character by space
# Keep some basic punctuation to keep syntaxic units
string = re.sub(r"[^a-zA-Z0-9,;:]", " ", string)
# Convert to lowercase
if lowercase:
string = string.lower()
# Convert arabic numbers to roman numbers
if convert_arabic_numerals:
string = convert_arabic_to_roman_in_text(string)
# Collapse multiple spaces, replace tabulations and newlines by space
string = re.sub(r"\s+", " ", string)
# Trim whitespaces
string = string.strip()
return string | b6772b47f4cc049e09d37c97710a4f37e5a50a7c | 5,041 |
from typing import Sequence
def find_sub_expression(
expression: Sequence[SnailfishElement],
) -> Sequence[SnailfishElement]:
"""Finds the outermost closed sub-expression in a subsequence."""
num_open_braces = 1
pos = 0
while num_open_braces > 0:
pos += 1
if expression[pos] == "[":
num_open_braces += 1
elif expression[pos] == "]":
num_open_braces -= 1
return expression[: pos + 1] | 11d91c38c66fc8c9ce1e58297fcfbb290c18b968 | 5,042 |
import tempfile
import os
import logging
import subprocess
def run_tha_test(manifest, cache_dir, remote, max_cache_size, min_free_space):
"""Downloads the dependencies in the cache, hardlinks them into a temporary
directory and runs the executable.
"""
cache = Cache(cache_dir, remote, max_cache_size, min_free_space)
outdir = tempfile.mkdtemp(prefix='run_tha_test')
try:
for filepath, properties in manifest['files'].iteritems():
infile = properties['sha-1']
outfile = os.path.join(outdir, filepath)
cache.retrieve(infile)
outfiledir = os.path.dirname(outfile)
if not os.path.isdir(outfiledir):
os.makedirs(outfiledir)
link_file(outfile, cache.path(infile), HARDLINK)
os.chmod(outfile, properties['mode'])
cwd = os.path.join(outdir, manifest['relative_cwd'])
if not os.path.isdir(cwd):
os.makedirs(cwd)
if manifest.get('read_only'):
make_writable(outdir, True)
cmd = manifest['command']
logging.info('Running %s, cwd=%s' % (cmd, cwd))
return subprocess.call(cmd, cwd=cwd)
finally:
# Save first, in case an exception occur in the following lines, then clean
# up.
cache.save()
rmtree(outdir)
cache.trim() | 340bbc2daf4f28b1574ca0729575d5abdb5848d4 | 5,043 |
import importlib
def get_rec_attr(obj, attrstr):
"""Get attributes and do so recursively if needed"""
if attrstr is None:
return None
if "." in attrstr:
attrs = attrstr.split('.', maxsplit=1)
if hasattr(obj, attrs[0]):
obj = get_rec_attr(getattr(obj, attrs[0]), attrs[1])
else:
try:
obj = get_rec_attr(importlib.import_module(obj.__name__ + "." + attrs[0]), attrs[1])
except ImportError:
raise
else:
if hasattr(obj, attrstr):
obj = getattr(obj, attrstr)
return obj | a6831d48c79b8c58542032385a5c56373fd45321 | 5,044 |
def _get_message_mapping(types: dict) -> dict:
"""
Return a mapping with the type as key, and the index number.
:param types: a dictionary of types with the type name, and the message type
:type types: dict
:return: message mapping
:rtype: dict
"""
message_mapping = {}
entry_index = 2 # based on the links found, they normally start with 2?
for _type, message in types.items():
message_mapping[_type] = entry_index
entry_index += 1
return message_mapping | a098e0386aa92c41d4d404154b0b2a87ce9365ce | 5,045 |
import sys
import os
def _get_default_config_files_location():
"""Get the locations of the standard configuration files. These are
Unix/Linux:
1. `/etc/pywps.cfg`
2. `$HOME/.pywps.cfg`
Windows:
1. `pywps\\etc\\default.cfg`
Both:
1. `$PYWPS_CFG environment variable`
:returns: configuration files
:rtype: list of strings
"""
is_win32 = sys.platform == 'win32'
if is_win32:
LOGGER.debug('Windows based environment')
else:
LOGGER.debug('UNIX based environment')
if os.getenv("PYWPS_CFG"):
LOGGER.debug('using PYWPS_CFG environment variable')
# Windows or Unix
if is_win32:
PYWPS_INSTALL_DIR = os.path.abspath(os.path.join(os.getcwd(), os.path.dirname(sys.argv[0])))
cfgfiles = (os.getenv("PYWPS_CFG"))
else:
cfgfiles = (os.getenv("PYWPS_CFG"))
else:
LOGGER.debug('trying to estimate the default location')
# Windows or Unix
if is_win32:
PYWPS_INSTALL_DIR = os.path.abspath(os.path.join(os.getcwd(), os.path.dirname(sys.argv[0])))
cfgfiles = (os.path.join(PYWPS_INSTALL_DIR, "pywps", "etc", "pywps.cfg"))
else:
homePath = os.getenv("HOME")
if homePath:
cfgfiles = (os.path.join(pywps.__path__[0], "etc", "pywps.cfg"), "/etc/pywps.cfg",
os.path.join(os.getenv("HOME"), ".pywps.cfg"))
else:
cfgfiles = (os.path.join(pywps.__path__[0], "etc",
"pywps.cfg"), "/etc/pywps.cfg")
return cfgfiles | a359c0ac95092200cf346768e6db8ea7e2753416 | 5,046 |
def cd(path):
"""
Change location to the provided path.
:param path: wlst directory to which to change location
:return: cmo object reference of the new location
:raises: PyWLSTException: if a WLST error occurs
"""
_method_name = 'cd'
_logger.finest('WLSDPLY-00001', path, class_name=_class_name, method_name=_method_name)
try:
result = wlst.cd(path)
except (wlst.WLSTException, offlineWLSTException), e:
raise exception_helper.create_pywlst_exception('WLSDPLY-00002', path, _get_exception_mode(e),
_format_exception(e), error=e)
_logger.finest('WLSDPLY-00003', path, result, class_name=_class_name, method_name=_method_name)
return result | fbb8d9ac0a9a4c393d06d0c15bfd15154b0a5c0a | 5,047 |
def plt_roc_curve(y_true, y_pred, classes, writer, total_iters):
"""
:param y_true:[[1,0,0,0,0], [0,1,0,0], [1,0,0,0,0],...]
:param y_pred: [0.34,0.2,0.1] , 0.2,...]
:param classes:5
:return:
"""
fpr = {}
tpr = {}
roc_auc = {}
roc_auc_res = []
n_classes = len(classes)
for i in range(n_classes):
fpr[classes[i]], tpr[classes[i]], _ = roc_curve(y_true[:, i], y_pred[:, i])
roc_auc[classes[i]] = auc(fpr[classes[i]], tpr[classes[i]])
roc_auc_res.append(roc_auc[classes[i]])
fig = plt.figure()
lw = 2
plt.plot(fpr[classes[i]], tpr[classes[i]], color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[classes[i]])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic beat {}'.format(classes[i]))
plt.legend(loc="lower right")
writer.add_figure('test/roc_curve_beat_{}'.format(classes[i]), fig, total_iters)
plt.close()
fig.clf()
fig.clear()
return roc_auc_res | 5e02d83f5a7cd4e8c8abbc3afe89c25271e3944e | 5,048 |
def get_deps(sentence_idx: int, graph: DependencyGraph):
"""Get the indices of the dependants of the word at index sentence_idx
from the provided DependencyGraph"""
return list(chain(*graph.nodes[sentence_idx]['deps'].values())) | 9eb00fc5719cc1fddb22ea457cc6b49a385eb51d | 5,049 |
import functools
def incr(func):
"""
Increment counter
"""
@functools.wraps(func)
def wrapper(self):
# Strip off the "test_" from the function name
name = func.__name__[5:]
def _incr(counter, num):
salt.utils.process.appendproctitle("test_{}".format(name))
for _ in range(0, num):
counter.value += 1
attrname = "incr_" + name
setattr(self, attrname, _incr)
self.addCleanup(delattr, self, attrname)
return wrapper | 036ae9eac9d223a34871737a7294fad027c2d3c9 | 5,050 |
import operator
def index(a: protocols.SupportsIndex) -> int:
"""
Return _a_ converted to an integer. Equivalent to a.__index__().
Example:
>>> class Index:
... def __index__(self) -> int:
... return 0
>>> [1][Index()]
1
Args:
a:
"""
return operator.index(a) | 52f2fbdb8d65b12cb53761647b2c13d3cb368272 | 5,051 |
def col_to_num(col_str):
""" Convert base26 column string to number. """
expn = 0
col_num = 0
for char in reversed(col_str):
col_num += (ord(char) - ord('A') + 1) * (26 ** expn)
expn += 1
return col_num | d6bb00d3ef77c48338df635a254cf3ca5503bb73 | 5,052 |
def process_chunk(chunk, verbose=False):
"""Return a tuple of chunk kind, task-create links, task-create times, task-leave times and the chunk's graph"""
# Make function for looking up event attributes
get_attr = attr_getter(chunk.attr)
# Unpack events from chunk
(_, (first_event, *events, last_event)), = chunk.items()
if verbose and len(events) > 0:
print(chunk)
# Make the graph representing this chunk
g = ig.Graph(directed=True)
prior_node = g.add_vertex(event=first_event)
# Used to save taskgroup-enter event to match to taskgroup-leave event
taskgroup_enter_event = None
# Match master-enter event to corresponding master-leave
master_enter_event = first_event if get_attr(first_event, 'region_type') == 'master' else None
if chunk.kind == 'parallel':
parallel_id = get_attr(first_event, 'unique_id')
prior_node["parallel_sequence_id"] = (parallel_id, get_attr(first_event, 'endpoint'))
task_create_nodes = deque()
task_links = deque()
task_crt_ts = deque()
task_leave_ts = deque()
if type(first_event) is Enter and get_attr(first_event, 'region_type') in ['initial_task']:
task_crt_ts.append((get_attr(first_event, 'unique_id'), first_event.time))
k = 1
for event in chain(events, (last_event,)):
if get_attr(event, 'region_type') in ['implicit_task']:
if type(event) is Enter:
task_links.append((get_attr(event, 'encountering_task_id'), get_attr(event, 'unique_id')))
task_crt_ts.append((get_attr(event, 'unique_id'), event.time))
elif type(event) is Leave:
task_leave_ts.append((get_attr(event, 'unique_id'), event.time))
continue
# The node representing this event
node = g.add_vertex(event=event)
# Add task-leave time
if type(event) is Leave and get_attr(event, 'region_type') == 'explicit_task':
task_leave_ts.append((get_attr(event, 'unique_id'), event.time))
# Add task links and task crt ts
if (type(event) is Enter and get_attr(event, 'region_type') == 'implicit_task') \
or (type(event) is ThreadTaskCreate):
task_links.append((get_attr(event, 'encountering_task_id'), get_attr(event, 'unique_id')))
task_crt_ts.append((get_attr(event, 'unique_id'), event.time))
# Match taskgroup-enter/-leave events
if get_attr(event, 'region_type') in ['taskgroup']:
if type(event) is Enter:
taskgroup_enter_event = event
elif type(event) is Leave:
if taskgroup_enter_event is None:
raise ValueError("taskgroup-enter event was None")
node['taskgroup_enter_event'] = taskgroup_enter_event
taskgroup_enter_event = None
# Match master-enter/-leave events
if get_attr(event, 'region_type') in ['master']:
if type(event) is Enter:
master_enter_event = event
elif type(event) is Leave:
if master_enter_event is None:
raise ValueError("master-enter event was None")
node['master_enter_event'] = master_enter_event
master_enter_event = None
# Label nodes in a parallel chunk by their position for easier merging
if (chunk.kind == 'parallel'
and type(event) is not ThreadTaskCreate
and get_attr(event, 'region_type') != 'master'):
node["parallel_sequence_id"] = (parallel_id, k)
k += 1
if get_attr(event, 'region_type') == 'parallel':
# Label nested parallel regions for easier merging...
if event is not last_event:
node["parallel_sequence_id"] = (get_attr(event, 'unique_id'), get_attr(event, 'endpoint'))
# ... but distinguish from a parallel chunk's terminating parallel-end event
else:
node["parallel_sequence_id"] = (parallel_id, get_attr(event, 'endpoint'))
# Add edge except for (single begin -> single end) and (parallel N begin -> parallel N end)
if events_bridge_region(prior_node['event'], node['event'], ['single_executor', 'single_other', 'master'], get_attr) \
or (events_bridge_region(prior_node['event'], node['event'], ['parallel'], get_attr)
and get_attr(node['event'], 'unique_id') == get_attr(prior_node['event'], 'unique_id')):
pass
else:
g.add_edge(prior_node, node)
# For task_create add dummy nodes for easier merging
if type(event) is ThreadTaskCreate:
node['task_cluster_id'] = (get_attr(event, 'unique_id'), 'enter')
dummy_node = g.add_vertex(event=event, task_cluster_id=(get_attr(event, 'unique_id'), 'leave'))
task_create_nodes.append(dummy_node)
continue
elif len(task_create_nodes) > 0:
task_create_nodes = deque()
prior_node = node
if chunk.kind == 'explicit_task' and len(events) == 0:
g.delete_edges([0])
# Require at least 1 edge between start and end nodes if there are no internal nodes, except for empty explicit
# task chunks
if chunk.kind != "explicit_task" and len(events) == 0 and g.ecount() == 0:
g.add_edge(g.vs[0], g.vs[1])
return chunk.kind, task_links, task_crt_ts, task_leave_ts, g | f2430377bc592b2a317b6db627cc39c185f64177 | 5,053 |
def delazi_wgs84(lat1, lon1, lat2, lon2):
"""delazi_wgs84(double lat1, double lon1, double lat2, double lon2)"""
return _Math.delazi_wgs84(lat1, lon1, lat2, lon2) | 3ced7e7dc3fd8dd7ced621a536c43bfb9062d89d | 5,054 |
def clipped_zoom(x: np.ndarray, zoom_factor: float) -> np.ndarray:
"""
Helper function for zoom blur.
Parameters
----------
x
Instance to be perturbed.
zoom_factor
Zoom strength.
Returns
-------
Cropped and zoomed instance.
"""
h = x.shape[0]
ch = int(np.ceil(h / float(zoom_factor))) # ceil crop height(= crop width)
top = (h - ch) // 2
x = zoom(x[top:top + ch, top:top + ch], (zoom_factor, zoom_factor, 1), order=1)
trim_top = (x.shape[0] - h) // 2 # trim off any extra pixels
return x[trim_top:trim_top + h, trim_top:trim_top + h] | befbe10493bd4acc63c609432c1c00ac3eeab652 | 5,055 |
def NextFchunk(ea):
"""
Get next function chunk
@param ea: any address
@return: the starting address of the next function chunk or BADADDR
@note: This function enumerates all chunks of all functions in the database
"""
func = idaapi.get_next_fchunk(ea)
if func:
return func.startEA
else:
return BADADDR | b70136da02d6b689fdc3ce7e946aeff87841cb46 | 5,056 |
def share_to_group(request, repo, group, permission):
"""Share repo to group with given permission.
"""
repo_id = repo.id
group_id = group.id
from_user = request.user.username
if is_org_context(request):
org_id = request.user.org.org_id
group_repo_ids = seafile_api.get_org_group_repoids(org_id, group.id)
else:
group_repo_ids = seafile_api.get_group_repoids(group.id)
if repo.id in group_repo_ids:
return False
try:
if is_org_context(request):
org_id = request.user.org.org_id
seafile_api.add_org_group_repo(repo_id, org_id, group_id,
from_user, permission)
else:
seafile_api.set_group_repo(repo_id, group_id, from_user,
permission)
return True
except Exception as e:
logger.error(e)
return False | 20cdd294692a71e44a635f5fb9dd8ab3a77f95c4 | 5,057 |
def change_config(python, backend, cheatsheet, asciiart):
"""
Show/update configuration (Python, Backend, Cheatsheet, ASCIIART).
"""
asciiart_file = "suppress_asciiart"
cheatsheet_file = "suppress_cheatsheet"
python_file = 'PYTHON_MAJOR_MINOR_VERSION'
backend_file = 'BACKEND'
if asciiart is not None:
if asciiart:
delete_cache(asciiart_file)
console.print('[bright_blue]Enable ASCIIART![/]')
else:
touch_cache_file(asciiart_file)
console.print('[bright_blue]Disable ASCIIART![/]')
if cheatsheet is not None:
if cheatsheet:
delete_cache(cheatsheet_file)
console.print('[bright_blue]Enable Cheatsheet[/]')
elif cheatsheet is not None:
touch_cache_file(cheatsheet_file)
console.print('[bright_blue]Disable Cheatsheet[/]')
if python is not None:
write_to_cache_file(python_file, python)
console.print(f'[bright_blue]Python default value set to: {python}[/]')
if backend is not None:
write_to_cache_file(backend_file, backend)
console.print(f'[bright_blue]Backend default value set to: {backend}[/]')
def get_status(file: str):
return "disabled" if check_if_cache_exists(file) else "enabled"
console.print()
console.print("[bright_blue]Current configuration:[/]")
console.print()
console.print(f"[bright_blue]* Python: {read_from_cache_file(python_file)}[/]")
console.print(f"[bright_blue]* Backend: {read_from_cache_file(backend_file)}[/]")
console.print(f"[bright_blue]* ASCIIART: {get_status(asciiart_file)}[/]")
console.print(f"[bright_blue]* Cheatsheet: {get_status(cheatsheet_file)}[/]")
console.print() | b76f40546981c66f8b068c12fa1c1701b532ee7f | 5,058 |
def get_memcached_usage(socket=None):
"""
Returns memcached statistics.
:param socket: Path to memcached's socket file.
"""
cmd = 'echo \'stats\' | nc -U {0}'.format(socket)
output = getoutput(cmd)
curr_items = None
bytes_ = None
rows = output.split('\n')[:-1]
for row in rows:
row = row.split()
if row[1] == 'curr_items':
curr_items = int(row[2])
if row[1] == 'bytes':
bytes_ = int(row[2])
return (bytes_, curr_items) | fcabd77bbf0186498753a4630c50ed7fd900cf96 | 5,059 |
def dataset_config():
"""Return a DatasetConfig for testing."""
return hubs.DatasetConfig(factory=Dataset, flag=True) | 15d8c33e5706c07c03589adb945bdaee3b1dd18a | 5,060 |
import itertools
def combinations():
"""Produce all the combinations for different items."""
combined = itertools.combinations('ABC', r=2)
combined = [''.join(possibility) for possibility in combined]
return combined | 501060cf9c7de9b4b4453940e017ad30cec2f84f | 5,061 |
from evohomeclient2 import EvohomeClient
import logging
def setup(hass, config):
"""Create a Honeywell (EMEA/EU) evohome CH/DHW system.
One controller with 0+ heating zones (e.g. TRVs, relays) and, optionally, a
DHW controller. Does not work for US-based systems.
"""
evo_data = hass.data[DATA_EVOHOME] = {}
evo_data['timers'] = {}
evo_data['params'] = dict(config[DOMAIN])
evo_data['params'][CONF_SCAN_INTERVAL] = SCAN_INTERVAL_DEFAULT
_LOGGER.debug("setup(): API call [4 request(s)]: client.__init__()...")
try:
# There's a bug in evohomeclient2 v0.2.7: the client.__init__() sets
# the root loglevel when EvohomeClient(debug=?), so remember it now...
log_level = logging.getLogger().getEffectiveLevel()
client = EvohomeClient(
evo_data['params'][CONF_USERNAME],
evo_data['params'][CONF_PASSWORD],
debug=False
)
# ...then restore it to what it was before instantiating the client
logging.getLogger().setLevel(log_level)
except HTTPError as err:
if err.response.status_code == HTTP_BAD_REQUEST:
_LOGGER.error(
"Failed to establish a connection with evohome web servers, "
"Check your username (%s), and password are correct."
"Unable to continue. Resolve any errors and restart HA.",
evo_data['params'][CONF_USERNAME]
)
return False # unable to continue
raise # we dont handle any other HTTPErrors
finally: # Redact username, password as no longer needed.
evo_data['params'][CONF_USERNAME] = 'REDACTED'
evo_data['params'][CONF_PASSWORD] = 'REDACTED'
evo_data['client'] = client
# Redact any installation data we'll never need.
if client.installation_info[0]['locationInfo']['locationId'] != 'REDACTED':
for loc in client.installation_info:
loc['locationInfo']['streetAddress'] = 'REDACTED'
loc['locationInfo']['city'] = 'REDACTED'
loc['locationInfo']['locationOwner'] = 'REDACTED'
loc[GWS][0]['gatewayInfo'] = 'REDACTED'
# Pull down the installation configuration.
loc_idx = evo_data['params'][CONF_LOCATION_IDX]
try:
evo_data['config'] = client.installation_info[loc_idx]
except IndexError:
_LOGGER.warning(
"setup(): Parameter '%s' = %s , is outside its range (0-%s)",
CONF_LOCATION_IDX,
loc_idx,
len(client.installation_info) - 1
)
return False # unable to continue
evo_data['status'] = {}
if _LOGGER.isEnabledFor(logging.DEBUG):
tmp_loc = dict(evo_data['config'])
tmp_loc['locationInfo']['postcode'] = 'REDACTED'
tmp_tcs = tmp_loc[GWS][0][TCS][0]
if 'zones' in tmp_tcs:
tmp_tcs['zones'] = '...'
if 'dhw' in tmp_tcs:
tmp_tcs['dhw'] = '...'
_LOGGER.debug("setup(), location = %s", tmp_loc)
load_platform(hass, 'climate', DOMAIN)
return True | 2baf2286e4f08ac0ffb452c8d089951fcede8688 | 5,062 |
def geo_exps_MD(n_nodes, radius, l_0, l_1, K=40, thinRatio=1,
gammas=10, max_iter=100, nSamp=50, Niid=1, seed=0):
"""Solves the Connected Subgraph Detection problem and calculates AUC using
Mirror Descent Optimisation for a random geometric graph.
Parameters
----------
n_nodes : int
Number of nodes for the random graph.
radius : float
Distance threshold value.
l_0 : float
Base rate.
l_1 : float
Anomalous rate.
K : int
Anomaly size.
thinRatio : float
Ratio of max semi axis length to min semi axis length. Determines if graph is an ellipsoid or a sphere.
gammas : int or np.array
Conductance rates.
max_iter : int
Number of iterations.
nSamp : int
Number of samples.
Niid : int
Number of iid runs.
seed : int
Random seed.
Returns
-------
scores_noise : np.array
List of shape (nSamp, gammas_n) with AUC scores of optimisation.
"""
graph = Geo_graph_3d(n_nodes=n_nodes, radius=radius, seed=seed)
A, pts = graph.Adj, graph.pos_array
if type(gammas) == int:
gammas = np.logspace(-3, np.log10(2), gammas)
gammas_n = gammas.shape[0]
yy, S = genMeasurements(pts, K, l_0, l_1, nSamp, thinRatio)
s = S[0]
scores_noise = np.zeros((Niid, nSamp, gammas_n), dtype='float32')
for niid in range(Niid):
print('No of iid run: {}'.format(niid+1))
scores = np.zeros((nSamp, gammas_n))
with trange(nSamp, ncols=100) as tqdm:
for ns in tqdm:
ys = yy[:,ns]
c = ys / np.linalg.norm(ys) * np.sqrt(ys.shape[0])
C = c.reshape(-1,1) @ c.reshape(1,-1)
for gind in range(gammas_n):
tqdm.set_description('MD || Run = {} gamma = {:2f}'.format(niid+1, gammas[gind]))
M = runOpt_md(A=A, C=C, gamma=gammas[gind], s=s, max_iter=max_iter)
scores[ns, gind] = np.trace(ys.reshape(-1,1) @ ys.reshape(1,-1) @ M)
tqdm.set_postfix(Loss='{:8f}'.format(np.trace(C.T @ M)))
scores_noise[niid] = scores
return scores_noise.mean(0) | 9e3831975915b6dffecbb1142dbd01cd26a255ca | 5,063 |
from typing import Tuple
def validate_sig_integrity(signer_info: cms.SignedData,
cert: x509.Certificate,
expected_content_type: str,
actual_digest: bytes) -> Tuple[bool, bool]:
"""
Validate the integrity of a signature for a particular signerInfo object
inside a CMS signed data container.
.. warning::
This function does not do any trust checks, and is considered
"dangerous" API because it is easy to misuse.
:param signer_info:
A :class:`cms.SignerInfo` object.
:param cert:
The signer's certificate.
.. note::
This function will not attempt to extract certificates from
the signed data.
:param expected_content_type:
The expected value for the content type attribute (as a Python string,
see :class:`cms.ContentType`).
:param actual_digest:
The actual digest to be matched to the message digest attribute.
:return:
A tuple of two booleans. The first indicates whether the provided
digest matches the value in the signed attributes.
The second indicates whether the signature of the digest is valid.
"""
signature_algorithm: cms.SignedDigestAlgorithm = \
signer_info['signature_algorithm']
digest_algorithm_obj = signer_info['digest_algorithm']
md_algorithm = digest_algorithm_obj['algorithm'].native
signature = signer_info['signature'].native
# signed_attrs comes with some context-specific tagging.
# We need to re-tag it with a universal SET OF tag.
signed_attrs = signer_info['signed_attrs'].untag()
if not signed_attrs:
embedded_digest = None
prehashed = True
signed_data = actual_digest
else:
prehashed = False
# check the CMSAlgorithmProtection attr, if present
try:
cms_algid_protection, = find_cms_attribute(
signed_attrs, 'cms_algorithm_protection'
)
signed_digest_algorithm = \
cms_algid_protection['digest_algorithm'].native
if signed_digest_algorithm != digest_algorithm_obj.native:
raise SignatureValidationError(
"Digest algorithm does not match CMS algorithm protection "
"attribute."
)
signed_sig_algorithm = \
cms_algid_protection['signature_algorithm'].native
if signed_sig_algorithm is None:
raise SignatureValidationError(
"CMS algorithm protection attribute not valid for signed "
"data"
)
elif signed_sig_algorithm != signature_algorithm.native:
raise SignatureValidationError(
"Signature mechanism does not match CMS algorithm "
"protection attribute."
)
except KeyError:
pass
except SignatureValidationError:
raise
except ValueError:
raise SignatureValidationError(
'Multiple CMS protection attributes present'
)
try:
content_type, = find_cms_attribute(signed_attrs, 'content_type')
content_type = content_type.native
if content_type != expected_content_type:
raise SignatureValidationError(
f'Content type {content_type} did not match expected value '
f'{expected_content_type}'
)
except SignatureValidationError:
raise
except (KeyError, ValueError):
raise SignatureValidationError(
'Content type not found in signature, or multiple content-type '
'attributes present.'
)
try:
embedded_digest, = find_cms_attribute(
signed_attrs, 'message_digest'
)
embedded_digest = embedded_digest.native
except (KeyError, ValueError):
raise SignatureValidationError(
'Message digest not found in signature, or multiple message '
'digest attributes present.'
)
signed_data = signed_attrs.dump()
try:
_validate_raw(
signature, signed_data, cert, signature_algorithm, md_algorithm,
prehashed=prehashed
)
valid = True
except InvalidSignature:
valid = False
intact = (
actual_digest == embedded_digest
if embedded_digest is not None else valid
)
return intact, valid | 36e64173d8612c9ca3e95cb0566222140c56c17d | 5,064 |
def linemod_dpt(path):
"""
read a depth image
@return uint16 image of distance in [mm]"""
dpt = open(path, "rb")
rows = np.frombuffer(dpt.read(4), dtype=np.int32)[0]
cols = np.frombuffer(dpt.read(4), dtype=np.int32)[0]
return (np.fromfile(dpt, dtype=np.uint16).reshape((rows, cols)) / 1000.).astype(np.float32) | e2538520ba3bd82ada339b816c4d1a067bbd4000 | 5,065 |
from typing import Iterable
from typing import Optional
def findNode(nodes: Iterable[AstNode], name: str) -> Optional[SExpr]:
"""
Finds a node with given name in a list of nodes
"""
for node in nodes:
if isinstance(node, Atom):
continue
if len(node.items) == 0:
continue
nameNode = node.items[0]
if isinstance(nameNode, Atom) and nameNode.value == name:
return node
return None | 5b3f53e98269e6d00cb2dc11dd75d81dfed98f30 | 5,066 |
def search(keywords=None, servicetype=None, waveband=None):
"""
execute a simple query to the RegTAP registry.
Parameters
----------
keywords : list of str
keyword terms to match to registry records.
Use this parameter to find resources related to a
particular topic.
servicetype : str
the service type to restrict results to.
Allowed values include
'conesearch',
'sia' ,
'ssa',
'slap',
'tap'
waveband : str
the name of a desired waveband; resources returned
will be restricted to those that indicate as having
data in that waveband. Allowed values include
'radio',
'millimeter',
'infrared',
'optical',
'uv',
'euv',
'x-ray'
'gamma-ray'
Returns
-------
RegistryResults
a container holding a table of matching resource (e.g. services)
See Also
--------
RegistryResults
"""
if not any((keywords, servicetype, waveband)):
raise dalq.DALQueryError(
"No search parameters passed to registry search")
joins = set(["rr.interface"])
joins = set(["rr.resource"])
wheres = list()
if keywords:
joins.add("rr.res_subject")
joins.add("rr.resource")
wheres.extend(["({})".format(" AND ".join("""
(
1=ivo_nocasematch(res_subject, '%{0}%') OR
1=ivo_hasword(res_description, '{0}') OR
1=ivo_hasword(res_title, '{0}')
)""".format(tap.escape(keyword)) for keyword in keywords
))])
if servicetype:
servicetype = _service_type_map.get(servicetype, servicetype)
joins.add("rr.interface")
wheres.append("standard_id LIKE 'ivo://ivoa.net/std/{}%'".format(
tap.escape(servicetype)))
wheres.append("intf_type = 'vs:paramhttp'")
else:
wheres.append("""(
standard_id LIKE 'ivo://ivoa.net/std/conesearch%' OR
standard_id LIKE 'ivo://ivoa.net/std/sia%' OR
standard_id LIKE 'ivo://ivoa.net/std/ssa%' OR
standard_id LIKE 'ivo://ivoa.net/std/slap%' OR
standard_id LIKE 'ivo://ivoa.net/std/tap%'
)""")
if waveband:
joins.add("rr.resource")
wheres.append("1 = ivo_hashlist_has('{}', waveband)".format(
tap.escape(waveband)))
query = """SELECT DISTINCT rr.interface.*, rr.capability.*, rr.resource.*
FROM rr.capability
{}
{}
""".format(
''.join("NATURAL JOIN {} ".format(j) for j in joins),
("WHERE " if wheres else "") + " AND ".join(wheres)
)
service = tap.TAPService(REGISTRY_BASEURL)
query = tap.TAPQuery(service.baseurl, query, maxrec=service.hardlimit)
query.RESULTS_CLASS = RegistryResults
return query.execute() | 855e00f2a001995de40beddb6334bdd8ddb8be77 | 5,067 |
import os
def _get_credentials(vcap_services, service_name=None):
"""Retrieves the credentials of the VCAP Service of the specified `service_name`. If
`service_name` is not specified, it takes the information from STREAMING_ANALYTICS_SERVICE_NAME environment
variable.
Args:
vcap_services (dict): A dict representation of the VCAP Services information.
service_name (str): One of the service name stored in `vcap_services`
Returns:
dict: A dict representation of the credentials.
Raises:
ValueError: Cannot find `service_name` in `vcap_services`
"""
service_name = service_name or os.environ.get('STREAMING_ANALYTICS_SERVICE_NAME', None)
# Get the service corresponding to the SERVICE_NAME
services = vcap_services['streaming-analytics']
creds = None
for service in services:
if service['name'] == service_name:
creds = service['credentials']
break
# If no corresponding service is found, error
if creds is None:
raise ValueError("Streaming Analytics service " + str(service_name) + " was not found in VCAP_SERVICES")
return creds | a72f3e7b6be56ab6c66252cd8063fd0207aac02b | 5,068 |
def all_are_independent_from_all(program, xs, ys):
"""
Returns true iff all xs are statistially independent from all ys, where the xs are from the current iteration
and the ys are from the previous iteration.
"""
for x in xs:
if not is_independent_from_all(program, x, ys):
return False
return True | 50f091530e322b741465b222da70080463e4f142 | 5,069 |
import sys
import traceback
import string
def get_exc_short():
"""Print only error type and error value.
"""
exType, exValue, exTb = sys.exc_info()
resL1 = traceback.format_exception_only(exType, exValue)
return string.join(resL1, "") | 5dc21b813fd3317544f06512e8c1160599aa6955 | 5,070 |
def is_str_str_dict(x):
"""Tests if something is a str:str dictionary"""
return isinstance(x, dict) and all(
isinstance(k, str) and isinstance(v, str) for k, v in x.items()
) | ce6230714c0526764f2cc67e4dedf598acd28169 | 5,071 |
def _ensureListLike(item):
"""
Return the item if it is a list or tuple, otherwise add it to a list and
return that.
"""
return item if (isinstance(item, list) or isinstance(item, tuple)) \
else [item] | 1c602a1fcf8dd6a5b4583264e63e38747f5b0d50 | 5,072 |
import io
def get_file_from_gitlab(gitpkg, path, ref="master"):
"""Retrieves a file from a Gitlab repository, returns a (StringIO) file."""
return io.StringIO(gitpkg.files.get(file_path=path, ref=ref).decode()) | 7eccad01a538bdd99651b0792aff150f73e82cdd | 5,073 |
def tsne(x, no_dims=2, initial_dims=50, perplexity=30.0, max_iter=1000):
"""Runs t-SNE on the dataset in the NxD array x
to reduce its dimensionality to no_dims dimensions.
The syntaxis of the function is Y = tsne.tsne(x, no_dims, perplexity),
where x is an NxD NumPy array.
"""
# Check inputs
if isinstance(no_dims, float):
print("Error: array x should have type float.")
return -1
if round(no_dims) != no_dims:
print("Error: number of dimensions should be an integer.")
return -1
# 初始化参数和变量
x = pca(x, initial_dims).real
(n, d) = x.shape
initial_momentum = 0.5
final_momentum = 0.8
eta = 500
min_gain = 0.01
y = np.random.randn(n, no_dims)
dy = np.zeros((n, no_dims))
iy = np.zeros((n, no_dims))
gains = np.ones((n, no_dims))
# 对称化
P = seach_prob(x, 1e-5, perplexity)
P = P + np.transpose(P)
P = P / np.sum(P)
# early exaggeration
P = P * 4
P = np.maximum(P, 1e-12)
# Run iterations
for iter in range(max_iter):
# Compute pairwise affinities
sum_y = np.sum(np.square(y), 1)
num = 1 / (1 + np.add(np.add(-2 * np.dot(y, y.T), sum_y).T, sum_y))
num[range(n), range(n)] = 0
Q = num / np.sum(num)
Q = np.maximum(Q, 1e-12)
# Compute gradient
PQ = P - Q
for i in range(n):
dy[i,:] = np.sum(np.tile(PQ[:,i] * num[:,i], (no_dims, 1)).T * (y[i,:] - y), 0)
# Perform the update
if iter < 20:
momentum = initial_momentum
else:
momentum = final_momentum
gains = (gains + 0.2) * ((dy > 0) != (iy > 0)) + (gains * 0.8) * ((dy > 0) == (iy > 0))
gains[gains < min_gain] = min_gain
iy = momentum * iy - eta * (gains * dy)
y = y + iy
y = y - np.tile(np.mean(y, 0), (n, 1))
# Compute current value of cost function
if (iter + 1) % 100 == 0:
if iter > 100:
C = np.sum(P * np.log(P / Q))
else:
C = np.sum( P/4 * np.log( P/4 / Q))
print("Iteration ", (iter + 1), ": error is ", C)
# Stop lying about P-values
if iter == 100:
P = P / 4
print("finished training!")
return y | 348c83048190830dd10982e9fa1426db06e983fc | 5,074 |
def add_corp():
"""
添加投顾信息页面,可以让用户手动添加投顾
:by zhoushaobo
:return:
"""
if request.method == 'GET':
fof_list = cache.get(str(current_user.id))
return render_template("add_corp.html", fof_list=fof_list)
if request.method == 'POST':
name = request.form['name']
alias = request.form['alias']
register_capital = request.form['register_capital']
status = request.form['status']
site = request.form['site']
desc = request.form['description']
corp = Invest_corp(name=name, alias=alias, review_status=int(status), address=site, description=desc,
registered_capital=register_capital)
db.session.add(corp)
db.session.commit()
return redirect(url_for('f_app.invest_corp')) | 9d70ac010bdf5a3102635eaf1acf75f43689f82e | 5,075 |
def nll_loss(output: Tensor, target: Tensor):
"""
Negative log likelihood loss function.
## Parameters
output: `Tensor` - model's prediction
target: `Target` - training sample targets
## Example usage
```python
from beacon.tensor import Tensor
from beacon.functional import functions as F
output = Tensor([[0.2, 0.7, 0.1], [0.4, 0.45, 0.15]], requires_grad=True)
target = Tensor([[0, 1, 0], [1, 0, 0]], requires_grad=True)
loss = F.nll_loss(output, target)
```
"""
output, target = fn.to_tensor(output), fn.to_tensor(target)
output = fn.clip(output, 1e-7, 1 - 1e-7)
return -target * fn.log(output) | 339ef1300c42ad6923e044e7011615b934923e23 | 5,076 |
from typing import List
from typing import Optional
async def album_upload(sessionid: str = Form(...),
files: List[UploadFile] = File(...),
caption: str = Form(...),
usertags: Optional[List[Usertag]] = Form([]),
location: Optional[Location] = Form(None),
clients: ClientStorage = Depends(get_clients)
) -> Media:
"""Upload album to feed
"""
cl = clients.get(sessionid)
return await album_upload_post(
cl, files, caption=caption,
usertags=usertags,
location=location) | 303aba7ee57e61082197fe18330663c5c0c51c76 | 5,077 |
def mocked_requests_get(*args, **kwargs):
"""Mock requests.get invocations."""
class MockResponse:
"""Class to represent a mocked response."""
def __init__(self, json_data, status_code):
"""Initialize the mock response class."""
self.json_data = json_data
self.status_code = status_code
def json(self):
"""Return the json of the response."""
return self.json_data
if str(args[0]).startswith('https://api.ring.com/clients_api/session'):
return MockResponse({
"profile": {
"authentication_token": "12345678910",
"email": "[email protected]",
"features": {
"chime_dnd_enabled": False,
"chime_pro_enabled": True,
"delete_all_enabled": True,
"delete_all_settings_enabled": False,
"device_health_alerts_enabled": True,
"floodlight_cam_enabled": True,
"live_view_settings_enabled": True,
"lpd_enabled": True,
"lpd_motion_announcement_enabled": False,
"multiple_calls_enabled": True,
"multiple_delete_enabled": True,
"nw_enabled": True,
"nw_larger_area_enabled": False,
"nw_user_activated": False,
"owner_proactive_snoozing_enabled": True,
"power_cable_enabled": False,
"proactive_snoozing_enabled": False,
"reactive_snoozing_enabled": False,
"remote_logging_format_storing": False,
"remote_logging_level": 1,
"ringplus_enabled": True,
"starred_events_enabled": True,
"stickupcam_setup_enabled": True,
"subscriptions_enabled": True,
"ujet_enabled": False,
"video_search_enabled": False,
"vod_enabled": False},
"first_name": "Home",
"id": 999999,
"last_name": "Assistant"}
}, 201)
elif str(args[0])\
.startswith("https://api.ring.com/clients_api/ring_devices"):
return MockResponse({
"authorized_doorbots": [],
"chimes": [
{
"address": "123 Main St",
"alerts": {"connection": "online"},
"description": "Downstairs",
"device_id": "abcdef123",
"do_not_disturb": {"seconds_left": 0},
"features": {"ringtones_enabled": True},
"firmware_version": "1.2.3",
"id": 999999,
"kind": "chime",
"latitude": 12.000000,
"longitude": -70.12345,
"owned": True,
"owner": {
"email": "[email protected]",
"first_name": "Marcelo",
"id": 999999,
"last_name": "Assistant"},
"settings": {
"ding_audio_id": None,
"ding_audio_user_id": None,
"motion_audio_id": None,
"motion_audio_user_id": None,
"volume": 2},
"time_zone": "America/New_York"}],
"doorbots": [
{
"address": "123 Main St",
"alerts": {"connection": "online"},
"battery_life": 4081,
"description": "Front Door",
"device_id": "aacdef123",
"external_connection": False,
"features": {
"advanced_motion_enabled": False,
"motion_message_enabled": False,
"motions_enabled": True,
"people_only_enabled": False,
"shadow_correction_enabled": False,
"show_recordings": True},
"firmware_version": "1.4.26",
"id": 987652,
"kind": "lpd_v1",
"latitude": 12.000000,
"longitude": -70.12345,
"motion_snooze": None,
"owned": True,
"owner": {
"email": "[email protected]",
"first_name": "Home",
"id": 999999,
"last_name": "Assistant"},
"settings": {
"chime_settings": {
"duration": 3,
"enable": True,
"type": 0},
"doorbell_volume": 1,
"enable_vod": True,
"live_view_preset_profile": "highest",
"live_view_presets": [
"low",
"middle",
"high",
"highest"],
"motion_announcement": False,
"motion_snooze_preset_profile": "low",
"motion_snooze_presets": [
"none",
"low",
"medium",
"high"]},
"subscribed": True,
"subscribed_motions": True,
"time_zone": "America/New_York"}]
}, 200)
elif str(args[0]).startswith("https://api.ring.com/clients_api/doorbots"):
return MockResponse([{
"answered": False,
"created_at": "2017-03-05T15:03:40.000Z",
"events": [],
"favorite": False,
"id": 987654321,
"kind": "motion",
"recording": {"status": "ready"},
"snapshot_url": ""
}], 200) | 41a54452593cd23e8ea86f1fbdc0c5e92845482f | 5,078 |
def count_disordered(arr, size):
"""Counts the number of items that are out of the expected
order (monotonous increase) in the given list."""
counter = 0
state = {
"expected": next(item for item in range(size) if item in arr),
"checked": []
}
def advance_state():
state["expected"] += 1
while True:
in_arr = state["expected"] in arr
is_overflow = state["expected"] > size
not_checked = state["expected"] not in state["checked"]
if not_checked and (in_arr or is_overflow):
return
state["expected"] += 1
for val in arr:
if val == state["expected"]:
advance_state()
else:
counter += 1
state["checked"].append(val)
return counter | bb708e7d862ea55e81207cd7ee85e634675b3992 | 5,079 |
from typing import Iterable
import collections
from sys import flags
def lookup_flag_values(flag_list: Iterable[str]) -> collections.OrderedDict:
"""Returns a dictionary of (flag_name, flag_value) pairs for an iterable of flag names."""
flag_odict = collections.OrderedDict()
for flag_name in flag_list:
if not isinstance(flag_name, str):
raise ValueError(
'All flag names must be strings. Flag {} was of type {}.'.format(
flag_name, type(flag_name)))
if flag_name not in flags.FLAGS:
raise ValueError('"{}" is not a defined flag.'.format(flag_name))
flag_odict[flag_name] = flags.FLAGS[flag_name].value
return flag_odict | 591f0e877a71388de8ad0111f624246ea7a2dc5b | 5,080 |
def test_alignment():
"""Ensure A.M. cosine's peaks are aligned across joint slices."""
if skip_all:
return None if run_without_pytest else pytest.skip()
N = 1025
J = 7
Q = 16
Q_fr = 2
F = 4
# generate A.M. cosine ###################################################
f1, f2 = 8, 256
t = np.linspace(0, 1, N, 1)
a = (np.cos(2*np.pi * f1 * t) + 1) / 2
c = np.cos(2*np.pi * f2 * t)
x = a * c
# scatter ################################################################
for out_3D in (True, False):
for sampling_psi_fr in ('resample', 'exclude'):
if sampling_psi_fr == 'exclude' and out_3D:
continue # incompatible
for J_fr in (3, 5):
out_type = ('dict:array' if out_3D else
'dict:list') # for convenience
test_params = dict(out_3D=out_3D,
sampling_filters_fr=(sampling_psi_fr, 'resample'))
test_params_str = '\n'.join(f'{k}={v}' for k, v in
test_params.items())
jtfs = TimeFrequencyScattering1D(
J, N, Q, J_fr=J_fr, Q_fr=Q_fr, F=F, average=True, average_fr=True,
aligned=True, out_type=out_type, frontend=default_backend,
pad_mode='zero', pad_mode_fr='zero', **pad_kw, **test_params)
Scx = jtfs(x)
Scx = drop_batch_dim_jtfs(Scx)
Scx = jtfs_to_numpy(Scx)
# assert peaks share an index #################################
def max_row_idx(c):
coef = c['coef'] if 'list' in out_type else c
return np.argmax(np.sum(coef**2, axis=-1))
first_coef = Scx['psi_t * psi_f_up'][0]
mx_idx = max_row_idx(first_coef)
for pair in Scx:
if pair in ('S0', 'S1'): # joint only
continue
for i, c in enumerate(Scx[pair]):
mx_idx_i = max_row_idx(c)
assert abs(mx_idx_i - mx_idx) < 2, (
"{} != {} -- Scx[{}][{}]\n{}").format(
mx_idx_i, mx_idx, pair, i, test_params_str)
if J_fr == 3:
# assert not all J_pad_frs are same so test covers this case
assert_pad_difference(jtfs, test_params_str) | a0f664a153c1af5942d39d54c75bcb8a3b3b660a | 5,081 |
import base64
def request_text(photo_file, max_results=5):
"""
Request the Google service to find text in an image
:param photo_file: The filename (or path) of the image in a local directory
:param max_results: The requested maximum number of results
:return: A list of text entries found in the image
Note: The argument max_results does not modify the number of results for text detection
"""
credentials = GoogleCredentials.get_application_default()
service = discovery.build('vision', 'v1', credentials=credentials)
with open(photo_file, 'rb') as phf:
image_content = base64.b64encode(phf.read())
service_request = service.images().annotate(body={
'requests': [{'image': {'content': image_content.decode('UTF-8')},
'features': [{'type': 'TEXT_DETECTION', 'maxResults': max_results}]
}]
})
response = service_request.execute()
text_list = response['responses'][0].get('textAnnotations', None)
if text_list is None:
return []
else:
text_vec = map(lambda s: s['description'].strip().strip('\n'), text_list)
return text_vec | 3af646e81fb71f89ffab2a9f20f979cdbaaf29a6 | 5,082 |
def config(request):
"""render a ProsperConfig object for testing"""
return p_config.ProsperConfig(request.config.getini('app_cfg')) | 4222d7d2a56020883e0a196f4c531b44d2f50dd5 | 5,083 |
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. It is assumed that the keys do not need
to be XML-escaped.
If the passed dictionary is empty, then return an empty string.
If the value passed is None writes only the attribute (eg. required)
"""
ret_arr = []
for k, v in attrs.items():
if v is None:
ret_arr.append(u' %s' % k)
else:
ret_arr.append(u' %s="%s"' % (k, conditional_escape(v)))
return u''.join(ret_arr) | 03175fd9b2d0574bd3634d43821e38177924ed0e | 5,084 |
import json
def json_io_dump(filename, data):
""" Dumps the the JSON data and returns it as a dictionary from filename
:arg filename <string> - Filename of json to point to
:arg data - The already formatted data to dump to JSON
"""
with open(filename, encoding='utf-8', mode='w') as json_file:
json.dump(data, json_file)
return True | e0ae7187ac29669330109ae39ebcac33c1e30ab6 | 5,085 |
import requests
import json
def get_restaurants(_lat, _lng):
"""緯度: lat 経度: lng"""
response = requests.get(URL.format(API_KEY, _lat, _lng))
result = json.loads(response.text)
lat_lng = []
for restaurant in result['results']['shop']:
lat = float(restaurant['lat'])
lng = float(restaurant['lng'])
lat_lng.append((lat, lng, restaurant['name']))
r = []
for lat, lng, name in lat_lng:
r2 = []
difference = (_lat - lat) * 3600
r2.append(int(difference * byou))
difference = (lng - _lng) * 3600
r2.append(int(difference * byou))
r2.append(name)
r.append(r2)
return r | 35884258210174cca0ffcf73dc3451dae07d5712 | 5,086 |
def geomfill_GetCircle(*args):
"""
:param TConv:
:type TConv: Convert_ParameterisationType
:param ns1:
:type ns1: gp_Vec
:param ns2:
:type ns2: gp_Vec
:param nplan:
:type nplan: gp_Vec
:param pt1:
:type pt1: gp_Pnt
:param pt2:
:type pt2: gp_Pnt
:param Rayon:
:type Rayon: float
:param Center:
:type Center: gp_Pnt
:param Poles:
:type Poles: TColgp_Array1OfPnt
:param Weigths:
:type Weigths: TColStd_Array1OfReal &
:rtype: void
:param TConv:
:type TConv: Convert_ParameterisationType
:param ns1:
:type ns1: gp_Vec
:param ns2:
:type ns2: gp_Vec
:param dn1w:
:type dn1w: gp_Vec
:param dn2w:
:type dn2w: gp_Vec
:param nplan:
:type nplan: gp_Vec
:param dnplan:
:type dnplan: gp_Vec
:param pts1:
:type pts1: gp_Pnt
:param pts2:
:type pts2: gp_Pnt
:param tang1:
:type tang1: gp_Vec
:param tang2:
:type tang2: gp_Vec
:param Rayon:
:type Rayon: float
:param DRayon:
:type DRayon: float
:param Center:
:type Center: gp_Pnt
:param DCenter:
:type DCenter: gp_Vec
:param Poles:
:type Poles: TColgp_Array1OfPnt
:param DPoles:
:type DPoles: TColgp_Array1OfVec
:param Weigths:
:type Weigths: TColStd_Array1OfReal &
:param DWeigths:
:type DWeigths: TColStd_Array1OfReal &
:rtype: bool
:param TConv:
:type TConv: Convert_ParameterisationType
:param ns1:
:type ns1: gp_Vec
:param ns2:
:type ns2: gp_Vec
:param dn1w:
:type dn1w: gp_Vec
:param dn2w:
:type dn2w: gp_Vec
:param d2n1w:
:type d2n1w: gp_Vec
:param d2n2w:
:type d2n2w: gp_Vec
:param nplan:
:type nplan: gp_Vec
:param dnplan:
:type dnplan: gp_Vec
:param d2nplan:
:type d2nplan: gp_Vec
:param pts1:
:type pts1: gp_Pnt
:param pts2:
:type pts2: gp_Pnt
:param tang1:
:type tang1: gp_Vec
:param tang2:
:type tang2: gp_Vec
:param Dtang1:
:type Dtang1: gp_Vec
:param Dtang2:
:type Dtang2: gp_Vec
:param Rayon:
:type Rayon: float
:param DRayon:
:type DRayon: float
:param D2Rayon:
:type D2Rayon: float
:param Center:
:type Center: gp_Pnt
:param DCenter:
:type DCenter: gp_Vec
:param D2Center:
:type D2Center: gp_Vec
:param Poles:
:type Poles: TColgp_Array1OfPnt
:param DPoles:
:type DPoles: TColgp_Array1OfVec
:param D2Poles:
:type D2Poles: TColgp_Array1OfVec
:param Weigths:
:type Weigths: TColStd_Array1OfReal &
:param DWeigths:
:type DWeigths: TColStd_Array1OfReal &
:param D2Weigths:
:type D2Weigths: TColStd_Array1OfReal &
:rtype: bool
"""
return _GeomFill.geomfill_GetCircle(*args) | f00ade1b203e819c6ae946c31c8c9821f2a79744 | 5,087 |
def dwconv3x3_block(in_channels,
out_channels,
stride,
padding=1,
dilation=1,
bias=False,
activation=(lambda: nn.ReLU(inplace=True)),
activate=True):
"""
3x3 depthwise version of the standard convolution block with ReLU6 activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
activate : bool, default True
Whether activate the convolution block.
"""
return conv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
activation=activation,
activate=activate) | eb2330206510369f8d81d0fc58d2578cf212a1df | 5,088 |
import json
def predict() -> str:
"""
Creates route for model prediction for given number of inputs.
:return: predicted price
"""
try:
input_params = process_input(request.data)
print(input_params)
predictions = regressor.predict(input_params)
return json.dumps({"predicted_price": predictions.tolist()})
except (KeyError, json.JSONDecodeError, AssertionError):
return json.dumps({"error": "CHECK INPUT"}), 400
except:
return json.dumps({"error": "PREDICTION FAILED"}), 500 | ee58cbecf6d44a65f94cb3becd4d6cfe2d30ef30 | 5,089 |
def all_h2h_pairs_all_lanes(matches_df, file_name=''):
"""Produces all head to head win rates for all lane matchups -- even across different lanes
(eg. TOP_SOLO Renekton vs MID_SOLO Xerath)."""
df = pd.DataFrame()
lanes = dc.get_lanes_roles()
for lane1 in lanes:
print(lane1)
for lane2 in lanes:
print(lane1 + '_' + lane2)
temp = all_h2h_pairs_fixed_lane(matches_df, lane1, lane2)
df[lane1 + '_' + lane2 + '_wr'] = temp['win_rate']
df[lane1 + '_' + lane2 + '_gp'] = temp['games_played']
df[lane1 + '_' + lane2 + '_wins'] = temp['wins']
if file_name != '':
df.to_csv(file_name)
return df | f6ffd40985455515767c1aa6286dc9998b7bdb7d | 5,090 |
import requests
def reload_rules(testcase, rest_url):
"""
:param TestCase self: TestCase object
:param str rest_url: http://host:port
:rtype: dict
"""
resp = requests.get(rest_url + "/rest/reload").json()
print("Reload rules response: {}".format(resp))
testcase.assertEqual(resp.get("success"), True)
return resp | e747668ba8ad5f58f0307194b0008469dd3593c1 | 5,091 |
def encryptMessage(key: str, message: str) -> str:
"""Vigenère cipher encryption
Wrapper function that encrypts given message with given key using the Vigenère cipher.
Args:
key: String encryption key to encrypt with Vigenère cipher.
message: Message string to encrypt.
Returns:
Encrypted message string.
"""
return translateMessage(key, message, 'encrypt') | 428372d8443579ac691d43a5542de850b49966ce | 5,092 |
def rae(label, pred):
"""computes the relative absolute error
(condensed using standard deviation formula)"""
#compute the root of the sum of the squared error
numerator = np.mean(np.abs(label - pred), axis=None)
#numerator = np.sum(np.abs(label - pred), axis = None)
#compute AE if we were to simply predict the average of the previous values
denominator = np.mean(np.abs(label - np.mean(label, axis=None)), axis=None)
#denominator = np.sum(np.abs(label - np.mean(label, axis = None)), axis=None)
return numerator / denominator | bff280ba243fd494347643233870524c008c7473 | 5,093 |
def subset_sum(arr, target_sum, i, cache):
"""
Returns whether any subset(not contiguous) of the array has sum equal to target sum.
"""
if target_sum == 0:
return True, {}
if i < 0:
return False, {}
if target_sum in cache[i]:
return cache[i][target_sum]
# Either include this element or not!
sub_ans, sub_ans_indices = subset_sum(arr, target_sum, i - 1, cache)
if not sub_ans and target_sum >= arr[i]:
sub_ans, sub_ans_indices = subset_sum(arr, target_sum - arr[i], i - 1, cache)
sub_ans_indices = set(sub_ans_indices)
sub_ans_indices.add(i)
if not sub_ans:
sub_ans_indices = {}
cache[i][target_sum] = sub_ans, sub_ans_indices
return cache[i][target_sum] | aa90d7eb4ffa3a457a5f27733de56a82df450861 | 5,094 |
from typing import Type
import types
from typing import Optional
def set_runtime_parameter_pb(
pb: pipeline_pb2.RuntimeParameter,
name: Text,
ptype: Type[types.Property],
default_value: Optional[types.Property] = None
) -> pipeline_pb2.RuntimeParameter:
"""Helper function to fill a RuntimeParameter proto.
Args:
pb: A RuntimeParameter proto to be filled in.
name: Name to be set at pb.name.
ptype: The Python type to be set at pb.type.
default_value: Optional. If provided, it will be pb.default_value.
Returns:
A RuntimeParameter proto filled with provided values.
"""
pb.name = name
if ptype == int:
pb.type = pipeline_pb2.RuntimeParameter.Type.INT
if default_value:
pb.default_value.int_value = default_value
elif ptype == float:
pb.type = pipeline_pb2.RuntimeParameter.Type.DOUBLE
if default_value:
pb.default_value.double_value = default_value
elif ptype == str:
pb.type = pipeline_pb2.RuntimeParameter.Type.STRING
if default_value:
pb.default_value.string_value = default_value
else:
raise ValueError("Got unsupported runtime parameter type: {}".format(ptype))
return pb | 4c6394f60774c42b0a6be8d55b57a67b8fc6b1d5 | 5,095 |
def get_loader(content_type):
"""Returns loader class for specified content type.
:type content_type: constants.ContentType
:param content_type: Content type.
:returns: Loader class for specified content type.
:raise ValueError: If no loader found for specified content type.
"""
for loader_cls in ALL_LOADERS:
content_types = loader_cls.content_types
if not isinstance(loader_cls.content_types, (list, tuple)):
content_types = [content_types]
if content_type in content_types:
return loader_cls
raise ValueError('Loader for content type "{0}" not found'
.format(content_type)) | 0d7e37ff17a48e8bed3a4abb7ce9734579fe9100 | 5,096 |
from typing import List
from typing import Dict
import torch
def get_basis_script(max_degree: int,
use_pad_trick: bool,
spherical_harmonics: List[Tensor],
clebsch_gordon: List[List[Tensor]],
amp: bool) -> Dict[str, Tensor]:
"""
Compute pairwise bases matrices for degrees up to max_degree
:param max_degree: Maximum input or output degree
:param use_pad_trick: Pad some of the odd dimensions for a better use of Tensor Cores
:param spherical_harmonics: List of computed spherical harmonics
:param clebsch_gordon: List of computed CB-coefficients
:param amp: When true, return bases in FP16 precision
"""
basis = {}
idx = 0
# Double for loop instead of product() because of JIT script
for d_in in range(max_degree + 1):
for d_out in range(max_degree + 1):
key = f'{d_in},{d_out}'
K_Js = []
for freq_idx, J in enumerate(range(abs(d_in - d_out), d_in + d_out + 1)):
Q_J = clebsch_gordon[idx][freq_idx]
K_Js.append(torch.einsum('n f, k l f -> n l k', spherical_harmonics[J].float(), Q_J.float()))
basis[key] = torch.stack(K_Js, 2) # Stack on second dim so order is n l f k
if amp:
basis[key] = basis[key].half()
if use_pad_trick:
basis[key] = F.pad(basis[key], (0, 1)) # Pad the k dimension, that can be sliced later
idx += 1
return basis | 9afbe8973541b8b1562f2d336d13b19dae9245fc | 5,097 |
def get_iterative_process_for_minimal_sum_example():
"""Returns an iterative process for a sum example.
This iterative process contains the fewest components required to compile to
`forms.MapReduceForm`.
"""
@computations.federated_computation
def init_fn():
"""The `init` function for `tff.templates.IterativeProcess`."""
zero = computations.tf_computation(lambda: [0, 0])
return intrinsics.federated_eval(zero, placements.SERVER)
@computations.tf_computation(tf.int32)
def work(client_data):
del client_data # Unused
return 1, 1
@computations.federated_computation([
computation_types.FederatedType([tf.int32, tf.int32], placements.SERVER),
computation_types.FederatedType(tf.int32, placements.CLIENTS),
])
def next_fn(server_state, client_data):
"""The `next` function for `tff.templates.IterativeProcess`."""
del server_state # Unused
# No call to `federated_map` with prepare.
# No call to `federated_broadcast`.
client_updates = intrinsics.federated_map(work, client_data)
unsecure_update = intrinsics.federated_sum(client_updates[0])
secure_update = intrinsics.federated_secure_sum_bitwidth(
client_updates[1], 8)
new_server_state = intrinsics.federated_zip(
[unsecure_update, secure_update])
# No call to `federated_map` with an `update` function.
server_output = intrinsics.federated_value([], placements.SERVER)
return new_server_state, server_output
return iterative_process.IterativeProcess(init_fn, next_fn) | 40ea1b07f2eeccaaff3cc0657207ef445985f795 | 5,098 |
def get_r_port_p_d_t(p):
"""玄関ポーチに設置された照明設備の使用時間率
Args:
p(int): 居住人数
Returns:
ndarray: r_port_p_d_t 日付dの時刻tにおける居住人数がp人の場合の玄関ポーチに設置された照明設備の使用時間率
"""
return get_r_i_p_d_t(19, p) | abdc6f9201594ca946ff2deb25cdf8c1e1d98839 | 5,099 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.